From c67dde550cf9e8913beeef339e41c58da4dcd59b Mon Sep 17 00:00:00 2001 From: <> Date: Fri, 14 Jun 2024 15:47:29 +0000 Subject: [PATCH] Deployed 2a94e0a1 with MkDocs version: 1.6.0 --- client_api/Client/index.html | 3842 ++++++++--------- client_api/Dataset/index.html | 1264 +++--- client_api/Evaluation/index.html | 805 ++-- client_api/Groundtruth/index.html | 182 +- client_api/Model/index.html | 2136 ++++----- client_api/Prediction/index.html | 160 +- .../EvaluationParameters/index.html | 57 +- search/search_index.json | 2 +- sitemap.xml.gz | Bin 127 -> 127 bytes static/openapi.json | 2 +- 10 files changed, 4307 insertions(+), 4143 deletions(-) diff --git a/client_api/Client/index.html b/client_api/Client/index.html index a03ba8ce3..d8b8e3d31 100644 --- a/client_api/Client/index.html +++ b/client_api/Client/index.html @@ -838,44 +838,7 @@

Client

Source code in valor/coretypes.py -
1103
-1104
-1105
-1106
-1107
-1108
-1109
-1110
-1111
-1112
-1113
-1114
-1115
-1116
-1117
-1118
-1119
-1120
-1121
-1122
-1123
-1124
-1125
-1126
-1127
-1128
-1129
-1130
-1131
-1132
-1133
-1134
-1135
-1136
-1137
-1138
-1139
-1140
+
1140
 1141
 1142
 1143
@@ -1504,673 +1467,710 @@ 

Client

1766 1767 1768 -1769
class Client:
-    """
-    Valor client object for interacting with the api.
-
-    Parameters
-    ----------
-    connection : ClientConnection, optional
-        Option to use an existing connection object.
-    """
-
-    def __init__(self, connection: Optional[ClientConnection] = None):
-        if not connection:
-            connection = get_connection()
-        self.conn = connection
-
-    @classmethod
-    def connect(
-        cls,
-        host: str,
-        access_token: Optional[str] = None,
-        reconnect: bool = False,
-    ) -> Client:
-        """
-        Establishes a connection to the Valor API.
-
-        Parameters
-        ----------
-        host : str
-            The host to connect to. Should start with "http://" or "https://".
-        access_token : str
-            The access token for the host (if the host requires authentication).
-        """
-        connect(host=host, access_token=access_token, reconnect=reconnect)
-        return cls(get_connection())
-
-    def get_labels(
-        self,
-        filter_by: Optional[FilterType] = None,
-    ) -> List[Label]:
-        """
-        Gets all labels using an optional filter.
-
-        Parameters
-        ----------
-        filter_by : FilterType, optional
-            Optional constraints to filter by.
+1769
+1770
+1771
+1772
+1773
+1774
+1775
+1776
+1777
+1778
+1779
+1780
+1781
+1782
+1783
+1784
+1785
+1786
+1787
+1788
+1789
+1790
+1791
+1792
+1793
+1794
+1795
+1796
+1797
+1798
+1799
+1800
+1801
+1802
+1803
+1804
+1805
+1806
class Client:
+    """
+    Valor client object for interacting with the api.
+
+    Parameters
+    ----------
+    connection : ClientConnection, optional
+        Option to use an existing connection object.
+    """
 
-        Returns
-        ------
-        List[valor.Label]
-            A list of labels.
-        """
-        filter_ = _format_filter(filter_by)
-        filter_ = asdict(filter_)
-        return [Label(**label) for label in self.conn.get_labels(filter_)]
-
-    def get_labels_from_dataset(
-        self, dataset: Union[Dataset, str]
-    ) -> List[Label]:
+    def __init__(self, connection: Optional[ClientConnection] = None):
+        if not connection:
+            connection = get_connection()
+        self.conn = connection
+
+    @classmethod
+    def connect(
+        cls,
+        host: str,
+        access_token: Optional[str] = None,
+        reconnect: bool = False,
+    ) -> Client:
         """
-        Get all labels associated with a dataset's ground truths.
+        Establishes a connection to the Valor API.
 
         Parameters
         ----------
-        dataset : valor.Dataset
-            The dataset to search by.
-
-        Returns
-        ------
-        List[valor.Label]
-            A list of labels.
-        """
-        dataset_name = (
-            dataset.name if isinstance(dataset, Dataset) else dataset
-        )
-        return [
-            Label(**label)
-            for label in self.conn.get_labels_from_dataset(dataset_name)  # type: ignore
-        ]
-
-    def get_labels_from_model(self, model: Union[Model, str]) -> List[Label]:
-        """
-        Get all labels associated with a model's ground truths.
+        host : str
+            The host to connect to. Should start with "http://" or "https://".
+        access_token : str
+            The access token for the host (if the host requires authentication).
+        """
+        connect(host=host, access_token=access_token, reconnect=reconnect)
+        return cls(get_connection())
+
+    def get_labels(
+        self,
+        filter_by: Optional[FilterType] = None,
+    ) -> List[Label]:
+        """
+        Gets all labels using an optional filter.
+
+        Parameters
+        ----------
+        filter_by : FilterType, optional
+            Optional constraints to filter by.
 
-        Parameters
-        ----------
-        model : valor.Model
-            The model to search by.
-
-        Returns
-        ------
-        List[valor.Label]
-            A list of labels.
-        """
-        model_name = model.name if isinstance(model, Model) else model
-        return [
-            Label(**label)
-            for label in self.conn.get_labels_from_model(model_name)  # type: ignore
-        ]
-
-    def create_dataset(
-        self,
-        dataset: Union[Dataset, dict],
-    ) -> None:
-        """
-        Creates a dataset.
-
-        Parameters
-        ----------
-        dataset : valor.Dataset
-            The dataset to create.
-        """
-        if isinstance(dataset, Dataset):
-            dataset = dataset.encode_value()
-        self.conn.create_dataset(dataset)
-
-    def create_groundtruths(
-        self,
-        dataset: Dataset,
-        groundtruths: List[GroundTruth],
-        ignore_existing_datums: bool = False,
-    ):
-        """
-        Creates ground truths.
-
-        Parameters
-        ----------
-
-        dataset : valor.Dataset
-            The dataset to create the ground truth for.
-        groundtruths : List[valor.GroundTruth]
-            The ground truths to create.
-        ignore_existing_datums : bool, default=False
-            If True, will ignore datums that already exist in the backend.
-            If False, will raise an error if any datums already exist.
-            Default is False.
-        """
-        groundtruths_json = []
-        for groundtruth in groundtruths:
-            if not isinstance(groundtruth, GroundTruth):
-                raise TypeError(
-                    f"Expected ground truth to be of type 'valor.GroundTruth' not '{type(groundtruth)}'."
-                )
-            if not isinstance(groundtruth.annotations._value, list):
-                raise TypeError
-            groundtruth_dict = groundtruth.encode_value()
-            groundtruth_dict["dataset_name"] = dataset.name
-            groundtruths_json.append(groundtruth_dict)
-        self.conn.create_groundtruths(
-            groundtruths_json, ignore_existing_datums=ignore_existing_datums
-        )
-
-    def get_groundtruth(
-        self,
-        dataset: Union[Dataset, str],
-        datum: Union[Datum, str],
-    ) -> Union[GroundTruth, None]:
-        """
-        Get a particular ground truth.
-
-        Parameters
-        ----------
-        dataset: Union[Dataset, str]
-            The dataset the datum belongs to.
-        datum: Union[Datum, str]
-            The desired datum.
-
-        Returns
-        ----------
-        Union[GroundTruth, None]
-            The matching ground truth or 'None' if it doesn't exist.
-        """
-        dataset_name = (
-            dataset.name if isinstance(dataset, Dataset) else dataset
-        )
-        datum_uid = datum.uid if isinstance(datum, Datum) else datum
-        try:
-            resp = self.conn.get_groundtruth(
-                dataset_name=dataset_name, datum_uid=datum_uid  # type: ignore
-            )
-            resp.pop("dataset_name")
-            return GroundTruth.decode_value(resp)
-        except ClientException as e:
-            if e.status_code == 404:
-                return None
-            raise e
-
-    def finalize_dataset(self, dataset: Union[Dataset, str]) -> None:
-        """
-        Finalizes a dataset such that new ground truths cannot be added to it.
-
-        Parameters
-        ----------
-        dataset : str
-            The dataset to be finalized.
-        """
-        dataset_name = (
-            dataset.name if isinstance(dataset, Dataset) else dataset
-        )
-        return self.conn.finalize_dataset(name=dataset_name)  # type: ignore
-
-    def get_dataset(
-        self,
-        name: str,
-    ) -> Union[Dataset, None]:
-        """
-        Gets a dataset by name.
-
-        Parameters
-        ----------
-        name : str
-            The name of the dataset to fetch.
-
-        Returns
-        -------
-        Union[Dataset, None]
-            A Dataset with a matching name, or 'None' if one doesn't exist.
-        """
-        dataset = Dataset.decode_value(
-            {
-                **self.conn.get_dataset(name),
-                "connection": self.conn,
-            }
-        )
-        return dataset
-
-    def get_datasets(
-        self,
-        filter_by: Optional[FilterType] = None,
-    ) -> List[Dataset]:
-        """
-        Get all datasets, with an option to filter results according to some user-defined parameters.
-
-        Parameters
-        ----------
-        filter_by : FilterType, optional
-            Optional constraints to filter by.
+        Returns
+        ------
+        List[valor.Label]
+            A list of labels.
+        """
+        filters = _format_filter(filter_by)
+        filters = asdict(filters)
+        return [Label(**label) for label in self.conn.get_labels(filters)]
+
+    def get_labels_from_dataset(
+        self, dataset: Union[Dataset, str]
+    ) -> List[Label]:
+        """
+        Get all labels associated with a dataset's ground truths.
+
+        Parameters
+        ----------
+        dataset : valor.Dataset
+            The dataset to search by.
+
+        Returns
+        ------
+        List[valor.Label]
+            A list of labels.
+        """
+        dataset_name = (
+            dataset.name if isinstance(dataset, Dataset) else dataset
+        )
+        return [
+            Label(**label)
+            for label in self.conn.get_labels_from_dataset(dataset_name)  # type: ignore
+        ]
+
+    def get_labels_from_model(self, model: Union[Model, str]) -> List[Label]:
+        """
+        Get all labels associated with a model's ground truths.
+
+        Parameters
+        ----------
+        model : valor.Model
+            The model to search by.
+
+        Returns
+        ------
+        List[valor.Label]
+            A list of labels.
+        """
+        model_name = model.name if isinstance(model, Model) else model
+        return [
+            Label(**label)
+            for label in self.conn.get_labels_from_model(model_name)  # type: ignore
+        ]
+
+    def create_dataset(
+        self,
+        dataset: Union[Dataset, dict],
+    ) -> None:
+        """
+        Creates a dataset.
+
+        Parameters
+        ----------
+        dataset : valor.Dataset
+            The dataset to create.
+        """
+        if isinstance(dataset, Dataset):
+            dataset = dataset.encode_value()
+        self.conn.create_dataset(dataset)
+
+    def create_groundtruths(
+        self,
+        dataset: Dataset,
+        groundtruths: List[GroundTruth],
+        ignore_existing_datums: bool = False,
+    ):
+        """
+        Creates ground truths.
+
+        Parameters
+        ----------
+
+        dataset : valor.Dataset
+            The dataset to create the ground truth for.
+        groundtruths : List[valor.GroundTruth]
+            The ground truths to create.
+        ignore_existing_datums : bool, default=False
+            If True, will ignore datums that already exist in the backend.
+            If False, will raise an error if any datums already exist.
+            Default is False.
+        """
+        groundtruths_json = []
+        for groundtruth in groundtruths:
+            if not isinstance(groundtruth, GroundTruth):
+                raise TypeError(
+                    f"Expected ground truth to be of type 'valor.GroundTruth' not '{type(groundtruth)}'."
+                )
+            if not isinstance(groundtruth.annotations._value, list):
+                raise TypeError
+            groundtruth_dict = groundtruth.encode_value()
+            groundtruth_dict["dataset_name"] = dataset.name
+            groundtruths_json.append(groundtruth_dict)
+        self.conn.create_groundtruths(
+            groundtruths_json, ignore_existing_datums=ignore_existing_datums
+        )
+
+    def get_groundtruth(
+        self,
+        dataset: Union[Dataset, str],
+        datum: Union[Datum, str],
+    ) -> Union[GroundTruth, None]:
+        """
+        Get a particular ground truth.
+
+        Parameters
+        ----------
+        dataset: Union[Dataset, str]
+            The dataset the datum belongs to.
+        datum: Union[Datum, str]
+            The desired datum.
+
+        Returns
+        ----------
+        Union[GroundTruth, None]
+            The matching ground truth or 'None' if it doesn't exist.
+        """
+        dataset_name = (
+            dataset.name if isinstance(dataset, Dataset) else dataset
+        )
+        datum_uid = datum.uid if isinstance(datum, Datum) else datum
+        try:
+            resp = self.conn.get_groundtruth(
+                dataset_name=dataset_name, datum_uid=datum_uid  # type: ignore
+            )
+            resp.pop("dataset_name")
+            return GroundTruth.decode_value(resp)
+        except ClientException as e:
+            if e.status_code == 404:
+                return None
+            raise e
+
+    def finalize_dataset(self, dataset: Union[Dataset, str]) -> None:
+        """
+        Finalizes a dataset such that new ground truths cannot be added to it.
+
+        Parameters
+        ----------
+        dataset : str
+            The dataset to be finalized.
+        """
+        dataset_name = (
+            dataset.name if isinstance(dataset, Dataset) else dataset
+        )
+        return self.conn.finalize_dataset(name=dataset_name)  # type: ignore
 
-        Returns
-        ------
-        List[valor.Dataset]
-            A list of datasets.
-        """
-        filter_ = _format_filter(filter_by)
-        if isinstance(filter_, Filter):
-            filter_ = asdict(filter_)
-        dataset_list = []
-        for kwargs in self.conn.get_datasets(filter_):
-            dataset = Dataset.decode_value({**kwargs, "connection": self.conn})
-            dataset_list.append(dataset)
-        return dataset_list
-
-    def get_datums(
-        self,
-        filter_by: Optional[FilterType] = None,
-    ) -> List[Datum]:
-        """
-        Get all datums using an optional filter.
-
-        Parameters
-        ----------
-        filter_by : FilterType, optional
-            Optional constraints to filter by.
-
-        Returns
-        -------
-        List[valor.Datum]
-            A list datums.
-        """
-        filter_ = _format_filter(filter_by)
-        if isinstance(filter_, Filter):
-            filter_ = asdict(filter_)
-        return [
-            Datum.decode_value(datum)
-            for datum in self.conn.get_datums(filter_)
-        ]
-
-    def get_datum(
-        self,
-        dataset: Union[Dataset, str],
-        uid: str,
-    ) -> Union[Datum, None]:
-        """
-        Get datum.
-        `GET` endpoint.
-        Parameters
-        ----------
-        dataset : valor.Dataset
-            The dataset the datum belongs to.
-        uid : str
-            The UID of the datum.
-        Returns
-        -------
-        valor.Datum
-            The requested datum or 'None' if it doesn't exist.
-        """
-        dataset_name = (
-            dataset.name if isinstance(dataset, Dataset) else dataset
-        )
-        resp = self.conn.get_datum(dataset_name=dataset_name, uid=uid)  # type: ignore
-        return Datum.decode_value(resp)
-
-    def get_dataset_status(
-        self,
-        name: str,
-    ) -> Union[TableStatus, None]:
-        """
-        Get the state of a given dataset.
-
-        Parameters
-        ----------
-        name : str
-            The name of the dataset we want to fetch the state of.
+    def get_dataset(
+        self,
+        name: str,
+    ) -> Union[Dataset, None]:
+        """
+        Gets a dataset by name.
+
+        Parameters
+        ----------
+        name : str
+            The name of the dataset to fetch.
+
+        Returns
+        -------
+        Union[Dataset, None]
+            A Dataset with a matching name, or 'None' if one doesn't exist.
+        """
+        dataset = Dataset.decode_value(
+            {
+                **self.conn.get_dataset(name),
+                "connection": self.conn,
+            }
+        )
+        return dataset
+
+    def get_datasets(
+        self,
+        filter_by: Optional[FilterType] = None,
+    ) -> List[Dataset]:
+        """
+        Get all datasets, with an option to filter results according to some user-defined parameters.
+
+        Parameters
+        ----------
+        filter_by : FilterType, optional
+            Optional constraints to filter by.
+
+        Returns
+        ------
+        List[valor.Dataset]
+            A list of datasets.
+        """
+        filters = _format_filter(filter_by)
+        if isinstance(filters, Filter):
+            filters = asdict(filters)
+        dataset_list = []
+        for kwargs in self.conn.get_datasets(filters):
+            dataset = Dataset.decode_value({**kwargs, "connection": self.conn})
+            dataset_list.append(dataset)
+        return dataset_list
+
+    def get_datums(
+        self,
+        filter_by: Optional[FilterType] = None,
+    ) -> List[Datum]:
+        """
+        Get all datums using an optional filter.
+
+        Parameters
+        ----------
+        filter_by : FilterType, optional
+            Optional constraints to filter by.
+
+        Returns
+        -------
+        List[valor.Datum]
+            A list datums.
+        """
+        filters = _format_filter(filter_by)
+        if isinstance(filters, Filter):
+            filters = asdict(filters)
+        return [
+            Datum.decode_value(datum)
+            for datum in self.conn.get_datums(filters)
+        ]
 
-        Returns
-        ------
-        TableStatus | None
-            The state of the dataset, or 'None' if the dataset does not exist.
-        """
-        try:
-            return self.conn.get_dataset_status(name)
-        except ClientException as e:
-            if e.status_code == 404:
-                return None
-            raise e
-
-    def get_dataset_summary(self, name: str) -> DatasetSummary:
-        """
-        Gets the summary of a dataset.
-
-        Parameters
-        ----------
-        name : str
-            The name of the dataset to create a summary for.
-
-        Returns
-        -------
-        DatasetSummary
-            A dataclass containing the dataset summary.
-        """
-        return DatasetSummary(**self.conn.get_dataset_summary(name))
-
-    def delete_dataset(self, name: str, timeout: int = 0) -> None:
+    def get_datum(
+        self,
+        dataset: Union[Dataset, str],
+        uid: str,
+    ) -> Union[Datum, None]:
+        """
+        Get datum.
+        `GET` endpoint.
+        Parameters
+        ----------
+        dataset : valor.Dataset
+            The dataset the datum belongs to.
+        uid : str
+            The UID of the datum.
+        Returns
+        -------
+        valor.Datum
+            The requested datum or 'None' if it doesn't exist.
+        """
+        dataset_name = (
+            dataset.name if isinstance(dataset, Dataset) else dataset
+        )
+        resp = self.conn.get_datum(dataset_name=dataset_name, uid=uid)  # type: ignore
+        return Datum.decode_value(resp)
+
+    def get_dataset_status(
+        self,
+        name: str,
+    ) -> Union[TableStatus, None]:
         """
-        Deletes a dataset.
+        Get the state of a given dataset.
 
         Parameters
         ----------
         name : str
-            The name of the dataset to be deleted.
-        timeout : int
-            The number of seconds to wait in order to confirm that the dataset was deleted.
-        """
-        self.conn.delete_dataset(name)
-        if timeout:
-            for _ in range(timeout):
-                try:
-                    self.get_dataset(name)
-                except DatasetDoesNotExistError:
-                    break
-                time.sleep(1)
-            else:
-                raise TimeoutError(
-                    "Dataset wasn't deleted within timeout interval"
-                )
-
-    def create_model(
-        self,
-        model: Union[Model, dict],
-    ):
-        """
-        Creates a model.
-
-        Parameters
-        ----------
-        model : valor.Model
-            The model to create.
-        """
-        if isinstance(model, Model):
-            model = model.encode_value()
-        self.conn.create_model(model)
-
-    def create_predictions(
-        self,
-        dataset: Dataset,
-        model: Model,
-        predictions: List[Prediction],
-    ) -> None:
-        """
-        Creates predictions.
-
-        Parameters
-        ----------
-        dataset : valor.Dataset
-            The dataset that is being operated over.
-        model : valor.Model
-            The model making the prediction.
-        predictions : List[valor.Prediction]
-            The predictions to create.
-        """
-        predictions_json = []
-        for prediction in predictions:
-            if not isinstance(prediction, Prediction):
-                raise TypeError(
-                    f"Expected prediction to be of type 'valor.Prediction' not '{type(prediction)}'."
-                )
-            if not isinstance(prediction.annotations._value, list):
-                raise TypeError
-            prediction_dict = prediction.encode_value()
-            prediction_dict["dataset_name"] = dataset.name
-            prediction_dict["model_name"] = model.name
-            predictions_json.append(prediction_dict)
-        self.conn.create_predictions(predictions_json)
-
-    def get_prediction(
-        self,
-        dataset: Union[Dataset, str],
-        model: Union[Model, str],
-        datum: Union[Datum, str],
-    ) -> Union[Prediction, None]:
-        """
-        Get a particular prediction.
-
-        Parameters
-        ----------
-        dataset: Union[Dataset, str]
-            The dataset the datum belongs to.
-        model: Union[Model, str]
-            The model that made the prediction.
-        datum: Union[Datum, str]
-            The desired datum.
-
-        Returns
-        ----------
-        Union[Prediction, None]
-            The matching prediction or 'None' if it doesn't exist.
+            The name of the dataset we want to fetch the state of.
+
+        Returns
+        ------
+        TableStatus | None
+            The state of the dataset, or 'None' if the dataset does not exist.
+        """
+        try:
+            return self.conn.get_dataset_status(name)
+        except ClientException as e:
+            if e.status_code == 404:
+                return None
+            raise e
+
+    def get_dataset_summary(self, name: str) -> DatasetSummary:
+        """
+        Gets the summary of a dataset.
+
+        Parameters
+        ----------
+        name : str
+            The name of the dataset to create a summary for.
+
+        Returns
+        -------
+        DatasetSummary
+            A dataclass containing the dataset summary.
+        """
+        return DatasetSummary(**self.conn.get_dataset_summary(name))
+
+    def delete_dataset(self, name: str, timeout: int = 0) -> None:
+        """
+        Deletes a dataset.
+
+        Parameters
+        ----------
+        name : str
+            The name of the dataset to be deleted.
+        timeout : int
+            The number of seconds to wait in order to confirm that the dataset was deleted.
+        """
+        self.conn.delete_dataset(name)
+        if timeout:
+            for _ in range(timeout):
+                try:
+                    self.get_dataset(name)
+                except DatasetDoesNotExistError:
+                    break
+                time.sleep(1)
+            else:
+                raise TimeoutError(
+                    "Dataset wasn't deleted within timeout interval"
+                )
+
+    def create_model(
+        self,
+        model: Union[Model, dict],
+    ):
+        """
+        Creates a model.
+
+        Parameters
+        ----------
+        model : valor.Model
+            The model to create.
+        """
+        if isinstance(model, Model):
+            model = model.encode_value()
+        self.conn.create_model(model)
+
+    def create_predictions(
+        self,
+        dataset: Dataset,
+        model: Model,
+        predictions: List[Prediction],
+    ) -> None:
+        """
+        Creates predictions.
+
+        Parameters
+        ----------
+        dataset : valor.Dataset
+            The dataset that is being operated over.
+        model : valor.Model
+            The model making the prediction.
+        predictions : List[valor.Prediction]
+            The predictions to create.
         """
-        dataset_name = (
-            dataset.name if isinstance(dataset, Dataset) else dataset
-        )
-        model_name = model.name if isinstance(model, Model) else model
-        datum_uid = datum.uid if isinstance(datum, Datum) else datum
-
-        resp = self.conn.get_prediction(
-            dataset_name=dataset_name,  # type: ignore
-            model_name=model_name,  # type: ignore
-            datum_uid=datum_uid,  # type: ignore
-        )
-        resp.pop("dataset_name")
-        resp.pop("model_name")
-        return Prediction.decode_value(resp)
-
-    def finalize_inferences(
-        self, dataset: Union[Dataset, str], model: Union[Model, str]
-    ) -> None:
-        """
-        Finalizes a model-dataset pairing such that new predictions cannot be added to it.
-        """
-        dataset_name = (
-            dataset.name if isinstance(dataset, Dataset) else dataset
-        )
-        model_name = model.name if isinstance(model, Model) else model
-        return self.conn.finalize_inferences(
-            dataset_name=dataset_name,  # type: ignore
-            model_name=model_name,  # type: ignore
-        )
-
-    def get_model(
-        self,
-        name: str,
-    ) -> Union[Model, None]:
-        """
-        Gets a model by name.
-
-        Parameters
-        ----------
-        name : str
-            The name of the model to fetch.
-
-        Returns
-        -------
-        Union[valor.Model, None]
-            A Model with matching name or 'None' if one doesn't exist.
-        """
-        return Model.decode_value(
-            {
-                **self.conn.get_model(name),
-                "connection": self.conn,
-            }
-        )
-
-    def get_models(
-        self,
-        filter_by: Optional[FilterType] = None,
-    ) -> List[Model]:
-        """
-        Get all models using an optional filter.
-
-        Parameters
-        ----------
-        filter_by : FilterType, optional
-            Optional constraints to filter by.
-
-        Returns
-        ------
-        List[valor.Model]
-            A list of models.
-        """
-        filter_ = _format_filter(filter_by)
-        if isinstance(filter_, Filter):
-            filter_ = asdict(filter_)
-        model_list = []
-        for kwargs in self.conn.get_models(filter_):
-            model = Model.decode_value({**kwargs, "connection": self.conn})
-            model_list.append(model)
-        return model_list
-
-    def get_model_status(
-        self,
-        dataset_name: str,
-        model_name: str,
-    ) -> Optional[TableStatus]:
-        """
-        Get the state of a given model over a dataset.
-
-        Parameters
-        ----------
-        dataset_name : str
-            The name of the dataset that the model is operating over.
-        model_name : str
-            The name of the model we want to fetch the state of.
-
-        Returns
-        ------
-        Union[TableStatus, None]
-            The state of the model or 'None' if the model doesn't exist.
-        """
-        try:
-            return self.conn.get_model_status(dataset_name, model_name)
-        except ClientException as e:
-            if e.status_code == 404:
-                return None
-            raise e
-
-    def get_model_eval_requests(
-        self, model: Union[Model, str]
-    ) -> List[Evaluation]:
-        """
-        Get all evaluations that have been created for a model.
-
-        This does not return evaluation results.
-
-        `GET` endpoint.
+        predictions_json = []
+        for prediction in predictions:
+            if not isinstance(prediction, Prediction):
+                raise TypeError(
+                    f"Expected prediction to be of type 'valor.Prediction' not '{type(prediction)}'."
+                )
+            if not isinstance(prediction.annotations._value, list):
+                raise TypeError
+            prediction_dict = prediction.encode_value()
+            prediction_dict["dataset_name"] = dataset.name
+            prediction_dict["model_name"] = model.name
+            predictions_json.append(prediction_dict)
+        self.conn.create_predictions(predictions_json)
+
+    def get_prediction(
+        self,
+        dataset: Union[Dataset, str],
+        model: Union[Model, str],
+        datum: Union[Datum, str],
+    ) -> Union[Prediction, None]:
+        """
+        Get a particular prediction.
+
+        Parameters
+        ----------
+        dataset: Union[Dataset, str]
+            The dataset the datum belongs to.
+        model: Union[Model, str]
+            The model that made the prediction.
+        datum: Union[Datum, str]
+            The desired datum.
+
+        Returns
+        ----------
+        Union[Prediction, None]
+            The matching prediction or 'None' if it doesn't exist.
+        """
+        dataset_name = (
+            dataset.name if isinstance(dataset, Dataset) else dataset
+        )
+        model_name = model.name if isinstance(model, Model) else model
+        datum_uid = datum.uid if isinstance(datum, Datum) else datum
+
+        resp = self.conn.get_prediction(
+            dataset_name=dataset_name,  # type: ignore
+            model_name=model_name,  # type: ignore
+            datum_uid=datum_uid,  # type: ignore
+        )
+        resp.pop("dataset_name")
+        resp.pop("model_name")
+        return Prediction.decode_value(resp)
+
+    def finalize_inferences(
+        self, dataset: Union[Dataset, str], model: Union[Model, str]
+    ) -> None:
+        """
+        Finalizes a model-dataset pairing such that new predictions cannot be added to it.
+        """
+        dataset_name = (
+            dataset.name if isinstance(dataset, Dataset) else dataset
+        )
+        model_name = model.name if isinstance(model, Model) else model
+        return self.conn.finalize_inferences(
+            dataset_name=dataset_name,  # type: ignore
+            model_name=model_name,  # type: ignore
+        )
+
+    def get_model(
+        self,
+        name: str,
+    ) -> Union[Model, None]:
+        """
+        Gets a model by name.
+
+        Parameters
+        ----------
+        name : str
+            The name of the model to fetch.
+
+        Returns
+        -------
+        Union[valor.Model, None]
+            A Model with matching name or 'None' if one doesn't exist.
+        """
+        return Model.decode_value(
+            {
+                **self.conn.get_model(name),
+                "connection": self.conn,
+            }
+        )
+
+    def get_models(
+        self,
+        filter_by: Optional[FilterType] = None,
+    ) -> List[Model]:
+        """
+        Get all models using an optional filter.
+
+        Parameters
+        ----------
+        filter_by : FilterType, optional
+            Optional constraints to filter by.
+
+        Returns
+        ------
+        List[valor.Model]
+            A list of models.
+        """
+        filters = _format_filter(filter_by)
+        if isinstance(filters, Filter):
+            filters = asdict(filters)
+        model_list = []
+        for kwargs in self.conn.get_models(filters):
+            model = Model.decode_value({**kwargs, "connection": self.conn})
+            model_list.append(model)
+        return model_list
 
-        Parameters
-        ----------
-        model : str
-            The model to search by.
-
-        Returns
-        -------
-        List[Evaluation]
-            A list of evaluations.
-        """
-        model_name = model.name if isinstance(model, Model) else model
-        return [
-            Evaluation(**evaluation, connection=self.conn)
-            for evaluation in self.conn.get_model_eval_requests(model_name)  # type: ignore
-        ]
-
-    def delete_model(self, name: str, timeout: int = 0) -> None:
-        """
-        Deletes a model.
-
-        Parameters
-        ----------
-        name : str
-            The name of the model to be deleted.
-        timeout : int
-            The number of seconds to wait in order to confirm that the model was deleted.
-        """
-        self.conn.delete_model(name)
-        if timeout:
-            for _ in range(timeout):
-                try:
-                    self.get_model(name)
-                except ModelDoesNotExistError:
-                    break
-                time.sleep(1)
-            else:
-                raise TimeoutError(
-                    "Model wasn't deleted within timeout interval"
-                )
-
-    def get_evaluations(
-        self,
-        *,
-        evaluation_ids: Optional[List[int]] = None,
-        models: Union[List[Model], List[str], None] = None,
-        datasets: Union[List[Dataset], List[str], None] = None,
-        metrics_to_sort_by: Optional[
-            Dict[str, Union[Dict[str, str], str]]
-        ] = None,
-    ) -> List[Evaluation]:
-        """
-        Returns all evaluations associated with user-supplied dataset and/or model names.
+    def get_model_status(
+        self,
+        dataset_name: str,
+        model_name: str,
+    ) -> Optional[TableStatus]:
+        """
+        Get the state of a given model over a dataset.
+
+        Parameters
+        ----------
+        dataset_name : str
+            The name of the dataset that the model is operating over.
+        model_name : str
+            The name of the model we want to fetch the state of.
+
+        Returns
+        ------
+        Union[TableStatus, None]
+            The state of the model or 'None' if the model doesn't exist.
+        """
+        try:
+            return self.conn.get_model_status(dataset_name, model_name)
+        except ClientException as e:
+            if e.status_code == 404:
+                return None
+            raise e
+
+    def get_model_eval_requests(
+        self, model: Union[Model, str]
+    ) -> List[Evaluation]:
+        """
+        Get all evaluations that have been created for a model.
+
+        This does not return evaluation results.
+
+        `GET` endpoint.
+
+        Parameters
+        ----------
+        model : str
+            The model to search by.
+
+        Returns
+        -------
+        List[Evaluation]
+            A list of evaluations.
+        """
+        model_name = model.name if isinstance(model, Model) else model
+        return [
+            Evaluation(**evaluation, connection=self.conn)
+            for evaluation in self.conn.get_model_eval_requests(model_name)  # type: ignore
+        ]
 
-        Parameters
-        ----------
-        evaluation_ids : List[int], optional.
-            A list of job IDs to return metrics for.
-        models : Union[List[valor.Model], List[str]], optional
-            A list of model names that we want to return metrics for.
-        datasets : Union[List[valor.Dataset], List[str]], optional
-            A list of dataset names that we want to return metrics for.
-        metrics_to_sort_by: dict[str, str | dict[str, str]], optional
-            An optional dict of metric types to sort the evaluations by.
-
-        Returns
-        -------
-        List[valor.Evaluation]
-            A list of evaluations.
-        """
-        if isinstance(datasets, list):
-            datasets = [  # type: ignore
-                element.name if isinstance(element, Dataset) else element
-                for element in datasets
-            ]
-        if isinstance(models, list):
-            models = [  # type: ignore
-                element.name if isinstance(element, Model) else element
-                for element in models
-            ]
-        return [
-            Evaluation(connection=self.conn, **evaluation)
-            for evaluation in self.conn.get_evaluations(
-                evaluation_ids=evaluation_ids,
-                models=models,  # type: ignore
-                datasets=datasets,  # type: ignore
-                metrics_to_sort_by=metrics_to_sort_by,
-            )
-        ]
-
-    def evaluate(
-        self, request: EvaluationRequest, allow_retries: bool = False
-    ) -> List[Evaluation]:
-        """
-        Creates as many evaluations as necessary to fulfill the request.
-
-        Parameters
-        ----------
-        request : schemas.EvaluationRequest
-            The requested evaluation parameters.
-        allow_retries : bool, default = False
-            Option to retry previously failed evaluations.
-
-        Returns
-        -------
-        List[Evaluation]
-            A list of evaluations that meet the parameters.
-        """
-        return [
-            Evaluation(**evaluation)
-            for evaluation in self.conn.evaluate(
-                request, allow_retries=allow_retries
-            )
-        ]
+    def delete_model(self, name: str, timeout: int = 0) -> None:
+        """
+        Deletes a model.
+
+        Parameters
+        ----------
+        name : str
+            The name of the model to be deleted.
+        timeout : int
+            The number of seconds to wait in order to confirm that the model was deleted.
+        """
+        self.conn.delete_model(name)
+        if timeout:
+            for _ in range(timeout):
+                try:
+                    self.get_model(name)
+                except ModelDoesNotExistError:
+                    break
+                time.sleep(1)
+            else:
+                raise TimeoutError(
+                    "Model wasn't deleted within timeout interval"
+                )
+
+    def get_evaluations(
+        self,
+        *,
+        evaluation_ids: Optional[List[int]] = None,
+        models: Union[List[Model], List[str], None] = None,
+        datasets: Union[List[Dataset], List[str], None] = None,
+        metrics_to_sort_by: Optional[
+            Dict[str, Union[Dict[str, str], str]]
+        ] = None,
+    ) -> List[Evaluation]:
+        """
+        Returns all evaluations associated with user-supplied dataset and/or model names.
+
+        Parameters
+        ----------
+        evaluation_ids : List[int], optional.
+            A list of job IDs to return metrics for.
+        models : Union[List[valor.Model], List[str]], optional
+            A list of model names that we want to return metrics for.
+        datasets : Union[List[valor.Dataset], List[str]], optional
+            A list of dataset names that we want to return metrics for.
+        metrics_to_sort_by: dict[str, str | dict[str, str]], optional
+            An optional dict of metric types to sort the evaluations by.
+
+        Returns
+        -------
+        List[valor.Evaluation]
+            A list of evaluations.
+        """
+        if isinstance(datasets, list):
+            datasets = [  # type: ignore
+                element.name if isinstance(element, Dataset) else element
+                for element in datasets
+            ]
+        if isinstance(models, list):
+            models = [  # type: ignore
+                element.name if isinstance(element, Model) else element
+                for element in models
+            ]
+        return [
+            Evaluation(connection=self.conn, **evaluation)
+            for evaluation in self.conn.get_evaluations(
+                evaluation_ids=evaluation_ids,
+                models=models,  # type: ignore
+                datasets=datasets,  # type: ignore
+                metrics_to_sort_by=metrics_to_sort_by,
+            )
+        ]
+
+    def evaluate(
+        self, request: EvaluationRequest, allow_retries: bool = False
+    ) -> List[Evaluation]:
+        """
+        Creates as many evaluations as necessary to fulfill the request.
+
+        Parameters
+        ----------
+        request : schemas.EvaluationRequest
+            The requested evaluation parameters.
+        allow_retries : bool, default = False
+            Option to retry previously failed evaluations.
+
+        Returns
+        -------
+        List[Evaluation]
+            A list of evaluations that meet the parameters.
+        """
+        return [
+            Evaluation(**evaluation)
+            for evaluation in self.conn.evaluate(
+                request, allow_retries=allow_retries
+            )
+        ]
 
@@ -2227,43 +2227,43 @@

Source code in valor/coretypes.py -
@classmethod
-def connect(
-    cls,
-    host: str,
-    access_token: Optional[str] = None,
-    reconnect: bool = False,
-) -> Client:
-    """
-    Establishes a connection to the Valor API.
-
-    Parameters
-    ----------
-    host : str
-        The host to connect to. Should start with "http://" or "https://".
-    access_token : str
-        The access token for the host (if the host requires authentication).
-    """
-    connect(host=host, access_token=access_token, reconnect=reconnect)
-    return cls(get_connection())
+
@classmethod
+def connect(
+    cls,
+    host: str,
+    access_token: Optional[str] = None,
+    reconnect: bool = False,
+) -> Client:
+    """
+    Establishes a connection to the Valor API.
+
+    Parameters
+    ----------
+    host : str
+        The host to connect to. Should start with "http://" or "https://".
+    access_token : str
+        The access token for the host (if the host requires authentication).
+    """
+    connect(host=host, access_token=access_token, reconnect=reconnect)
+    return cls(get_connection())
 
@@ -2303,35 +2303,35 @@

Source code in valor/coretypes.py -
def create_dataset(
-    self,
-    dataset: Union[Dataset, dict],
-) -> None:
-    """
-    Creates a dataset.
-
-    Parameters
-    ----------
-    dataset : valor.Dataset
-        The dataset to create.
-    """
-    if isinstance(dataset, Dataset):
-        dataset = dataset.encode_value()
-    self.conn.create_dataset(dataset)
+
def create_dataset(
+    self,
+    dataset: Union[Dataset, dict],
+) -> None:
+    """
+    Creates a dataset.
+
+    Parameters
+    ----------
+    dataset : valor.Dataset
+        The dataset to create.
+    """
+    if isinstance(dataset, Dataset):
+        dataset = dataset.encode_value()
+    self.conn.create_dataset(dataset)
 
@@ -2401,75 +2401,75 @@

Source code in valor/coretypes.py -
def create_groundtruths(
-    self,
-    dataset: Dataset,
-    groundtruths: List[GroundTruth],
-    ignore_existing_datums: bool = False,
-):
-    """
-    Creates ground truths.
-
-    Parameters
-    ----------
-
-    dataset : valor.Dataset
-        The dataset to create the ground truth for.
-    groundtruths : List[valor.GroundTruth]
-        The ground truths to create.
-    ignore_existing_datums : bool, default=False
-        If True, will ignore datums that already exist in the backend.
-        If False, will raise an error if any datums already exist.
-        Default is False.
-    """
-    groundtruths_json = []
-    for groundtruth in groundtruths:
-        if not isinstance(groundtruth, GroundTruth):
-            raise TypeError(
-                f"Expected ground truth to be of type 'valor.GroundTruth' not '{type(groundtruth)}'."
-            )
-        if not isinstance(groundtruth.annotations._value, list):
-            raise TypeError
-        groundtruth_dict = groundtruth.encode_value()
-        groundtruth_dict["dataset_name"] = dataset.name
-        groundtruths_json.append(groundtruth_dict)
-    self.conn.create_groundtruths(
-        groundtruths_json, ignore_existing_datums=ignore_existing_datums
-    )
+
def create_groundtruths(
+    self,
+    dataset: Dataset,
+    groundtruths: List[GroundTruth],
+    ignore_existing_datums: bool = False,
+):
+    """
+    Creates ground truths.
+
+    Parameters
+    ----------
+
+    dataset : valor.Dataset
+        The dataset to create the ground truth for.
+    groundtruths : List[valor.GroundTruth]
+        The ground truths to create.
+    ignore_existing_datums : bool, default=False
+        If True, will ignore datums that already exist in the backend.
+        If False, will raise an error if any datums already exist.
+        Default is False.
+    """
+    groundtruths_json = []
+    for groundtruth in groundtruths:
+        if not isinstance(groundtruth, GroundTruth):
+            raise TypeError(
+                f"Expected ground truth to be of type 'valor.GroundTruth' not '{type(groundtruth)}'."
+            )
+        if not isinstance(groundtruth.annotations._value, list):
+            raise TypeError
+        groundtruth_dict = groundtruth.encode_value()
+        groundtruth_dict["dataset_name"] = dataset.name
+        groundtruths_json.append(groundtruth_dict)
+    self.conn.create_groundtruths(
+        groundtruths_json, ignore_existing_datums=ignore_existing_datums
+    )
 
@@ -2509,35 +2509,35 @@

Source code in valor/coretypes.py -
def create_model(
-    self,
-    model: Union[Model, dict],
-):
-    """
-    Creates a model.
-
-    Parameters
-    ----------
-    model : valor.Model
-        The model to create.
-    """
-    if isinstance(model, Model):
-        model = model.encode_value()
-    self.conn.create_model(model)
+
def create_model(
+    self,
+    model: Union[Model, dict],
+):
+    """
+    Creates a model.
+
+    Parameters
+    ----------
+    model : valor.Model
+        The model to create.
+    """
+    if isinstance(model, Model):
+        model = model.encode_value()
+    self.conn.create_model(model)
 
@@ -2605,67 +2605,67 @@

Source code in valor/coretypes.py -
def create_predictions(
-    self,
-    dataset: Dataset,
-    model: Model,
-    predictions: List[Prediction],
-) -> None:
-    """
-    Creates predictions.
-
-    Parameters
-    ----------
-    dataset : valor.Dataset
-        The dataset that is being operated over.
-    model : valor.Model
-        The model making the prediction.
-    predictions : List[valor.Prediction]
-        The predictions to create.
-    """
-    predictions_json = []
-    for prediction in predictions:
-        if not isinstance(prediction, Prediction):
-            raise TypeError(
-                f"Expected prediction to be of type 'valor.Prediction' not '{type(prediction)}'."
-            )
-        if not isinstance(prediction.annotations._value, list):
-            raise TypeError
-        prediction_dict = prediction.encode_value()
-        prediction_dict["dataset_name"] = dataset.name
-        prediction_dict["model_name"] = model.name
-        predictions_json.append(prediction_dict)
-    self.conn.create_predictions(predictions_json)
+
def create_predictions(
+    self,
+    dataset: Dataset,
+    model: Model,
+    predictions: List[Prediction],
+) -> None:
+    """
+    Creates predictions.
+
+    Parameters
+    ----------
+    dataset : valor.Dataset
+        The dataset that is being operated over.
+    model : valor.Model
+        The model making the prediction.
+    predictions : List[valor.Prediction]
+        The predictions to create.
+    """
+    predictions_json = []
+    for prediction in predictions:
+        if not isinstance(prediction, Prediction):
+            raise TypeError(
+                f"Expected prediction to be of type 'valor.Prediction' not '{type(prediction)}'."
+            )
+        if not isinstance(prediction.annotations._value, list):
+            raise TypeError
+        prediction_dict = prediction.encode_value()
+        prediction_dict["dataset_name"] = dataset.name
+        prediction_dict["model_name"] = model.name
+        predictions_json.append(prediction_dict)
+    self.conn.create_predictions(predictions_json)
 
@@ -2719,51 +2719,51 @@

Source code in valor/coretypes.py -
def delete_dataset(self, name: str, timeout: int = 0) -> None:
-    """
-    Deletes a dataset.
-
-    Parameters
-    ----------
-    name : str
-        The name of the dataset to be deleted.
-    timeout : int
-        The number of seconds to wait in order to confirm that the dataset was deleted.
-    """
-    self.conn.delete_dataset(name)
-    if timeout:
-        for _ in range(timeout):
-            try:
-                self.get_dataset(name)
-            except DatasetDoesNotExistError:
-                break
-            time.sleep(1)
-        else:
-            raise TimeoutError(
-                "Dataset wasn't deleted within timeout interval"
-            )
+
def delete_dataset(self, name: str, timeout: int = 0) -> None:
+    """
+    Deletes a dataset.
+
+    Parameters
+    ----------
+    name : str
+        The name of the dataset to be deleted.
+    timeout : int
+        The number of seconds to wait in order to confirm that the dataset was deleted.
+    """
+    self.conn.delete_dataset(name)
+    if timeout:
+        for _ in range(timeout):
+            try:
+                self.get_dataset(name)
+            except DatasetDoesNotExistError:
+                break
+            time.sleep(1)
+        else:
+            raise TimeoutError(
+                "Dataset wasn't deleted within timeout interval"
+            )
 
@@ -2817,51 +2817,51 @@

Source code in valor/coretypes.py -
def delete_model(self, name: str, timeout: int = 0) -> None:
-    """
-    Deletes a model.
-
-    Parameters
-    ----------
-    name : str
-        The name of the model to be deleted.
-    timeout : int
-        The number of seconds to wait in order to confirm that the model was deleted.
-    """
-    self.conn.delete_model(name)
-    if timeout:
-        for _ in range(timeout):
-            try:
-                self.get_model(name)
-            except ModelDoesNotExistError:
-                break
-            time.sleep(1)
-        else:
-            raise TimeoutError(
-                "Model wasn't deleted within timeout interval"
-            )
+
def delete_model(self, name: str, timeout: int = 0) -> None:
+    """
+    Deletes a model.
+
+    Parameters
+    ----------
+    name : str
+        The name of the model to be deleted.
+    timeout : int
+        The number of seconds to wait in order to confirm that the model was deleted.
+    """
+    self.conn.delete_model(name)
+    if timeout:
+        for _ in range(timeout):
+            try:
+                self.get_model(name)
+            except ModelDoesNotExistError:
+                break
+            time.sleep(1)
+        else:
+            raise TimeoutError(
+                "Model wasn't deleted within timeout interval"
+            )
 
@@ -2936,53 +2936,53 @@

Source code in valor/coretypes.py -
def evaluate(
-    self, request: EvaluationRequest, allow_retries: bool = False
-) -> List[Evaluation]:
-    """
-    Creates as many evaluations as necessary to fulfill the request.
-
-    Parameters
-    ----------
-    request : schemas.EvaluationRequest
-        The requested evaluation parameters.
-    allow_retries : bool, default = False
-        Option to retry previously failed evaluations.
-
-    Returns
-    -------
-    List[Evaluation]
-        A list of evaluations that meet the parameters.
-    """
-    return [
-        Evaluation(**evaluation)
-        for evaluation in self.conn.evaluate(
-            request, allow_retries=allow_retries
-        )
-    ]
+
def evaluate(
+    self, request: EvaluationRequest, allow_retries: bool = False
+) -> List[Evaluation]:
+    """
+    Creates as many evaluations as necessary to fulfill the request.
+
+    Parameters
+    ----------
+    request : schemas.EvaluationRequest
+        The requested evaluation parameters.
+    allow_retries : bool, default = False
+        Option to retry previously failed evaluations.
+
+    Returns
+    -------
+    List[Evaluation]
+        A list of evaluations that meet the parameters.
+    """
+    return [
+        Evaluation(**evaluation)
+        for evaluation in self.conn.evaluate(
+            request, allow_retries=allow_retries
+        )
+    ]
 
@@ -3022,31 +3022,31 @@

Source code in valor/coretypes.py -
def finalize_dataset(self, dataset: Union[Dataset, str]) -> None:
-    """
-    Finalizes a dataset such that new ground truths cannot be added to it.
-
-    Parameters
-    ----------
-    dataset : str
-        The dataset to be finalized.
-    """
-    dataset_name = (
-        dataset.name if isinstance(dataset, Dataset) else dataset
-    )
-    return self.conn.finalize_dataset(name=dataset_name)  # type: ignore
+
def finalize_dataset(self, dataset: Union[Dataset, str]) -> None:
+    """
+    Finalizes a dataset such that new ground truths cannot be added to it.
+
+    Parameters
+    ----------
+    dataset : str
+        The dataset to be finalized.
+    """
+    dataset_name = (
+        dataset.name if isinstance(dataset, Dataset) else dataset
+    )
+    return self.conn.finalize_dataset(name=dataset_name)  # type: ignore
 
@@ -3059,33 +3059,33 @@

Finalizes a model-dataset pairing such that new predictions cannot be added to it.

Source code in valor/coretypes.py -
def finalize_inferences(
-    self, dataset: Union[Dataset, str], model: Union[Model, str]
-) -> None:
-    """
-    Finalizes a model-dataset pairing such that new predictions cannot be added to it.
-    """
-    dataset_name = (
-        dataset.name if isinstance(dataset, Dataset) else dataset
-    )
-    model_name = model.name if isinstance(model, Model) else model
-    return self.conn.finalize_inferences(
-        dataset_name=dataset_name,  # type: ignore
-        model_name=model_name,  # type: ignore
-    )
+
def finalize_inferences(
+    self, dataset: Union[Dataset, str], model: Union[Model, str]
+) -> None:
+    """
+    Finalizes a model-dataset pairing such that new predictions cannot be added to it.
+    """
+    dataset_name = (
+        dataset.name if isinstance(dataset, Dataset) else dataset
+    )
+    model_name = model.name if isinstance(model, Model) else model
+    return self.conn.finalize_inferences(
+        dataset_name=dataset_name,  # type: ignore
+        model_name=model_name,  # type: ignore
+    )
 
@@ -3146,53 +3146,53 @@

Source code in valor/coretypes.py -
def get_dataset(
-    self,
-    name: str,
-) -> Union[Dataset, None]:
-    """
-    Gets a dataset by name.
-
-    Parameters
-    ----------
-    name : str
-        The name of the dataset to fetch.
-
-    Returns
-    -------
-    Union[Dataset, None]
-        A Dataset with a matching name, or 'None' if one doesn't exist.
-    """
-    dataset = Dataset.decode_value(
-        {
-            **self.conn.get_dataset(name),
-            "connection": self.conn,
-        }
-    )
-    return dataset
+
def get_dataset(
+    self,
+    name: str,
+) -> Union[Dataset, None]:
+    """
+    Gets a dataset by name.
+
+    Parameters
+    ----------
+    name : str
+        The name of the dataset to fetch.
+
+    Returns
+    -------
+    Union[Dataset, None]
+        A Dataset with a matching name, or 'None' if one doesn't exist.
+    """
+    dataset = Dataset.decode_value(
+        {
+            **self.conn.get_dataset(name),
+            "connection": self.conn,
+        }
+    )
+    return dataset
 
@@ -3253,51 +3253,51 @@

Source code in valor/coretypes.py -
def get_dataset_status(
-    self,
-    name: str,
-) -> Union[TableStatus, None]:
-    """
-    Get the state of a given dataset.
-
-    Parameters
-    ----------
-    name : str
-        The name of the dataset we want to fetch the state of.
-
-    Returns
-    ------
-    TableStatus | None
-        The state of the dataset, or 'None' if the dataset does not exist.
-    """
-    try:
-        return self.conn.get_dataset_status(name)
-    except ClientException as e:
-        if e.status_code == 404:
-            return None
-        raise e
+
def get_dataset_status(
+    self,
+    name: str,
+) -> Union[TableStatus, None]:
+    """
+    Get the state of a given dataset.
+
+    Parameters
+    ----------
+    name : str
+        The name of the dataset we want to fetch the state of.
+
+    Returns
+    ------
+    TableStatus | None
+        The state of the dataset, or 'None' if the dataset does not exist.
+    """
+    try:
+        return self.conn.get_dataset_status(name)
+    except ClientException as e:
+        if e.status_code == 404:
+            return None
+        raise e
 
@@ -3358,35 +3358,35 @@

Source code in valor/coretypes.py -
def get_dataset_summary(self, name: str) -> DatasetSummary:
-    """
-    Gets the summary of a dataset.
-
-    Parameters
-    ----------
-    name : str
-        The name of the dataset to create a summary for.
-
-    Returns
-    -------
-    DatasetSummary
-        A dataclass containing the dataset summary.
-    """
-    return DatasetSummary(**self.conn.get_dataset_summary(name))
+
def get_dataset_summary(self, name: str) -> DatasetSummary:
+    """
+    Gets the summary of a dataset.
+
+    Parameters
+    ----------
+    name : str
+        The name of the dataset to create a summary for.
+
+    Returns
+    -------
+    DatasetSummary
+        A dataclass containing the dataset summary.
+    """
+    return DatasetSummary(**self.conn.get_dataset_summary(name))
 
@@ -3447,68 +3447,68 @@

Source code in valor/coretypes.py -
def get_datasets(
-    self,
-    filter_by: Optional[FilterType] = None,
-) -> List[Dataset]:
-    """
-    Get all datasets, with an option to filter results according to some user-defined parameters.
-
-    Parameters
-    ----------
-    filter_by : FilterType, optional
-        Optional constraints to filter by.
-
-    Returns
-    ------
-    List[valor.Dataset]
-        A list of datasets.
-    """
-    filter_ = _format_filter(filter_by)
-    if isinstance(filter_, Filter):
-        filter_ = asdict(filter_)
-    dataset_list = []
-    for kwargs in self.conn.get_datasets(filter_):
-        dataset = Dataset.decode_value({**kwargs, "connection": self.conn})
-        dataset_list.append(dataset)
-    return dataset_list
-
-
-
-
-
-

-valor.Client.get_datum(dataset, uid) -

-
-

Get datum. -GET endpoint.

-

Parameters:

- +
def get_datasets(
+    self,
+    filter_by: Optional[FilterType] = None,
+) -> List[Dataset]:
+    """
+    Get all datasets, with an option to filter results according to some user-defined parameters.
+
+    Parameters
+    ----------
+    filter_by : FilterType, optional
+        Optional constraints to filter by.
+
+    Returns
+    ------
+    List[valor.Dataset]
+        A list of datasets.
+    """
+    filters = _format_filter(filter_by)
+    if isinstance(filters, Filter):
+        filters = asdict(filters)
+    dataset_list = []
+    for kwargs in self.conn.get_datasets(filters):
+        dataset = Dataset.decode_value({**kwargs, "connection": self.conn})
+        dataset_list.append(dataset)
+    return dataset_list
+
+
+
+
+
+

+valor.Client.get_datum(dataset, uid) +

+
+

Get datum. +GET endpoint.

+

Parameters:

+ @@ -3571,53 +3571,53 @@

Name
Source code in valor/coretypes.py -
def get_datum(
-    self,
-    dataset: Union[Dataset, str],
-    uid: str,
-) -> Union[Datum, None]:
-    """
-    Get datum.
-    `GET` endpoint.
-    Parameters
-    ----------
-    dataset : valor.Dataset
-        The dataset the datum belongs to.
-    uid : str
-        The UID of the datum.
-    Returns
-    -------
-    valor.Datum
-        The requested datum or 'None' if it doesn't exist.
-    """
-    dataset_name = (
-        dataset.name if isinstance(dataset, Dataset) else dataset
-    )
-    resp = self.conn.get_datum(dataset_name=dataset_name, uid=uid)  # type: ignore
-    return Datum.decode_value(resp)
+
def get_datum(
+    self,
+    dataset: Union[Dataset, str],
+    uid: str,
+) -> Union[Datum, None]:
+    """
+    Get datum.
+    `GET` endpoint.
+    Parameters
+    ----------
+    dataset : valor.Dataset
+        The dataset the datum belongs to.
+    uid : str
+        The UID of the datum.
+    Returns
+    -------
+    valor.Datum
+        The requested datum or 'None' if it doesn't exist.
+    """
+    dataset_name = (
+        dataset.name if isinstance(dataset, Dataset) else dataset
+    )
+    resp = self.conn.get_datum(dataset_name=dataset_name, uid=uid)  # type: ignore
+    return Datum.decode_value(resp)
 
@@ -3678,53 +3678,53 @@

Source code in valor/coretypes.py -
def get_datums(
-    self,
-    filter_by: Optional[FilterType] = None,
-) -> List[Datum]:
-    """
-    Get all datums using an optional filter.
-
-    Parameters
-    ----------
-    filter_by : FilterType, optional
-        Optional constraints to filter by.
-
-    Returns
-    -------
-    List[valor.Datum]
-        A list datums.
-    """
-    filter_ = _format_filter(filter_by)
-    if isinstance(filter_, Filter):
-        filter_ = asdict(filter_)
-    return [
-        Datum.decode_value(datum)
-        for datum in self.conn.get_datums(filter_)
-    ]
+
def get_datums(
+    self,
+    filter_by: Optional[FilterType] = None,
+) -> List[Datum]:
+    """
+    Get all datums using an optional filter.
+
+    Parameters
+    ----------
+    filter_by : FilterType, optional
+        Optional constraints to filter by.
+
+    Returns
+    -------
+    List[valor.Datum]
+        A list datums.
+    """
+    filters = _format_filter(filter_by)
+    if isinstance(filters, Filter):
+        filters = asdict(filters)
+    return [
+        Datum.decode_value(datum)
+        for datum in self.conn.get_datums(filters)
+    ]
 
@@ -3827,44 +3827,7 @@

Source code in valor/coretypes.py -
1697
-1698
-1699
-1700
-1701
-1702
-1703
-1704
-1705
-1706
-1707
-1708
-1709
-1710
-1711
-1712
-1713
-1714
-1715
-1716
-1717
-1718
-1719
-1720
-1721
-1722
-1723
-1724
-1725
-1726
-1727
-1728
-1729
-1730
-1731
-1732
-1733
-1734
+
1734
 1735
 1736
 1737
@@ -3874,54 +3837,91 @@ 

1741 1742 1743 -1744

def get_evaluations(
-    self,
-    *,
-    evaluation_ids: Optional[List[int]] = None,
-    models: Union[List[Model], List[str], None] = None,
-    datasets: Union[List[Dataset], List[str], None] = None,
-    metrics_to_sort_by: Optional[
-        Dict[str, Union[Dict[str, str], str]]
-    ] = None,
-) -> List[Evaluation]:
-    """
-    Returns all evaluations associated with user-supplied dataset and/or model names.
-
-    Parameters
-    ----------
-    evaluation_ids : List[int], optional.
-        A list of job IDs to return metrics for.
-    models : Union[List[valor.Model], List[str]], optional
-        A list of model names that we want to return metrics for.
-    datasets : Union[List[valor.Dataset], List[str]], optional
-        A list of dataset names that we want to return metrics for.
-    metrics_to_sort_by: dict[str, str | dict[str, str]], optional
-        An optional dict of metric types to sort the evaluations by.
-
-    Returns
-    -------
-    List[valor.Evaluation]
-        A list of evaluations.
-    """
-    if isinstance(datasets, list):
-        datasets = [  # type: ignore
-            element.name if isinstance(element, Dataset) else element
-            for element in datasets
-        ]
-    if isinstance(models, list):
-        models = [  # type: ignore
-            element.name if isinstance(element, Model) else element
-            for element in models
-        ]
-    return [
-        Evaluation(connection=self.conn, **evaluation)
-        for evaluation in self.conn.get_evaluations(
-            evaluation_ids=evaluation_ids,
-            models=models,  # type: ignore
-            datasets=datasets,  # type: ignore
-            metrics_to_sort_by=metrics_to_sort_by,
-        )
-    ]
+1744
+1745
+1746
+1747
+1748
+1749
+1750
+1751
+1752
+1753
+1754
+1755
+1756
+1757
+1758
+1759
+1760
+1761
+1762
+1763
+1764
+1765
+1766
+1767
+1768
+1769
+1770
+1771
+1772
+1773
+1774
+1775
+1776
+1777
+1778
+1779
+1780
+1781
def get_evaluations(
+    self,
+    *,
+    evaluation_ids: Optional[List[int]] = None,
+    models: Union[List[Model], List[str], None] = None,
+    datasets: Union[List[Dataset], List[str], None] = None,
+    metrics_to_sort_by: Optional[
+        Dict[str, Union[Dict[str, str], str]]
+    ] = None,
+) -> List[Evaluation]:
+    """
+    Returns all evaluations associated with user-supplied dataset and/or model names.
+
+    Parameters
+    ----------
+    evaluation_ids : List[int], optional.
+        A list of job IDs to return metrics for.
+    models : Union[List[valor.Model], List[str]], optional
+        A list of model names that we want to return metrics for.
+    datasets : Union[List[valor.Dataset], List[str]], optional
+        A list of dataset names that we want to return metrics for.
+    metrics_to_sort_by: dict[str, str | dict[str, str]], optional
+        An optional dict of metric types to sort the evaluations by.
+
+    Returns
+    -------
+    List[valor.Evaluation]
+        A list of evaluations.
+    """
+    if isinstance(datasets, list):
+        datasets = [  # type: ignore
+            element.name if isinstance(element, Dataset) else element
+            for element in datasets
+        ]
+    if isinstance(models, list):
+        models = [  # type: ignore
+            element.name if isinstance(element, Model) else element
+            for element in models
+        ]
+    return [
+        Evaluation(connection=self.conn, **evaluation)
+        for evaluation in self.conn.get_evaluations(
+            evaluation_ids=evaluation_ids,
+            models=models,  # type: ignore
+            datasets=datasets,  # type: ignore
+            metrics_to_sort_by=metrics_to_sort_by,
+        )
+    ]
 
@@ -3996,73 +3996,73 @@

Source code in valor/coretypes.py -
def get_groundtruth(
-    self,
-    dataset: Union[Dataset, str],
-    datum: Union[Datum, str],
-) -> Union[GroundTruth, None]:
-    """
-    Get a particular ground truth.
-
-    Parameters
-    ----------
-    dataset: Union[Dataset, str]
-        The dataset the datum belongs to.
-    datum: Union[Datum, str]
-        The desired datum.
-
-    Returns
-    ----------
-    Union[GroundTruth, None]
-        The matching ground truth or 'None' if it doesn't exist.
-    """
-    dataset_name = (
-        dataset.name if isinstance(dataset, Dataset) else dataset
-    )
-    datum_uid = datum.uid if isinstance(datum, Datum) else datum
-    try:
-        resp = self.conn.get_groundtruth(
-            dataset_name=dataset_name, datum_uid=datum_uid  # type: ignore
-        )
-        resp.pop("dataset_name")
-        return GroundTruth.decode_value(resp)
-    except ClientException as e:
-        if e.status_code == 404:
-            return None
-        raise e
+
def get_groundtruth(
+    self,
+    dataset: Union[Dataset, str],
+    datum: Union[Datum, str],
+) -> Union[GroundTruth, None]:
+    """
+    Get a particular ground truth.
+
+    Parameters
+    ----------
+    dataset: Union[Dataset, str]
+        The dataset the datum belongs to.
+    datum: Union[Datum, str]
+        The desired datum.
+
+    Returns
+    ----------
+    Union[GroundTruth, None]
+        The matching ground truth or 'None' if it doesn't exist.
+    """
+    dataset_name = (
+        dataset.name if isinstance(dataset, Dataset) else dataset
+    )
+    datum_uid = datum.uid if isinstance(datum, Datum) else datum
+    try:
+        resp = self.conn.get_groundtruth(
+            dataset_name=dataset_name, datum_uid=datum_uid  # type: ignore
+        )
+        resp.pop("dataset_name")
+        return GroundTruth.decode_value(resp)
+    except ClientException as e:
+        if e.status_code == 404:
+            return None
+        raise e
 
@@ -4123,45 +4123,45 @@

Source code in valor/coretypes.py -
def get_labels(
-    self,
-    filter_by: Optional[FilterType] = None,
-) -> List[Label]:
-    """
-    Gets all labels using an optional filter.
-
-    Parameters
-    ----------
-    filter_by : FilterType, optional
-        Optional constraints to filter by.
-
-    Returns
-    ------
-    List[valor.Label]
-        A list of labels.
-    """
-    filter_ = _format_filter(filter_by)
-    filter_ = asdict(filter_)
-    return [Label(**label) for label in self.conn.get_labels(filter_)]
+
def get_labels(
+    self,
+    filter_by: Optional[FilterType] = None,
+) -> List[Label]:
+    """
+    Gets all labels using an optional filter.
+
+    Parameters
+    ----------
+    filter_by : FilterType, optional
+        Optional constraints to filter by.
+
+    Returns
+    ------
+    List[valor.Label]
+        A list of labels.
+    """
+    filters = _format_filter(filter_by)
+    filters = asdict(filters)
+    return [Label(**label) for label in self.conn.get_labels(filters)]
 
@@ -4222,51 +4222,51 @@

Source code in valor/coretypes.py -
def get_labels_from_dataset(
-    self, dataset: Union[Dataset, str]
-) -> List[Label]:
-    """
-    Get all labels associated with a dataset's ground truths.
-
-    Parameters
-    ----------
-    dataset : valor.Dataset
-        The dataset to search by.
-
-    Returns
-    ------
-    List[valor.Label]
-        A list of labels.
-    """
-    dataset_name = (
-        dataset.name if isinstance(dataset, Dataset) else dataset
-    )
-    return [
-        Label(**label)
-        for label in self.conn.get_labels_from_dataset(dataset_name)  # type: ignore
-    ]
+
def get_labels_from_dataset(
+    self, dataset: Union[Dataset, str]
+) -> List[Label]:
+    """
+    Get all labels associated with a dataset's ground truths.
+
+    Parameters
+    ----------
+    dataset : valor.Dataset
+        The dataset to search by.
+
+    Returns
+    ------
+    List[valor.Label]
+        A list of labels.
+    """
+    dataset_name = (
+        dataset.name if isinstance(dataset, Dataset) else dataset
+    )
+    return [
+        Label(**label)
+        for label in self.conn.get_labels_from_dataset(dataset_name)  # type: ignore
+    ]
 
@@ -4327,43 +4327,43 @@

Source code in valor/coretypes.py -
def get_labels_from_model(self, model: Union[Model, str]) -> List[Label]:
-    """
-    Get all labels associated with a model's ground truths.
-
-    Parameters
-    ----------
-    model : valor.Model
-        The model to search by.
-
-    Returns
-    ------
-    List[valor.Label]
-        A list of labels.
-    """
-    model_name = model.name if isinstance(model, Model) else model
-    return [
-        Label(**label)
-        for label in self.conn.get_labels_from_model(model_name)  # type: ignore
-    ]
+
def get_labels_from_model(self, model: Union[Model, str]) -> List[Label]:
+    """
+    Get all labels associated with a model's ground truths.
+
+    Parameters
+    ----------
+    model : valor.Model
+        The model to search by.
+
+    Returns
+    ------
+    List[valor.Label]
+        A list of labels.
+    """
+    model_name = model.name if isinstance(model, Model) else model
+    return [
+        Label(**label)
+        for label in self.conn.get_labels_from_model(model_name)  # type: ignore
+    ]
 
@@ -4424,51 +4424,51 @@

Source code in valor/coretypes.py -
def get_model(
-    self,
-    name: str,
-) -> Union[Model, None]:
-    """
-    Gets a model by name.
-
-    Parameters
-    ----------
-    name : str
-        The name of the model to fetch.
-
-    Returns
-    -------
-    Union[valor.Model, None]
-        A Model with matching name or 'None' if one doesn't exist.
-    """
-    return Model.decode_value(
-        {
-            **self.conn.get_model(name),
-            "connection": self.conn,
-        }
-    )
+
def get_model(
+    self,
+    name: str,
+) -> Union[Model, None]:
+    """
+    Gets a model by name.
+
+    Parameters
+    ----------
+    name : str
+        The name of the model to fetch.
+
+    Returns
+    -------
+    Union[valor.Model, None]
+        A Model with matching name or 'None' if one doesn't exist.
+    """
+    return Model.decode_value(
+        {
+            **self.conn.get_model(name),
+            "connection": self.conn,
+        }
+    )
 
@@ -4531,55 +4531,55 @@

Source code in valor/coretypes.py -
def get_model_eval_requests(
-    self, model: Union[Model, str]
-) -> List[Evaluation]:
-    """
-    Get all evaluations that have been created for a model.
-
-    This does not return evaluation results.
-
-    `GET` endpoint.
-
-    Parameters
-    ----------
-    model : str
-        The model to search by.
-
-    Returns
-    -------
-    List[Evaluation]
-        A list of evaluations.
-    """
-    model_name = model.name if isinstance(model, Model) else model
-    return [
-        Evaluation(**evaluation, connection=self.conn)
-        for evaluation in self.conn.get_model_eval_requests(model_name)  # type: ignore
-    ]
+
def get_model_eval_requests(
+    self, model: Union[Model, str]
+) -> List[Evaluation]:
+    """
+    Get all evaluations that have been created for a model.
+
+    This does not return evaluation results.
+
+    `GET` endpoint.
+
+    Parameters
+    ----------
+    model : str
+        The model to search by.
+
+    Returns
+    -------
+    List[Evaluation]
+        A list of evaluations.
+    """
+    model_name = model.name if isinstance(model, Model) else model
+    return [
+        Evaluation(**evaluation, connection=self.conn)
+        for evaluation in self.conn.get_model_eval_requests(model_name)  # type: ignore
+    ]
 
@@ -4654,57 +4654,57 @@

Source code in valor/coretypes.py -
def get_model_status(
-    self,
-    dataset_name: str,
-    model_name: str,
-) -> Optional[TableStatus]:
-    """
-    Get the state of a given model over a dataset.
-
-    Parameters
-    ----------
-    dataset_name : str
-        The name of the dataset that the model is operating over.
-    model_name : str
-        The name of the model we want to fetch the state of.
-
-    Returns
-    ------
-    Union[TableStatus, None]
-        The state of the model or 'None' if the model doesn't exist.
-    """
-    try:
-        return self.conn.get_model_status(dataset_name, model_name)
-    except ClientException as e:
-        if e.status_code == 404:
-            return None
-        raise e
+
def get_model_status(
+    self,
+    dataset_name: str,
+    model_name: str,
+) -> Optional[TableStatus]:
+    """
+    Get the state of a given model over a dataset.
+
+    Parameters
+    ----------
+    dataset_name : str
+        The name of the dataset that the model is operating over.
+    model_name : str
+        The name of the model we want to fetch the state of.
+
+    Returns
+    ------
+    Union[TableStatus, None]
+        The state of the model or 'None' if the model doesn't exist.
+    """
+    try:
+        return self.conn.get_model_status(dataset_name, model_name)
+    except ClientException as e:
+        if e.status_code == 404:
+            return None
+        raise e
 
@@ -4765,55 +4765,55 @@

Source code in valor/coretypes.py -
def get_models(
-    self,
-    filter_by: Optional[FilterType] = None,
-) -> List[Model]:
-    """
-    Get all models using an optional filter.
-
-    Parameters
-    ----------
-    filter_by : FilterType, optional
-        Optional constraints to filter by.
-
-    Returns
-    ------
-    List[valor.Model]
-        A list of models.
-    """
-    filter_ = _format_filter(filter_by)
-    if isinstance(filter_, Filter):
-        filter_ = asdict(filter_)
-    model_list = []
-    for kwargs in self.conn.get_models(filter_):
-        model = Model.decode_value({**kwargs, "connection": self.conn})
-        model_list.append(model)
-    return model_list
+
def get_models(
+    self,
+    filter_by: Optional[FilterType] = None,
+) -> List[Model]:
+    """
+    Get all models using an optional filter.
+
+    Parameters
+    ----------
+    filter_by : FilterType, optional
+        Optional constraints to filter by.
+
+    Returns
+    ------
+    List[valor.Model]
+        A list of models.
+    """
+    filters = _format_filter(filter_by)
+    if isinstance(filters, Filter):
+        filters = asdict(filters)
+    model_list = []
+    for kwargs in self.conn.get_models(filters):
+        model = Model.decode_value({**kwargs, "connection": self.conn})
+        model_list.append(model)
+    return model_list
 
@@ -4902,79 +4902,79 @@

Source code in valor/coretypes.py -
def get_prediction(
-    self,
-    dataset: Union[Dataset, str],
-    model: Union[Model, str],
-    datum: Union[Datum, str],
-) -> Union[Prediction, None]:
-    """
-    Get a particular prediction.
-
-    Parameters
-    ----------
-    dataset: Union[Dataset, str]
-        The dataset the datum belongs to.
-    model: Union[Model, str]
-        The model that made the prediction.
-    datum: Union[Datum, str]
-        The desired datum.
-
-    Returns
-    ----------
-    Union[Prediction, None]
-        The matching prediction or 'None' if it doesn't exist.
-    """
-    dataset_name = (
-        dataset.name if isinstance(dataset, Dataset) else dataset
-    )
-    model_name = model.name if isinstance(model, Model) else model
-    datum_uid = datum.uid if isinstance(datum, Datum) else datum
-
-    resp = self.conn.get_prediction(
-        dataset_name=dataset_name,  # type: ignore
-        model_name=model_name,  # type: ignore
-        datum_uid=datum_uid,  # type: ignore
-    )
-    resp.pop("dataset_name")
-    resp.pop("model_name")
-    return Prediction.decode_value(resp)
+
def get_prediction(
+    self,
+    dataset: Union[Dataset, str],
+    model: Union[Model, str],
+    datum: Union[Datum, str],
+) -> Union[Prediction, None]:
+    """
+    Get a particular prediction.
+
+    Parameters
+    ----------
+    dataset: Union[Dataset, str]
+        The dataset the datum belongs to.
+    model: Union[Model, str]
+        The model that made the prediction.
+    datum: Union[Datum, str]
+        The desired datum.
+
+    Returns
+    ----------
+    Union[Prediction, None]
+        The matching prediction or 'None' if it doesn't exist.
+    """
+    dataset_name = (
+        dataset.name if isinstance(dataset, Dataset) else dataset
+    )
+    model_name = model.name if isinstance(model, Model) else model
+    datum_uid = datum.uid if isinstance(datum, Datum) else datum
+
+    resp = self.conn.get_prediction(
+        dataset_name=dataset_name,  # type: ignore
+        model_name=model_name,  # type: ignore
+        datum_uid=datum_uid,  # type: ignore
+    )
+    resp.pop("dataset_name")
+    resp.pop("model_name")
+    return Prediction.decode_value(resp)
 
diff --git a/client_api/Dataset/index.html b/client_api/Dataset/index.html index 2d43d0a7b..8a24c47ac 100644 --- a/client_api/Dataset/index.html +++ b/client_api/Dataset/index.html @@ -656,17 +656,7 @@

Dataset

Source code in valor/coretypes.py -
375
-376
-377
-378
-379
-380
-381
-382
-383
-384
-385
+
385
 386
 387
 388
@@ -921,272 +911,282 @@ 

Dataset

637 638 639 -640
class Dataset(StaticCollection):
-    """
-    A class describing a given dataset.
-
-    Attributes
-    ----------
-    name : String
-        The name of the dataset.
-    metadata : Dictionary
-        A dictionary of metadata that describes the dataset.
-
-    Examples
-    --------
-    >>> Dataset.create(name="dataset1")
-    >>> Dataset.create(name="dataset1", metadata={})
-    >>> Dataset.create(name="dataset1", metadata={"foo": "bar", "pi": 3.14})
-    """
-
-    name: String = String.symbolic(owner="dataset", name="name")
-    metadata: Dictionary = Dictionary.symbolic(
-        owner="dataset", name="metadata"
-    )
-
-    def __init__(
-        self,
-        *,
-        name: str,
-        metadata: Optional[dict] = None,
-        connection: Optional[ClientConnection] = None,
-    ):
-        """
-        Creates a local instance of a dataset.
+640
+641
+642
+643
+644
+645
+646
+647
+648
+649
+650
class Dataset(StaticCollection):
+    """
+    A class describing a given dataset.
+
+    Attributes
+    ----------
+    name : String
+        The name of the dataset.
+    metadata : Dictionary
+        A dictionary of metadata that describes the dataset.
+
+    Examples
+    --------
+    >>> Dataset.create(name="dataset1")
+    >>> Dataset.create(name="dataset1", metadata={})
+    >>> Dataset.create(name="dataset1", metadata={"foo": "bar", "pi": 3.14})
+    """
+
+    name: String = String.symbolic(owner="dataset", name="name")
+    metadata: Dictionary = Dictionary.symbolic(
+        owner="dataset", name="metadata"
+    )
 
-        Use 'Dataset.create' classmethod to create a dataset with persistence.
-
-        Parameters
-        ----------
-        name : str
-            The name of the dataset.
-        metadata : dict, optional
-            A dictionary of metadata that describes the dataset.
-        connection : ClientConnection, optional
-            An initialized client connection.
-        """
-        self.conn = connection
-        super().__init__(name=name, metadata=metadata if metadata else dict())
-
-    @classmethod
-    def create(
-        cls,
-        name: str,
-        metadata: Optional[Dict[str, Any]] = None,
-        connection: Optional[ClientConnection] = None,
-    ) -> Dataset:
-        """
-        Creates a dataset that persists in the back end.
+    def __init__(
+        self,
+        *,
+        name: str,
+        metadata: Optional[dict] = None,
+        connection: Optional[ClientConnection] = None,
+    ):
+        """
+        Creates a local instance of a dataset.
+
+        Use 'Dataset.create' classmethod to create a dataset with persistence.
+
+        Parameters
+        ----------
+        name : str
+            The name of the dataset.
+        metadata : dict, optional
+            A dictionary of metadata that describes the dataset.
+        connection : ClientConnection, optional
+            An initialized client connection.
+        """
+        self.conn = connection
+        super().__init__(name=name, metadata=metadata if metadata else dict())
 
-        Parameters
-        ----------
-        name : str
-            The name of the dataset.
-        metadata : dict, optional
-            A dictionary of metadata that describes the dataset.
-        connection : ClientConnection, optional
-            An initialized client connection.
-        """
-        dataset = cls(name=name, metadata=metadata, connection=connection)
-        Client(dataset.conn).create_dataset(dataset)
-        return dataset
-
-    @classmethod
-    def get(
-        cls,
-        name: str,
-        connection: Optional[ClientConnection] = None,
-    ) -> Union[Dataset, None]:
-        """
-        Retrieves a dataset from the back end database.
-
-        Parameters
-        ----------
-        name : str
-            The name of the dataset.
-
-        Returns
-        -------
-        Union[valor.Dataset, None]
-            The dataset or 'None' if it doesn't exist.
-        """
-        return Client(connection).get_dataset(name)
-
-    def add_groundtruth(
-        self,
-        groundtruth: GroundTruth,
-    ) -> None:
-        """
-        Add a ground truth to the dataset.
-
-        Parameters
-        ----------
-        groundtruth : GroundTruth
-            The ground truth to create.
-        """
-        Client(self.conn).create_groundtruths(
-            dataset=self,
-            groundtruths=[groundtruth],
-        )
+    @classmethod
+    def create(
+        cls,
+        name: str,
+        metadata: Optional[Dict[str, Any]] = None,
+        connection: Optional[ClientConnection] = None,
+    ) -> Dataset:
+        """
+        Creates a dataset that persists in the back end.
+
+        Parameters
+        ----------
+        name : str
+            The name of the dataset.
+        metadata : dict, optional
+            A dictionary of metadata that describes the dataset.
+        connection : ClientConnection, optional
+            An initialized client connection.
+        """
+        dataset = cls(name=name, metadata=metadata, connection=connection)
+        Client(dataset.conn).create_dataset(dataset)
+        return dataset
+
+    @classmethod
+    def get(
+        cls,
+        name: str,
+        connection: Optional[ClientConnection] = None,
+    ) -> Union[Dataset, None]:
+        """
+        Retrieves a dataset from the back end database.
+
+        Parameters
+        ----------
+        name : str
+            The name of the dataset.
+
+        Returns
+        -------
+        Union[valor.Dataset, None]
+            The dataset or 'None' if it doesn't exist.
+        """
+        return Client(connection).get_dataset(name)
+
+    def add_groundtruth(
+        self,
+        groundtruth: GroundTruth,
+    ) -> None:
+        """
+        Add a ground truth to the dataset.
 
-    def add_groundtruths(
-        self,
-        groundtruths: List[GroundTruth],
-        ignore_existing_datums: bool = False,
-    ) -> None:
-        """
-        Add multiple ground truths to the dataset.
-
-        Parameters
-        ----------
-        groundtruths : List[GroundTruth]
-            The ground truths to create.
-        ignore_existing_datums : bool, default=False
-            If True, will ignore datums that already exist in the backend.
-            If False, will raise an error if any datums already exist.
-            Default is False.
-        """
-        Client(self.conn).create_groundtruths(
-            dataset=self,
-            groundtruths=groundtruths,
-            ignore_existing_datums=ignore_existing_datums,
-        )
-
-    def get_groundtruth(
-        self,
-        datum: Union[Datum, str],
-    ) -> Union[GroundTruth, None]:
-        """
-        Get a particular ground truth.
-
-        Parameters
-        ----------
-        datum: Union[Datum, str]
-            The desired datum.
-
-        Returns
-        ----------
-        Union[GroundTruth, None]
-            The matching ground truth or 'None' if it doesn't exist.
-        """
-        return Client(self.conn).get_groundtruth(dataset=self, datum=datum)
-
-    def get_labels(
-        self,
-    ) -> List[Label]:
-        """
-        Get all labels associated with a given dataset.
-
-        Returns
-        ----------
-        List[Label]
-            A list of `Labels` associated with the dataset.
-        """
-        return Client(self.conn).get_labels_from_dataset(self)
-
-    def get_datums(
-        self, filter_by: Optional[FilterType] = None
-    ) -> List[Datum]:
-        """
-        Get all datums associated with a given dataset.
-
-        Parameters
-        ----------
-        filter_by
-            Optional constraints to filter by.
-
-        Returns
-        ----------
-        List[Datum]
-            A list of `Datums` associated with the dataset.
-        """
-        filter_ = _format_filter(filter_by)
-        if isinstance(filter_, Filter):
-            filter_ = asdict(filter_)
-
-        if filter_.get("dataset_names"):
-            raise ValueError(
-                "Cannot filter by dataset_names when calling `Dataset.get_datums`."
-            )
-        filter_["dataset_names"] = [self.name]  # type: ignore
-        return Client(self.conn).get_datums(filter_by=filter_)
-
-    def get_evaluations(
-        self,
-        metrics_to_sort_by: Optional[
-            Dict[str, Union[Dict[str, str], str]]
-        ] = None,
-    ) -> List[Evaluation]:
-        """
-        Get all evaluations associated with a given dataset.
-
-        Parameters
-        ----------
-        metrics_to_sort_by: dict[str, str | dict[str, str]], optional
-            An optional dict of metric types to sort the evaluations by.
-
-        Returns
-        ----------
-        List[Evaluation]
-            A list of `Evaluations` associated with the dataset.
-        """
-        return Client(self.conn).get_evaluations(
-            datasets=[self], metrics_to_sort_by=metrics_to_sort_by
-        )
-
-    def get_summary(self) -> DatasetSummary:
-        """
-        Get the summary of a given dataset.
-
-        Returns
-        -------
-        DatasetSummary
-            The summary of the dataset. This class has the following fields:
-
-            name: name of the dataset
-
-            num_datums: total number of datums in the dataset
-
-            num_annotations: total number of labeled annotations in the dataset; if an
-            object (such as a bounding box) has multiple labels, then each label is counted separately
-
-            num_bounding_boxes: total number of bounding boxes in the dataset
-
-            num_polygons: total number of polygons in the dataset
-
-            num_rasters: total number of rasters in the dataset
-
-            labels: list of the unique labels in the dataset
-
-            datum_metadata: list of the unique metadata dictionaries in the dataset that are associated
-            to datums
-
-            groundtruth_annotation_metadata: list of the unique metadata dictionaries in the dataset that are
-            associated to annotations
-        """
-        return Client(self.conn).get_dataset_summary(self.name)  # type: ignore
+        Parameters
+        ----------
+        groundtruth : GroundTruth
+            The ground truth to create.
+        """
+        Client(self.conn).create_groundtruths(
+            dataset=self,
+            groundtruths=[groundtruth],
+        )
+
+    def add_groundtruths(
+        self,
+        groundtruths: List[GroundTruth],
+        ignore_existing_datums: bool = False,
+    ) -> None:
+        """
+        Add multiple ground truths to the dataset.
+
+        Parameters
+        ----------
+        groundtruths : List[GroundTruth]
+            The ground truths to create.
+        ignore_existing_datums : bool, default=False
+            If True, will ignore datums that already exist in the backend.
+            If False, will raise an error if any datums already exist.
+            Default is False.
+        """
+        Client(self.conn).create_groundtruths(
+            dataset=self,
+            groundtruths=groundtruths,
+            ignore_existing_datums=ignore_existing_datums,
+        )
+
+    def get_groundtruth(
+        self,
+        datum: Union[Datum, str],
+    ) -> Union[GroundTruth, None]:
+        """
+        Get a particular ground truth.
+
+        Parameters
+        ----------
+        datum: Union[Datum, str]
+            The desired datum.
+
+        Returns
+        ----------
+        Union[GroundTruth, None]
+            The matching ground truth or 'None' if it doesn't exist.
+        """
+        return Client(self.conn).get_groundtruth(dataset=self, datum=datum)
+
+    def get_labels(
+        self,
+    ) -> List[Label]:
+        """
+        Get all labels associated with a given dataset.
+
+        Returns
+        ----------
+        List[Label]
+            A list of `Labels` associated with the dataset.
+        """
+        return Client(self.conn).get_labels_from_dataset(self)
+
+    def get_datums(
+        self, filter_by: Optional[FilterType] = None
+    ) -> List[Datum]:
+        """
+        Get all datums associated with a given dataset.
+
+        Parameters
+        ----------
+        filter_by
+            Optional constraints to filter by.
+
+        Returns
+        ----------
+        List[Datum]
+            A list of `Datums` associated with the dataset.
+        """
+        filters = _format_filter(filter_by)
+        if isinstance(filters, Filter):
+            filters = asdict(filters)
+
+        if filters.get("dataset_names"):
+            raise ValueError(
+                "Cannot filter by dataset_names when calling `Dataset.get_datums`."
+            )
+        filters["dataset_names"] = [self.name]  # type: ignore
+        return Client(self.conn).get_datums(filter_by=filters)
+
+    def get_evaluations(
+        self,
+        metrics_to_sort_by: Optional[
+            Dict[str, Union[Dict[str, str], str]]
+        ] = None,
+    ) -> List[Evaluation]:
+        """
+        Get all evaluations associated with a given dataset.
+
+        Parameters
+        ----------
+        metrics_to_sort_by: dict[str, str | dict[str, str]], optional
+            An optional dict of metric types to sort the evaluations by.
+
+        Returns
+        ----------
+        List[Evaluation]
+            A list of `Evaluations` associated with the dataset.
+        """
+        return Client(self.conn).get_evaluations(
+            datasets=[self], metrics_to_sort_by=metrics_to_sort_by
+        )
+
+    def get_summary(self) -> DatasetSummary:
+        """
+        Get the summary of a given dataset.
+
+        Returns
+        -------
+        DatasetSummary
+            The summary of the dataset. This class has the following fields:
+
+            name: name of the dataset
+
+            num_datums: total number of datums in the dataset
+
+            num_annotations: total number of labeled annotations in the dataset; if an
+            object (such as a bounding box) has multiple labels, then each label is counted separately
+
+            num_bounding_boxes: total number of bounding boxes in the dataset
+
+            num_polygons: total number of polygons in the dataset
+
+            num_rasters: total number of rasters in the dataset
 
-    def finalize(
-        self,
-    ):
-        """
-        Finalizes the dataset such that new ground truths cannot be added to it.
-        """
-        return Client(self.conn).finalize_dataset(self)
-
-    def delete(
-        self,
-        timeout: int = 0,
-    ):
-        """
-        Delete the dataset from the back end.
-
-        Parameters
-        ----------
-        timeout : int, default=0
-            Sets a timeout in seconds.
-        """
-        Client(self.conn).delete_dataset(self.name, timeout)  # type: ignore
+            labels: list of the unique labels in the dataset
+
+            datum_metadata: list of the unique metadata dictionaries in the dataset that are associated
+            to datums
+
+            groundtruth_annotation_metadata: list of the unique metadata dictionaries in the dataset that are
+            associated to annotations
+        """
+        return Client(self.conn).get_dataset_summary(self.name)  # type: ignore
+
+    def finalize(
+        self,
+    ):
+        """
+        Finalizes the dataset such that new ground truths cannot be added to it.
+        """
+        return Client(self.conn).finalize_dataset(self)
+
+    def delete(
+        self,
+        timeout: int = 0,
+    ):
+        """
+        Delete the dataset from the back end.
+
+        Parameters
+        ----------
+        timeout : int, default=0
+            Sets a timeout in seconds.
+        """
+        Client(self.conn).delete_dataset(self.name, timeout)  # type: ignore
 
@@ -1255,17 +1255,7 @@

Source code in valor/coretypes.py -
398
-399
-400
-401
-402
-403
-404
-405
-406
-407
-408
+
408
 409
 410
 411
@@ -1277,29 +1267,39 @@ 

417 418 419 -420

def __init__(
-    self,
-    *,
-    name: str,
-    metadata: Optional[dict] = None,
-    connection: Optional[ClientConnection] = None,
-):
-    """
-    Creates a local instance of a dataset.
-
-    Use 'Dataset.create' classmethod to create a dataset with persistence.
-
-    Parameters
-    ----------
-    name : str
-        The name of the dataset.
-    metadata : dict, optional
-        A dictionary of metadata that describes the dataset.
-    connection : ClientConnection, optional
-        An initialized client connection.
-    """
-    self.conn = connection
-    super().__init__(name=name, metadata=metadata if metadata else dict())
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
def __init__(
+    self,
+    *,
+    name: str,
+    metadata: Optional[dict] = None,
+    connection: Optional[ClientConnection] = None,
+):
+    """
+    Creates a local instance of a dataset.
+
+    Use 'Dataset.create' classmethod to create a dataset with persistence.
+
+    Parameters
+    ----------
+    name : str
+        The name of the dataset.
+    metadata : dict, optional
+        A dictionary of metadata that describes the dataset.
+    connection : ClientConnection, optional
+        An initialized client connection.
+    """
+    self.conn = connection
+    super().__init__(name=name, metadata=metadata if metadata else dict())
 
@@ -1339,37 +1339,37 @@

Source code in valor/coretypes.py -
466
-467
-468
-469
-470
-471
-472
-473
-474
-475
-476
+
def add_groundtruth(
-    self,
-    groundtruth: GroundTruth,
-) -> None:
-    """
-    Add a ground truth to the dataset.
-
-    Parameters
-    ----------
-    groundtruth : GroundTruth
-        The ground truth to create.
-    """
-    Client(self.conn).create_groundtruths(
-        dataset=self,
-        groundtruths=[groundtruth],
-    )
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
def add_groundtruth(
+    self,
+    groundtruth: GroundTruth,
+) -> None:
+    """
+    Add a ground truth to the dataset.
+
+    Parameters
+    ----------
+    groundtruth : GroundTruth
+        The ground truth to create.
+    """
+    Client(self.conn).create_groundtruths(
+        dataset=self,
+        groundtruths=[groundtruth],
+    )
 
@@ -1425,17 +1425,7 @@

Source code in valor/coretypes.py -
483
-484
-485
-486
-487
-488
-489
-490
-491
-492
-493
+
493
 494
 495
 496
@@ -1446,28 +1436,38 @@ 

501 502 503 -504

def add_groundtruths(
-    self,
-    groundtruths: List[GroundTruth],
-    ignore_existing_datums: bool = False,
-) -> None:
-    """
-    Add multiple ground truths to the dataset.
-
-    Parameters
-    ----------
-    groundtruths : List[GroundTruth]
-        The ground truths to create.
-    ignore_existing_datums : bool, default=False
-        If True, will ignore datums that already exist in the backend.
-        If False, will raise an error if any datums already exist.
-        Default is False.
-    """
-    Client(self.conn).create_groundtruths(
-        dataset=self,
-        groundtruths=groundtruths,
-        ignore_existing_datums=ignore_existing_datums,
-    )
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
def add_groundtruths(
+    self,
+    groundtruths: List[GroundTruth],
+    ignore_existing_datums: bool = False,
+) -> None:
+    """
+    Add multiple ground truths to the dataset.
+
+    Parameters
+    ----------
+    groundtruths : List[GroundTruth]
+        The ground truths to create.
+    ignore_existing_datums : bool, default=False
+        If True, will ignore datums that already exist in the backend.
+        If False, will raise an error if any datums already exist.
+        Default is False.
+    """
+    Client(self.conn).create_groundtruths(
+        dataset=self,
+        groundtruths=groundtruths,
+        ignore_existing_datums=ignore_existing_datums,
+    )
 
@@ -1538,17 +1538,7 @@

Source code in valor/coretypes.py -
422
-423
-424
-425
-426
-427
-428
-429
-430
-431
-432
+
432
 433
 434
 435
@@ -1559,28 +1549,38 @@ 

440 441 442 -443

@classmethod
-def create(
-    cls,
-    name: str,
-    metadata: Optional[Dict[str, Any]] = None,
-    connection: Optional[ClientConnection] = None,
-) -> Dataset:
-    """
-    Creates a dataset that persists in the back end.
-
-    Parameters
-    ----------
-    name : str
-        The name of the dataset.
-    metadata : dict, optional
-        A dictionary of metadata that describes the dataset.
-    connection : ClientConnection, optional
-        An initialized client connection.
-    """
-    dataset = cls(name=name, metadata=metadata, connection=connection)
-    Client(dataset.conn).create_dataset(dataset)
-    return dataset
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
@classmethod
+def create(
+    cls,
+    name: str,
+    metadata: Optional[Dict[str, Any]] = None,
+    connection: Optional[ClientConnection] = None,
+) -> Dataset:
+    """
+    Creates a dataset that persists in the back end.
+
+    Parameters
+    ----------
+    name : str
+        The name of the dataset.
+    metadata : dict, optional
+        A dictionary of metadata that describes the dataset.
+    connection : ClientConnection, optional
+        An initialized client connection.
+    """
+    dataset = cls(name=name, metadata=metadata, connection=connection)
+    Client(dataset.conn).create_dataset(dataset)
+    return dataset
 
@@ -1620,31 +1620,31 @@

Source code in valor/coretypes.py -
628
-629
-630
-631
-632
-633
-634
-635
-636
-637
-638
+
def delete(
-    self,
-    timeout: int = 0,
-):
-    """
-    Delete the dataset from the back end.
-
-    Parameters
-    ----------
-    timeout : int, default=0
-        Sets a timeout in seconds.
-    """
-    Client(self.conn).delete_dataset(self.name, timeout)  # type: ignore
+640
+641
+642
+643
+644
+645
+646
+647
+648
+649
+650
def delete(
+    self,
+    timeout: int = 0,
+):
+    """
+    Delete the dataset from the back end.
+
+    Parameters
+    ----------
+    timeout : int, default=0
+        Sets a timeout in seconds.
+    """
+    Client(self.conn).delete_dataset(self.name, timeout)  # type: ignore
 
@@ -1657,19 +1657,19 @@

Finalizes the dataset such that new ground truths cannot be added to it.

Source code in valor/coretypes.py -
def finalize(
-    self,
-):
-    """
-    Finalizes the dataset such that new ground truths cannot be added to it.
-    """
-    return Client(self.conn).finalize_dataset(self)
+
def finalize(
+    self,
+):
+    """
+    Finalizes the dataset such that new ground truths cannot be added to it.
+    """
+    return Client(self.conn).finalize_dataset(self)
 
@@ -1733,17 +1733,7 @@

Source code in valor/coretypes.py -
445
-446
-447
-448
-449
-450
-451
-452
-453
-454
-455
+
455
 456
 457
 458
@@ -1752,26 +1742,36 @@ 

461 462 463 -464

@classmethod
-def get(
-    cls,
-    name: str,
-    connection: Optional[ClientConnection] = None,
-) -> Union[Dataset, None]:
-    """
-    Retrieves a dataset from the back end database.
-
-    Parameters
-    ----------
-    name : str
-        The name of the dataset.
-
-    Returns
-    -------
-    Union[valor.Dataset, None]
-        The dataset or 'None' if it doesn't exist.
-    """
-    return Client(connection).get_dataset(name)
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
@classmethod
+def get(
+    cls,
+    name: str,
+    connection: Optional[ClientConnection] = None,
+) -> Union[Dataset, None]:
+    """
+    Retrieves a dataset from the back end database.
+
+    Parameters
+    ----------
+    name : str
+        The name of the dataset.
+
+    Returns
+    -------
+    Union[valor.Dataset, None]
+        The dataset or 'None' if it doesn't exist.
+    """
+    return Client(connection).get_dataset(name)
 
@@ -1832,17 +1832,7 @@

Source code in valor/coretypes.py -
538
-539
-540
-541
-542
-543
-544
-545
-546
-547
-548
+
548
 549
 550
 551
@@ -1857,32 +1847,42 @@ 

560 561 562 -563

def get_datums(
-    self, filter_by: Optional[FilterType] = None
-) -> List[Datum]:
-    """
-    Get all datums associated with a given dataset.
-
-    Parameters
-    ----------
-    filter_by
-        Optional constraints to filter by.
-
-    Returns
-    ----------
-    List[Datum]
-        A list of `Datums` associated with the dataset.
-    """
-    filter_ = _format_filter(filter_by)
-    if isinstance(filter_, Filter):
-        filter_ = asdict(filter_)
-
-    if filter_.get("dataset_names"):
-        raise ValueError(
-            "Cannot filter by dataset_names when calling `Dataset.get_datums`."
-        )
-    filter_["dataset_names"] = [self.name]  # type: ignore
-    return Client(self.conn).get_datums(filter_by=filter_)
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
def get_datums(
+    self, filter_by: Optional[FilterType] = None
+) -> List[Datum]:
+    """
+    Get all datums associated with a given dataset.
+
+    Parameters
+    ----------
+    filter_by
+        Optional constraints to filter by.
+
+    Returns
+    ----------
+    List[Datum]
+        A list of `Datums` associated with the dataset.
+    """
+    filters = _format_filter(filter_by)
+    if isinstance(filters, Filter):
+        filters = asdict(filters)
+
+    if filters.get("dataset_names"):
+        raise ValueError(
+            "Cannot filter by dataset_names when calling `Dataset.get_datums`."
+        )
+    filters["dataset_names"] = [self.name]  # type: ignore
+    return Client(self.conn).get_datums(filter_by=filters)
 
@@ -1943,17 +1943,7 @@

Source code in valor/coretypes.py -
565
-566
-567
-568
-569
-570
-571
-572
-573
-574
-575
+
575
 576
 577
 578
@@ -1964,28 +1954,38 @@ 

583 584 585 -586

def get_evaluations(
-    self,
-    metrics_to_sort_by: Optional[
-        Dict[str, Union[Dict[str, str], str]]
-    ] = None,
-) -> List[Evaluation]:
-    """
-    Get all evaluations associated with a given dataset.
-
-    Parameters
-    ----------
-    metrics_to_sort_by: dict[str, str | dict[str, str]], optional
-        An optional dict of metric types to sort the evaluations by.
-
-    Returns
-    ----------
-    List[Evaluation]
-        A list of `Evaluations` associated with the dataset.
-    """
-    return Client(self.conn).get_evaluations(
-        datasets=[self], metrics_to_sort_by=metrics_to_sort_by
-    )
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
def get_evaluations(
+    self,
+    metrics_to_sort_by: Optional[
+        Dict[str, Union[Dict[str, str], str]]
+    ] = None,
+) -> List[Evaluation]:
+    """
+    Get all evaluations associated with a given dataset.
+
+    Parameters
+    ----------
+    metrics_to_sort_by: dict[str, str | dict[str, str]], optional
+        An optional dict of metric types to sort the evaluations by.
+
+    Returns
+    ----------
+    List[Evaluation]
+        A list of `Evaluations` associated with the dataset.
+    """
+    return Client(self.conn).get_evaluations(
+        datasets=[self], metrics_to_sort_by=metrics_to_sort_by
+    )
 
@@ -2046,41 +2046,41 @@

Source code in valor/coretypes.py -
506
-507
-508
-509
-510
-511
-512
-513
-514
-515
-516
+
def get_groundtruth(
-    self,
-    datum: Union[Datum, str],
-) -> Union[GroundTruth, None]:
-    """
-    Get a particular ground truth.
-
-    Parameters
-    ----------
-    datum: Union[Datum, str]
-        The desired datum.
-
-    Returns
-    ----------
-    Union[GroundTruth, None]
-        The matching ground truth or 'None' if it doesn't exist.
-    """
-    return Client(self.conn).get_groundtruth(dataset=self, datum=datum)
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
def get_groundtruth(
+    self,
+    datum: Union[Datum, str],
+) -> Union[GroundTruth, None]:
+    """
+    Get a particular ground truth.
+
+    Parameters
+    ----------
+    datum: Union[Datum, str]
+        The desired datum.
+
+    Returns
+    ----------
+    Union[GroundTruth, None]
+        The matching ground truth or 'None' if it doesn't exist.
+    """
+    return Client(self.conn).get_groundtruth(dataset=self, datum=datum)
 
@@ -2114,29 +2114,29 @@

Source code in valor/coretypes.py -
def get_labels(
-    self,
-) -> List[Label]:
-    """
-    Get all labels associated with a given dataset.
-
-    Returns
-    ----------
-    List[Label]
-        A list of `Labels` associated with the dataset.
-    """
-    return Client(self.conn).get_labels_from_dataset(self)
+
def get_labels(
+    self,
+) -> List[Label]:
+    """
+    Get all labels associated with a given dataset.
+
+    Returns
+    ----------
+    List[Label]
+        A list of `Labels` associated with the dataset.
+    """
+    return Client(self.conn).get_labels_from_dataset(self)
 
@@ -2182,17 +2182,7 @@

Source code in valor/coretypes.py -
588
-589
-590
-591
-592
-593
-594
-595
-596
-597
-598
+
598
 599
 600
 601
@@ -2212,37 +2202,47 @@ 

615 616 617 -618

def get_summary(self) -> DatasetSummary:
-    """
-    Get the summary of a given dataset.
-
-    Returns
-    -------
-    DatasetSummary
-        The summary of the dataset. This class has the following fields:
-
-        name: name of the dataset
-
-        num_datums: total number of datums in the dataset
-
-        num_annotations: total number of labeled annotations in the dataset; if an
-        object (such as a bounding box) has multiple labels, then each label is counted separately
-
-        num_bounding_boxes: total number of bounding boxes in the dataset
-
-        num_polygons: total number of polygons in the dataset
-
-        num_rasters: total number of rasters in the dataset
-
-        labels: list of the unique labels in the dataset
-
-        datum_metadata: list of the unique metadata dictionaries in the dataset that are associated
-        to datums
-
-        groundtruth_annotation_metadata: list of the unique metadata dictionaries in the dataset that are
-        associated to annotations
-    """
-    return Client(self.conn).get_dataset_summary(self.name)  # type: ignore
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
def get_summary(self) -> DatasetSummary:
+    """
+    Get the summary of a given dataset.
+
+    Returns
+    -------
+    DatasetSummary
+        The summary of the dataset. This class has the following fields:
+
+        name: name of the dataset
+
+        num_datums: total number of datums in the dataset
+
+        num_annotations: total number of labeled annotations in the dataset; if an
+        object (such as a bounding box) has multiple labels, then each label is counted separately
+
+        num_bounding_boxes: total number of bounding boxes in the dataset
+
+        num_polygons: total number of polygons in the dataset
+
+        num_rasters: total number of rasters in the dataset
+
+        labels: list of the unique labels in the dataset
+
+        datum_metadata: list of the unique metadata dictionaries in the dataset that are associated
+        to datums
+
+        groundtruth_annotation_metadata: list of the unique metadata dictionaries in the dataset that are
+        associated to annotations
+    """
+    return Client(self.conn).get_dataset_summary(self.name)  # type: ignore
 
diff --git a/client_api/Evaluation/index.html b/client_api/Evaluation/index.html index 50f3a10a8..f19d5ad6b 100644 --- a/client_api/Evaluation/index.html +++ b/client_api/Evaluation/index.html @@ -531,14 +531,7 @@

Evaluation

Wraps valor.client.Job to provide evaluation-specifc members.

Source code in valor/coretypes.py -
+ + + + + - + @@ -1015,14 +1032,7 @@

160
-161
-162
-163
-164
-165
-166
-167
+
167
 168
 169
 170
@@ -722,198 +715,211 @@ 

Evaluation

348 349 350 -351
class Evaluation:
-    """
-    Wraps `valor.client.Job` to provide evaluation-specifc members.
-    """
-
-    def __init__(
-        self, connection: Optional[ClientConnection] = None, **kwargs
-    ):
-        """
-        Defines important attributes of the API's `EvaluationResult`.
-
-        Attributes
-        ----------
-        id : int
-            The ID of the evaluation.
-        model_name : str
-            The name of the evaluated model.
-        datum_filter : schemas.Filter
-            The filter used to select the datums for evaluation.
-        status : EvaluationStatus
-            The status of the evaluation.
-        metrics : List[dict]
-            A list of metric dictionaries returned by the job.
-        confusion_matrices : List[dict]
-            A list of confusion matrix dictionaries returned by the job.
-        meta: dict[str, str | float | dict], optional
-            A dictionary of metadata describing the evaluation run.
-        """
-        if not connection:
-            connection = get_connection()
-        self.conn = connection
-        self.update(**kwargs)
-
-    def update(
-        self,
-        *_,
-        id: int,
-        model_name: str,
-        datum_filter: Filter,
-        parameters: EvaluationParameters,
-        status: EvaluationStatus,
-        metrics: List[Dict],
-        confusion_matrices: List[Dict],
-        created_at: str,
-        meta: dict[str, str | float | dict] | None,
-        **kwargs,
-    ):
-        self.id = id
-        self.model_name = model_name
-        self.datum_filter = (
-            Filter(**datum_filter)
-            if isinstance(datum_filter, dict)
-            else datum_filter
-        )
-        self.parameters = (
-            EvaluationParameters(**parameters)
-            if isinstance(parameters, dict)
-            else parameters
-        )
-        self.status = EvaluationStatus(status)
-        self.metrics = metrics
-        self.meta = meta
-        self.confusion_matrices = confusion_matrices
-        self.kwargs = kwargs
-        self.ignored_pred_labels: Optional[List[Label]] = None
-        self.missing_pred_labels: Optional[List[Label]] = None
-        self.created_at = datetime.datetime.strptime(
-            created_at, "%Y-%m-%dT%H:%M:%S.%fZ"
-        ).replace(tzinfo=datetime.timezone.utc)
-
-        for k, v in kwargs.items():
-            setattr(self, k, v)
-
-    def poll(self) -> EvaluationStatus:
-        """
-        Poll the back end.
-
-        Updates the evaluation with the latest state from the back end.
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
class Evaluation:
+    """
+    Wraps `valor.client.Job` to provide evaluation-specifc members.
+    """
+
+    def __init__(
+        self, connection: Optional[ClientConnection] = None, **kwargs
+    ):
+        """
+        Defines important attributes of the API's `EvaluationResult`.
+
+        Attributes
+        ----------
+        id : int
+            The ID of the evaluation.
+        dataset_names : list[str]
+            The names of the datasets the model was evaluated over.
+        model_name : str
+            The name of the evaluated model.
+        filters : schemas.Filter
+            The filter used to select data partitions for evaluation.
+        status : EvaluationStatus
+            The status of the evaluation.
+        metrics : List[dict]
+            A list of metric dictionaries returned by the job.
+        confusion_matrices : List[dict]
+            A list of confusion matrix dictionaries returned by the job.
+        meta: dict[str, str | float | dict], optional
+            A dictionary of metadata describing the evaluation run.
+        """
+        if not connection:
+            connection = get_connection()
+        self.conn = connection
+        self.update(**kwargs)
+
+    def update(
+        self,
+        *_,
+        id: int,
+        dataset_names: list[str],
+        model_name: str,
+        filters: Filter,
+        parameters: EvaluationParameters,
+        status: EvaluationStatus,
+        metrics: List[Dict],
+        confusion_matrices: List[Dict],
+        created_at: str,
+        meta: dict[str, str | float | dict] | None,
+        **kwargs,
+    ):
+        self.id = id
+        self.dataset_names = dataset_names
+        self.model_name = model_name
+        self.filters = (
+            Filter(**filters) if isinstance(filters, dict) else Filter()
+        )
+        self.parameters = (
+            EvaluationParameters(**parameters)
+            if isinstance(parameters, dict)
+            else parameters
+        )
+        self.status = EvaluationStatus(status)
+        self.metrics = metrics
+        self.meta = meta
+        self.confusion_matrices = confusion_matrices
+        self.kwargs = kwargs
+        self.ignored_pred_labels: Optional[List[Label]] = None
+        self.missing_pred_labels: Optional[List[Label]] = None
+        self.created_at = datetime.datetime.strptime(
+            created_at, "%Y-%m-%dT%H:%M:%S.%fZ"
+        ).replace(tzinfo=datetime.timezone.utc)
 
-        Returns
-        -------
-        enums.EvaluationStatus
-            The status of the evaluation.
-
-        Raises
-        ----------
-        ClientException
-            If an Evaluation with the given `evaluation_id` is not found.
-        """
-        response = self.conn.get_evaluations(evaluation_ids=[self.id])
-        if not response:
-            raise ClientException("Not Found")
-        self.update(**response[0])
-        return self.status
-
-    def wait_for_completion(
-        self,
-        *,
-        timeout: Optional[int] = None,
-        interval: float = 1.0,
-    ) -> EvaluationStatus:
-        """
-        Blocking function that waits for evaluation to finish.
+        for k, v in kwargs.items():
+            setattr(self, k, v)
+
+    def poll(self) -> EvaluationStatus:
+        """
+        Poll the back end.
+
+        Updates the evaluation with the latest state from the back end.
+
+        Returns
+        -------
+        enums.EvaluationStatus
+            The status of the evaluation.
+
+        Raises
+        ----------
+        ClientException
+            If an Evaluation with the given `evaluation_id` is not found.
+        """
+        response = self.conn.get_evaluations(evaluation_ids=[self.id])
+        if not response:
+            raise EvaluationDoesNotExist(self.id)
+        self.update(**response[0])
+        return self.status
 
-        Parameters
-        ----------
-        timeout : int, optional
-            Length of timeout in seconds.
-        interval : float, default=1.0
-            Polling interval in seconds.
-        """
-        t_start = time.time()
-        while self.poll() not in [
-            EvaluationStatus.DONE,
-            EvaluationStatus.FAILED,
-        ]:
-            time.sleep(interval)
-            if timeout and time.time() - t_start > timeout:
-                raise TimeoutError
-        return self.status
-
-    def __str__(self) -> str:
-        """Dumps the object into a JSON formatted string."""
-        return json.dumps(self.to_dict(), indent=4)
-
-    def to_dict(self) -> dict:
-        """
-        Defines how a `valor.Evaluation` object is serialized into a dictionary.
-
-        Returns
-        ----------
-        dict
-            A dictionary describing an evaluation.
-        """
-        return {
-            "id": self.id,
-            "model_name": self.model_name,
-            "datum_filter": asdict(self.datum_filter),
-            "parameters": asdict(self.parameters),
-            "status": self.status.value,
-            "metrics": self.metrics,
-            "confusion_matrices": self.confusion_matrices,
-            "meta": self.meta,
-            **self.kwargs,
-        }
-
-    def to_dataframe(
-        self,
-        stratify_by: Optional[Tuple[str, str]] = None,
-    ):
-        """
-        Get all metrics associated with a Model and return them in a `pd.DataFrame`.
-
-        Returns
-        ----------
-        pd.DataFrame
-            Evaluation metrics being displayed in a `pd.DataFrame`.
-
-        Raises
-        ------
-        ModuleNotFoundError
-            This function requires the use of `pandas.DataFrame`.
+    def wait_for_completion(
+        self,
+        *,
+        timeout: Optional[int] = None,
+        interval: float = 1.0,
+    ) -> EvaluationStatus:
+        """
+        Blocking function that waits for evaluation to finish.
+
+        Parameters
+        ----------
+        timeout : int, optional
+            Length of timeout in seconds.
+        interval : float, default=1.0
+            Polling interval in seconds.
+        """
+        t_start = time.time()
+        while self.poll() not in [
+            EvaluationStatus.DONE,
+            EvaluationStatus.FAILED,
+        ]:
+            time.sleep(interval)
+            if timeout and time.time() - t_start > timeout:
+                raise TimeoutError
+        return self.status
+
+    def __str__(self) -> str:
+        """Dumps the object into a JSON formatted string."""
+        return json.dumps(self.to_dict(), indent=4)
+
+    def to_dict(self) -> dict:
+        """
+        Defines how a `valor.Evaluation` object is serialized into a dictionary.
+
+        Returns
+        ----------
+        dict
+            A dictionary describing an evaluation.
+        """
+        return {
+            "id": self.id,
+            "dataset_names": self.dataset_names,
+            "model_name": self.model_name,
+            "filters": asdict(self.filters),
+            "parameters": asdict(self.parameters),
+            "status": self.status.value,
+            "metrics": self.metrics,
+            "confusion_matrices": self.confusion_matrices,
+            "meta": self.meta,
+            **self.kwargs,
+        }
+
+    def to_dataframe(
+        self,
+        stratify_by: Optional[Tuple[str, str]] = None,
+    ):
+        """
+        Get all metrics associated with a Model and return them in a `pd.DataFrame`.
 
-        """
-        try:
-            import pandas as pd
-        except ModuleNotFoundError:
-            raise ModuleNotFoundError(
-                "Must have pandas installed to use `get_metric_dataframes`."
-            )
-
-        if not stratify_by:
-            column_type = "evaluation"
-            column_name = self.id
-        else:
-            column_type = stratify_by[0]
-            column_name = stratify_by[1]
-
-        metrics = [
-            {**metric, column_type: column_name} for metric in self.metrics
-        ]
-        df = pd.DataFrame(metrics)
-        for k in ["label", "parameters"]:
-            df[k] = df[k].fillna("n/a")
-        df["parameters"] = df["parameters"].apply(json.dumps)
-        df["label"] = df["label"].apply(
-            lambda x: f"{x['key']}: {x['value']}" if x != "n/a" else x
-        )
-        df = df.pivot(
-            index=["type", "parameters", "label"], columns=[column_type]
-        )
-        return df
+        Returns
+        ----------
+        pd.DataFrame
+            Evaluation metrics being displayed in a `pd.DataFrame`.
+
+        Raises
+        ------
+        ModuleNotFoundError
+            This function requires the use of `pandas.DataFrame`.
+
+        """
+        try:
+            import pandas as pd
+        except ModuleNotFoundError:
+            raise ModuleNotFoundError(
+                "Must have pandas installed to use `get_metric_dataframes`."
+            )
+
+        if not stratify_by:
+            column_type = "evaluation"
+            column_name = self.id
+        else:
+            column_type = stratify_by[0]
+            column_name = stratify_by[1]
+
+        metrics = [
+            {**metric, column_type: column_name} for metric in self.metrics
+        ]
+        df = pd.DataFrame(metrics)
+        for k in ["label", "parameters"]:
+            df[k] = df[k].fillna("n/a")
+        df["parameters"] = df["parameters"].apply(json.dumps)
+        df["label"] = df["label"].apply(
+            lambda x: f"{x['key']}: {x['value']}" if x != "n/a" else x
+        )
+        df = df.pivot(
+            index=["type", "parameters", "label"], columns=[column_type]
+        )
+        return df
 
@@ -946,6 +952,17 @@

dataset_names +list[str] + +
+

The names of the datasets the model was evaluated over.

+
+
model_name str @@ -957,13 +974,13 @@

datum_filterfilters Filter
-

The filter used to select the datums for evaluation.

+

The filter used to select data partitions for evaluation.

Source code in valor/coretypes.py -
165
-166
-167
-168
-169
-170
-171
-172
+
172
 173
 174
 175
@@ -1041,33 +1051,44 @@ 

188 189 190 -191

def __init__(
-    self, connection: Optional[ClientConnection] = None, **kwargs
-):
-    """
-    Defines important attributes of the API's `EvaluationResult`.
-
-    Attributes
-    ----------
-    id : int
-        The ID of the evaluation.
-    model_name : str
-        The name of the evaluated model.
-    datum_filter : schemas.Filter
-        The filter used to select the datums for evaluation.
-    status : EvaluationStatus
-        The status of the evaluation.
-    metrics : List[dict]
-        A list of metric dictionaries returned by the job.
-    confusion_matrices : List[dict]
-        A list of confusion matrix dictionaries returned by the job.
-    meta: dict[str, str | float | dict], optional
-        A dictionary of metadata describing the evaluation run.
-    """
-    if not connection:
-        connection = get_connection()
-    self.conn = connection
-    self.update(**kwargs)
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
def __init__(
+    self, connection: Optional[ClientConnection] = None, **kwargs
+):
+    """
+    Defines important attributes of the API's `EvaluationResult`.
+
+    Attributes
+    ----------
+    id : int
+        The ID of the evaluation.
+    dataset_names : list[str]
+        The names of the datasets the model was evaluated over.
+    model_name : str
+        The name of the evaluated model.
+    filters : schemas.Filter
+        The filter used to select data partitions for evaluation.
+    status : EvaluationStatus
+        The status of the evaluation.
+    metrics : List[dict]
+        A list of metric dictionaries returned by the job.
+    confusion_matrices : List[dict]
+        A list of confusion matrix dictionaries returned by the job.
+    meta: dict[str, str | float | dict], optional
+        A dictionary of metadata describing the evaluation run.
+    """
+    if not connection:
+        connection = get_connection()
+    self.conn = connection
+    self.update(**kwargs)
 
@@ -1080,11 +1101,11 @@

Dumps the object into a JSON formatted string.

Source code in valor/coretypes.py -
def __str__(self) -> str:
-    """Dumps the object into a JSON formatted string."""
-    return json.dumps(self.to_dict(), indent=4)
+
def __str__(self) -> str:
+    """Dumps the object into a JSON formatted string."""
+    return json.dumps(self.to_dict(), indent=4)
 
@@ -1140,16 +1161,7 @@

Source code in valor/coretypes.py -
233
-234
-235
-236
-237
-238
-239
-240
-241
-242
+
242
 243
 244
 245
@@ -1160,27 +1172,36 @@ 

250 251 252 -253

def poll(self) -> EvaluationStatus:
-    """
-    Poll the back end.
-
-    Updates the evaluation with the latest state from the back end.
-
-    Returns
-    -------
-    enums.EvaluationStatus
-        The status of the evaluation.
-
-    Raises
-    ----------
-    ClientException
-        If an Evaluation with the given `evaluation_id` is not found.
-    """
-    response = self.conn.get_evaluations(evaluation_ids=[self.id])
-    if not response:
-        raise ClientException("Not Found")
-    self.update(**response[0])
-    return self.status
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
def poll(self) -> EvaluationStatus:
+    """
+    Poll the back end.
+
+    Updates the evaluation with the latest state from the back end.
+
+    Returns
+    -------
+    enums.EvaluationStatus
+        The status of the evaluation.
+
+    Raises
+    ----------
+    ClientException
+        If an Evaluation with the given `evaluation_id` is not found.
+    """
+    response = self.conn.get_evaluations(evaluation_ids=[self.id])
+    if not response:
+        raise EvaluationDoesNotExist(self.id)
+    self.update(**response[0])
+    return self.status
 
@@ -1235,17 +1256,7 @@

Source code in valor/coretypes.py -
306
-307
-308
-309
-310
-311
-312
-313
-314
-315
-316
+
316
 317
 318
 319
@@ -1280,52 +1291,62 @@ 

348 349 350 -351

def to_dataframe(
-    self,
-    stratify_by: Optional[Tuple[str, str]] = None,
-):
-    """
-    Get all metrics associated with a Model and return them in a `pd.DataFrame`.
-
-    Returns
-    ----------
-    pd.DataFrame
-        Evaluation metrics being displayed in a `pd.DataFrame`.
-
-    Raises
-    ------
-    ModuleNotFoundError
-        This function requires the use of `pandas.DataFrame`.
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
def to_dataframe(
+    self,
+    stratify_by: Optional[Tuple[str, str]] = None,
+):
+    """
+    Get all metrics associated with a Model and return them in a `pd.DataFrame`.
 
-    """
-    try:
-        import pandas as pd
-    except ModuleNotFoundError:
-        raise ModuleNotFoundError(
-            "Must have pandas installed to use `get_metric_dataframes`."
-        )
-
-    if not stratify_by:
-        column_type = "evaluation"
-        column_name = self.id
-    else:
-        column_type = stratify_by[0]
-        column_name = stratify_by[1]
-
-    metrics = [
-        {**metric, column_type: column_name} for metric in self.metrics
-    ]
-    df = pd.DataFrame(metrics)
-    for k in ["label", "parameters"]:
-        df[k] = df[k].fillna("n/a")
-    df["parameters"] = df["parameters"].apply(json.dumps)
-    df["label"] = df["label"].apply(
-        lambda x: f"{x['key']}: {x['value']}" if x != "n/a" else x
-    )
-    df = df.pivot(
-        index=["type", "parameters", "label"], columns=[column_type]
-    )
-    return df
+    Returns
+    ----------
+    pd.DataFrame
+        Evaluation metrics being displayed in a `pd.DataFrame`.
+
+    Raises
+    ------
+    ModuleNotFoundError
+        This function requires the use of `pandas.DataFrame`.
+
+    """
+    try:
+        import pandas as pd
+    except ModuleNotFoundError:
+        raise ModuleNotFoundError(
+            "Must have pandas installed to use `get_metric_dataframes`."
+        )
+
+    if not stratify_by:
+        column_type = "evaluation"
+        column_name = self.id
+    else:
+        column_type = stratify_by[0]
+        column_name = stratify_by[1]
+
+    metrics = [
+        {**metric, column_type: column_name} for metric in self.metrics
+    ]
+    df = pd.DataFrame(metrics)
+    for k in ["label", "parameters"]:
+        df[k] = df[k].fillna("n/a")
+    df["parameters"] = df["parameters"].apply(json.dumps)
+    df["label"] = df["label"].apply(
+        lambda x: f"{x['key']}: {x['value']}" if x != "n/a" else x
+    )
+    df = df.pivot(
+        index=["type", "parameters", "label"], columns=[column_type]
+    )
+    return df
 
@@ -1359,16 +1380,7 @@

Source code in valor/coretypes.py -
285
-286
-287
-288
-289
-290
-291
-292
-293
-294
+
294
 295
 296
 297
@@ -1378,26 +1390,37 @@ 

301 302 303 -304

def to_dict(self) -> dict:
-    """
-    Defines how a `valor.Evaluation` object is serialized into a dictionary.
-
-    Returns
-    ----------
-    dict
-        A dictionary describing an evaluation.
-    """
-    return {
-        "id": self.id,
-        "model_name": self.model_name,
-        "datum_filter": asdict(self.datum_filter),
-        "parameters": asdict(self.parameters),
-        "status": self.status.value,
-        "metrics": self.metrics,
-        "confusion_matrices": self.confusion_matrices,
-        "meta": self.meta,
-        **self.kwargs,
-    }
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
def to_dict(self) -> dict:
+    """
+    Defines how a `valor.Evaluation` object is serialized into a dictionary.
+
+    Returns
+    ----------
+    dict
+        A dictionary describing an evaluation.
+    """
+    return {
+        "id": self.id,
+        "dataset_names": self.dataset_names,
+        "model_name": self.model_name,
+        "filters": asdict(self.filters),
+        "parameters": asdict(self.parameters),
+        "status": self.status.value,
+        "metrics": self.metrics,
+        "confusion_matrices": self.confusion_matrices,
+        "meta": self.meta,
+        **self.kwargs,
+    }
 
@@ -1451,16 +1474,7 @@

Source code in valor/coretypes.py -
255
-256
-257
-258
-259
-260
-261
-262
-263
-264
+
264
 265
 266
 267
@@ -1475,31 +1489,40 @@ 

276 277 278 -279

def wait_for_completion(
-    self,
-    *,
-    timeout: Optional[int] = None,
-    interval: float = 1.0,
-) -> EvaluationStatus:
-    """
-    Blocking function that waits for evaluation to finish.
-
-    Parameters
-    ----------
-    timeout : int, optional
-        Length of timeout in seconds.
-    interval : float, default=1.0
-        Polling interval in seconds.
-    """
-    t_start = time.time()
-    while self.poll() not in [
-        EvaluationStatus.DONE,
-        EvaluationStatus.FAILED,
-    ]:
-        time.sleep(interval)
-        if timeout and time.time() - t_start > timeout:
-            raise TimeoutError
-    return self.status
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
def wait_for_completion(
+    self,
+    *,
+    timeout: Optional[int] = None,
+    interval: float = 1.0,
+) -> EvaluationStatus:
+    """
+    Blocking function that waits for evaluation to finish.
+
+    Parameters
+    ----------
+    timeout : int, optional
+        Length of timeout in seconds.
+    interval : float, default=1.0
+        Polling interval in seconds.
+    """
+    t_start = time.time()
+    while self.poll() not in [
+        EvaluationStatus.DONE,
+        EvaluationStatus.FAILED,
+    ]:
+        time.sleep(interval)
+        if timeout and time.time() - t_start > timeout:
+            raise TimeoutError
+    return self.status
 
diff --git a/client_api/Groundtruth/index.html b/client_api/Groundtruth/index.html index 0bd532785..f9571116b 100644 --- a/client_api/Groundtruth/index.html +++ b/client_api/Groundtruth/index.html @@ -507,14 +507,7 @@

Groundtruth

Source code in valor/coretypes.py -
 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
+
 63
  64
  65
  66
@@ -558,58 +551,65 @@ 

Groundtruth

104 105 106 -107
class GroundTruth(StaticCollection):
-    """
-    An object describing a ground truth (e.g., a human-drawn bounding box on an image).
-
-    Attributes
-    ----------
-    datum : Datum
-        The datum associated with the groundtruth.
-    annotations : List[Annotation]
-        The list of annotations associated with the groundtruth.
+107
+108
+109
+110
+111
+112
+113
+114
class GroundTruth(StaticCollection):
+    """
+    An object describing a ground truth (e.g., a human-drawn bounding box on an image).
 
-    Examples
-    --------
-    >>> GroundTruth(
-    ...     datum=Datum(uid="uid1"),
-    ...     annotations=[
-    ...         Annotation(
-    ...             labels=[Label(key="k1", value="v1")],
-    ...         )
-    ...     ]
-    ... )
-    """
-
-    datum: Datum = Datum.symbolic(owner="groundtruth", name="datum")
-    annotations: SymbolicList[Annotation] = SymbolicList[Annotation].symbolic(
-        owner="groundtruth", name="annotations"
-    )
-
-    def __init__(
-        self,
-        *,
-        datum: Datum,
-        annotations: List[Annotation],
-    ):
-        """
-        Creates a ground truth.
-
-        Parameters
-        ----------
-        datum : Datum
-            The datum that the ground truth is operating over.
-        annotations : List[Annotation]
-            The list of ground truth annotations.
-        """
-        super().__init__(datum=datum, annotations=annotations)
-
-        for annotation in self.annotations:
-            for label in annotation.labels:
-                if label.score is not None:
-                    raise ValueError(
-                        "GroundTruth labels should not have scores."
-                    )
+    Attributes
+    ----------
+    datum : Datum
+        The datum associated with the groundtruth.
+    annotations : List[Annotation]
+        The list of annotations associated with the groundtruth.
+
+    Examples
+    --------
+    >>> GroundTruth(
+    ...     datum=Datum(uid="uid1"),
+    ...     annotations=[
+    ...         Annotation(
+    ...             labels=[Label(key="k1", value="v1")],
+    ...         )
+    ...     ]
+    ... )
+    """
+
+    datum: Datum = Datum.symbolic(owner="groundtruth", name="datum")
+    annotations: SymbolicList[Annotation] = SymbolicList[Annotation].symbolic(
+        owner="groundtruth", name="annotations"
+    )
+
+    def __init__(
+        self,
+        *,
+        datum: Datum,
+        annotations: List[Annotation],
+    ):
+        """
+        Creates a ground truth.
+
+        Parameters
+        ----------
+        datum : Datum
+            The datum that the ground truth is operating over.
+        annotations : List[Annotation]
+            The list of ground truth annotations.
+        """
+        super().__init__(datum=datum, annotations=annotations)
+
+        for annotation in self.annotations:
+            for label in annotation.labels:
+                if label.score is not None:
+                    raise ValueError(
+                        "GroundTruth labels should not have scores."
+                    )
 
@@ -663,14 +663,7 @@

Source code in valor/coretypes.py -
 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
+
 91
  92
  93
  94
@@ -686,30 +679,37 @@ 

104 105 106 -107

def __init__(
-    self,
-    *,
-    datum: Datum,
-    annotations: List[Annotation],
-):
-    """
-    Creates a ground truth.
-
-    Parameters
-    ----------
-    datum : Datum
-        The datum that the ground truth is operating over.
-    annotations : List[Annotation]
-        The list of ground truth annotations.
-    """
-    super().__init__(datum=datum, annotations=annotations)
-
-    for annotation in self.annotations:
-        for label in annotation.labels:
-            if label.score is not None:
-                raise ValueError(
-                    "GroundTruth labels should not have scores."
-                )
+107
+108
+109
+110
+111
+112
+113
+114
def __init__(
+    self,
+    *,
+    datum: Datum,
+    annotations: List[Annotation],
+):
+    """
+    Creates a ground truth.
+
+    Parameters
+    ----------
+    datum : Datum
+        The datum that the ground truth is operating over.
+    annotations : List[Annotation]
+        The list of ground truth annotations.
+    """
+    super().__init__(datum=datum, annotations=annotations)
+
+    for annotation in self.annotations:
+        for label in annotation.labels:
+            if label.score is not None:
+                raise ValueError(
+                    "GroundTruth labels should not have scores."
+                )
 
diff --git a/client_api/Model/index.html b/client_api/Model/index.html index 3c5e3cba4..28477b614 100644 --- a/client_api/Model/index.html +++ b/client_api/Model/index.html @@ -670,17 +670,7 @@

Model

Source code in valor/coretypes.py -
 643
- 644
- 645
- 646
- 647
- 648
- 649
- 650
- 651
- 652
- 653
+
 653
  654
  655
  656
@@ -1127,464 +1117,528 @@ 

Model

1097 1098 1099 -1100
class Model(StaticCollection):
-    """
-    A class describing a model that was trained on a particular dataset.
-
-    Attributes
-    ----------
-    name : String
-        The name of the model.
-    metadata : Dictionary
-        A dictionary of metadata that describes the model.
-
-    Examples
-    --------
-    >>> Model.create(name="model1")
-    >>> Model.create(name="model1", metadata={})
-    >>> Model.create(name="model1", metadata={"foo": "bar", "pi": 3.14})
-    """
-
-    name: String = String.symbolic(owner="model", name="name")
-    metadata: Dictionary = Dictionary.symbolic(owner="model", name="metadata")
+1100
+1101
+1102
+1103
+1104
+1105
+1106
+1107
+1108
+1109
+1110
+1111
+1112
+1113
+1114
+1115
+1116
+1117
+1118
+1119
+1120
+1121
+1122
+1123
+1124
+1125
+1126
+1127
+1128
+1129
+1130
+1131
+1132
+1133
+1134
+1135
+1136
+1137
class Model(StaticCollection):
+    """
+    A class describing a model that was trained on a particular dataset.
+
+    Attributes
+    ----------
+    name : String
+        The name of the model.
+    metadata : Dictionary
+        A dictionary of metadata that describes the model.
 
-    def __init__(
-        self,
-        *,
-        name: str,
-        metadata: Optional[dict] = None,
-        connection: Optional[ClientConnection] = None,
-    ):
-        """
-        Creates a local instance of a model.
+    Examples
+    --------
+    >>> Model.create(name="model1")
+    >>> Model.create(name="model1", metadata={})
+    >>> Model.create(name="model1", metadata={"foo": "bar", "pi": 3.14})
+    """
+
+    name: String = String.symbolic(owner="model", name="name")
+    metadata: Dictionary = Dictionary.symbolic(owner="model", name="metadata")
 
-        Use 'Model.create' classmethod to create a model with persistence.
-
-        Parameters
-        ----------
-        name : String
-            The name of the model.
-        metadata : Dictionary
-            A dictionary of metadata that describes the model.
-        connection : ClientConnection, optional
-            An initialized client connection.
-        """
-        self.conn = connection
-        super().__init__(name=name, metadata=metadata if metadata else dict())
-
-    @classmethod
-    def create(
-        cls,
-        name: str,
-        metadata: Optional[Dict[str, Any]] = None,
-        connection: Optional[ClientConnection] = None,
-        **_,
-    ) -> Model:
-        """
-        Creates a model that persists in the back end.
-
-        Parameters
-        ----------
-        name : str
-            The name of the model.
-        metadata : dict, optional
-            A dictionary of metadata that describes the model.
-        connection : ClientConnection, optional
-            An initialized client connection.
-        """
-        model = cls(name=name, metadata=metadata, connection=connection)
-        Client(connection).create_model(model)
-        return model
-
-    @classmethod
-    def get(
-        cls,
-        name: str,
-        connection: Optional[ClientConnection] = None,
-    ) -> Union[Model, None]:
-        """
-        Retrieves a model from the back end database.
-
-        Parameters
-        ----------
-        name : str
-            The name of the model.
-        connection : ClientConnnetion, optional
-            An optional Valor client object for interacting with the API.
-
-        Returns
-        -------
-        Union[valor.Model, None]
-            The model or 'None' if it doesn't exist.
-        """
-        return Client(connection).get_model(name)
-
-    def add_prediction(
-        self,
-        dataset: Dataset,
-        prediction: Prediction,
-    ) -> None:
-        """
-        Add a prediction to the model.
-
-        Parameters
-        ----------
-        dataset : valor.Dataset
-            The dataset that is being operated over.
-        prediction : valor.Prediction
-            The prediction to create.
-        """
-        Client(self.conn).create_predictions(
-            dataset=dataset,
-            model=self,
-            predictions=[prediction],
-        )
-
-    def add_predictions(
-        self,
-        dataset: Dataset,
-        predictions: List[Prediction],
-    ) -> None:
-        """
-        Add multiple predictions to the model.
-
-        Parameters
-        ----------
-        dataset : valor.Dataset
-            The dataset that is being operated over.
-        predictions : List[valor.Prediction]
-            The predictions to create.
-        """
-        Client(self.conn).create_predictions(
-            dataset=dataset,
-            model=self,
-            predictions=predictions,
-        )
-
-    def get_prediction(
-        self, dataset: Union[Dataset, str], datum: Union[Datum, str]
-    ) -> Union[Prediction, None]:
-        """
-        Get a particular prediction.
-
-        Parameters
-        ----------
-        dataset: Union[Dataset, str]
-            The dataset the datum belongs to.
-        datum: Union[Datum, str]
-            The desired datum.
-
-        Returns
-        ----------
-        Union[Prediction, None]
-            The matching prediction or 'None' if it doesn't exist.
-        """
-        return Client(self.conn).get_prediction(
-            dataset=dataset, model=self, datum=datum
-        )
-
-    def finalize_inferences(self, dataset: Union[Dataset, str]) -> None:
-        """
-        Finalizes the model over a dataset such that new predictions cannot be added to it.
-        """
-        return Client(self.conn).finalize_inferences(
-            dataset=dataset, model=self
-        )
-
-    def _format_constraints(
-        self,
-        datasets: Optional[Union[Dataset, List[Dataset]]] = None,
-        filter_by: Optional[FilterType] = None,
-    ) -> Filter:
-        """Formats the 'datum_filter' for any evaluation requests."""
-
-        # get list of dataset names
-        dataset_names_from_obj = []
-        if isinstance(datasets, list):
-            dataset_names_from_obj = [dataset.name for dataset in datasets]
-        elif isinstance(datasets, Dataset):
-            dataset_names_from_obj = [datasets.name]
-
-        # create a 'schemas.Filter' object from the constraints.
-        filter_ = _format_filter(filter_by)
+    def __init__(
+        self,
+        *,
+        name: str,
+        metadata: Optional[dict] = None,
+        connection: Optional[ClientConnection] = None,
+    ):
+        """
+        Creates a local instance of a model.
+
+        Use 'Model.create' classmethod to create a model with persistence.
+
+        Parameters
+        ----------
+        name : String
+            The name of the model.
+        metadata : Dictionary
+            A dictionary of metadata that describes the model.
+        connection : ClientConnection, optional
+            An initialized client connection.
+        """
+        self.conn = connection
+        super().__init__(name=name, metadata=metadata if metadata else dict())
+
+    @classmethod
+    def create(
+        cls,
+        name: str,
+        metadata: Optional[Dict[str, Any]] = None,
+        connection: Optional[ClientConnection] = None,
+        **_,
+    ) -> Model:
+        """
+        Creates a model that persists in the back end.
+
+        Parameters
+        ----------
+        name : str
+            The name of the model.
+        metadata : dict, optional
+            A dictionary of metadata that describes the model.
+        connection : ClientConnection, optional
+            An initialized client connection.
+        """
+        model = cls(name=name, metadata=metadata, connection=connection)
+        Client(connection).create_model(model)
+        return model
+
+    @classmethod
+    def get(
+        cls,
+        name: str,
+        connection: Optional[ClientConnection] = None,
+    ) -> Union[Model, None]:
+        """
+        Retrieves a model from the back end database.
+
+        Parameters
+        ----------
+        name : str
+            The name of the model.
+        connection : ClientConnnetion, optional
+            An optional Valor client object for interacting with the API.
+
+        Returns
+        -------
+        Union[valor.Model, None]
+            The model or 'None' if it doesn't exist.
+        """
+        return Client(connection).get_model(name)
+
+    def add_prediction(
+        self,
+        dataset: Dataset,
+        prediction: Prediction,
+    ) -> None:
+        """
+        Add a prediction to the model.
+
+        Parameters
+        ----------
+        dataset : valor.Dataset
+            The dataset that is being operated over.
+        prediction : valor.Prediction
+            The prediction to create.
+        """
+        Client(self.conn).create_predictions(
+            dataset=dataset,
+            model=self,
+            predictions=[prediction],
+        )
+
+    def add_predictions(
+        self,
+        dataset: Dataset,
+        predictions: List[Prediction],
+    ) -> None:
+        """
+        Add multiple predictions to the model.
+
+        Parameters
+        ----------
+        dataset : valor.Dataset
+            The dataset that is being operated over.
+        predictions : List[valor.Prediction]
+            The predictions to create.
+        """
+        Client(self.conn).create_predictions(
+            dataset=dataset,
+            model=self,
+            predictions=predictions,
+        )
+
+    def get_prediction(
+        self, dataset: Union[Dataset, str], datum: Union[Datum, str]
+    ) -> Union[Prediction, None]:
+        """
+        Get a particular prediction.
+
+        Parameters
+        ----------
+        dataset: Union[Dataset, str]
+            The dataset the datum belongs to.
+        datum: Union[Datum, str]
+            The desired datum.
+
+        Returns
+        ----------
+        Union[Prediction, None]
+            The matching prediction or 'None' if it doesn't exist.
+        """
+        return Client(self.conn).get_prediction(
+            dataset=dataset, model=self, datum=datum
+        )
+
+    def finalize_inferences(self, dataset: Union[Dataset, str]) -> None:
+        """
+        Finalizes the model over a dataset such that new predictions cannot be added to it.
+        """
+        return Client(self.conn).finalize_inferences(
+            dataset=dataset, model=self
+        )
+
+    def _format_constraints(
+        self,
+        datasets: Optional[Union[Dataset, List[Dataset]]] = None,
+        filter_by: Optional[FilterType] = None,
+    ) -> Filter:
+        """Formats the 'filter' for any evaluation requests."""
 
-        # reset model name
-        filter_.model_names = None
-        filter_.model_metadata = None
-
-        # set dataset names
-        if not filter_.dataset_names:
-            filter_.dataset_names = []
-        filter_.dataset_names.extend(dataset_names_from_obj)  # type: ignore
-        return filter_
+        # get list of dataset names
+        dataset_names_from_obj = []
+        if isinstance(datasets, list):
+            dataset_names_from_obj = [dataset.name for dataset in datasets]
+        elif isinstance(datasets, Dataset):
+            dataset_names_from_obj = [datasets.name]
+
+        # create a 'schemas.Filter' object from the constraints.
+        filters = _format_filter(filter_by)
 
-    def _create_label_map(
-        self,
-        label_map: Optional[Dict[Label, Label]],
-    ) -> Union[List[List[List[str]]], None]:
-        """Convert a dictionary of label maps to a serializable list format."""
-        if not label_map:
-            return None
-
-        if not isinstance(label_map, dict) or not all(
-            [
-                isinstance(key, Label) and isinstance(value, Label)
-                for key, value in label_map.items()
-            ]
-        ):
-            raise TypeError(
-                "label_map should be a dictionary with valid Labels for both the key and value."
-            )
+        # reset model name
+        filters.model_names = None
+        filters.model_metadata = None
+
+        # set dataset names
+        if not filters.dataset_names:
+            filters.dataset_names = []
+        filters.dataset_names.extend(dataset_names_from_obj)  # type: ignore
+        return filters
+
+    def _create_label_map(
+        self,
+        label_map: Optional[Dict[Label, Label]],
+    ) -> Union[List[List[List[str]]], None]:
+        """Convert a dictionary of label maps to a serializable list format."""
+        if not label_map:
+            return None
 
-        return_value = []
-        for key, value in label_map.items():
-            if not all(
-                [
-                    (isinstance(v.key, str) and isinstance(v.value, str))
-                    for v in [key, value]
-                ]
-            ):
-                raise TypeError
-            return_value.append(
-                [
-                    [key.key, key.value],
-                    [value.key, value.value],
-                ]
-            )
-        return return_value
-
-    def evaluate_classification(
-        self,
-        datasets: Optional[Union[Dataset, List[Dataset]]] = None,
-        filter_by: Optional[FilterType] = None,
-        label_map: Optional[Dict[Label, Label]] = None,
-        pr_curve_max_examples: int = 1,
-        metrics_to_return: Optional[List[str]] = None,
-        allow_retries: bool = False,
-    ) -> Evaluation:
-        """
-        Start a classification evaluation job.
-
-        Parameters
-        ----------
-        datasets : Union[Dataset, List[Dataset]], optional
-            The dataset or list of datasets to evaluate against.
-        filter_by : FilterType, optional
-            Optional set of constraints to filter evaluation by.
-        label_map : Dict[Label, Label], optional
-            Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.
-        metrics: List[str], optional
-            The list of metrics to compute, store, and return to the user.
-        allow_retries : bool, default = False
-            Option to retry previously failed evaluations.
-
-        Returns
-        -------
-        Evaluation
-            A job object that can be used to track the status of the job and get the metrics of it upon completion.
-        """
-        if not datasets and not filter_by:
-            raise ValueError(
-                "Evaluation requires the definition of either datasets, dataset filters or both."
-            )
+        if not isinstance(label_map, dict) or not all(
+            [
+                isinstance(key, Label) and isinstance(value, Label)
+                for key, value in label_map.items()
+            ]
+        ):
+            raise TypeError(
+                "label_map should be a dictionary with valid Labels for both the key and value."
+            )
+
+        return_value = []
+        for key, value in label_map.items():
+            if not all(
+                [
+                    (isinstance(v.key, str) and isinstance(v.value, str))
+                    for v in [key, value]
+                ]
+            ):
+                raise TypeError
+            return_value.append(
+                [
+                    [key.key, key.value],
+                    [value.key, value.value],
+                ]
+            )
+        return return_value
+
+    def evaluate_classification(
+        self,
+        datasets: Union[Dataset, List[Dataset]],
+        filter_by: Optional[FilterType] = None,
+        label_map: Optional[Dict[Label, Label]] = None,
+        pr_curve_max_examples: int = 1,
+        metrics_to_return: Optional[List[MetricType]] = None,
+        allow_retries: bool = False,
+    ) -> Evaluation:
+        """
+        Start a classification evaluation job.
+
+        Parameters
+        ----------
+        datasets : Union[Dataset, List[Dataset]], optional
+            The dataset or list of datasets to evaluate against.
+        filter_by : FilterType, optional
+            Optional set of constraints to filter evaluation by.
+        label_map : Dict[Label, Label], optional
+            Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.
+        metrics_to_return: List[MetricType], optional
+            The list of metrics to compute, store, and return to the user.
+        allow_retries : bool, default = False
+            Option to retry previously failed evaluations.
 
-        # format request
-        datum_filter = self._format_constraints(datasets, filter_by)
-        request = EvaluationRequest(
-            model_names=[self.name],  # type: ignore
-            datum_filter=datum_filter,
-            parameters=EvaluationParameters(
-                task_type=TaskType.CLASSIFICATION,
-                label_map=self._create_label_map(label_map=label_map),
-                pr_curve_max_examples=pr_curve_max_examples,
-                metrics_to_return=metrics_to_return,
-            ),
-        )
-
-        # create evaluation
-        evaluation = Client(self.conn).evaluate(
-            request, allow_retries=allow_retries
-        )
-        if len(evaluation) != 1:
-            raise RuntimeError
-        return evaluation[0]
-
-    def evaluate_detection(
-        self,
-        datasets: Optional[Union[Dataset, List[Dataset]]] = None,
-        filter_by: Optional[FilterType] = None,
-        convert_annotations_to_type: Optional[AnnotationType] = None,
-        iou_thresholds_to_compute: Optional[List[float]] = None,
-        iou_thresholds_to_return: Optional[List[float]] = None,
-        label_map: Optional[Dict[Label, Label]] = None,
-        recall_score_threshold: float = 0,
-        metrics_to_return: Optional[List[str]] = None,
-        pr_curve_iou_threshold: float = 0.5,
-        pr_curve_max_examples: int = 1,
-        allow_retries: bool = False,
-    ) -> Evaluation:
-        """
-        Start an object-detection evaluation job.
-
-        Parameters
-        ----------
-        datasets : Union[Dataset, List[Dataset]], optional
-            The dataset or list of datasets to evaluate against.
-        filter_by : FilterType, optional
-            Optional set of constraints to filter evaluation by.
-        convert_annotations_to_type : enums.AnnotationType, optional
-            Forces the object detection evaluation to compute over this type.
-        iou_thresholds_to_compute : List[float], optional
-            Thresholds to compute mAP against.
-        iou_thresholds_to_return : List[float], optional
-            Thresholds to return AP for. Must be subset of `iou_thresholds_to_compute`.
-        label_map : Dict[Label, Label], optional
-            Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.
-        recall_score_threshold: float, default=0
-            The confidence score threshold for use when determining whether to count a prediction as a true positive or not while calculating Average Recall.
-        pr_curve_iou_threshold: float, optional
-            The IOU threshold to use when calculating precision-recall curves. Defaults to 0.5.
-        pr_curve_max_examples: int, optional
-            The maximum number of datum examples to store when calculating PR curves.
-        allow_retries : bool, default = False
-            Option to retry previously failed evaluations.
-
-
-        Returns
-        -------
-        Evaluation
-            A job object that can be used to track the status of the job and get the metrics of it upon completion.
-        """
-        if iou_thresholds_to_compute is None:
-            iou_thresholds_to_compute = [
-                round(0.5 + 0.05 * i, 2) for i in range(10)
-            ]
-        if iou_thresholds_to_return is None:
-            iou_thresholds_to_return = [0.5, 0.75]
-
-        # format request
-        parameters = EvaluationParameters(
-            task_type=TaskType.OBJECT_DETECTION,
-            convert_annotations_to_type=convert_annotations_to_type,
-            iou_thresholds_to_compute=iou_thresholds_to_compute,
-            iou_thresholds_to_return=iou_thresholds_to_return,
-            label_map=self._create_label_map(label_map=label_map),
-            recall_score_threshold=recall_score_threshold,
-            metrics_to_return=metrics_to_return,
-            pr_curve_iou_threshold=pr_curve_iou_threshold,
-            pr_curve_max_examples=pr_curve_max_examples,
-        )
-        datum_filter = self._format_constraints(datasets, filter_by)
-        request = EvaluationRequest(
-            model_names=[self.name],  # type: ignore
-            datum_filter=datum_filter,
-            parameters=parameters,
-        )
+        Returns
+        -------
+        Evaluation
+            A job object that can be used to track the status of the job and get the metrics of it upon completion.
+        """
+        if not datasets and not filter_by:
+            raise ValueError(
+                "Evaluation requires the definition of either datasets, dataset filters or both."
+            )
+        elif metrics_to_return and not set(metrics_to_return).issubset(
+            MetricType.classification()
+        ):
+            raise ValueError(
+                f"The following metrics are not supported for classification: '{set(metrics_to_return) - MetricType.classification()}'"
+            )
+
+        # format request
+        filters = self._format_constraints(datasets, filter_by)
+        datasets = datasets if isinstance(datasets, list) else [datasets]
+        request = EvaluationRequest(
+            dataset_names=[dataset.name for dataset in datasets],  # type: ignore - issue #604
+            model_names=[self.name],  # type: ignore - issue #604
+            filters=filters,
+            parameters=EvaluationParameters(
+                task_type=TaskType.CLASSIFICATION,
+                label_map=self._create_label_map(label_map=label_map),
+                pr_curve_max_examples=pr_curve_max_examples,
+                metrics_to_return=metrics_to_return,
+            ),
+        )
+
+        # create evaluation
+        evaluation = Client(self.conn).evaluate(
+            request, allow_retries=allow_retries
+        )
+        if len(evaluation) != 1:
+            raise RuntimeError
+        return evaluation[0]
+
+    def evaluate_detection(
+        self,
+        datasets: Union[Dataset, List[Dataset]],
+        filter_by: Optional[FilterType] = None,
+        convert_annotations_to_type: Optional[AnnotationType] = None,
+        iou_thresholds_to_compute: Optional[List[float]] = None,
+        iou_thresholds_to_return: Optional[List[float]] = None,
+        label_map: Optional[Dict[Label, Label]] = None,
+        recall_score_threshold: float = 0,
+        metrics_to_return: Optional[List[MetricType]] = None,
+        pr_curve_iou_threshold: float = 0.5,
+        pr_curve_max_examples: int = 1,
+        allow_retries: bool = False,
+    ) -> Evaluation:
+        """
+        Start an object-detection evaluation job.
+
+        Parameters
+        ----------
+        datasets : Union[Dataset, List[Dataset]], optional
+            The dataset or list of datasets to evaluate against.
+        filter_by : FilterType, optional
+            Optional set of constraints to filter evaluation by.
+        convert_annotations_to_type : enums.AnnotationType, optional
+            Forces the object detection evaluation to compute over this type.
+        iou_thresholds_to_compute : List[float], optional
+            Thresholds to compute mAP against.
+        iou_thresholds_to_return : List[float], optional
+            Thresholds to return AP for. Must be subset of `iou_thresholds_to_compute`.
+        label_map : Dict[Label, Label], optional
+            Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.
+        recall_score_threshold: float, default=0
+            The confidence score threshold for use when determining whether to count a prediction as a true positive or not while calculating Average Recall.
+        metrics_to_return: List[MetricType], optional
+            The list of metrics to compute, store, and return to the user.
+        pr_curve_iou_threshold: float, optional
+            The IOU threshold to use when calculating precision-recall curves. Defaults to 0.5.
+        pr_curve_max_examples: int, optional
+            The maximum number of datum examples to store when calculating PR curves.
+        allow_retries : bool, default = False
+            Option to retry previously failed evaluations.
+
+        Returns
+        -------
+        Evaluation
+            A job object that can be used to track the status of the job and get the metrics of it upon completion.
+        """
+        if metrics_to_return and not set(metrics_to_return).issubset(
+            MetricType.object_detection()
+        ):
+            raise ValueError(
+                f"The following metrics are not supported for object detection: '{set(metrics_to_return) - MetricType.object_detection()}'"
+            )
 
-        # create evaluation
-        evaluation = Client(self.conn).evaluate(
-            request, allow_retries=allow_retries
-        )
-        if len(evaluation) != 1:
-            raise RuntimeError
-        return evaluation[0]
-
-    def evaluate_segmentation(
-        self,
-        datasets: Optional[Union[Dataset, List[Dataset]]] = None,
-        filter_by: Optional[FilterType] = None,
-        label_map: Optional[Dict[Label, Label]] = None,
-        metrics_to_return: Optional[List[str]] = None,
-        allow_retries: bool = False,
-    ) -> Evaluation:
-        """
-        Start a semantic-segmentation evaluation job.
-
-        Parameters
-        ----------
-        datasets : Union[Dataset, List[Dataset]], optional
-            The dataset or list of datasets to evaluate against.
-        filter_by : FilterType, optional
-            Optional set of constraints to filter evaluation by.
-        label_map : Dict[Label, Label], optional
-            Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.
-        metrics: List[str], optional
-            The list of metrics to compute, store, and return to the user.
-        allow_retries : bool, default = False
-            Option to retry previously failed evaluations.
-
-        Returns
-        -------
-        Evaluation
-            A job object that can be used to track the status of the job and get the metrics of it upon completion
-        """
-        # format request
-        datum_filter = self._format_constraints(datasets, filter_by)
-        request = EvaluationRequest(
-            model_names=[self.name],  # type: ignore
-            datum_filter=datum_filter,
-            parameters=EvaluationParameters(
-                task_type=TaskType.SEMANTIC_SEGMENTATION,
-                label_map=self._create_label_map(label_map=label_map),
-                metrics_to_return=metrics_to_return,
-            ),
-        )
-
-        # create evaluation
-        evaluation = Client(self.conn).evaluate(
-            request, allow_retries=allow_retries
-        )
-        if len(evaluation) != 1:
-            raise RuntimeError
-        return evaluation[0]
-
-    def delete(self, timeout: int = 0):
-        """
-        Delete the `Model` object from the back end.
-
-        Parameters
-        ----------
-        timeout : int, default=0
-            Sets a timeout in seconds.
-        """
-        Client(self.conn).delete_model(self.name, timeout)  # type: ignore
-
-    def get_labels(
-        self,
-    ) -> List[Label]:
-        """
-        Get all labels associated with a given model.
-
-        Returns
-        ----------
-        List[Label]
-            A list of `Labels` associated with the model.
-        """
-        return Client(self.conn).get_labels_from_model(self)
-
-    def get_evaluations(
-        self,
-        metrics_to_sort_by: Optional[
-            Dict[str, Union[Dict[str, str], str]]
-        ] = None,
-    ) -> List[Evaluation]:
-        """
-        Get all evaluations associated with a given model.
-
-        Parameters
-        ----------
-        metrics_to_sort_by: dict[str, str | dict[str, str]], optional
-            An optional dict of metric types to sort the evaluations by.
-
-
-        Returns
-        ----------
-        List[Evaluation]
-            A list of `Evaluations` associated with the model.
-        """
-        return Client(self.conn).get_evaluations(
-            models=[self], metrics_to_sort_by=metrics_to_sort_by
-        )
+        if iou_thresholds_to_compute is None:
+            iou_thresholds_to_compute = [
+                round(0.5 + 0.05 * i, 2) for i in range(10)
+            ]
+        if iou_thresholds_to_return is None:
+            iou_thresholds_to_return = [0.5, 0.75]
+
+        # format request
+        parameters = EvaluationParameters(
+            task_type=TaskType.OBJECT_DETECTION,
+            convert_annotations_to_type=convert_annotations_to_type,
+            iou_thresholds_to_compute=iou_thresholds_to_compute,
+            iou_thresholds_to_return=iou_thresholds_to_return,
+            label_map=self._create_label_map(label_map=label_map),
+            recall_score_threshold=recall_score_threshold,
+            metrics_to_return=metrics_to_return,
+            pr_curve_iou_threshold=pr_curve_iou_threshold,
+            pr_curve_max_examples=pr_curve_max_examples,
+        )
+        filters = self._format_constraints(datasets, filter_by)
+        datasets = datasets if isinstance(datasets, list) else [datasets]
+        request = EvaluationRequest(
+            dataset_names=[dataset.name for dataset in datasets],  # type: ignore - issue #604
+            model_names=[self.name],  # type: ignore - issue #604
+            filters=filters,
+            parameters=parameters,
+        )
+
+        # create evaluation
+        evaluation = Client(self.conn).evaluate(
+            request, allow_retries=allow_retries
+        )
+        if len(evaluation) != 1:
+            raise RuntimeError
+        return evaluation[0]
+
+    def evaluate_segmentation(
+        self,
+        datasets: Union[Dataset, List[Dataset]],
+        filter_by: Optional[FilterType] = None,
+        label_map: Optional[Dict[Label, Label]] = None,
+        metrics_to_return: Optional[List[MetricType]] = None,
+        allow_retries: bool = False,
+    ) -> Evaluation:
+        """
+        Start a semantic-segmentation evaluation job.
+
+        Parameters
+        ----------
+        datasets : Union[Dataset, List[Dataset]], optional
+            The dataset or list of datasets to evaluate against.
+        filter_by : FilterType, optional
+            Optional set of constraints to filter evaluation by.
+        label_map : Dict[Label, Label], optional
+            Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.
+        metrics_to_return: List[MetricType], optional
+            The list of metrics to compute, store, and return to the user.
+        allow_retries : bool, default = False
+            Option to retry previously failed evaluations.
+
+        Returns
+        -------
+        Evaluation
+            A job object that can be used to track the status of the job and get the metrics of it upon completion
+        """
+        if metrics_to_return and not set(metrics_to_return).issubset(
+            MetricType.semantic_segmentation()
+        ):
+            raise ValueError(
+                f"The following metrics are not supported for semantic segmentation: '{set(metrics_to_return) - MetricType.semantic_segmentation()}'"
+            )
+
+        # format request
+        filters = self._format_constraints(datasets, filter_by)
+        datasets = datasets if isinstance(datasets, list) else [datasets]
+        request = EvaluationRequest(
+            dataset_names=[dataset.name for dataset in datasets],  # type: ignore - issue #604
+            model_names=[self.name],  # type: ignore - issue #604
+            filters=filters,
+            parameters=EvaluationParameters(
+                task_type=TaskType.SEMANTIC_SEGMENTATION,
+                label_map=self._create_label_map(label_map=label_map),
+                metrics_to_return=metrics_to_return,
+            ),
+        )
+
+        # create evaluation
+        evaluation = Client(self.conn).evaluate(
+            request, allow_retries=allow_retries
+        )
+        if len(evaluation) != 1:
+            raise RuntimeError
+        return evaluation[0]
+
+    def delete(self, timeout: int = 0):
+        """
+        Delete the `Model` object from the back end.
+
+        Parameters
+        ----------
+        timeout : int, default=0
+            Sets a timeout in seconds.
+        """
+        Client(self.conn).delete_model(self.name, timeout)  # type: ignore
+
+    def get_labels(
+        self,
+    ) -> List[Label]:
+        """
+        Get all labels associated with a given model.
+
+        Returns
+        ----------
+        List[Label]
+            A list of `Labels` associated with the model.
+        """
+        return Client(self.conn).get_labels_from_model(self)
+
+    def get_evaluations(
+        self,
+        metrics_to_sort_by: Optional[
+            Dict[str, Union[Dict[str, str], str]]
+        ] = None,
+    ) -> List[Evaluation]:
+        """
+        Get all evaluations associated with a given model.
+
+        Parameters
+        ----------
+        metrics_to_sort_by: dict[str, str | dict[str, str]], optional
+            An optional dict of metric types to sort the evaluations by.
+
+
+        Returns
+        ----------
+        List[Evaluation]
+            A list of `Evaluations` associated with the model.
+        """
+        return Client(self.conn).get_evaluations(
+            models=[self], metrics_to_sort_by=metrics_to_sort_by
+        )
 
@@ -1653,17 +1707,7 @@

Source code in valor/coretypes.py -
664
-665
-666
-667
-668
-669
-670
-671
-672
-673
-674
+
674
 675
 676
 677
@@ -1675,29 +1719,39 @@ 

683 684 685 -686

def __init__(
-    self,
-    *,
-    name: str,
-    metadata: Optional[dict] = None,
-    connection: Optional[ClientConnection] = None,
-):
-    """
-    Creates a local instance of a model.
-
-    Use 'Model.create' classmethod to create a model with persistence.
-
-    Parameters
-    ----------
-    name : String
-        The name of the model.
-    metadata : Dictionary
-        A dictionary of metadata that describes the model.
-    connection : ClientConnection, optional
-        An initialized client connection.
-    """
-    self.conn = connection
-    super().__init__(name=name, metadata=metadata if metadata else dict())
+686
+687
+688
+689
+690
+691
+692
+693
+694
+695
+696
def __init__(
+    self,
+    *,
+    name: str,
+    metadata: Optional[dict] = None,
+    connection: Optional[ClientConnection] = None,
+):
+    """
+    Creates a local instance of a model.
+
+    Use 'Model.create' classmethod to create a model with persistence.
+
+    Parameters
+    ----------
+    name : String
+        The name of the model.
+    metadata : Dictionary
+        A dictionary of metadata that describes the model.
+    connection : ClientConnection, optional
+        An initialized client connection.
+    """
+    self.conn = connection
+    super().__init__(name=name, metadata=metadata if metadata else dict())
 
@@ -1751,17 +1805,7 @@

Source code in valor/coretypes.py -
735
-736
-737
-738
-739
-740
-741
-742
-743
-744
-745
+
745
 746
 747
 748
@@ -1770,26 +1814,36 @@ 

751 752 753 -754

def add_prediction(
-    self,
-    dataset: Dataset,
-    prediction: Prediction,
-) -> None:
-    """
-    Add a prediction to the model.
-
-    Parameters
-    ----------
-    dataset : valor.Dataset
-        The dataset that is being operated over.
-    prediction : valor.Prediction
-        The prediction to create.
-    """
-    Client(self.conn).create_predictions(
-        dataset=dataset,
-        model=self,
-        predictions=[prediction],
-    )
+754
+755
+756
+757
+758
+759
+760
+761
+762
+763
+764
def add_prediction(
+    self,
+    dataset: Dataset,
+    prediction: Prediction,
+) -> None:
+    """
+    Add a prediction to the model.
+
+    Parameters
+    ----------
+    dataset : valor.Dataset
+        The dataset that is being operated over.
+    prediction : valor.Prediction
+        The prediction to create.
+    """
+    Client(self.conn).create_predictions(
+        dataset=dataset,
+        model=self,
+        predictions=[prediction],
+    )
 
@@ -1843,17 +1897,7 @@

Source code in valor/coretypes.py -
756
-757
-758
-759
-760
-761
-762
-763
-764
-765
-766
+
766
 767
 768
 769
@@ -1862,26 +1906,36 @@ 

772 773 774 -775

def add_predictions(
-    self,
-    dataset: Dataset,
-    predictions: List[Prediction],
-) -> None:
-    """
-    Add multiple predictions to the model.
-
-    Parameters
-    ----------
-    dataset : valor.Dataset
-        The dataset that is being operated over.
-    predictions : List[valor.Prediction]
-        The predictions to create.
-    """
-    Client(self.conn).create_predictions(
-        dataset=dataset,
-        model=self,
-        predictions=predictions,
-    )
+775
+776
+777
+778
+779
+780
+781
+782
+783
+784
+785
def add_predictions(
+    self,
+    dataset: Dataset,
+    predictions: List[Prediction],
+) -> None:
+    """
+    Add multiple predictions to the model.
+
+    Parameters
+    ----------
+    dataset : valor.Dataset
+        The dataset that is being operated over.
+    predictions : List[valor.Prediction]
+        The predictions to create.
+    """
+    Client(self.conn).create_predictions(
+        dataset=dataset,
+        model=self,
+        predictions=predictions,
+    )
 
@@ -1952,17 +2006,7 @@

Source code in valor/coretypes.py -
688
-689
-690
-691
-692
-693
-694
-695
-696
-697
-698
+
698
 699
 700
 701
@@ -1974,29 +2018,39 @@ 

707 708 709 -710

@classmethod
-def create(
-    cls,
-    name: str,
-    metadata: Optional[Dict[str, Any]] = None,
-    connection: Optional[ClientConnection] = None,
-    **_,
-) -> Model:
-    """
-    Creates a model that persists in the back end.
-
-    Parameters
-    ----------
-    name : str
-        The name of the model.
-    metadata : dict, optional
-        A dictionary of metadata that describes the model.
-    connection : ClientConnection, optional
-        An initialized client connection.
-    """
-    model = cls(name=name, metadata=metadata, connection=connection)
-    Client(connection).create_model(model)
-    return model
+710
+711
+712
+713
+714
+715
+716
+717
+718
+719
+720
@classmethod
+def create(
+    cls,
+    name: str,
+    metadata: Optional[Dict[str, Any]] = None,
+    connection: Optional[ClientConnection] = None,
+    **_,
+) -> Model:
+    """
+    Creates a model that persists in the back end.
+
+    Parameters
+    ----------
+    name : str
+        The name of the model.
+    metadata : dict, optional
+        A dictionary of metadata that describes the model.
+    connection : ClientConnection, optional
+        An initialized client connection.
+    """
+    model = cls(name=name, metadata=metadata, connection=connection)
+    Client(connection).create_model(model)
+    return model
 
@@ -2036,32 +2090,32 @@

Source code in valor/coretypes.py -
@@ -2119,8 +2173,9 @@

- + @@ -2170,17 +2225,7 @@

def delete(self, timeout: int = 0):
-    """
-    Delete the `Model` object from the back end.
-
-    Parameters
-    ----------
-    timeout : int, default=0
-        Sets a timeout in seconds.
-    """
-    Client(self.conn).delete_model(self.name, timeout)  # type: ignore
+
def delete(self, timeout: int = 0):
+    """
+    Delete the `Model` object from the back end.
+
+    Parameters
+    ----------
+    timeout : int, default=0
+        Sets a timeout in seconds.
+    """
+    Client(self.conn).delete_model(self.name, timeout)  # type: ignore
 

-valor.Model.evaluate_classification(datasets=None, filter_by=None, label_map=None, pr_curve_max_examples=1, metrics_to_return=None, allow_retries=False) +valor.Model.evaluate_classification(datasets, filter_by=None, label_map=None, pr_curve_max_examples=1, metrics_to_return=None, allow_retries=False)

Start a classification evaluation job.

@@ -2087,7 +2141,7 @@

-None +required
metricsmetrics_to_return +Optional[List[MetricType]]
@@ -2128,7 +2183,7 @@

-required +None
Source code in valor/coretypes.py -
@@ -2399,6 +2470,20 @@

+ + + + + +
869
-870
-871
-872
-873
-874
-875
-876
-877
-878
-879
+
879
 880
 881
 882
@@ -2224,68 +2269,94 @@ 

920 921 922 -923

def evaluate_classification(
-    self,
-    datasets: Optional[Union[Dataset, List[Dataset]]] = None,
-    filter_by: Optional[FilterType] = None,
-    label_map: Optional[Dict[Label, Label]] = None,
-    pr_curve_max_examples: int = 1,
-    metrics_to_return: Optional[List[str]] = None,
-    allow_retries: bool = False,
-) -> Evaluation:
-    """
-    Start a classification evaluation job.
-
-    Parameters
-    ----------
-    datasets : Union[Dataset, List[Dataset]], optional
-        The dataset or list of datasets to evaluate against.
-    filter_by : FilterType, optional
-        Optional set of constraints to filter evaluation by.
-    label_map : Dict[Label, Label], optional
-        Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.
-    metrics: List[str], optional
-        The list of metrics to compute, store, and return to the user.
-    allow_retries : bool, default = False
-        Option to retry previously failed evaluations.
-
-    Returns
-    -------
-    Evaluation
-        A job object that can be used to track the status of the job and get the metrics of it upon completion.
-    """
-    if not datasets and not filter_by:
-        raise ValueError(
-            "Evaluation requires the definition of either datasets, dataset filters or both."
-        )
+923
+924
+925
+926
+927
+928
+929
+930
+931
+932
+933
+934
+935
+936
+937
+938
+939
+940
+941
def evaluate_classification(
+    self,
+    datasets: Union[Dataset, List[Dataset]],
+    filter_by: Optional[FilterType] = None,
+    label_map: Optional[Dict[Label, Label]] = None,
+    pr_curve_max_examples: int = 1,
+    metrics_to_return: Optional[List[MetricType]] = None,
+    allow_retries: bool = False,
+) -> Evaluation:
+    """
+    Start a classification evaluation job.
+
+    Parameters
+    ----------
+    datasets : Union[Dataset, List[Dataset]], optional
+        The dataset or list of datasets to evaluate against.
+    filter_by : FilterType, optional
+        Optional set of constraints to filter evaluation by.
+    label_map : Dict[Label, Label], optional
+        Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.
+    metrics_to_return: List[MetricType], optional
+        The list of metrics to compute, store, and return to the user.
+    allow_retries : bool, default = False
+        Option to retry previously failed evaluations.
 
-    # format request
-    datum_filter = self._format_constraints(datasets, filter_by)
-    request = EvaluationRequest(
-        model_names=[self.name],  # type: ignore
-        datum_filter=datum_filter,
-        parameters=EvaluationParameters(
-            task_type=TaskType.CLASSIFICATION,
-            label_map=self._create_label_map(label_map=label_map),
-            pr_curve_max_examples=pr_curve_max_examples,
-            metrics_to_return=metrics_to_return,
-        ),
-    )
-
-    # create evaluation
-    evaluation = Client(self.conn).evaluate(
-        request, allow_retries=allow_retries
-    )
-    if len(evaluation) != 1:
-        raise RuntimeError
-    return evaluation[0]
+    Returns
+    -------
+    Evaluation
+        A job object that can be used to track the status of the job and get the metrics of it upon completion.
+    """
+    if not datasets and not filter_by:
+        raise ValueError(
+            "Evaluation requires the definition of either datasets, dataset filters or both."
+        )
+    elif metrics_to_return and not set(metrics_to_return).issubset(
+        MetricType.classification()
+    ):
+        raise ValueError(
+            f"The following metrics are not supported for classification: '{set(metrics_to_return) - MetricType.classification()}'"
+        )
+
+    # format request
+    filters = self._format_constraints(datasets, filter_by)
+    datasets = datasets if isinstance(datasets, list) else [datasets]
+    request = EvaluationRequest(
+        dataset_names=[dataset.name for dataset in datasets],  # type: ignore - issue #604
+        model_names=[self.name],  # type: ignore - issue #604
+        filters=filters,
+        parameters=EvaluationParameters(
+            task_type=TaskType.CLASSIFICATION,
+            label_map=self._create_label_map(label_map=label_map),
+            pr_curve_max_examples=pr_curve_max_examples,
+            metrics_to_return=metrics_to_return,
+        ),
+    )
+
+    # create evaluation
+    evaluation = Client(self.conn).evaluate(
+        request, allow_retries=allow_retries
+    )
+    if len(evaluation) != 1:
+        raise RuntimeError
+    return evaluation[0]
 

-valor.Model.evaluate_detection(datasets=None, filter_by=None, convert_annotations_to_type=None, iou_thresholds_to_compute=None, iou_thresholds_to_return=None, label_map=None, recall_score_threshold=0, metrics_to_return=None, pr_curve_iou_threshold=0.5, pr_curve_max_examples=1, allow_retries=False) +valor.Model.evaluate_detection(datasets, filter_by=None, convert_annotations_to_type=None, iou_thresholds_to_compute=None, iou_thresholds_to_return=None, label_map=None, recall_score_threshold=0, metrics_to_return=None, pr_curve_iou_threshold=0.5, pr_curve_max_examples=1, allow_retries=False)

Start an object-detection evaluation job.

@@ -2311,7 +2382,7 @@

-None +required
metrics_to_return +Optional[List[MetricType]] + +
+

The list of metrics to compute, store, and return to the user.

+
+
+None +
pr_curve_iou_threshold float @@ -2465,25 +2550,7 @@

Source code in valor/coretypes.py -
@@ -2686,8 +2791,9 @@

- + @@ -2737,35 +2843,7 @@

 925
- 926
- 927
- 928
- 929
- 930
- 931
- 932
- 933
- 934
- 935
- 936
- 937
- 938
- 939
- 940
- 941
- 942
- 943
+
 943
  944
  945
  946
@@ -2543,92 +2610,130 @@ 

1000 1001 1002 -1003

def evaluate_detection(
-    self,
-    datasets: Optional[Union[Dataset, List[Dataset]]] = None,
-    filter_by: Optional[FilterType] = None,
-    convert_annotations_to_type: Optional[AnnotationType] = None,
-    iou_thresholds_to_compute: Optional[List[float]] = None,
-    iou_thresholds_to_return: Optional[List[float]] = None,
-    label_map: Optional[Dict[Label, Label]] = None,
-    recall_score_threshold: float = 0,
-    metrics_to_return: Optional[List[str]] = None,
-    pr_curve_iou_threshold: float = 0.5,
-    pr_curve_max_examples: int = 1,
-    allow_retries: bool = False,
-) -> Evaluation:
-    """
-    Start an object-detection evaluation job.
-
-    Parameters
-    ----------
-    datasets : Union[Dataset, List[Dataset]], optional
-        The dataset or list of datasets to evaluate against.
-    filter_by : FilterType, optional
-        Optional set of constraints to filter evaluation by.
-    convert_annotations_to_type : enums.AnnotationType, optional
-        Forces the object detection evaluation to compute over this type.
-    iou_thresholds_to_compute : List[float], optional
-        Thresholds to compute mAP against.
-    iou_thresholds_to_return : List[float], optional
-        Thresholds to return AP for. Must be subset of `iou_thresholds_to_compute`.
-    label_map : Dict[Label, Label], optional
-        Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.
-    recall_score_threshold: float, default=0
-        The confidence score threshold for use when determining whether to count a prediction as a true positive or not while calculating Average Recall.
-    pr_curve_iou_threshold: float, optional
-        The IOU threshold to use when calculating precision-recall curves. Defaults to 0.5.
-    pr_curve_max_examples: int, optional
-        The maximum number of datum examples to store when calculating PR curves.
-    allow_retries : bool, default = False
-        Option to retry previously failed evaluations.
-
-
-    Returns
-    -------
-    Evaluation
-        A job object that can be used to track the status of the job and get the metrics of it upon completion.
-    """
-    if iou_thresholds_to_compute is None:
-        iou_thresholds_to_compute = [
-            round(0.5 + 0.05 * i, 2) for i in range(10)
-        ]
-    if iou_thresholds_to_return is None:
-        iou_thresholds_to_return = [0.5, 0.75]
-
-    # format request
-    parameters = EvaluationParameters(
-        task_type=TaskType.OBJECT_DETECTION,
-        convert_annotations_to_type=convert_annotations_to_type,
-        iou_thresholds_to_compute=iou_thresholds_to_compute,
-        iou_thresholds_to_return=iou_thresholds_to_return,
-        label_map=self._create_label_map(label_map=label_map),
-        recall_score_threshold=recall_score_threshold,
-        metrics_to_return=metrics_to_return,
-        pr_curve_iou_threshold=pr_curve_iou_threshold,
-        pr_curve_max_examples=pr_curve_max_examples,
-    )
-    datum_filter = self._format_constraints(datasets, filter_by)
-    request = EvaluationRequest(
-        model_names=[self.name],  # type: ignore
-        datum_filter=datum_filter,
-        parameters=parameters,
-    )
+1003
+1004
+1005
+1006
+1007
+1008
+1009
+1010
+1011
+1012
+1013
+1014
+1015
+1016
+1017
+1018
+1019
+1020
+1021
+1022
+1023
+1024
+1025
+1026
+1027
+1028
+1029
+1030
+1031
def evaluate_detection(
+    self,
+    datasets: Union[Dataset, List[Dataset]],
+    filter_by: Optional[FilterType] = None,
+    convert_annotations_to_type: Optional[AnnotationType] = None,
+    iou_thresholds_to_compute: Optional[List[float]] = None,
+    iou_thresholds_to_return: Optional[List[float]] = None,
+    label_map: Optional[Dict[Label, Label]] = None,
+    recall_score_threshold: float = 0,
+    metrics_to_return: Optional[List[MetricType]] = None,
+    pr_curve_iou_threshold: float = 0.5,
+    pr_curve_max_examples: int = 1,
+    allow_retries: bool = False,
+) -> Evaluation:
+    """
+    Start an object-detection evaluation job.
+
+    Parameters
+    ----------
+    datasets : Union[Dataset, List[Dataset]], optional
+        The dataset or list of datasets to evaluate against.
+    filter_by : FilterType, optional
+        Optional set of constraints to filter evaluation by.
+    convert_annotations_to_type : enums.AnnotationType, optional
+        Forces the object detection evaluation to compute over this type.
+    iou_thresholds_to_compute : List[float], optional
+        Thresholds to compute mAP against.
+    iou_thresholds_to_return : List[float], optional
+        Thresholds to return AP for. Must be subset of `iou_thresholds_to_compute`.
+    label_map : Dict[Label, Label], optional
+        Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.
+    recall_score_threshold: float, default=0
+        The confidence score threshold for use when determining whether to count a prediction as a true positive or not while calculating Average Recall.
+    metrics_to_return: List[MetricType], optional
+        The list of metrics to compute, store, and return to the user.
+    pr_curve_iou_threshold: float, optional
+        The IOU threshold to use when calculating precision-recall curves. Defaults to 0.5.
+    pr_curve_max_examples: int, optional
+        The maximum number of datum examples to store when calculating PR curves.
+    allow_retries : bool, default = False
+        Option to retry previously failed evaluations.
+
+    Returns
+    -------
+    Evaluation
+        A job object that can be used to track the status of the job and get the metrics of it upon completion.
+    """
+    if metrics_to_return and not set(metrics_to_return).issubset(
+        MetricType.object_detection()
+    ):
+        raise ValueError(
+            f"The following metrics are not supported for object detection: '{set(metrics_to_return) - MetricType.object_detection()}'"
+        )
 
-    # create evaluation
-    evaluation = Client(self.conn).evaluate(
-        request, allow_retries=allow_retries
-    )
-    if len(evaluation) != 1:
-        raise RuntimeError
-    return evaluation[0]
+    if iou_thresholds_to_compute is None:
+        iou_thresholds_to_compute = [
+            round(0.5 + 0.05 * i, 2) for i in range(10)
+        ]
+    if iou_thresholds_to_return is None:
+        iou_thresholds_to_return = [0.5, 0.75]
+
+    # format request
+    parameters = EvaluationParameters(
+        task_type=TaskType.OBJECT_DETECTION,
+        convert_annotations_to_type=convert_annotations_to_type,
+        iou_thresholds_to_compute=iou_thresholds_to_compute,
+        iou_thresholds_to_return=iou_thresholds_to_return,
+        label_map=self._create_label_map(label_map=label_map),
+        recall_score_threshold=recall_score_threshold,
+        metrics_to_return=metrics_to_return,
+        pr_curve_iou_threshold=pr_curve_iou_threshold,
+        pr_curve_max_examples=pr_curve_max_examples,
+    )
+    filters = self._format_constraints(datasets, filter_by)
+    datasets = datasets if isinstance(datasets, list) else [datasets]
+    request = EvaluationRequest(
+        dataset_names=[dataset.name for dataset in datasets],  # type: ignore - issue #604
+        model_names=[self.name],  # type: ignore - issue #604
+        filters=filters,
+        parameters=parameters,
+    )
+
+    # create evaluation
+    evaluation = Client(self.conn).evaluate(
+        request, allow_retries=allow_retries
+    )
+    if len(evaluation) != 1:
+        raise RuntimeError
+    return evaluation[0]
 

-valor.Model.evaluate_segmentation(datasets=None, filter_by=None, label_map=None, metrics_to_return=None, allow_retries=False) +valor.Model.evaluate_segmentation(datasets, filter_by=None, label_map=None, metrics_to_return=None, allow_retries=False)

Start a semantic-segmentation evaluation job.

@@ -2654,7 +2759,7 @@

-None +required
metricsmetrics_to_return +Optional[List[MetricType]]
@@ -2695,7 +2801,7 @@

-required +None
Source code in valor/coretypes.py -
1005
-1006
-1007
-1008
-1009
-1010
-1011
-1012
-1013
-1014
-1015
-1016
-1017
-1018
-1019
-1020
-1021
-1022
-1023
-1024
-1025
-1026
-1027
-1028
-1029
-1030
-1031
-1032
-1033
+
1033
 1034
 1035
 1036
@@ -2784,54 +2862,100 @@ 

1049 1050 1051 -1052

def evaluate_segmentation(
-    self,
-    datasets: Optional[Union[Dataset, List[Dataset]]] = None,
-    filter_by: Optional[FilterType] = None,
-    label_map: Optional[Dict[Label, Label]] = None,
-    metrics_to_return: Optional[List[str]] = None,
-    allow_retries: bool = False,
-) -> Evaluation:
-    """
-    Start a semantic-segmentation evaluation job.
-
-    Parameters
-    ----------
-    datasets : Union[Dataset, List[Dataset]], optional
-        The dataset or list of datasets to evaluate against.
-    filter_by : FilterType, optional
-        Optional set of constraints to filter evaluation by.
-    label_map : Dict[Label, Label], optional
-        Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.
-    metrics: List[str], optional
-        The list of metrics to compute, store, and return to the user.
-    allow_retries : bool, default = False
-        Option to retry previously failed evaluations.
-
-    Returns
-    -------
-    Evaluation
-        A job object that can be used to track the status of the job and get the metrics of it upon completion
-    """
-    # format request
-    datum_filter = self._format_constraints(datasets, filter_by)
-    request = EvaluationRequest(
-        model_names=[self.name],  # type: ignore
-        datum_filter=datum_filter,
-        parameters=EvaluationParameters(
-            task_type=TaskType.SEMANTIC_SEGMENTATION,
-            label_map=self._create_label_map(label_map=label_map),
-            metrics_to_return=metrics_to_return,
-        ),
-    )
-
-    # create evaluation
-    evaluation = Client(self.conn).evaluate(
-        request, allow_retries=allow_retries
-    )
-    if len(evaluation) != 1:
-        raise RuntimeError
-    return evaluation[0]
+1052
+1053
+1054
+1055
+1056
+1057
+1058
+1059
+1060
+1061
+1062
+1063
+1064
+1065
+1066
+1067
+1068
+1069
+1070
+1071
+1072
+1073
+1074
+1075
+1076
+1077
+1078
+1079
+1080
+1081
+1082
+1083
+1084
+1085
+1086
+1087
+1088
+1089
def evaluate_segmentation(
+    self,
+    datasets: Union[Dataset, List[Dataset]],
+    filter_by: Optional[FilterType] = None,
+    label_map: Optional[Dict[Label, Label]] = None,
+    metrics_to_return: Optional[List[MetricType]] = None,
+    allow_retries: bool = False,
+) -> Evaluation:
+    """
+    Start a semantic-segmentation evaluation job.
+
+    Parameters
+    ----------
+    datasets : Union[Dataset, List[Dataset]], optional
+        The dataset or list of datasets to evaluate against.
+    filter_by : FilterType, optional
+        Optional set of constraints to filter evaluation by.
+    label_map : Dict[Label, Label], optional
+        Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.
+    metrics_to_return: List[MetricType], optional
+        The list of metrics to compute, store, and return to the user.
+    allow_retries : bool, default = False
+        Option to retry previously failed evaluations.
+
+    Returns
+    -------
+    Evaluation
+        A job object that can be used to track the status of the job and get the metrics of it upon completion
+    """
+    if metrics_to_return and not set(metrics_to_return).issubset(
+        MetricType.semantic_segmentation()
+    ):
+        raise ValueError(
+            f"The following metrics are not supported for semantic segmentation: '{set(metrics_to_return) - MetricType.semantic_segmentation()}'"
+        )
+
+    # format request
+    filters = self._format_constraints(datasets, filter_by)
+    datasets = datasets if isinstance(datasets, list) else [datasets]
+    request = EvaluationRequest(
+        dataset_names=[dataset.name for dataset in datasets],  # type: ignore - issue #604
+        model_names=[self.name],  # type: ignore - issue #604
+        filters=filters,
+        parameters=EvaluationParameters(
+            task_type=TaskType.SEMANTIC_SEGMENTATION,
+            label_map=self._create_label_map(label_map=label_map),
+            metrics_to_return=metrics_to_return,
+        ),
+    )
+
+    # create evaluation
+    evaluation = Client(self.conn).evaluate(
+        request, allow_retries=allow_retries
+    )
+    if len(evaluation) != 1:
+        raise RuntimeError
+    return evaluation[0]
 
@@ -2844,19 +2968,19 @@

Finalizes the model over a dataset such that new predictions cannot be added to it.

Source code in valor/coretypes.py -
def finalize_inferences(self, dataset: Union[Dataset, str]) -> None:
-    """
-    Finalizes the model over a dataset such that new predictions cannot be added to it.
-    """
-    return Client(self.conn).finalize_inferences(
-        dataset=dataset, model=self
-    )
+
def finalize_inferences(self, dataset: Union[Dataset, str]) -> None:
+    """
+    Finalizes the model over a dataset such that new predictions cannot be added to it.
+    """
+    return Client(self.conn).finalize_inferences(
+        dataset=dataset, model=self
+    )
 
@@ -2934,17 +3058,7 @@

Source code in valor/coretypes.py -
712
-713
-714
-715
-716
-717
-718
-719
-720
-721
-722
+
722
 723
 724
 725
@@ -2955,28 +3069,38 @@ 

730 731 732 -733

@classmethod
-def get(
-    cls,
-    name: str,
-    connection: Optional[ClientConnection] = None,
-) -> Union[Model, None]:
-    """
-    Retrieves a model from the back end database.
-
-    Parameters
-    ----------
-    name : str
-        The name of the model.
-    connection : ClientConnnetion, optional
-        An optional Valor client object for interacting with the API.
-
-    Returns
-    -------
-    Union[valor.Model, None]
-        The model or 'None' if it doesn't exist.
-    """
-    return Client(connection).get_model(name)
+733
+734
+735
+736
+737
+738
+739
+740
+741
+742
+743
@classmethod
+def get(
+    cls,
+    name: str,
+    connection: Optional[ClientConnection] = None,
+) -> Union[Model, None]:
+    """
+    Retrieves a model from the back end database.
+
+    Parameters
+    ----------
+    name : str
+        The name of the model.
+    connection : ClientConnnetion, optional
+        An optional Valor client object for interacting with the API.
+
+    Returns
+    -------
+    Union[valor.Model, None]
+        The model or 'None' if it doesn't exist.
+    """
+    return Client(connection).get_model(name)
 
@@ -3037,51 +3161,51 @@

Source code in valor/coretypes.py -
def get_evaluations(
-    self,
-    metrics_to_sort_by: Optional[
-        Dict[str, Union[Dict[str, str], str]]
-    ] = None,
-) -> List[Evaluation]:
-    """
-    Get all evaluations associated with a given model.
-
-    Parameters
-    ----------
-    metrics_to_sort_by: dict[str, str | dict[str, str]], optional
-        An optional dict of metric types to sort the evaluations by.
-
-
-    Returns
-    ----------
-    List[Evaluation]
-        A list of `Evaluations` associated with the model.
-    """
-    return Client(self.conn).get_evaluations(
-        models=[self], metrics_to_sort_by=metrics_to_sort_by
-    )
+
def get_evaluations(
+    self,
+    metrics_to_sort_by: Optional[
+        Dict[str, Union[Dict[str, str], str]]
+    ] = None,
+) -> List[Evaluation]:
+    """
+    Get all evaluations associated with a given model.
+
+    Parameters
+    ----------
+    metrics_to_sort_by: dict[str, str | dict[str, str]], optional
+        An optional dict of metric types to sort the evaluations by.
+
+
+    Returns
+    ----------
+    List[Evaluation]
+        A list of `Evaluations` associated with the model.
+    """
+    return Client(self.conn).get_evaluations(
+        models=[self], metrics_to_sort_by=metrics_to_sort_by
+    )
 
@@ -3115,29 +3239,29 @@

Source code in valor/coretypes.py -
def get_labels(
-    self,
-) -> List[Label]:
-    """
-    Get all labels associated with a given model.
-
-    Returns
-    ----------
-    List[Label]
-        A list of `Labels` associated with the model.
-    """
-    return Client(self.conn).get_labels_from_model(self)
+
def get_labels(
+    self,
+) -> List[Label]:
+    """
+    Get all labels associated with a given model.
+
+    Returns
+    ----------
+    List[Label]
+        A list of `Labels` associated with the model.
+    """
+    return Client(self.conn).get_labels_from_model(self)
 
@@ -3212,17 +3336,7 @@

Source code in valor/coretypes.py -
777
-778
-779
-780
-781
-782
-783
-784
-785
-786
-787
+
787
 788
 789
 790
@@ -3232,27 +3346,37 @@ 

794 795 796 -797

def get_prediction(
-    self, dataset: Union[Dataset, str], datum: Union[Datum, str]
-) -> Union[Prediction, None]:
-    """
-    Get a particular prediction.
-
-    Parameters
-    ----------
-    dataset: Union[Dataset, str]
-        The dataset the datum belongs to.
-    datum: Union[Datum, str]
-        The desired datum.
-
-    Returns
-    ----------
-    Union[Prediction, None]
-        The matching prediction or 'None' if it doesn't exist.
-    """
-    return Client(self.conn).get_prediction(
-        dataset=dataset, model=self, datum=datum
-    )
+797
+798
+799
+800
+801
+802
+803
+804
+805
+806
+807
def get_prediction(
+    self, dataset: Union[Dataset, str], datum: Union[Datum, str]
+) -> Union[Prediction, None]:
+    """
+    Get a particular prediction.
+
+    Parameters
+    ----------
+    dataset: Union[Dataset, str]
+        The dataset the datum belongs to.
+    datum: Union[Datum, str]
+        The desired datum.
+
+    Returns
+    ----------
+    Union[Prediction, None]
+        The matching prediction or 'None' if it doesn't exist.
+    """
+    return Client(self.conn).get_prediction(
+        dataset=dataset, model=self, datum=datum
+    )
 
diff --git a/client_api/Prediction/index.html b/client_api/Prediction/index.html index 3b56b2927..e6986dadc 100644 --- a/client_api/Prediction/index.html +++ b/client_api/Prediction/index.html @@ -510,14 +510,7 @@

Prediction

Source code in valor/coretypes.py -
110
-111
-112
-113
-114
-115
-116
-117
+
117
 118
 119
 120
@@ -557,54 +550,61 @@ 

Prediction

154 155 156 -157
class Prediction(StaticCollection):
-    """
-    An object describing a prediction (e.g., a machine-drawn bounding box on an image).
-
-    Attributes
-    ----------
-    datum : Datum
-        The datum associated with the prediction.
-    annotations : List[Annotation]
-        The list of annotations associated with the prediction.
+157
+158
+159
+160
+161
+162
+163
+164
class Prediction(StaticCollection):
+    """
+    An object describing a prediction (e.g., a machine-drawn bounding box on an image).
 
-    Examples
-    --------
-    >>> Prediction(
-    ...     datum=Datum(uid="uid1"),
-    ...     annotations=[
-    ...         Annotation(
-    ...             labels=[
-    ...                 Label(key="k1", value="v1", score=0.9),
-    ...                 Label(key="k1", value="v1", score=0.1)
-    ...             ],
-    ...         )
-    ...     ]
-    ... )
-    """
-
-    datum: Datum = Datum.symbolic(owner="prediction", name="datum")
-    annotations: SymbolicList[Annotation] = SymbolicList[Annotation].symbolic(
-        owner="prediction", name="annotations"
-    )
-
-    def __init__(
-        self,
-        *,
-        datum: Datum,
-        annotations: List[Annotation],
-    ):
-        """
-        Creates a prediction.
-
-        Parameters
-        ----------
-        datum : Datum
-            The datum that the prediction is operating over.
-        annotations : List[Annotation]
-            The list of predicted annotations.
-        """
-        super().__init__(datum=datum, annotations=annotations)
+    Attributes
+    ----------
+    datum : Datum
+        The datum associated with the prediction.
+    annotations : List[Annotation]
+        The list of annotations associated with the prediction.
+
+    Examples
+    --------
+    >>> Prediction(
+    ...     datum=Datum(uid="uid1"),
+    ...     annotations=[
+    ...         Annotation(
+    ...             labels=[
+    ...                 Label(key="k1", value="v1", score=0.9),
+    ...                 Label(key="k1", value="v1", score=0.1)
+    ...             ],
+    ...         )
+    ...     ]
+    ... )
+    """
+
+    datum: Datum = Datum.symbolic(owner="prediction", name="datum")
+    annotations: SymbolicList[Annotation] = SymbolicList[Annotation].symbolic(
+        owner="prediction", name="annotations"
+    )
+
+    def __init__(
+        self,
+        *,
+        datum: Datum,
+        annotations: List[Annotation],
+    ):
+        """
+        Creates a prediction.
+
+        Parameters
+        ----------
+        datum : Datum
+            The datum that the prediction is operating over.
+        annotations : List[Annotation]
+            The list of predicted annotations.
+        """
+        super().__init__(datum=datum, annotations=annotations)
 
@@ -658,14 +658,7 @@

Source code in valor/coretypes.py -
+ + + + + - + @@ -757,7 +768,10 @@

69 70 71 -72

141
-142
-143
-144
-145
-146
-147
-148
+
148
 149
 150
 151
@@ -674,23 +667,30 @@ 

154 155 156 -157

def __init__(
-    self,
-    *,
-    datum: Datum,
-    annotations: List[Annotation],
-):
-    """
-    Creates a prediction.
-
-    Parameters
-    ----------
-    datum : Datum
-        The datum that the prediction is operating over.
-    annotations : List[Annotation]
-        The list of predicted annotations.
-    """
-    super().__init__(datum=datum, annotations=annotations)
+157
+158
+159
+160
+161
+162
+163
+164
def __init__(
+    self,
+    *,
+    datum: Datum,
+    annotations: List[Annotation],
+):
+    """
+    Creates a prediction.
+
+    Parameters
+    ----------
+    datum : Datum
+        The datum that the prediction is operating over.
+    annotations : List[Annotation]
+        The list of predicted annotations.
+    """
+    super().__init__(datum=datum, annotations=annotations)
 
diff --git a/client_api/Schemas/Evaluation/EvaluationParameters/index.html b/client_api/Schemas/Evaluation/EvaluationParameters/index.html index a89a176fd..2ef70574c 100644 --- a/client_api/Schemas/Evaluation/EvaluationParameters/index.html +++ b/client_api/Schemas/Evaluation/EvaluationParameters/index.html @@ -661,7 +661,7 @@

task_type: TaskType label_map: Optional[List[List[List[str]]]] = None - metrics_to_return: Optional[List[str]] = None + metrics_to_return: Optional[List[MetricType]] = None convert_annotations_to_type: Optional[AnnotationType] = None iou_thresholds_to_compute: Optional[List[float]] = None @@ -696,6 +696,17 @@

dataset_names +List[str] + +
+

The list of datasets we want to evaluate by name.

+
+
model_names List[str] @@ -707,7 +718,7 @@

datum_filterfilters Filter
@dataclass
+72
+73
+74
+75
@dataclass
 class EvaluationRequest:
     """
     An evaluation request.
@@ -766,23 +780,26 @@ 

Attributes ---------- - model_names : List[str] - The list of models we want to evaluate by name. - datum_filter : schemas.Filter - The filter object used to define what the model(s) is evaluating against. - parameters : EvaluationParameters - Any parameters that are used to modify an evaluation method. - """ - - model_names: Union[str, List[str]] - datum_filter: Filter - parameters: EvaluationParameters - - def __post_init__(self): - if isinstance(self.datum_filter, dict): - self.datum_filter = Filter(**self.datum_filter) - if isinstance(self.parameters, dict): - self.parameters = EvaluationParameters(**self.parameters) + dataset_names : List[str] + The list of datasets we want to evaluate by name. + model_names : List[str] + The list of models we want to evaluate by name. + filters : schemas.Filter + The filter object used to define what the model(s) is evaluating against. + parameters : EvaluationParameters + Any parameters that are used to modify an evaluation method. + """ + + dataset_names: Union[str, List[str]] + model_names: Union[str, List[str]] + parameters: EvaluationParameters + filters: Optional[Filter] = field(default=None) + + def __post_init__(self): + if isinstance(self.filters, dict): + self.filters = Filter(**self.filters) + if isinstance(self.parameters, dict): + self.parameters = EvaluationParameters(**self.parameters)

diff --git a/search/search_index.json b/search/search_index.json index c048d2ec2..999e429f8 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Introduction","text":"

Valor is a centralized evaluation store that makes it easy to measure, explore, and rank model performance. Valor empowers data scientists and engineers to evaluate the performance of their machine learning pipelines and use those evaluations to make better modeling decisions in the future. To skip this textual introduction and dive right in, first go here for instructions to setup the Valor service, and then checkout the sample notebooks.

Valor is maintained by Striveworks, a cutting-edge machine learning operations (MLOps) company based out of Austin, Texas. We'd love to learn more about your interest in Valor and answer any questions you may have; please don't hesitate to reach out to us on Slack or GitHub.

These docs are organized as follows:

  • Overview (this page): Provides an overview of what Valor is, why it's important, and how it works.
  • Installation: Explains how to install Valor.
  • Getting Started Notebook: Details everything you need to get up and running with using Valor.
  • All Sample Notebooks: Collection of descriptive Jupyter notebooks giving examples of how to evaluate model performance using Valor.
  • Metadata and Filtering: Describes Valor's robust support for adding metadata to data, along with how to filter evaluations and Valor objects based on metadata and other attributes.
  • Metrics: Describes all of the metrics that you can calculate using Valor.
  • Endpoints: Documents Valor's various API endpoints.
  • Technical Concepts: Describes the technical concepts that underpin Valor.
  • Contributing and Development: Explains how you can build on and contribute to Valor.
  • Python Client API: Shares reference documentation for our Python client.
"},{"location":"#overview","title":"Overview","text":"

In this section, we'll explore what Valor is, why it's important, and provide a high-level description of how it works. This overview is also available in the following five-minute video:

"},{"location":"#use-cases-for-a-containerized-evaluation-store","title":"Use Cases for a Containerized Evaluation Store","text":"

As we've worked with dozens of data scientists and engineers on their MLOps pipelines, we have identified three important questions that an effective evaluation store could help them answer. First, they wanted to understand: \"Of the various models I tested for a given dataset, which one performs best?\". This is a very common and important use case\u2014and one that is often solved on a model-to-model basis in a local Jupyter notebook. This focus on bespoke implementations limits traceability and makes it difficult to create apples-to-apples comparisons between new model runs and prior model runs.

Second, our users wanted to understand: \"How does the performance of a particular model vary across datasets?\". We found that many practitioners use the same computer vision model (e.g., YOLOv8) for a variety of supervised learning tasks, and they needed a way to identify patterns where that particular model didn't meet expectations.

Finally, our users wanted to understand: \"How can I use my prior evaluations to pick the best model for a future ML pipeline?\". This last question requires the ability to filter previous evaluations on granular metadata (e.g., time of day, geospatial coordinates, etc.) in order to provide tailored recommendations regarding which model to pick in the future.

With these three use cases in mind, we set out to build a centralized evaluation store that we later named Valor.

"},{"location":"#introducing-valor","title":"Introducing Valor","text":"

Valor is a centralized evaluation store that makes it easy to measure, explore, and rank model performance. Our ultimate goal with Valor is to help data scientists and engineers pick the right ML model for their specific needs. To that end, we built Valor with three design principles in mind:

  • Valor works with any dataset or model: We believe Valor should be able to handle any supervised learning task that you want to throw at it. Just pass in your ground truth annotations and predictions, describe your learning task (i.e., object detection), and Valor will do the rest. (Note: At launch, Valor will only support classification and computer vision (i.e., image segmentation and object detection) tasks. We're confident this framework will abstract well to other supervised learning tasks and plan to support them in later releases).
  • Valor can handle any type of image, model, or dataset metadata you throw at it: Metadata is a critical component of any evaluation store as it enables the system to offer tailored model recommendations based on a user's specific needs. To that end, we built Valor to handle any metadata under the sun. Dates, geospatial coordinates, and even JSONs filled with configuration details are all on the table. This means you can slice and dice your evaluations any way you want: just pass in the right labels for your use case and define your filter (say a geographic bounding box), and you\u2019ll get back results for your specific needs.
  • Valor standardizes the evaluation process: The trickiest part of comparing two different model runs is avoiding apples-to-oranges comparisons. Valor helps you audit your metrics and avoid false comparisons by versioning your uploads, storing them in a centralized location, and ensuring that you only compare runs that used the exact same filters and metrics.
"},{"location":"#how-it-works-an-illustrative-example","title":"How It Works: An Illustrative Example","text":"

Let\u2019s walk through a quick example to bring Valor to life.

Say that you're interested in using computer vision models to detect forest fires around the world using satellite imagery. You've just been tasked with building a new ML pipeline to detect fires in an unfamiliar region of interest. How might you leverage your evaluation metrics from prior ML pipelines to understand which model will perform best for this particular use case?

To answer this question, we'll start by passing in three pieces of information from each of our prior modeling runs:

  • GroundTruths: First, we'll pass in human-annotated bounding boxes to tell Valor exactly where forest fires can be found across all of the satellite images used in prior runs.
  • Predictions: Next, we'll pass machine-generated predictions for each image (also in the form of bounding boxes) so that Valor can evaluate how well each model did at predicting forest fires.
  • Labels: Finally, we'll pass metadata to Valor describing each of our various images (e.g., the time of day the photo was taken, the geospatial coordinates of the forest in the photo, etc.). We'll use this metadata later on in order to identify the right model for our new use case.

Once we pass in these three ingredients, Valor will compare all of our GroundTruths and Predictions in order to calculate various evaluation metrics (i.e., mean average precision or mAP). These metrics, Labels, GroundTruths, and Predictions, will all be stored in Postgres, with PostGIS support for fast geospatial lookups and geometric comparisons at a later date.

Finally, once all of our previous pipeline runs and evaluations are stored in Valor, we can use Valor\u2019s API to specify our exact filter criteria and get back its model rankings. In this case, we can ask Valor to find us the best model for detecting forest fires at night in a 50 mile radius around (42.36, -71.03), sorted by mAP. Valor will then filter all of our stored evaluation metrics, rank each model with evaluations that meet our criteria, and send back all relevant evaluation metrics to help us determine which model to use for our new modeling pipeline.

"},{"location":"#next-steps","title":"Next Steps","text":"

We'd recommend reviewing our \"Getting Started\" sample notebook to become further acquainted with Valor. For more detailed explanations of Valor's technical underpinnings, see our technical concepts guide.

"},{"location":"#faq","title":"FAQ","text":"

Q. What is Valor?

A. Valor is a centralized evaluation store that makes it easy to measure, explore, and rank model performance. For an overview of what Valor is and why it's important, please refer to our high-level overview.

Q. What evaluation methods are supported?

A. Valor currently supports generic classification as well as object-detection and semantic-segmentation for images. The long-term goal for Valor is to support the most popular supervised learning methods.

Q. Does Valor store data?

A. Valor only stores ground truth annotations, model predictions, and user-defined metadata.

Q. What is a Datum?

A. A valor.Datum object is a generic type that represents a datum in the context of a machine learning workflow. The object stores a UID and related metadata in a dictionary. This metadata allows for the user to construct their own abstraction layer by mapping a real-world type (e.g., an image) into a valor.Datum type.

from valor.metatypes import ImageMetadata\nimage = ImageMetadata.create(\n  uid = \"1234\",\n  height = 100,\n  width = 100,\n)\n\n# access the datum\ndatum = image.datum\n

Q. What is a GroundTruth?

A. valor.GroundTruth objects in Valor each represent a singular datum and its associated annotations that provide a reference standard or the 'truth' against which predictions are compared. There cannot be multiple ground truths per datum.

Q. What is a Prediction?

A. valor.Prediction objects are similar to valor.GroundTruth objects in that they also contain a list of annotations over a datum. However, these annotations are generated by a model as inferences, and the object also includes the name of the model that was used for creating these inferences. There cannot be multiple predictions by the same model over a single datum.

Q. Can Valor handle multiple data types?

A. Valor abstracts data types through metadata. An example of this can be seen in valor.metatypes.ImageMetadata which describes the mapping of an image to a valor.Datum.

Q. Does Valor support geospatial queries?

A. Valor follows the GeoJSON specification (RFC 7946) in the implementation of Point, MulitPoint, LineString, MultiLineString, Polygon and MulitPolygon geometries. These objects are used to define annotations and facilitate the creation of geospatial metadata.

"},{"location":"#troubleshooting","title":"Troubleshooting","text":"

Q. Why am I getting NotFinalizedError when trying to run an evaluation?

A. Valor requires both dataset and model representations to be finalized before evaluation can take place. Finalization is crucial for auditability as it ensures that data finalized at a certain date is immutable.

Dataset finalization is accomplished through the valor.Dataset.finalize member function.

from valor import Client, Dataset\nclient = Client(...)\ndataset = Dataset(name=\"test_dataset\")\n...\ndataset.finalize()\n

Models are finalized automatically given two conditions.

  1. The working dataset is finalized.
  2. There is a 1:1 mapping of predictions to ground truths.

Models and their predictions can also be finalized prematurely using the valor.Model.finalize_inferences member function. This will generate empty predictions with task type enums.TaskType.SKIP to achieve the 1:1 ground truth mapping.

from valor import Client, Dataset, Model\nclient = Client(...)\ndataset = Dataset(name=\"test_dataset\")\nmodel = Model(name=\"test_model\")\n...\ndataset.finalize()\nmodel.finalize_inferences(dataset)\n

Q. Why am I getting GDAL driver errors?

A. For some computations (mostly involving rasters), Valor requires the PostGIS database to have all GDAL drivers enabled. The Valor back end attempts to enable these drivers, but it might not have permission depending on your specific setup. If you encounter this error, see here for ways to enable the drivers directly in the PostGIS instance.

"},{"location":"contributing/","title":"Contributing to Valor","text":"

We welcome all contributions, bug reports, bug fixes, documentation improvements, enhancements, and ideas aimed at improving Valor. This doc describes the high-level process for how to contribute to this repository. If you have any questions or comments about this process, please feel free to reach out to us on Slack.

"},{"location":"contributing/#on-github","title":"On GitHub","text":"

We use Git on GitHub to manage this repo, which means you will need to sign up for a free GitHub account to submit issues, ideas, and pull requests. We use Git for version control to allow contributors from all over the world to work together on this project.

If you are new to Git, these official resources can help bring you up to speed:

  • GitHub documentation for forking a repo
  • GitHub documentation for collaborating with pull requests
  • GitHub documentation for working with forks
"},{"location":"contributing/#contribution-workflow","title":"Contribution Workflow","text":"

Generally, the high-level workflow for contributing to this repo includes:

  1. Submitting an issue or enhancement request using the appropriate template on GitHub Issues.
  2. Gathering feedback from devs and the broader community in your issue before starting to code.
  3. Forking the Valor repo, making your proposed changes, and submitting a pull request (PR). When submitting a PR, please be sure to:
    1. Update the README.md and/or any relevant docstrings with details of your change.
    2. Add tests where necessary.
    3. Run pre-commit install on your local repo before your last commit to ensure your changes follow our formatting guidelines.
    4. Double-check that your code passes all of the tests that are automated via GitHub Actions.
    5. Ping us on Slack to ensure timely review.
  4. Working with repo maintainers to review and improve your PR before it is merged into the official repo.

For questions or comments on this process, please reach out to us at any time on Slack.

"},{"location":"contributing/#development-tips-and-tricks","title":"Development Tips and Tricks","text":""},{"location":"contributing/#deploying-the-back-end-for-development","title":"Deploying the Back End for Development","text":""},{"location":"contributing/#docker-compose","title":"Docker Compose","text":"

The fastest way to test the API and Python client is via Docker Compose. Start by setting the environment variable POSTGRES_PASSWORD to your liking, and then start Docker and build the container:

export POSTGRES_PASSWORD=\"my_password\"\ndocker compose up\n
"},{"location":"contributing/#makefile-requires-docker","title":"Makefile (requires Docker)","text":"

Alternatively, you may want to run the API service from a terminal to enable faster debugging. To start the service, you can run:

pip install api # Install the API in your python environment\n\nexport POSTGRES_PASSWORD=password\nexport POSTGRES_HOST=localhost\nmake start-postgres-docker # Start the custom postgres service in Docker\nmake run-migrations # Instantiate the table schemas in Postgres\nmake start-server # Start the API service locally\n
"},{"location":"contributing/#setting-up-your-environment","title":"Setting Up Your Environment","text":"

Creating a Valor-specific Python environment at the start of development can help you avoid dependency and versioning issues later on. To start, we'd recommend activating a new Python environment:

# venv\npython3 -m venv .env-valor\nsource .env-valor/bin/activate\n\n# conda\nconda create --name valor python=3.11\nconda activate valor\n

Next, install pre-commit to ensure formatting consistency throughout your repo:

pip install pre-commit\npre-commit install\n

Finally, you're ready to install your client and API modules:

# Install the Client module\npython -m pip install -e client/.\n\n# Install the API module\npython -m pip install -e api/.\n
"},{"location":"contributing/#use-pgadmin-to-debug-postgis","title":"Use pgAdmin to Debug PostGIS","text":"

You can use the pgAdmin utility to debug your PostGIS tables as you code. Start by installing pgAdmin, and then select Object > Register > Server to connect to your PostGIS container. The default connection details are listed below for convenience:

- *Host name/address*: 0.0.0.0\n- *Port*: 5432\n- *Maintenance database*: postgres\n- *Username*: postgres\n
"},{"location":"contributing/#running-tests","title":"Running Tests","text":"

All of our tests are run automatically via GitHub Actions on every push, so it's important to double-check that your code passes all local tests before committing your code. All of the tests below require pytest:

pip install pytest\n
"},{"location":"contributing/#running-integration-tests","title":"Running integration tests","text":"
pytest integration_tests\n
"},{"location":"contributing/#running-back-end-unit-tests","title":"Running back end unit tests","text":"
pytest api/tests/unit-tests\n
"},{"location":"contributing/#running-back-end-functional-tests","title":"Running back end functional tests","text":"

Note: Functional tests require a running instance of PostgreSQL, which you can start using make start-postgres-docker.

POSTGRES_PASSWORD=password \\\nPOSTGRES_HOST=localhost \\\npytest api/tests/functional-tests/\n
"},{"location":"endpoints/","title":"Endpoints","text":""},{"location":"installation/","title":"Installation","text":"

Valor comprises two services: a back-end service (which consists of a REST API and a Postgres database with the PostGIS extension), and a Python client for interacting with the back-end service.

"},{"location":"installation/#setting-up-the-back-end-service","title":"Setting up the back-end service","text":""},{"location":"installation/#using-docker-compose","title":"Using Docker Compose","text":"

The easiest way to get up and running with Valor is to use Docker Compose with the docker-compose.yml file in the repository root:

git clone https://github.com/striveworks/valor\ncd valor\ndocker compose --env-file ./api/.env.testing up\n

This will set up the necessary environment variables, start both the API and database services, and run the database migration job. The endpoint localhost:8000/health should return {\"status\":\"ok\"} if all of Valor's services were started correctly.

Note: running Valor this way is not intended for production and scalable use and is only recommended for development and testing purposes.

"},{"location":"installation/#deploying-via-docker-and-a-hosted-database","title":"Deploying via Docker and a hosted database","text":"

For a more production-grade deployment, we publish the images ghcr.io/striveworks/valor/valor-service (used for the REST API) and ghcr.io/striveworks/valor/migrations (used for setting up the database and migrations). These can be paired with any Postgres database with the PostGIS extension.

The following environment variables are required for running these images:

Variable Description Images that need it POSTGRES_HOST The host of the Postgres database valor-service, migrations POSTGRES_PORT The port of the Postgres database valor-service, migrations POSTGRES_DB The name of the Postgres database valor-service, migrations POSTGRES_USERNAME The user of the Postgres database valor-service, migrations POSTGRES_PASSWORD The password of the Postgres database valor-service, migrations POSTGRES_SSLMODE Sets the Postgres instance SSL mode (typically needs to be \"require\") migrations API_ROOT_PATH The root path of the API (if serving behind a proxy) valor-service

Additionally, the Valor REST API has an optional single username/password/bearer token authentication. To enable this feature, the valor-service image requires the following environment variables:

Variable Description VALOR_USERNAME The username to use VALOR_PASSWORD The password to use VALOR_SECRET_KEY A random, secret string used for signing JWT tokens"},{"location":"installation/#manual-deployment","title":"Manual deployment","text":"

If you would prefer to build your own image or if you want a debug console for the back-end, please see the deployment instructions in Contributing to Valor.

"},{"location":"installation/#setting-up-the-python-client","title":"Setting up the Python client","text":"

The Python client can be installed via pip:

pip install valor-client\n
"},{"location":"metadata_and_filtering/","title":"Metadata and Filtering","text":""},{"location":"metadata_and_filtering/#metadata","title":"Metadata","text":"

Valor offers rich support for attaching metadata to almost any object, which can then be used to filter, group, and organize objects in Valor.

The metadata types supported are:

  • simple data types (strings, numerics, boolean)
  • datetimes (via datetime.datetime, datetime.date, datetime.time, and datetime.timedelta in the Valor client)
  • geometries and geographies (via GeoJSON)

Metadata is added on object creation. For example, if you want to use metadata to organize models that come from training run checkpoints, this may look like:

run_name: str\nckpt: int\n\nModel.create(name=f\"{run_name}-ckpt{ckpt}\", metadata={\"run_name\": run_name, \"ckpt\": ckpt})\n

or if a datum has an associated datetime of capture, that can be added in the creation stage:

from datetime import datetime\n\nDatum(uid=fname, metadata={\"capture_day\": datetime.datetime(day=1, month=1, year=2021)})\n
"},{"location":"metadata_and_filtering/#filtering","title":"Filtering","text":"

Valor supports filtering objects based on metadata or other attributes (such as labels or bounding boxes). One of the most important use cases of filtering is to define a subset of a dataset to evaluate a model on.

"},{"location":"metadata_and_filtering/#filtering-by-metadata","title":"Filtering by metadata","text":"

For example, using the above example where capture_day was added as metadata, one way to test model drift could be to evaluate the model over different time periods. Such a workflow may look like:

import datetime\n\nimport valor\n\nmodel: valor.Model # classification model\ndset: valor.Dataset # dataset to evaluate on\n\n# compare performance on data captured before and after 2020\nd = datetime.datetime(day=5, month=10, year=2020)\neval1 = model.evaluate_classification(dset, filter_by=[Datum.metadata[\"capture_day\"] < d])\neval2 = model.evaluate_classification(dset, filter_by=[Datum.metadata[\"capture_day\"] > d])\n
"},{"location":"metadata_and_filtering/#filtering-by-geometric-attributes","title":"Filtering by geometric attributes","text":"

As an example for filtering by geometric attributes, consider evaluating an object detection model's performance on small objects, where we define small as being less than 500 square pixels in area. This can be achieved via:

import valor\n\nmodel: valor.Model # object detection model\ndset: valor.Dataset # dataset to evaluate on\n\ndset.evaluate_detection(dset, filter_by=[valor.Annotation.bounding_box.area < 500])\n
"},{"location":"metadata_and_filtering/#filtering-in-queries","title":"Filtering in queries","text":"

Filtering can also be used when querying for different objects. For example, taking the model section checkpoint example from above, we could query model checkpoints from a training run based on the checkpoint number greater than 100 by:

from valor import client\n\nrun_name: str # run name to query for\n\nclient.get_models([Model.metadata[\"run_name\"] == run_name, Model.metadata[\"ckpt\"] > 100])\n
"},{"location":"metrics/","title":"Metrics","text":"

Let's look at the various metrics you can calculate using Valor.

If we're missing an important metric for your particular use case, please write us a GitHub Issue ticket. We love hearing your suggestions.

"},{"location":"metrics/#classification-metrics","title":"Classification Metrics","text":"Name Description Equation Precision The number of true positives divided by the total number of positive predictions (i.e., the number of true positives plus the number of false positives). \\(\\dfrac{\\|TP\\|}{\\|TP\\|+\\|FP\\|}\\) Recall The number of true positives divided by the total count of the class of interest (i.e., the number of true positives plus the number of true negatives). \\(\\dfrac{\\|TP\\|}{\\|TP\\|+\\|FN\\|}\\) F1 A weighted average of precision and recall. \\(\\frac{2 * Precision * Recall}{Precision + Recall}\\) Accuracy The number of true predictions divided by the total number of predictions. \\(\\dfrac{\\|TP\\|+\\|TN\\|}{\\|TP\\|+\\|TN\\|+\\|FP\\|+\\|FN\\|}\\) ROC AUC The area under the Receiver Operating Characteristic (ROC) curve for the predictions generated by a given model. See ROCAUC methods. Precision-Recall Curves Outputs a nested dictionary containing the true positives, false positives, true negatives, false negatives, precision, recall, and F1 score for each (label key, label value, confidence threshold) combination. See precision-recall curve methods Detailed Precision-Recall Curves Similar to PrecisionRecallCurve, except this metric a) classifies false positives as hallucinations or misclassifications, b) classifies false negatives as misclassifications or missed_detections, and c) gives example datums for each observation, up to a maximum of pr_curve_max_examples. See detailed precision-recall curve methods"},{"location":"metrics/#object-detection-and-instance-segmentation-metrics","title":"Object Detection and Instance Segmentation Metrics**","text":"Name Description Equation Average Precision (AP) The weighted mean of precisions achieved at several different recall thresholds for a single Intersection over Union (IOU), grouped by class. See AP methods. AP Averaged Over IOUs The average of several AP metrics across IOU thresholds, grouped by class labels. \\(\\dfrac{1}{\\text{number of thresholds}} \\sum\\limits_{iou \\in thresholds} AP_{iou}\\) Mean Average Precision (mAP) The average of several AP metrics, grouped by label keys and IOU thresholds. \\(\\dfrac{1}{\\text{number of labels}} \\sum\\limits_{label \\in labels} AP_{c}\\) mAP Averaged Over IOUs The average of several mAP metrics grouped by label keys. \\(\\dfrac{1}{\\text{number of thresholds}} \\sum\\limits_{iou \\in thresholds} mAP_{iou}\\) Average Recall (AR) The average of several recall metrics across IOU thresholds, grouped by class labels. See AR methods. Mean Average Recall (mAR) The average of several AR metrics, grouped by label keys. \\(\\dfrac{1}{\\text{number of labels}} \\sum\\limits_{label \\in labels} AR_{class}\\) Precision-Recall Curves Outputs a nested dictionary containing the true positives, false positives, true negatives, false negatives, precision, recall, and F1 score for each (label key, label value, confidence threshold) combination. These curves are calculated using a default IOU threshold of 0.5; you can set your own threshold by passing a float between 0 and 1 to the pr_curve_iou_threshold parameter at evaluation time. See precision-recall curve methods Detailed Precision-Recall Curves Similar to PrecisionRecallCurve, except this metric a) classifies false positives as hallucinations or misclassifications, b) classifies false negatives as misclassifications or missed_detections, and c) gives example datums and bounding boxes for each observation, up to a maximum of pr_curve_max_examples. See detailed precision-recall curve methods

**When calculating IOUs for object detection metrics, Valor handles the necessary conversion between different types of geometric annotations. For example, if your model prediction is a polygon and your groundtruth is a raster, then the raster will be converted to a polygon prior to calculating the IOU.

"},{"location":"metrics/#semantic-segmentation-metrics","title":"Semantic Segmentation Metrics","text":"Name Description Equation Intersection Over Union (IOU) A ratio between the groundtruth and predicted regions of an image, measured as a percentage, grouped by class. \\(\\dfrac{area( prediction \\cap groundtruth )}{area( prediction \\cup groundtruth )}\\) Mean IOU The average of IOU across labels, grouped by label key. \\(\\dfrac{1}{\\text{number of labels}} \\sum\\limits_{label \\in labels} IOU_{c}\\)"},{"location":"metrics/#appendix-metric-calculations","title":"Appendix: Metric Calculations","text":""},{"location":"metrics/#binary-roc-auc","title":"Binary ROC AUC","text":""},{"location":"metrics/#receiver-operating-characteristic-roc","title":"Receiver Operating Characteristic (ROC)","text":"

An ROC curve plots the True Positive Rate (TPR) vs. the False Positive Rate (FPR) at different confidence thresholds.

In Valor, we use the confidence scores sorted in decreasing order as our thresholds. Using these thresholds, we can calculate our TPR and FPR as follows:

"},{"location":"metrics/#determining-the-rate-of-correct-predictions","title":"Determining the Rate of Correct Predictions","text":"Element Description True Positive (TP) Prediction confidence score >= threshold and is correct. False Positive (FP) Prediction confidence score >= threshold and is incorrect. True Negative (TN) Prediction confidence score < threshold and is correct. False Negative (FN) Prediction confidence score < threshold and is incorrect.
  • \\(\\text{True Positive Rate (TPR)} = \\dfrac{|TP|}{|TP| + |FN|} = \\dfrac{|TP(threshold)|}{|TP(threshold)| + |FN(threshold)|}\\)

  • \\(\\text{False Positive Rate (FPR)} = \\dfrac{|FP|}{|FP| + |TN|} = \\dfrac{|FP(threshold)|}{|FP(threshold)| + |TN(threshold)|}\\)

We now use the confidence scores, sorted in decreasing order, as our thresholds in order to generate points on a curve.

\\(Point(score) = (FPR(score), \\ TPR(score))\\)

"},{"location":"metrics/#area-under-the-roc-curve-roc-auc","title":"Area Under the ROC Curve (ROC AUC)","text":"

After calculating the ROC curve, we find the ROC AUC metric by approximating the integral using the trapezoidal rule formula.

\\(ROC AUC = \\sum_{i=1}^{|scores|} \\frac{ \\lVert Point(score_{i-1}) - Point(score_i) \\rVert }{2}\\)

See Classification: ROC Curve and AUC for more information.

"},{"location":"metrics/#average-precision-ap","title":"Average Precision (AP)","text":"

For object detection and instance segmentation tasks, average precision is calculated from the intersection-over-union (IOU) of geometric predictions and ground truths.

"},{"location":"metrics/#multiclass-precision-and-recall","title":"Multiclass Precision and Recall","text":"

Tasks that predict geometries (such as object detection or instance segmentation) use the ratio intersection-over-union (IOU) to calculate precision and recall. IOU is the ratio of the intersecting area over the joint area spanned by the two geometries, and is defined in the following equation.

\\(Intersection \\ over \\ Union \\ (IOU) = \\dfrac{Area( prediction \\cap groundtruth )}{Area( prediction \\cup groundtruth )}\\)

Using different IOU thresholds, we can determine whether we count a pairing between a prediction and a ground truth pairing based on their overlap.

Case Description True Positive (TP) Prediction-GroundTruth pair exists with IOU >= threshold. False Positive (FP) Prediction-GroundTruth pair exists with IOU < threshold. True Negative (TN) Unused in multi-class evaluation. False Negative (FN) No Prediction with a matching label exists for the GroundTruth.
  • \\(Precision = \\dfrac{|TP|}{|TP| + |FP|} = \\dfrac{\\text{Number of True Predictions}}{|\\text{Predictions}|}\\)

  • \\(Recall = \\dfrac{|TP|}{|TP| + |FN|} = \\dfrac{\\text{Number of True Predictions}}{|\\text{Groundtruths}|}\\)

"},{"location":"metrics/#matching-ground-truths-with-predictions","title":"Matching Ground Truths with Predictions","text":"

To properly evaluate a detection, we must first find the best pairings of predictions to ground truths. We start by iterating over our predictions, ordering them by highest scores first. We pair each prediction with the ground truth that has the highest calculated IOU. Both the prediction and ground truth are now considered paired and removed from the pool of choices.

def rank_ious(\n    groundtruths: list,\n    predictions: list,\n) -> list[float]:\n    \"\"\"Ranks ious by unique pairings.\"\"\"\n\n    retval = []\n    groundtruths = set(groundtruths)\n    for prediction in sorted(predictions, key=lambda x : -x.score):\n        groundtruth = max(groundtruths, key=lambda x : calculate_iou(groundtruth, prediction))\n        groundtruths.remove(groundtruth)\n        retval.append(calculate_iou(groundtruth, prediction))\n
"},{"location":"metrics/#precision-recall-curve","title":"Precision-Recall Curve","text":"

We can now compute the precision-recall curve using our previously ranked IOU's. We do this by iterating through the ranked IOU's and creating points cumulatively using recall and precision.

def create_precision_recall_curve(\n    number_of_groundtruths: int,\n    ranked_ious: list[float],\n    threshold: float\n) -> list[tuple[float, float]]:\n    \"\"\"Creates the precision-recall curve from a list of IOU's and a threshold.\"\"\"\n\n    retval = []\n    count_tp = 0\n    for i in range(ranked_ious):\n        if ranked_ious[i] >= threshold:\n            count_tp += 1\n        precision = count_tp / (i + 1)\n        recall = count_tp / number_of_groundtruths\n        retval.append((recall, precision))\n
"},{"location":"metrics/#calculating-average-precision","title":"Calculating Average Precision","text":"

Average precision is defined as the area under the precision-recall curve.

We will use a 101-point interpolation of the curve to be consistent with the COCO evaluator. The intent behind interpolation is to reduce the fuzziness that results from ranking pairs.

\\(AP = \\frac{1}{101} \\sum\\limits_{r\\in\\{ 0, 0.01, \\ldots , 1 \\}}\\rho_{interp}(r)\\)

\\(\\rho_{interp} = \\underset{\\tilde{r}:\\tilde{r} \\ge r}{max \\ \\rho (\\tilde{r})}\\)

"},{"location":"metrics/#references","title":"References","text":"
  • MS COCO Detection Evaluation
  • The PASCAL Visual Object Classes (VOC) Challenge
  • Mean Average Precision (mAP) Using the COCO Evaluator
"},{"location":"metrics/#average-recall-ar","title":"Average Recall (AR)","text":"

To calculate Average Recall (AR), we:

  1. Find the count of true positives above specified IOU and confidence thresholds for all images containing a ground truth of a particular class.
  2. Divide that count of true positives by the total number of ground truths to get the recall value per class and IOU threshold. Append that recall value to a list.
  3. Repeat steps 1 & 2 for multiple IOU thresholds (e.g., [.5, .75])
  4. Take the average of our list of recalls to arrive at the AR value per class.

Note that this metric differs from COCO's calculation in two ways:

  • COCO averages across classes while calculating AR, while we calculate AR separately for each class. Our AR calculations matches the original FAIR definition of AR, while our mAR calculations match what COCO calls AR.
  • COCO calculates three different AR metrics (AR@1, AR@5, AR@100) by considering only the top 1/5/100 most confident predictions during the matching process. Valor, on the other hand, allows users to input a recall_score_threshold value that will prevent low-confidence predictions from being counted as true positives when calculating AR.
"},{"location":"metrics/#precision-recall-curves","title":"Precision-Recall Curves","text":"

Precision-recall curves offer insight into which confidence threshold you should pick for your production pipeline. The PrecisionRecallCurve metric includes the true positives, false positives, true negatives, false negatives, precision, recall, and F1 score for each (label key, label value, confidence threshold) combination. When using the Valor Python client, the output will be formatted as follows:

pr_evaluation = evaluate_detection(\n    data=dataset,\n)\nprint(pr_evaluation)\n\n[...,\n{\n    \"type\": \"PrecisionRecallCurve\",\n    \"parameters\": {\n        \"label_key\": \"class\", # The key of the label.\n        \"pr_curve_iou_threshold\": 0.5, # Note that this value will be None for classification tasks. For detection tasks, we use 0.5 as the default threshold, but allow users to pass an optional `pr_curve_iou_threshold` parameter in their evaluation call.\n    },\n    \"value\": {\n        \"cat\": { # The value of the label.\n            \"0.05\": { # The confidence score threshold, ranging from 0.05 to 0.95 in increments of 0.05.\n                \"fn\": 0,\n                \"fp\": 1,\n                \"tp\": 3,\n                \"recall\": 1,\n                \"precision\": 0.75,\n                \"f1_score\": .857,\n            },\n            ...\n        },\n    }\n}]\n

It's important to note that these curves are computed slightly differently from our other aggregate metrics above:

"},{"location":"metrics/#classification-tasks","title":"Classification Tasks","text":"

Valor calculates its aggregate precision, recall, and F1 metrics by matching the highest confidence prediction with each groundtruth. One issue with this approach is that we may throw away useful information in cases where prediction labels all have similarly strong confidence scores. For example: if our top two predictions for a given ground truth are {\u201clabel\u201d: cat, \u201cscore\u201d:.91} and {\u201clabel\u201d: dog, \u201cscore\u201d:.90}, then our aggregated precision and recall metrics would penalize the dog label even though its confidence score was nearly equal to the cat label.

We think the approach above makes sense when calculating aggregate precision and recall metrics, but, when calculating the PrecisionRecallCurve value for each label, we consider all ground truth-prediction matches in order to treat each label as its own, separate binary classification problem.

"},{"location":"metrics/#detection-tasks","title":"Detection Tasks","text":"

The PrecisionRecallCurve values differ from the precision-recall curves used to calculate Average Precision in two subtle ways:

  • The PrecisionRecallCurve values visualize how precision and recall change as confidence thresholds vary from 0.05 to 0.95 in increments of 0.05. In contrast, the precision-recall curves used to calculate Average Precision are non-uniform; they vary over the actual confidence scores for each ground truth-prediction match.
  • If your pipeline predicts a label on an image, but that label doesn't exist on any ground truths in that particular image, then the PrecisionRecallCurve values will consider that prediction to be a false positive, whereas the other detection metrics will ignore that particular prediction.
"},{"location":"metrics/#detailedprecisionrecallcurve","title":"DetailedPrecisionRecallCurve","text":"

Valor also includes a more detailed version of PrecisionRecallCurve which can be useful for debugging your model's false positives and false negatives. When calculating DetailedPrecisionCurve, Valor will classify false positives as either hallucinations or misclassifications and your false negatives as either missed_detections or misclassifications using the following logic:

"},{"location":"metrics/#classification-tasks_1","title":"Classification Tasks","text":"
  • A false positive is a misclassification if there is a qualified prediction (with score >= score_threshold) with the same Label.key as the groundtruth on the datum, but the Label.value is incorrect. For example: if there's a photo with one groundtruth label on it (e.g., Label(key='animal', value='dog')), and we predicted another label value (e.g., Label(key='animal', value='cat')) on that datum, we'd say it's a misclassification since the key was correct but the value was not. Any false positives that do not meet this criteria are considered to be hallucinations.
  • Similarly, a false negative is a misclassification if there is a prediction with the same Label.key as the groundtruth on the datum, but the Label.value is incorrect. Any false negatives that do not meet this criteria are considered to be missed_detections.
"},{"location":"metrics/#object-detection-tasks","title":"Object Detection Tasks","text":"
  • A false positive is a misclassification if a) there is a qualified prediction with the same Label.key as the groundtruth on the datum, but the Label.value is incorrect, and b) the qualified prediction and groundtruth have an IOU >= pr_curve_iou_threshold. For example: if there's a photo with one groundtruth label on it (e.g., Label(key='animal', value='dog')), and we predicted another bounding box directly over that same object (e.g., Label(key='animal', value='cat')), we'd say it's a misclassification. Any false positives that do not meet this criteria are considered to be hallucinations.
  • A false negative is determined to be a misclassification if the two criteria above are met: a) there is a qualified prediction with the same Label.key as the groundtruth on the datum, but the Label.value is incorrect, and b) the qualified prediction and groundtruth have an IOU >= pr_curve_iou_threshold. Any false negatives that do not meet this criteria are considered to be missed_detections.

The DetailedPrecisionRecallOutput also includes up to n examples of each type of error, where n is set using pr_curve_max_examples. An example output is as follows:

# To retrieve more detailed examples for each `fn`, `fp`, and `tp`, look at the `DetailedPrecisionRecallCurve` metric\ndetailed_evaluation = evaluate_detection(\n    data=dataset,\n    pr_curve_max_examples=1 # The maximum number of examples to return for each obseration type (e.g., hallucinations, misclassifications, etc.)\n    metrics_to_return=[..., 'DetailedPrecisionRecallCurve'] # DetailedPrecisionRecallCurve isn't returned by default; the user must ask for it explicitely\n)\nprint(detailed_evaluation)\n\n[...,\n{\n    \"type\": \"PrecisionRecallCurve\",\n    \"parameters\": {\n        \"label_key\": \"class\", # The key of the label.\n        \"pr_curve_iou_threshold\": 0.5,\n    },\n    \"value\": {\n        \"cat\": { # The value of the label.\n            \"0.05\": { # The confidence score threshold, ranging from 0.05 to 0.95 in increments of 0.05.\n                \"fp\": {\n                    \"total\": 1,\n                    \"observations\": {\n                        'hallucinations': {\n                            \"count\": 1,\n                            \"examples\": [\n                                (\n                                    'test_dataset',\n                                     1,\n                                    '{\"type\":\"Polygon\",\"coordinates\":[[[464.08,105.09],[495.74,105.09],[495.74,146.99],[464.08,146.99],[464.08,105.91]]]}'\n                               ) # There's one false positive for this (key, value, confidence threshold) combination as indicated by the one tuple shown here. This tuple contains that observation's dataset name, datum ID, and coordinates in the form of a GeoJSON string. For classification tasks, this tuple will only contain the given observation's dataset name and datum ID.\n                            ],\n                        }\n                    },\n                },\n                \"tp\": {\n                    \"total\": 3,\n                    \"observations\": {\n                        'all': {\n                            \"count\": 3,\n                            \"examples\": [\n                                (\n                                    'test_dataset',\n                                     2,\n                                    '{\"type\":\"Polygon\",\"coordinates\":[[[464.08,105.09],[495.74,105.09],[495.74,146.99],[464.08,146.99],[464.08,105.91]]]}'\n                               ) # We only return one example since `pr_curve_max_examples` is set to 1 by default; update this argument at evaluation time to store and retrieve an arbitrary number of examples.\n                            ],\n                        },\n                    }\n                },\n                \"fn\": {...},\n            },\n        },\n    }\n}]\n
"},{"location":"technical_concepts/","title":"Technical Concepts","text":"

On this page, we'll describe many of the technical concepts underpinning Valor.

"},{"location":"technical_concepts/#high-level-workflow","title":"High-Level Workflow","text":"

The typical Valor workflow involves POSTing ground truth annotations (e.g., class labels, bounding boxes, segmentation masks, etc.) and model predictions to our API service. The service leverages these ground truths and predictions to compute evaluation metrics, and then stores the ground truths, predictions, and evaluation metrics centrally in Postgres. Users can also attach metadata to their Datasets, Models, GroundTruths, and Annotations; this metadata makes it easy to query for specific subsets of evaluations at a later date. Once an evaluation is stored in Valor, users can query those evaluations from Postgres via GET requests to the Valor API.

Note that Valor does not store raw data (such as underlying images) or facilitate model inference. Only the following items are stored in Postgres:

  • Ground truth annotations
  • Predictions outputted from a model
  • Metadata from any of Valor's various classes
  • Evaluation metrics computed by Valor
  • State related to any of the above
"},{"location":"technical_concepts/#supported-task-types","title":"Supported Task Types","text":"

As of May 2024, Valor supports the following types of supervised learning tasks and associated metrics:

  • Classification (including multi-label classification)
  • F1
  • ROC AUC
  • Accuracy
  • Precision
  • Recall
  • Precision Recall Curve
  • Detailed Precision Recall Curve
  • Object detection
  • AP
  • mAP
  • AP Averaged Over IOUs
  • mAP Averaged Over IOUs
  • Precision Recall Curve
  • Detailed Precision Recall Curve
  • Segmentation (including both instance and semantic segmentation)
  • IOU
  • mIOU

For descriptions of each of these metrics, see our Metrics page.

We expect the Valor framework to extend well to other types of supervised learning tasks and plan to expand our supported task types in future releases.

"},{"location":"technical_concepts/#components","title":"Components","text":"

We can think of Valor in terms of four orthogonal components:

"},{"location":"technical_concepts/#api","title":"API","text":"

The core of Valor is a back end REST API service. Users can call the API's endpoints directly (e.g., POST /datasets), or they can use our Python client to handle the API calls in their Python environment. All of Valor's state is stored in Postgres; the API itself is completely stateless.

Note that, after you start the API service in Dockers, you'll be able to view FastAPI's automatically generated API documentation at https://<your host>/docs.

"},{"location":"technical_concepts/#postgresql","title":"PostgreSQL","text":"

PostgreSQL (a.k.a. Postgres or psql) is an open-source relational database management system. We use Postgres to store all of Valor's various objects and states.

One of the most important reasons we chose Postgres was its PostGIS extension, which adds support for storing, indexing, and querying geographic data. PostGIS enables Valor to quickly filter prior evaluations using geographic coordinates, which is a critically important feature for any computer vision task involving satellite data.

"},{"location":"technical_concepts/#python-client","title":"Python Client","text":"

Finally, we created a client to make it easier for our users to play with Valor from their Python environment. All of Valor's validations and computations are handled by our API; the Python client simply provides convenient methods to call the API's endpoints.

"},{"location":"technical_concepts/#classes","title":"Classes","text":"

The Valor API and Python client both make use of six core classes:

"},{"location":"technical_concepts/#dataset","title":"Dataset","text":"

The highest-level class is a Dataset, which stores metadata and annotations associated with a particular set of data. Note that Dataset is an abstraction: You can have multiple Datasets that reference the exact same input data, which is useful if you need to update or version your data over time.

Datasets require a name at instantiation and can optionally take in various types of metadata that you want to associate with your data.

"},{"location":"technical_concepts/#model","title":"Model","text":"

Models describe a particular instantiation of a machine learning model. We use the Model object to delineate between different model runs or between the same model run over time. Note that Models aren't children of Datasets; you can have one Model contain predictions for multiple Datasets.

Models require a name at instantiation and can optionally take in various types of metadata that you want to associate with your model.

"},{"location":"technical_concepts/#groundtruth","title":"GroundTruth","text":"

A GroundTruth object clarifies what the correct prediction should be for a given piece of data (e.g., an image). For an object detection task, for example, the GroundTruth would store a human-drawn bounding box that, when overlayed on an object, would correctly enclose the object that we're trying to predict.

GroundTruths take one Datum and a list of Annotations as arguments.

"},{"location":"technical_concepts/#prediction","title":"Prediction","text":"

A Prediction object describes the output of a machine learning model. For an object detection task, for example, the Prediction would describe a machine-generated bounding box enclosing the area where a computer vision model believes a certain class of object can be found.

Predictions take one Datum and a list of Annotations as arguments.

"},{"location":"technical_concepts/#datum","title":"Datum","text":"

Datums are used to store metadata about GroundTruths or Predictions. This metadata can include user-supplied metadata (e.g., JSONs filled with configuration details) or geospatial coordinates (via the geospatial argument). Datums provide the vital link between GroundTruths / Predictions and Datasets, and they are useful when filtering your evaluations on specific conditions.

A Datum requires a universal ID (UID) and dataset name at instantiation, along with any metadata or geospatial dictionaries that you want to associate with your GroundTruth or Prediction.

"},{"location":"technical_concepts/#annotation","title":"Annotation","text":"

Annotations attach to both GroundTruths and Predictions, enabling users to add textual labels to these objects. If a GroundTruth depicts a bounding box around a cat, for example, the Annotation would be passed into the GroundTruth to clarify the correct label for the GroundTruth (e.g., class=cat) and any other labels the user wants to specify for that bounding box (e.g., breed=tabby).

Annotations require the user to specify their task type, labels, and metadata at instantiation. Users can also pass in various visual representations tailored to their specific task, such as bounding boxes, segmentations, or image rasters.

"},{"location":"technical_concepts/#authentication","title":"Authentication","text":"

The API can be run without authentication (by default), or with authentication with a single global username and password. To set this up, set the following environment variables when running the back end:

  • Set the environment variables VALOR_SECRET_KEY, VALOR_USERNAME, and VALOR_PASSWORD manually (e.g., export SECRET_KEY=<secret key>)
  • Set these environment variables in a file named .env.auth, and place that file in the api directory. An example of such a file would look like:
VALOR_SECRET_KEY=\"secret key\"\nVALOR_USERNAME=\"username\"\nVALOR_PASSWORD=\"password\"\n

VALOR_SECRET_KEY is the key used for encoding and decoding tokens, and should be a random string. VALOR_USERNAME and VALOR_PASSWORD are the username and password that will be used to authenticate requests.

You can use the tests in integration_tests/test_client_auth.py to check whether your authenticator is running correctly.

"},{"location":"technical_concepts/#deployment-settings","title":"Deployment Settings","text":"

When deploying behind a proxy or with external routing, the API_ROOT_PATH environment variable should be used to set the root_path argument to fastapi.FastAPI (see https://fastapi.tiangolo.com/advanced/behind-a-proxy/#setting-the-root_path-in-the-fastapi-app).

"},{"location":"technical_concepts/#release-process","title":"Release Process","text":"

A release is made by publishing a tag of the form vX.Y.Z (e.g., v0.1.0). This will trigger a GitHub action that will build and publish the Python client to PyPI. These releases should be created using the GitHub UI.

"},{"location":"client_api/Annotation/","title":"Annotation","text":"

Bases: StaticCollection

A class used to annotate GroundTruths and Predictions.

Attributes:

Name Type Description metadata Dictionary

A dictionary of metadata that describes the Annotation.

labels (List[Label], optional)

A list of labels to use for the Annotation.

bounding_box Box

A bounding box to assign to the Annotation.

polygon BoundingPolygon

A polygon to assign to the Annotation.

raster Raster

A raster to assign to the Annotation.

embedding List[float]

An embedding, described by a list of values with type float and a maximum length of 16,000.

is_instance (bool, optional)

A boolean describing whether we should treat the Raster attached to an annotation as an instance segmentation or not. If set to true, then the Annotation will be validated for use in object detection tasks. If set to false, then the Annotation will be validated for use in semantic segmentation tasks.

implied_task_types (list[str], optional)

The validated task types that are applicable to each Annotation. Doesn't need to bet set by the user.

Examples:

Classification

>>> Annotation.create(\n...     labels=[\n...         Label(key=\"class\", value=\"dog\"),\n...         Label(key=\"category\", value=\"animal\"),\n...     ]\n... )\n

Object-Detection Box

>>> annotation = Annotation.create(\n...     labels=[Label(key=\"k1\", value=\"v1\")],\n...    bounding_box=box2,\n... )\n

Object-Detection Polygon

>>> annotation = Annotation.create(\n...     labels=[Label(key=\"k1\", value=\"v1\")],\n...     polygon=BoundingPolygon(...),\n... )\n

Object-Detection Raster

>>> annotation = Annotation.create(\n...     labels=[Label(key=\"k1\", value=\"v1\")],\n...     raster=Raster(...),\n...     is_instance=True\n... )\n

Semantic-Segmentation Raster

>>> annotation = Annotation.create(\n...     labels=[Label(key=\"k1\", value=\"v1\")],\n...     raster=Raster(...),\n...     is_instance=False # or None\n... )\n

Defining all supported annotation types is allowed!

>>> Annotation.create(\n...     labels=[Label(key=\"k1\", value=\"v1\")],\n...     bounding_box=Box(...),\n...     polygon=BoundingPolygon(...),\n...     raster=Raster(...),\n... )\n
Source code in valor/schemas/symbolic/collections.py
class Annotation(StaticCollection):\n    \"\"\"\n    A class used to annotate `GroundTruths` and `Predictions`.\n\n    Attributes\n    ----------\n    metadata: Dictionary\n        A dictionary of metadata that describes the `Annotation`.\n    labels: List[Label], optional\n        A list of labels to use for the `Annotation`.\n    bounding_box: Box\n        A bounding box to assign to the `Annotation`.\n    polygon: BoundingPolygon\n        A polygon to assign to the `Annotation`.\n    raster: Raster\n        A raster to assign to the `Annotation`.\n    embedding: List[float]\n        An embedding, described by a list of values with type float and a maximum length of 16,000.\n    is_instance: bool, optional\n        A boolean describing whether we should treat the Raster attached to an annotation as an instance segmentation or not. If set to true, then the Annotation will be validated for use in object detection tasks. If set to false, then the Annotation will be validated for use in semantic segmentation tasks.\n    implied_task_types: list[str], optional\n        The validated task types that are applicable to each Annotation. Doesn't need to bet set by the user.\n\n    Examples\n    --------\n\n    Classification\n    >>> Annotation.create(\n    ...     labels=[\n    ...         Label(key=\"class\", value=\"dog\"),\n    ...         Label(key=\"category\", value=\"animal\"),\n    ...     ]\n    ... )\n\n    Object-Detection Box\n    >>> annotation = Annotation.create(\n    ...     labels=[Label(key=\"k1\", value=\"v1\")],\n    ...    bounding_box=box2,\n    ... )\n\n    Object-Detection Polygon\n    >>> annotation = Annotation.create(\n    ...     labels=[Label(key=\"k1\", value=\"v1\")],\n    ...     polygon=BoundingPolygon(...),\n    ... )\n\n    Object-Detection Raster\n    >>> annotation = Annotation.create(\n    ...     labels=[Label(key=\"k1\", value=\"v1\")],\n    ...     raster=Raster(...),\n    ...     is_instance=True\n    ... )\n\n    Semantic-Segmentation Raster\n    >>> annotation = Annotation.create(\n    ...     labels=[Label(key=\"k1\", value=\"v1\")],\n    ...     raster=Raster(...),\n    ...     is_instance=False # or None\n    ... )\n\n    Defining all supported annotation types is allowed!\n    >>> Annotation.create(\n    ...     labels=[Label(key=\"k1\", value=\"v1\")],\n    ...     bounding_box=Box(...),\n    ...     polygon=BoundingPolygon(...),\n    ...     raster=Raster(...),\n    ... )\n    \"\"\"\n\n    metadata: Dictionary = Dictionary.symbolic(\n        owner=\"annotation\", name=\"metadata\"\n    )\n    labels: SymbolicList[Label] = SymbolicList[Label].symbolic(\n        owner=\"annotation\", name=\"labels\"\n    )\n    bounding_box: Box = Box.symbolic(owner=\"annotation\", name=\"bounding_box\")\n    polygon: Polygon = Polygon.symbolic(owner=\"annotation\", name=\"polygon\")\n    raster: Raster = Raster.symbolic(owner=\"annotation\", name=\"raster\")\n    embedding: Embedding = Embedding.symbolic(\n        owner=\"annotation\", name=\"embedding\"\n    )\n    is_instance: Bool = Bool.symbolic(owner=\"annotation\", name=\"is_instance\")\n    implied_task_types: SymbolicList[String] = SymbolicList[String].symbolic(\n        owner=\"annotation\", name=\"implied_task_types\"\n    )\n\n    def __init__(\n        self,\n        *,\n        metadata: Optional[dict] = None,\n        labels: Optional[List[Label]] = None,\n        bounding_box: Optional[Box] = None,\n        polygon: Optional[Polygon] = None,\n        raster: Optional[Raster] = None,\n        embedding: Optional[Embedding] = None,\n        is_instance: Optional[bool] = None,\n        implied_task_types: Optional[List[String]] = None,\n    ):\n        \"\"\"\n        Constructs an annotation.\n\n        Parameters\n        ----------\n        metadata: Dict[str, Union[int, float, str, bool, datetime.datetime, datetime.date, datetime.time]]\n            A dictionary of metadata that describes the `Annotation`.\n        labels: List[Label]\n            A list of labels to use for the `Annotation`.\n        bounding_box: Box, optional\n            A bounding box annotation.\n        polygon: Polygon, optional\n            A polygon annotation.\n        raster: Raster, optional\n            A raster annotation.\n        embedding: List[float], optional\n            An embedding, described by a list of values with type float and a maximum length of 16,000.\n        is_instance: bool, optional\n            A boolean describing whether we should treat the Raster attached to an annotation as an instance segmentation or not. If set to true, then the Annotation will be validated for use in object detection tasks. If set to false, then the Annotation will be validated for use in semantic segmentation tasks.\n        implied_task_types: list[str], optional\n            The validated task types that are applicable to each Annotation. Doesn't need to bet set by the user.\n\n        \"\"\"\n        super().__init__(\n            metadata=metadata if metadata else dict(),\n            labels=labels if labels else list(),\n            bounding_box=bounding_box,\n            polygon=polygon,\n            raster=raster,\n            embedding=embedding,\n            is_instance=is_instance,\n            implied_task_types=implied_task_types,\n        )\n\n    @staticmethod\n    def formatting() -> Dict[str, Any]:\n        \"\"\"Attribute format mapping.\"\"\"\n        return {\n            \"bounding_box\": Box.nullable,\n            \"polygon\": Polygon.nullable,\n            \"raster\": Raster.nullable,\n            \"embedding\": Embedding.nullable,\n            \"is_instance\": Bool.nullable,\n            \"implied_task_types\": SymbolicList,\n        }\n
"},{"location":"client_api/Annotation/#valor.Annotation-functions","title":"Functions","text":""},{"location":"client_api/Annotation/#valor.Annotation.__init__","title":"valor.Annotation.__init__(*, metadata=None, labels=None, bounding_box=None, polygon=None, raster=None, embedding=None, is_instance=None, implied_task_types=None)","text":"

Constructs an annotation.

Parameters:

Name Type Description Default metadata Optional[dict]

A dictionary of metadata that describes the Annotation.

None labels Optional[List[Label]]

A list of labels to use for the Annotation.

None bounding_box Optional[Box]

A bounding box annotation.

None polygon Optional[Polygon]

A polygon annotation.

None raster Optional[Raster]

A raster annotation.

None embedding Optional[Embedding]

An embedding, described by a list of values with type float and a maximum length of 16,000.

None is_instance Optional[bool]

A boolean describing whether we should treat the Raster attached to an annotation as an instance segmentation or not. If set to true, then the Annotation will be validated for use in object detection tasks. If set to false, then the Annotation will be validated for use in semantic segmentation tasks.

None implied_task_types Optional[List[String]]

The validated task types that are applicable to each Annotation. Doesn't need to bet set by the user.

None Source code in valor/schemas/symbolic/collections.py
def __init__(\n    self,\n    *,\n    metadata: Optional[dict] = None,\n    labels: Optional[List[Label]] = None,\n    bounding_box: Optional[Box] = None,\n    polygon: Optional[Polygon] = None,\n    raster: Optional[Raster] = None,\n    embedding: Optional[Embedding] = None,\n    is_instance: Optional[bool] = None,\n    implied_task_types: Optional[List[String]] = None,\n):\n    \"\"\"\n    Constructs an annotation.\n\n    Parameters\n    ----------\n    metadata: Dict[str, Union[int, float, str, bool, datetime.datetime, datetime.date, datetime.time]]\n        A dictionary of metadata that describes the `Annotation`.\n    labels: List[Label]\n        A list of labels to use for the `Annotation`.\n    bounding_box: Box, optional\n        A bounding box annotation.\n    polygon: Polygon, optional\n        A polygon annotation.\n    raster: Raster, optional\n        A raster annotation.\n    embedding: List[float], optional\n        An embedding, described by a list of values with type float and a maximum length of 16,000.\n    is_instance: bool, optional\n        A boolean describing whether we should treat the Raster attached to an annotation as an instance segmentation or not. If set to true, then the Annotation will be validated for use in object detection tasks. If set to false, then the Annotation will be validated for use in semantic segmentation tasks.\n    implied_task_types: list[str], optional\n        The validated task types that are applicable to each Annotation. Doesn't need to bet set by the user.\n\n    \"\"\"\n    super().__init__(\n        metadata=metadata if metadata else dict(),\n        labels=labels if labels else list(),\n        bounding_box=bounding_box,\n        polygon=polygon,\n        raster=raster,\n        embedding=embedding,\n        is_instance=is_instance,\n        implied_task_types=implied_task_types,\n    )\n
"},{"location":"client_api/Annotation/#valor.Annotation.formatting","title":"valor.Annotation.formatting() staticmethod","text":"

Attribute format mapping.

Source code in valor/schemas/symbolic/collections.py
@staticmethod\ndef formatting() -> Dict[str, Any]:\n    \"\"\"Attribute format mapping.\"\"\"\n    return {\n        \"bounding_box\": Box.nullable,\n        \"polygon\": Polygon.nullable,\n        \"raster\": Raster.nullable,\n        \"embedding\": Embedding.nullable,\n        \"is_instance\": Bool.nullable,\n        \"implied_task_types\": SymbolicList,\n    }\n
"},{"location":"client_api/Client/","title":"Client","text":"

Valor client object for interacting with the api.

Parameters:

Name Type Description Default connection ClientConnection

Option to use an existing connection object.

None Source code in valor/coretypes.py
class Client:\n    \"\"\"\n    Valor client object for interacting with the api.\n\n    Parameters\n    ----------\n    connection : ClientConnection, optional\n        Option to use an existing connection object.\n    \"\"\"\n\n    def __init__(self, connection: Optional[ClientConnection] = None):\n        if not connection:\n            connection = get_connection()\n        self.conn = connection\n\n    @classmethod\n    def connect(\n        cls,\n        host: str,\n        access_token: Optional[str] = None,\n        reconnect: bool = False,\n    ) -> Client:\n        \"\"\"\n        Establishes a connection to the Valor API.\n\n        Parameters\n        ----------\n        host : str\n            The host to connect to. Should start with \"http://\" or \"https://\".\n        access_token : str\n            The access token for the host (if the host requires authentication).\n        \"\"\"\n        connect(host=host, access_token=access_token, reconnect=reconnect)\n        return cls(get_connection())\n\n    def get_labels(\n        self,\n        filter_by: Optional[FilterType] = None,\n    ) -> List[Label]:\n        \"\"\"\n        Gets all labels using an optional filter.\n\n        Parameters\n        ----------\n        filter_by : FilterType, optional\n            Optional constraints to filter by.\n\n        Returns\n        ------\n        List[valor.Label]\n            A list of labels.\n        \"\"\"\n        filter_ = _format_filter(filter_by)\n        filter_ = asdict(filter_)\n        return [Label(**label) for label in self.conn.get_labels(filter_)]\n\n    def get_labels_from_dataset(\n        self, dataset: Union[Dataset, str]\n    ) -> List[Label]:\n        \"\"\"\n        Get all labels associated with a dataset's ground truths.\n\n        Parameters\n        ----------\n        dataset : valor.Dataset\n            The dataset to search by.\n\n        Returns\n        ------\n        List[valor.Label]\n            A list of labels.\n        \"\"\"\n        dataset_name = (\n            dataset.name if isinstance(dataset, Dataset) else dataset\n        )\n        return [\n            Label(**label)\n            for label in self.conn.get_labels_from_dataset(dataset_name)  # type: ignore\n        ]\n\n    def get_labels_from_model(self, model: Union[Model, str]) -> List[Label]:\n        \"\"\"\n        Get all labels associated with a model's ground truths.\n\n        Parameters\n        ----------\n        model : valor.Model\n            The model to search by.\n\n        Returns\n        ------\n        List[valor.Label]\n            A list of labels.\n        \"\"\"\n        model_name = model.name if isinstance(model, Model) else model\n        return [\n            Label(**label)\n            for label in self.conn.get_labels_from_model(model_name)  # type: ignore\n        ]\n\n    def create_dataset(\n        self,\n        dataset: Union[Dataset, dict],\n    ) -> None:\n        \"\"\"\n        Creates a dataset.\n\n        Parameters\n        ----------\n        dataset : valor.Dataset\n            The dataset to create.\n        \"\"\"\n        if isinstance(dataset, Dataset):\n            dataset = dataset.encode_value()\n        self.conn.create_dataset(dataset)\n\n    def create_groundtruths(\n        self,\n        dataset: Dataset,\n        groundtruths: List[GroundTruth],\n        ignore_existing_datums: bool = False,\n    ):\n        \"\"\"\n        Creates ground truths.\n\n        Parameters\n        ----------\n\n        dataset : valor.Dataset\n            The dataset to create the ground truth for.\n        groundtruths : List[valor.GroundTruth]\n            The ground truths to create.\n        ignore_existing_datums : bool, default=False\n            If True, will ignore datums that already exist in the backend.\n            If False, will raise an error if any datums already exist.\n            Default is False.\n        \"\"\"\n        groundtruths_json = []\n        for groundtruth in groundtruths:\n            if not isinstance(groundtruth, GroundTruth):\n                raise TypeError(\n                    f\"Expected ground truth to be of type 'valor.GroundTruth' not '{type(groundtruth)}'.\"\n                )\n            if not isinstance(groundtruth.annotations._value, list):\n                raise TypeError\n            groundtruth_dict = groundtruth.encode_value()\n            groundtruth_dict[\"dataset_name\"] = dataset.name\n            groundtruths_json.append(groundtruth_dict)\n        self.conn.create_groundtruths(\n            groundtruths_json, ignore_existing_datums=ignore_existing_datums\n        )\n\n    def get_groundtruth(\n        self,\n        dataset: Union[Dataset, str],\n        datum: Union[Datum, str],\n    ) -> Union[GroundTruth, None]:\n        \"\"\"\n        Get a particular ground truth.\n\n        Parameters\n        ----------\n        dataset: Union[Dataset, str]\n            The dataset the datum belongs to.\n        datum: Union[Datum, str]\n            The desired datum.\n\n        Returns\n        ----------\n        Union[GroundTruth, None]\n            The matching ground truth or 'None' if it doesn't exist.\n        \"\"\"\n        dataset_name = (\n            dataset.name if isinstance(dataset, Dataset) else dataset\n        )\n        datum_uid = datum.uid if isinstance(datum, Datum) else datum\n        try:\n            resp = self.conn.get_groundtruth(\n                dataset_name=dataset_name, datum_uid=datum_uid  # type: ignore\n            )\n            resp.pop(\"dataset_name\")\n            return GroundTruth.decode_value(resp)\n        except ClientException as e:\n            if e.status_code == 404:\n                return None\n            raise e\n\n    def finalize_dataset(self, dataset: Union[Dataset, str]) -> None:\n        \"\"\"\n        Finalizes a dataset such that new ground truths cannot be added to it.\n\n        Parameters\n        ----------\n        dataset : str\n            The dataset to be finalized.\n        \"\"\"\n        dataset_name = (\n            dataset.name if isinstance(dataset, Dataset) else dataset\n        )\n        return self.conn.finalize_dataset(name=dataset_name)  # type: ignore\n\n    def get_dataset(\n        self,\n        name: str,\n    ) -> Union[Dataset, None]:\n        \"\"\"\n        Gets a dataset by name.\n\n        Parameters\n        ----------\n        name : str\n            The name of the dataset to fetch.\n\n        Returns\n        -------\n        Union[Dataset, None]\n            A Dataset with a matching name, or 'None' if one doesn't exist.\n        \"\"\"\n        dataset = Dataset.decode_value(\n            {\n                **self.conn.get_dataset(name),\n                \"connection\": self.conn,\n            }\n        )\n        return dataset\n\n    def get_datasets(\n        self,\n        filter_by: Optional[FilterType] = None,\n    ) -> List[Dataset]:\n        \"\"\"\n        Get all datasets, with an option to filter results according to some user-defined parameters.\n\n        Parameters\n        ----------\n        filter_by : FilterType, optional\n            Optional constraints to filter by.\n\n        Returns\n        ------\n        List[valor.Dataset]\n            A list of datasets.\n        \"\"\"\n        filter_ = _format_filter(filter_by)\n        if isinstance(filter_, Filter):\n            filter_ = asdict(filter_)\n        dataset_list = []\n        for kwargs in self.conn.get_datasets(filter_):\n            dataset = Dataset.decode_value({**kwargs, \"connection\": self.conn})\n            dataset_list.append(dataset)\n        return dataset_list\n\n    def get_datums(\n        self,\n        filter_by: Optional[FilterType] = None,\n    ) -> List[Datum]:\n        \"\"\"\n        Get all datums using an optional filter.\n\n        Parameters\n        ----------\n        filter_by : FilterType, optional\n            Optional constraints to filter by.\n\n        Returns\n        -------\n        List[valor.Datum]\n            A list datums.\n        \"\"\"\n        filter_ = _format_filter(filter_by)\n        if isinstance(filter_, Filter):\n            filter_ = asdict(filter_)\n        return [\n            Datum.decode_value(datum)\n            for datum in self.conn.get_datums(filter_)\n        ]\n\n    def get_datum(\n        self,\n        dataset: Union[Dataset, str],\n        uid: str,\n    ) -> Union[Datum, None]:\n        \"\"\"\n        Get datum.\n        `GET` endpoint.\n        Parameters\n        ----------\n        dataset : valor.Dataset\n            The dataset the datum belongs to.\n        uid : str\n            The UID of the datum.\n        Returns\n        -------\n        valor.Datum\n            The requested datum or 'None' if it doesn't exist.\n        \"\"\"\n        dataset_name = (\n            dataset.name if isinstance(dataset, Dataset) else dataset\n        )\n        resp = self.conn.get_datum(dataset_name=dataset_name, uid=uid)  # type: ignore\n        return Datum.decode_value(resp)\n\n    def get_dataset_status(\n        self,\n        name: str,\n    ) -> Union[TableStatus, None]:\n        \"\"\"\n        Get the state of a given dataset.\n\n        Parameters\n        ----------\n        name : str\n            The name of the dataset we want to fetch the state of.\n\n        Returns\n        ------\n        TableStatus | None\n            The state of the dataset, or 'None' if the dataset does not exist.\n        \"\"\"\n        try:\n            return self.conn.get_dataset_status(name)\n        except ClientException as e:\n            if e.status_code == 404:\n                return None\n            raise e\n\n    def get_dataset_summary(self, name: str) -> DatasetSummary:\n        \"\"\"\n        Gets the summary of a dataset.\n\n        Parameters\n        ----------\n        name : str\n            The name of the dataset to create a summary for.\n\n        Returns\n        -------\n        DatasetSummary\n            A dataclass containing the dataset summary.\n        \"\"\"\n        return DatasetSummary(**self.conn.get_dataset_summary(name))\n\n    def delete_dataset(self, name: str, timeout: int = 0) -> None:\n        \"\"\"\n        Deletes a dataset.\n\n        Parameters\n        ----------\n        name : str\n            The name of the dataset to be deleted.\n        timeout : int\n            The number of seconds to wait in order to confirm that the dataset was deleted.\n        \"\"\"\n        self.conn.delete_dataset(name)\n        if timeout:\n            for _ in range(timeout):\n                try:\n                    self.get_dataset(name)\n                except DatasetDoesNotExistError:\n                    break\n                time.sleep(1)\n            else:\n                raise TimeoutError(\n                    \"Dataset wasn't deleted within timeout interval\"\n                )\n\n    def create_model(\n        self,\n        model: Union[Model, dict],\n    ):\n        \"\"\"\n        Creates a model.\n\n        Parameters\n        ----------\n        model : valor.Model\n            The model to create.\n        \"\"\"\n        if isinstance(model, Model):\n            model = model.encode_value()\n        self.conn.create_model(model)\n\n    def create_predictions(\n        self,\n        dataset: Dataset,\n        model: Model,\n        predictions: List[Prediction],\n    ) -> None:\n        \"\"\"\n        Creates predictions.\n\n        Parameters\n        ----------\n        dataset : valor.Dataset\n            The dataset that is being operated over.\n        model : valor.Model\n            The model making the prediction.\n        predictions : List[valor.Prediction]\n            The predictions to create.\n        \"\"\"\n        predictions_json = []\n        for prediction in predictions:\n            if not isinstance(prediction, Prediction):\n                raise TypeError(\n                    f\"Expected prediction to be of type 'valor.Prediction' not '{type(prediction)}'.\"\n                )\n            if not isinstance(prediction.annotations._value, list):\n                raise TypeError\n            prediction_dict = prediction.encode_value()\n            prediction_dict[\"dataset_name\"] = dataset.name\n            prediction_dict[\"model_name\"] = model.name\n            predictions_json.append(prediction_dict)\n        self.conn.create_predictions(predictions_json)\n\n    def get_prediction(\n        self,\n        dataset: Union[Dataset, str],\n        model: Union[Model, str],\n        datum: Union[Datum, str],\n    ) -> Union[Prediction, None]:\n        \"\"\"\n        Get a particular prediction.\n\n        Parameters\n        ----------\n        dataset: Union[Dataset, str]\n            The dataset the datum belongs to.\n        model: Union[Model, str]\n            The model that made the prediction.\n        datum: Union[Datum, str]\n            The desired datum.\n\n        Returns\n        ----------\n        Union[Prediction, None]\n            The matching prediction or 'None' if it doesn't exist.\n        \"\"\"\n        dataset_name = (\n            dataset.name if isinstance(dataset, Dataset) else dataset\n        )\n        model_name = model.name if isinstance(model, Model) else model\n        datum_uid = datum.uid if isinstance(datum, Datum) else datum\n\n        resp = self.conn.get_prediction(\n            dataset_name=dataset_name,  # type: ignore\n            model_name=model_name,  # type: ignore\n            datum_uid=datum_uid,  # type: ignore\n        )\n        resp.pop(\"dataset_name\")\n        resp.pop(\"model_name\")\n        return Prediction.decode_value(resp)\n\n    def finalize_inferences(\n        self, dataset: Union[Dataset, str], model: Union[Model, str]\n    ) -> None:\n        \"\"\"\n        Finalizes a model-dataset pairing such that new predictions cannot be added to it.\n        \"\"\"\n        dataset_name = (\n            dataset.name if isinstance(dataset, Dataset) else dataset\n        )\n        model_name = model.name if isinstance(model, Model) else model\n        return self.conn.finalize_inferences(\n            dataset_name=dataset_name,  # type: ignore\n            model_name=model_name,  # type: ignore\n        )\n\n    def get_model(\n        self,\n        name: str,\n    ) -> Union[Model, None]:\n        \"\"\"\n        Gets a model by name.\n\n        Parameters\n        ----------\n        name : str\n            The name of the model to fetch.\n\n        Returns\n        -------\n        Union[valor.Model, None]\n            A Model with matching name or 'None' if one doesn't exist.\n        \"\"\"\n        return Model.decode_value(\n            {\n                **self.conn.get_model(name),\n                \"connection\": self.conn,\n            }\n        )\n\n    def get_models(\n        self,\n        filter_by: Optional[FilterType] = None,\n    ) -> List[Model]:\n        \"\"\"\n        Get all models using an optional filter.\n\n        Parameters\n        ----------\n        filter_by : FilterType, optional\n            Optional constraints to filter by.\n\n        Returns\n        ------\n        List[valor.Model]\n            A list of models.\n        \"\"\"\n        filter_ = _format_filter(filter_by)\n        if isinstance(filter_, Filter):\n            filter_ = asdict(filter_)\n        model_list = []\n        for kwargs in self.conn.get_models(filter_):\n            model = Model.decode_value({**kwargs, \"connection\": self.conn})\n            model_list.append(model)\n        return model_list\n\n    def get_model_status(\n        self,\n        dataset_name: str,\n        model_name: str,\n    ) -> Optional[TableStatus]:\n        \"\"\"\n        Get the state of a given model over a dataset.\n\n        Parameters\n        ----------\n        dataset_name : str\n            The name of the dataset that the model is operating over.\n        model_name : str\n            The name of the model we want to fetch the state of.\n\n        Returns\n        ------\n        Union[TableStatus, None]\n            The state of the model or 'None' if the model doesn't exist.\n        \"\"\"\n        try:\n            return self.conn.get_model_status(dataset_name, model_name)\n        except ClientException as e:\n            if e.status_code == 404:\n                return None\n            raise e\n\n    def get_model_eval_requests(\n        self, model: Union[Model, str]\n    ) -> List[Evaluation]:\n        \"\"\"\n        Get all evaluations that have been created for a model.\n\n        This does not return evaluation results.\n\n        `GET` endpoint.\n\n        Parameters\n        ----------\n        model : str\n            The model to search by.\n\n        Returns\n        -------\n        List[Evaluation]\n            A list of evaluations.\n        \"\"\"\n        model_name = model.name if isinstance(model, Model) else model\n        return [\n            Evaluation(**evaluation, connection=self.conn)\n            for evaluation in self.conn.get_model_eval_requests(model_name)  # type: ignore\n        ]\n\n    def delete_model(self, name: str, timeout: int = 0) -> None:\n        \"\"\"\n        Deletes a model.\n\n        Parameters\n        ----------\n        name : str\n            The name of the model to be deleted.\n        timeout : int\n            The number of seconds to wait in order to confirm that the model was deleted.\n        \"\"\"\n        self.conn.delete_model(name)\n        if timeout:\n            for _ in range(timeout):\n                try:\n                    self.get_model(name)\n                except ModelDoesNotExistError:\n                    break\n                time.sleep(1)\n            else:\n                raise TimeoutError(\n                    \"Model wasn't deleted within timeout interval\"\n                )\n\n    def get_evaluations(\n        self,\n        *,\n        evaluation_ids: Optional[List[int]] = None,\n        models: Union[List[Model], List[str], None] = None,\n        datasets: Union[List[Dataset], List[str], None] = None,\n        metrics_to_sort_by: Optional[\n            Dict[str, Union[Dict[str, str], str]]\n        ] = None,\n    ) -> List[Evaluation]:\n        \"\"\"\n        Returns all evaluations associated with user-supplied dataset and/or model names.\n\n        Parameters\n        ----------\n        evaluation_ids : List[int], optional.\n            A list of job IDs to return metrics for.\n        models : Union[List[valor.Model], List[str]], optional\n            A list of model names that we want to return metrics for.\n        datasets : Union[List[valor.Dataset], List[str]], optional\n            A list of dataset names that we want to return metrics for.\n        metrics_to_sort_by: dict[str, str | dict[str, str]], optional\n            An optional dict of metric types to sort the evaluations by.\n\n        Returns\n        -------\n        List[valor.Evaluation]\n            A list of evaluations.\n        \"\"\"\n        if isinstance(datasets, list):\n            datasets = [  # type: ignore\n                element.name if isinstance(element, Dataset) else element\n                for element in datasets\n            ]\n        if isinstance(models, list):\n            models = [  # type: ignore\n                element.name if isinstance(element, Model) else element\n                for element in models\n            ]\n        return [\n            Evaluation(connection=self.conn, **evaluation)\n            for evaluation in self.conn.get_evaluations(\n                evaluation_ids=evaluation_ids,\n                models=models,  # type: ignore\n                datasets=datasets,  # type: ignore\n                metrics_to_sort_by=metrics_to_sort_by,\n            )\n        ]\n\n    def evaluate(\n        self, request: EvaluationRequest, allow_retries: bool = False\n    ) -> List[Evaluation]:\n        \"\"\"\n        Creates as many evaluations as necessary to fulfill the request.\n\n        Parameters\n        ----------\n        request : schemas.EvaluationRequest\n            The requested evaluation parameters.\n        allow_retries : bool, default = False\n            Option to retry previously failed evaluations.\n\n        Returns\n        -------\n        List[Evaluation]\n            A list of evaluations that meet the parameters.\n        \"\"\"\n        return [\n            Evaluation(**evaluation)\n            for evaluation in self.conn.evaluate(\n                request, allow_retries=allow_retries\n            )\n        ]\n
"},{"location":"client_api/Client/#valor.Client-functions","title":"Functions","text":""},{"location":"client_api/Client/#valor.Client.connect","title":"valor.Client.connect(host, access_token=None, reconnect=False) classmethod","text":"

Establishes a connection to the Valor API.

Parameters:

Name Type Description Default host str

The host to connect to. Should start with \"http://\" or \"https://\".

required access_token str

The access token for the host (if the host requires authentication).

None Source code in valor/coretypes.py
@classmethod\ndef connect(\n    cls,\n    host: str,\n    access_token: Optional[str] = None,\n    reconnect: bool = False,\n) -> Client:\n    \"\"\"\n    Establishes a connection to the Valor API.\n\n    Parameters\n    ----------\n    host : str\n        The host to connect to. Should start with \"http://\" or \"https://\".\n    access_token : str\n        The access token for the host (if the host requires authentication).\n    \"\"\"\n    connect(host=host, access_token=access_token, reconnect=reconnect)\n    return cls(get_connection())\n
"},{"location":"client_api/Client/#valor.Client.create_dataset","title":"valor.Client.create_dataset(dataset)","text":"

Creates a dataset.

Parameters:

Name Type Description Default dataset Dataset

The dataset to create.

required Source code in valor/coretypes.py
def create_dataset(\n    self,\n    dataset: Union[Dataset, dict],\n) -> None:\n    \"\"\"\n    Creates a dataset.\n\n    Parameters\n    ----------\n    dataset : valor.Dataset\n        The dataset to create.\n    \"\"\"\n    if isinstance(dataset, Dataset):\n        dataset = dataset.encode_value()\n    self.conn.create_dataset(dataset)\n
"},{"location":"client_api/Client/#valor.Client.create_groundtruths","title":"valor.Client.create_groundtruths(dataset, groundtruths, ignore_existing_datums=False)","text":"

Creates ground truths.

Parameters:

Name Type Description Default dataset Dataset

The dataset to create the ground truth for.

required groundtruths List[GroundTruth]

The ground truths to create.

required ignore_existing_datums bool

If True, will ignore datums that already exist in the backend. If False, will raise an error if any datums already exist. Default is False.

False Source code in valor/coretypes.py
def create_groundtruths(\n    self,\n    dataset: Dataset,\n    groundtruths: List[GroundTruth],\n    ignore_existing_datums: bool = False,\n):\n    \"\"\"\n    Creates ground truths.\n\n    Parameters\n    ----------\n\n    dataset : valor.Dataset\n        The dataset to create the ground truth for.\n    groundtruths : List[valor.GroundTruth]\n        The ground truths to create.\n    ignore_existing_datums : bool, default=False\n        If True, will ignore datums that already exist in the backend.\n        If False, will raise an error if any datums already exist.\n        Default is False.\n    \"\"\"\n    groundtruths_json = []\n    for groundtruth in groundtruths:\n        if not isinstance(groundtruth, GroundTruth):\n            raise TypeError(\n                f\"Expected ground truth to be of type 'valor.GroundTruth' not '{type(groundtruth)}'.\"\n            )\n        if not isinstance(groundtruth.annotations._value, list):\n            raise TypeError\n        groundtruth_dict = groundtruth.encode_value()\n        groundtruth_dict[\"dataset_name\"] = dataset.name\n        groundtruths_json.append(groundtruth_dict)\n    self.conn.create_groundtruths(\n        groundtruths_json, ignore_existing_datums=ignore_existing_datums\n    )\n
"},{"location":"client_api/Client/#valor.Client.create_model","title":"valor.Client.create_model(model)","text":"

Creates a model.

Parameters:

Name Type Description Default model Model

The model to create.

required Source code in valor/coretypes.py
def create_model(\n    self,\n    model: Union[Model, dict],\n):\n    \"\"\"\n    Creates a model.\n\n    Parameters\n    ----------\n    model : valor.Model\n        The model to create.\n    \"\"\"\n    if isinstance(model, Model):\n        model = model.encode_value()\n    self.conn.create_model(model)\n
"},{"location":"client_api/Client/#valor.Client.create_predictions","title":"valor.Client.create_predictions(dataset, model, predictions)","text":"

Creates predictions.

Parameters:

Name Type Description Default dataset Dataset

The dataset that is being operated over.

required model Model

The model making the prediction.

required predictions List[Prediction]

The predictions to create.

required Source code in valor/coretypes.py
def create_predictions(\n    self,\n    dataset: Dataset,\n    model: Model,\n    predictions: List[Prediction],\n) -> None:\n    \"\"\"\n    Creates predictions.\n\n    Parameters\n    ----------\n    dataset : valor.Dataset\n        The dataset that is being operated over.\n    model : valor.Model\n        The model making the prediction.\n    predictions : List[valor.Prediction]\n        The predictions to create.\n    \"\"\"\n    predictions_json = []\n    for prediction in predictions:\n        if not isinstance(prediction, Prediction):\n            raise TypeError(\n                f\"Expected prediction to be of type 'valor.Prediction' not '{type(prediction)}'.\"\n            )\n        if not isinstance(prediction.annotations._value, list):\n            raise TypeError\n        prediction_dict = prediction.encode_value()\n        prediction_dict[\"dataset_name\"] = dataset.name\n        prediction_dict[\"model_name\"] = model.name\n        predictions_json.append(prediction_dict)\n    self.conn.create_predictions(predictions_json)\n
"},{"location":"client_api/Client/#valor.Client.delete_dataset","title":"valor.Client.delete_dataset(name, timeout=0)","text":"

Deletes a dataset.

Parameters:

Name Type Description Default name str

The name of the dataset to be deleted.

required timeout int

The number of seconds to wait in order to confirm that the dataset was deleted.

0 Source code in valor/coretypes.py
def delete_dataset(self, name: str, timeout: int = 0) -> None:\n    \"\"\"\n    Deletes a dataset.\n\n    Parameters\n    ----------\n    name : str\n        The name of the dataset to be deleted.\n    timeout : int\n        The number of seconds to wait in order to confirm that the dataset was deleted.\n    \"\"\"\n    self.conn.delete_dataset(name)\n    if timeout:\n        for _ in range(timeout):\n            try:\n                self.get_dataset(name)\n            except DatasetDoesNotExistError:\n                break\n            time.sleep(1)\n        else:\n            raise TimeoutError(\n                \"Dataset wasn't deleted within timeout interval\"\n            )\n
"},{"location":"client_api/Client/#valor.Client.delete_model","title":"valor.Client.delete_model(name, timeout=0)","text":"

Deletes a model.

Parameters:

Name Type Description Default name str

The name of the model to be deleted.

required timeout int

The number of seconds to wait in order to confirm that the model was deleted.

0 Source code in valor/coretypes.py
def delete_model(self, name: str, timeout: int = 0) -> None:\n    \"\"\"\n    Deletes a model.\n\n    Parameters\n    ----------\n    name : str\n        The name of the model to be deleted.\n    timeout : int\n        The number of seconds to wait in order to confirm that the model was deleted.\n    \"\"\"\n    self.conn.delete_model(name)\n    if timeout:\n        for _ in range(timeout):\n            try:\n                self.get_model(name)\n            except ModelDoesNotExistError:\n                break\n            time.sleep(1)\n        else:\n            raise TimeoutError(\n                \"Model wasn't deleted within timeout interval\"\n            )\n
"},{"location":"client_api/Client/#valor.Client.evaluate","title":"valor.Client.evaluate(request, allow_retries=False)","text":"

Creates as many evaluations as necessary to fulfill the request.

Parameters:

Name Type Description Default request EvaluationRequest

The requested evaluation parameters.

required allow_retries bool

Option to retry previously failed evaluations.

= False

Returns:

Type Description List[Evaluation]

A list of evaluations that meet the parameters.

Source code in valor/coretypes.py
def evaluate(\n    self, request: EvaluationRequest, allow_retries: bool = False\n) -> List[Evaluation]:\n    \"\"\"\n    Creates as many evaluations as necessary to fulfill the request.\n\n    Parameters\n    ----------\n    request : schemas.EvaluationRequest\n        The requested evaluation parameters.\n    allow_retries : bool, default = False\n        Option to retry previously failed evaluations.\n\n    Returns\n    -------\n    List[Evaluation]\n        A list of evaluations that meet the parameters.\n    \"\"\"\n    return [\n        Evaluation(**evaluation)\n        for evaluation in self.conn.evaluate(\n            request, allow_retries=allow_retries\n        )\n    ]\n
"},{"location":"client_api/Client/#valor.Client.finalize_dataset","title":"valor.Client.finalize_dataset(dataset)","text":"

Finalizes a dataset such that new ground truths cannot be added to it.

Parameters:

Name Type Description Default dataset str

The dataset to be finalized.

required Source code in valor/coretypes.py
def finalize_dataset(self, dataset: Union[Dataset, str]) -> None:\n    \"\"\"\n    Finalizes a dataset such that new ground truths cannot be added to it.\n\n    Parameters\n    ----------\n    dataset : str\n        The dataset to be finalized.\n    \"\"\"\n    dataset_name = (\n        dataset.name if isinstance(dataset, Dataset) else dataset\n    )\n    return self.conn.finalize_dataset(name=dataset_name)  # type: ignore\n
"},{"location":"client_api/Client/#valor.Client.finalize_inferences","title":"valor.Client.finalize_inferences(dataset, model)","text":"

Finalizes a model-dataset pairing such that new predictions cannot be added to it.

Source code in valor/coretypes.py
def finalize_inferences(\n    self, dataset: Union[Dataset, str], model: Union[Model, str]\n) -> None:\n    \"\"\"\n    Finalizes a model-dataset pairing such that new predictions cannot be added to it.\n    \"\"\"\n    dataset_name = (\n        dataset.name if isinstance(dataset, Dataset) else dataset\n    )\n    model_name = model.name if isinstance(model, Model) else model\n    return self.conn.finalize_inferences(\n        dataset_name=dataset_name,  # type: ignore\n        model_name=model_name,  # type: ignore\n    )\n
"},{"location":"client_api/Client/#valor.Client.get_dataset","title":"valor.Client.get_dataset(name)","text":"

Gets a dataset by name.

Parameters:

Name Type Description Default name str

The name of the dataset to fetch.

required

Returns:

Type Description Union[Dataset, None]

A Dataset with a matching name, or 'None' if one doesn't exist.

Source code in valor/coretypes.py
def get_dataset(\n    self,\n    name: str,\n) -> Union[Dataset, None]:\n    \"\"\"\n    Gets a dataset by name.\n\n    Parameters\n    ----------\n    name : str\n        The name of the dataset to fetch.\n\n    Returns\n    -------\n    Union[Dataset, None]\n        A Dataset with a matching name, or 'None' if one doesn't exist.\n    \"\"\"\n    dataset = Dataset.decode_value(\n        {\n            **self.conn.get_dataset(name),\n            \"connection\": self.conn,\n        }\n    )\n    return dataset\n
"},{"location":"client_api/Client/#valor.Client.get_dataset_status","title":"valor.Client.get_dataset_status(name)","text":"

Get the state of a given dataset.

Parameters:

Name Type Description Default name str

The name of the dataset we want to fetch the state of.

required

Returns:

Type Description TableStatus | None

The state of the dataset, or 'None' if the dataset does not exist.

Source code in valor/coretypes.py
def get_dataset_status(\n    self,\n    name: str,\n) -> Union[TableStatus, None]:\n    \"\"\"\n    Get the state of a given dataset.\n\n    Parameters\n    ----------\n    name : str\n        The name of the dataset we want to fetch the state of.\n\n    Returns\n    ------\n    TableStatus | None\n        The state of the dataset, or 'None' if the dataset does not exist.\n    \"\"\"\n    try:\n        return self.conn.get_dataset_status(name)\n    except ClientException as e:\n        if e.status_code == 404:\n            return None\n        raise e\n
"},{"location":"client_api/Client/#valor.Client.get_dataset_summary","title":"valor.Client.get_dataset_summary(name)","text":"

Gets the summary of a dataset.

Parameters:

Name Type Description Default name str

The name of the dataset to create a summary for.

required

Returns:

Type Description DatasetSummary

A dataclass containing the dataset summary.

Source code in valor/coretypes.py
def get_dataset_summary(self, name: str) -> DatasetSummary:\n    \"\"\"\n    Gets the summary of a dataset.\n\n    Parameters\n    ----------\n    name : str\n        The name of the dataset to create a summary for.\n\n    Returns\n    -------\n    DatasetSummary\n        A dataclass containing the dataset summary.\n    \"\"\"\n    return DatasetSummary(**self.conn.get_dataset_summary(name))\n
"},{"location":"client_api/Client/#valor.Client.get_datasets","title":"valor.Client.get_datasets(filter_by=None)","text":"

Get all datasets, with an option to filter results according to some user-defined parameters.

Parameters:

Name Type Description Default filter_by FilterType

Optional constraints to filter by.

None

Returns:

Type Description List[Dataset]

A list of datasets.

Source code in valor/coretypes.py
def get_datasets(\n    self,\n    filter_by: Optional[FilterType] = None,\n) -> List[Dataset]:\n    \"\"\"\n    Get all datasets, with an option to filter results according to some user-defined parameters.\n\n    Parameters\n    ----------\n    filter_by : FilterType, optional\n        Optional constraints to filter by.\n\n    Returns\n    ------\n    List[valor.Dataset]\n        A list of datasets.\n    \"\"\"\n    filter_ = _format_filter(filter_by)\n    if isinstance(filter_, Filter):\n        filter_ = asdict(filter_)\n    dataset_list = []\n    for kwargs in self.conn.get_datasets(filter_):\n        dataset = Dataset.decode_value({**kwargs, \"connection\": self.conn})\n        dataset_list.append(dataset)\n    return dataset_list\n
"},{"location":"client_api/Client/#valor.Client.get_datum","title":"valor.Client.get_datum(dataset, uid)","text":"

Get datum. GET endpoint.

Parameters:

Name Type Description Default dataset Dataset

The dataset the datum belongs to.

required uid str

The UID of the datum.

required

Returns:

Type Description Datum

The requested datum or 'None' if it doesn't exist.

Source code in valor/coretypes.py
def get_datum(\n    self,\n    dataset: Union[Dataset, str],\n    uid: str,\n) -> Union[Datum, None]:\n    \"\"\"\n    Get datum.\n    `GET` endpoint.\n    Parameters\n    ----------\n    dataset : valor.Dataset\n        The dataset the datum belongs to.\n    uid : str\n        The UID of the datum.\n    Returns\n    -------\n    valor.Datum\n        The requested datum or 'None' if it doesn't exist.\n    \"\"\"\n    dataset_name = (\n        dataset.name if isinstance(dataset, Dataset) else dataset\n    )\n    resp = self.conn.get_datum(dataset_name=dataset_name, uid=uid)  # type: ignore\n    return Datum.decode_value(resp)\n
"},{"location":"client_api/Client/#valor.Client.get_datums","title":"valor.Client.get_datums(filter_by=None)","text":"

Get all datums using an optional filter.

Parameters:

Name Type Description Default filter_by FilterType

Optional constraints to filter by.

None

Returns:

Type Description List[Datum]

A list datums.

Source code in valor/coretypes.py
def get_datums(\n    self,\n    filter_by: Optional[FilterType] = None,\n) -> List[Datum]:\n    \"\"\"\n    Get all datums using an optional filter.\n\n    Parameters\n    ----------\n    filter_by : FilterType, optional\n        Optional constraints to filter by.\n\n    Returns\n    -------\n    List[valor.Datum]\n        A list datums.\n    \"\"\"\n    filter_ = _format_filter(filter_by)\n    if isinstance(filter_, Filter):\n        filter_ = asdict(filter_)\n    return [\n        Datum.decode_value(datum)\n        for datum in self.conn.get_datums(filter_)\n    ]\n
"},{"location":"client_api/Client/#valor.Client.get_evaluations","title":"valor.Client.get_evaluations(*, evaluation_ids=None, models=None, datasets=None, metrics_to_sort_by=None)","text":"

Returns all evaluations associated with user-supplied dataset and/or model names.

Parameters:

Name Type Description Default evaluation_ids List[int], optional.

A list of job IDs to return metrics for.

None models Union[List[Model], List[str]]

A list of model names that we want to return metrics for.

None datasets Union[List[Dataset], List[str]]

A list of dataset names that we want to return metrics for.

None metrics_to_sort_by Optional[Dict[str, Union[Dict[str, str], str]]]

An optional dict of metric types to sort the evaluations by.

None

Returns:

Type Description List[Evaluation]

A list of evaluations.

Source code in valor/coretypes.py
def get_evaluations(\n    self,\n    *,\n    evaluation_ids: Optional[List[int]] = None,\n    models: Union[List[Model], List[str], None] = None,\n    datasets: Union[List[Dataset], List[str], None] = None,\n    metrics_to_sort_by: Optional[\n        Dict[str, Union[Dict[str, str], str]]\n    ] = None,\n) -> List[Evaluation]:\n    \"\"\"\n    Returns all evaluations associated with user-supplied dataset and/or model names.\n\n    Parameters\n    ----------\n    evaluation_ids : List[int], optional.\n        A list of job IDs to return metrics for.\n    models : Union[List[valor.Model], List[str]], optional\n        A list of model names that we want to return metrics for.\n    datasets : Union[List[valor.Dataset], List[str]], optional\n        A list of dataset names that we want to return metrics for.\n    metrics_to_sort_by: dict[str, str | dict[str, str]], optional\n        An optional dict of metric types to sort the evaluations by.\n\n    Returns\n    -------\n    List[valor.Evaluation]\n        A list of evaluations.\n    \"\"\"\n    if isinstance(datasets, list):\n        datasets = [  # type: ignore\n            element.name if isinstance(element, Dataset) else element\n            for element in datasets\n        ]\n    if isinstance(models, list):\n        models = [  # type: ignore\n            element.name if isinstance(element, Model) else element\n            for element in models\n        ]\n    return [\n        Evaluation(connection=self.conn, **evaluation)\n        for evaluation in self.conn.get_evaluations(\n            evaluation_ids=evaluation_ids,\n            models=models,  # type: ignore\n            datasets=datasets,  # type: ignore\n            metrics_to_sort_by=metrics_to_sort_by,\n        )\n    ]\n
"},{"location":"client_api/Client/#valor.Client.get_groundtruth","title":"valor.Client.get_groundtruth(dataset, datum)","text":"

Get a particular ground truth.

Parameters:

Name Type Description Default dataset Union[Dataset, str]

The dataset the datum belongs to.

required datum Union[Datum, str]

The desired datum.

required

Returns:

Type Description Union[GroundTruth, None]

The matching ground truth or 'None' if it doesn't exist.

Source code in valor/coretypes.py
def get_groundtruth(\n    self,\n    dataset: Union[Dataset, str],\n    datum: Union[Datum, str],\n) -> Union[GroundTruth, None]:\n    \"\"\"\n    Get a particular ground truth.\n\n    Parameters\n    ----------\n    dataset: Union[Dataset, str]\n        The dataset the datum belongs to.\n    datum: Union[Datum, str]\n        The desired datum.\n\n    Returns\n    ----------\n    Union[GroundTruth, None]\n        The matching ground truth or 'None' if it doesn't exist.\n    \"\"\"\n    dataset_name = (\n        dataset.name if isinstance(dataset, Dataset) else dataset\n    )\n    datum_uid = datum.uid if isinstance(datum, Datum) else datum\n    try:\n        resp = self.conn.get_groundtruth(\n            dataset_name=dataset_name, datum_uid=datum_uid  # type: ignore\n        )\n        resp.pop(\"dataset_name\")\n        return GroundTruth.decode_value(resp)\n    except ClientException as e:\n        if e.status_code == 404:\n            return None\n        raise e\n
"},{"location":"client_api/Client/#valor.Client.get_labels","title":"valor.Client.get_labels(filter_by=None)","text":"

Gets all labels using an optional filter.

Parameters:

Name Type Description Default filter_by FilterType

Optional constraints to filter by.

None

Returns:

Type Description List[Label]

A list of labels.

Source code in valor/coretypes.py
def get_labels(\n    self,\n    filter_by: Optional[FilterType] = None,\n) -> List[Label]:\n    \"\"\"\n    Gets all labels using an optional filter.\n\n    Parameters\n    ----------\n    filter_by : FilterType, optional\n        Optional constraints to filter by.\n\n    Returns\n    ------\n    List[valor.Label]\n        A list of labels.\n    \"\"\"\n    filter_ = _format_filter(filter_by)\n    filter_ = asdict(filter_)\n    return [Label(**label) for label in self.conn.get_labels(filter_)]\n
"},{"location":"client_api/Client/#valor.Client.get_labels_from_dataset","title":"valor.Client.get_labels_from_dataset(dataset)","text":"

Get all labels associated with a dataset's ground truths.

Parameters:

Name Type Description Default dataset Dataset

The dataset to search by.

required

Returns:

Type Description List[Label]

A list of labels.

Source code in valor/coretypes.py
def get_labels_from_dataset(\n    self, dataset: Union[Dataset, str]\n) -> List[Label]:\n    \"\"\"\n    Get all labels associated with a dataset's ground truths.\n\n    Parameters\n    ----------\n    dataset : valor.Dataset\n        The dataset to search by.\n\n    Returns\n    ------\n    List[valor.Label]\n        A list of labels.\n    \"\"\"\n    dataset_name = (\n        dataset.name if isinstance(dataset, Dataset) else dataset\n    )\n    return [\n        Label(**label)\n        for label in self.conn.get_labels_from_dataset(dataset_name)  # type: ignore\n    ]\n
"},{"location":"client_api/Client/#valor.Client.get_labels_from_model","title":"valor.Client.get_labels_from_model(model)","text":"

Get all labels associated with a model's ground truths.

Parameters:

Name Type Description Default model Model

The model to search by.

required

Returns:

Type Description List[Label]

A list of labels.

Source code in valor/coretypes.py
def get_labels_from_model(self, model: Union[Model, str]) -> List[Label]:\n    \"\"\"\n    Get all labels associated with a model's ground truths.\n\n    Parameters\n    ----------\n    model : valor.Model\n        The model to search by.\n\n    Returns\n    ------\n    List[valor.Label]\n        A list of labels.\n    \"\"\"\n    model_name = model.name if isinstance(model, Model) else model\n    return [\n        Label(**label)\n        for label in self.conn.get_labels_from_model(model_name)  # type: ignore\n    ]\n
"},{"location":"client_api/Client/#valor.Client.get_model","title":"valor.Client.get_model(name)","text":"

Gets a model by name.

Parameters:

Name Type Description Default name str

The name of the model to fetch.

required

Returns:

Type Description Union[Model, None]

A Model with matching name or 'None' if one doesn't exist.

Source code in valor/coretypes.py
def get_model(\n    self,\n    name: str,\n) -> Union[Model, None]:\n    \"\"\"\n    Gets a model by name.\n\n    Parameters\n    ----------\n    name : str\n        The name of the model to fetch.\n\n    Returns\n    -------\n    Union[valor.Model, None]\n        A Model with matching name or 'None' if one doesn't exist.\n    \"\"\"\n    return Model.decode_value(\n        {\n            **self.conn.get_model(name),\n            \"connection\": self.conn,\n        }\n    )\n
"},{"location":"client_api/Client/#valor.Client.get_model_eval_requests","title":"valor.Client.get_model_eval_requests(model)","text":"

Get all evaluations that have been created for a model.

This does not return evaluation results.

GET endpoint.

Parameters:

Name Type Description Default model str

The model to search by.

required

Returns:

Type Description List[Evaluation]

A list of evaluations.

Source code in valor/coretypes.py
def get_model_eval_requests(\n    self, model: Union[Model, str]\n) -> List[Evaluation]:\n    \"\"\"\n    Get all evaluations that have been created for a model.\n\n    This does not return evaluation results.\n\n    `GET` endpoint.\n\n    Parameters\n    ----------\n    model : str\n        The model to search by.\n\n    Returns\n    -------\n    List[Evaluation]\n        A list of evaluations.\n    \"\"\"\n    model_name = model.name if isinstance(model, Model) else model\n    return [\n        Evaluation(**evaluation, connection=self.conn)\n        for evaluation in self.conn.get_model_eval_requests(model_name)  # type: ignore\n    ]\n
"},{"location":"client_api/Client/#valor.Client.get_model_status","title":"valor.Client.get_model_status(dataset_name, model_name)","text":"

Get the state of a given model over a dataset.

Parameters:

Name Type Description Default dataset_name str

The name of the dataset that the model is operating over.

required model_name str

The name of the model we want to fetch the state of.

required

Returns:

Type Description Union[TableStatus, None]

The state of the model or 'None' if the model doesn't exist.

Source code in valor/coretypes.py
def get_model_status(\n    self,\n    dataset_name: str,\n    model_name: str,\n) -> Optional[TableStatus]:\n    \"\"\"\n    Get the state of a given model over a dataset.\n\n    Parameters\n    ----------\n    dataset_name : str\n        The name of the dataset that the model is operating over.\n    model_name : str\n        The name of the model we want to fetch the state of.\n\n    Returns\n    ------\n    Union[TableStatus, None]\n        The state of the model or 'None' if the model doesn't exist.\n    \"\"\"\n    try:\n        return self.conn.get_model_status(dataset_name, model_name)\n    except ClientException as e:\n        if e.status_code == 404:\n            return None\n        raise e\n
"},{"location":"client_api/Client/#valor.Client.get_models","title":"valor.Client.get_models(filter_by=None)","text":"

Get all models using an optional filter.

Parameters:

Name Type Description Default filter_by FilterType

Optional constraints to filter by.

None

Returns:

Type Description List[Model]

A list of models.

Source code in valor/coretypes.py
def get_models(\n    self,\n    filter_by: Optional[FilterType] = None,\n) -> List[Model]:\n    \"\"\"\n    Get all models using an optional filter.\n\n    Parameters\n    ----------\n    filter_by : FilterType, optional\n        Optional constraints to filter by.\n\n    Returns\n    ------\n    List[valor.Model]\n        A list of models.\n    \"\"\"\n    filter_ = _format_filter(filter_by)\n    if isinstance(filter_, Filter):\n        filter_ = asdict(filter_)\n    model_list = []\n    for kwargs in self.conn.get_models(filter_):\n        model = Model.decode_value({**kwargs, \"connection\": self.conn})\n        model_list.append(model)\n    return model_list\n
"},{"location":"client_api/Client/#valor.Client.get_prediction","title":"valor.Client.get_prediction(dataset, model, datum)","text":"

Get a particular prediction.

Parameters:

Name Type Description Default dataset Union[Dataset, str]

The dataset the datum belongs to.

required model Union[Model, str]

The model that made the prediction.

required datum Union[Datum, str]

The desired datum.

required

Returns:

Type Description Union[Prediction, None]

The matching prediction or 'None' if it doesn't exist.

Source code in valor/coretypes.py
def get_prediction(\n    self,\n    dataset: Union[Dataset, str],\n    model: Union[Model, str],\n    datum: Union[Datum, str],\n) -> Union[Prediction, None]:\n    \"\"\"\n    Get a particular prediction.\n\n    Parameters\n    ----------\n    dataset: Union[Dataset, str]\n        The dataset the datum belongs to.\n    model: Union[Model, str]\n        The model that made the prediction.\n    datum: Union[Datum, str]\n        The desired datum.\n\n    Returns\n    ----------\n    Union[Prediction, None]\n        The matching prediction or 'None' if it doesn't exist.\n    \"\"\"\n    dataset_name = (\n        dataset.name if isinstance(dataset, Dataset) else dataset\n    )\n    model_name = model.name if isinstance(model, Model) else model\n    datum_uid = datum.uid if isinstance(datum, Datum) else datum\n\n    resp = self.conn.get_prediction(\n        dataset_name=dataset_name,  # type: ignore\n        model_name=model_name,  # type: ignore\n        datum_uid=datum_uid,  # type: ignore\n    )\n    resp.pop(\"dataset_name\")\n    resp.pop(\"model_name\")\n    return Prediction.decode_value(resp)\n
"},{"location":"client_api/Dataset/","title":"Dataset","text":"

Bases: StaticCollection

A class describing a given dataset.

Attributes:

Name Type Description name String

The name of the dataset.

metadata Dictionary

A dictionary of metadata that describes the dataset.

Examples:

>>> Dataset.create(name=\"dataset1\")\n>>> Dataset.create(name=\"dataset1\", metadata={})\n>>> Dataset.create(name=\"dataset1\", metadata={\"foo\": \"bar\", \"pi\": 3.14})\n
Source code in valor/coretypes.py
class Dataset(StaticCollection):\n    \"\"\"\n    A class describing a given dataset.\n\n    Attributes\n    ----------\n    name : String\n        The name of the dataset.\n    metadata : Dictionary\n        A dictionary of metadata that describes the dataset.\n\n    Examples\n    --------\n    >>> Dataset.create(name=\"dataset1\")\n    >>> Dataset.create(name=\"dataset1\", metadata={})\n    >>> Dataset.create(name=\"dataset1\", metadata={\"foo\": \"bar\", \"pi\": 3.14})\n    \"\"\"\n\n    name: String = String.symbolic(owner=\"dataset\", name=\"name\")\n    metadata: Dictionary = Dictionary.symbolic(\n        owner=\"dataset\", name=\"metadata\"\n    )\n\n    def __init__(\n        self,\n        *,\n        name: str,\n        metadata: Optional[dict] = None,\n        connection: Optional[ClientConnection] = None,\n    ):\n        \"\"\"\n        Creates a local instance of a dataset.\n\n        Use 'Dataset.create' classmethod to create a dataset with persistence.\n\n        Parameters\n        ----------\n        name : str\n            The name of the dataset.\n        metadata : dict, optional\n            A dictionary of metadata that describes the dataset.\n        connection : ClientConnection, optional\n            An initialized client connection.\n        \"\"\"\n        self.conn = connection\n        super().__init__(name=name, metadata=metadata if metadata else dict())\n\n    @classmethod\n    def create(\n        cls,\n        name: str,\n        metadata: Optional[Dict[str, Any]] = None,\n        connection: Optional[ClientConnection] = None,\n    ) -> Dataset:\n        \"\"\"\n        Creates a dataset that persists in the back end.\n\n        Parameters\n        ----------\n        name : str\n            The name of the dataset.\n        metadata : dict, optional\n            A dictionary of metadata that describes the dataset.\n        connection : ClientConnection, optional\n            An initialized client connection.\n        \"\"\"\n        dataset = cls(name=name, metadata=metadata, connection=connection)\n        Client(dataset.conn).create_dataset(dataset)\n        return dataset\n\n    @classmethod\n    def get(\n        cls,\n        name: str,\n        connection: Optional[ClientConnection] = None,\n    ) -> Union[Dataset, None]:\n        \"\"\"\n        Retrieves a dataset from the back end database.\n\n        Parameters\n        ----------\n        name : str\n            The name of the dataset.\n\n        Returns\n        -------\n        Union[valor.Dataset, None]\n            The dataset or 'None' if it doesn't exist.\n        \"\"\"\n        return Client(connection).get_dataset(name)\n\n    def add_groundtruth(\n        self,\n        groundtruth: GroundTruth,\n    ) -> None:\n        \"\"\"\n        Add a ground truth to the dataset.\n\n        Parameters\n        ----------\n        groundtruth : GroundTruth\n            The ground truth to create.\n        \"\"\"\n        Client(self.conn).create_groundtruths(\n            dataset=self,\n            groundtruths=[groundtruth],\n        )\n\n    def add_groundtruths(\n        self,\n        groundtruths: List[GroundTruth],\n        ignore_existing_datums: bool = False,\n    ) -> None:\n        \"\"\"\n        Add multiple ground truths to the dataset.\n\n        Parameters\n        ----------\n        groundtruths : List[GroundTruth]\n            The ground truths to create.\n        ignore_existing_datums : bool, default=False\n            If True, will ignore datums that already exist in the backend.\n            If False, will raise an error if any datums already exist.\n            Default is False.\n        \"\"\"\n        Client(self.conn).create_groundtruths(\n            dataset=self,\n            groundtruths=groundtruths,\n            ignore_existing_datums=ignore_existing_datums,\n        )\n\n    def get_groundtruth(\n        self,\n        datum: Union[Datum, str],\n    ) -> Union[GroundTruth, None]:\n        \"\"\"\n        Get a particular ground truth.\n\n        Parameters\n        ----------\n        datum: Union[Datum, str]\n            The desired datum.\n\n        Returns\n        ----------\n        Union[GroundTruth, None]\n            The matching ground truth or 'None' if it doesn't exist.\n        \"\"\"\n        return Client(self.conn).get_groundtruth(dataset=self, datum=datum)\n\n    def get_labels(\n        self,\n    ) -> List[Label]:\n        \"\"\"\n        Get all labels associated with a given dataset.\n\n        Returns\n        ----------\n        List[Label]\n            A list of `Labels` associated with the dataset.\n        \"\"\"\n        return Client(self.conn).get_labels_from_dataset(self)\n\n    def get_datums(\n        self, filter_by: Optional[FilterType] = None\n    ) -> List[Datum]:\n        \"\"\"\n        Get all datums associated with a given dataset.\n\n        Parameters\n        ----------\n        filter_by\n            Optional constraints to filter by.\n\n        Returns\n        ----------\n        List[Datum]\n            A list of `Datums` associated with the dataset.\n        \"\"\"\n        filter_ = _format_filter(filter_by)\n        if isinstance(filter_, Filter):\n            filter_ = asdict(filter_)\n\n        if filter_.get(\"dataset_names\"):\n            raise ValueError(\n                \"Cannot filter by dataset_names when calling `Dataset.get_datums`.\"\n            )\n        filter_[\"dataset_names\"] = [self.name]  # type: ignore\n        return Client(self.conn).get_datums(filter_by=filter_)\n\n    def get_evaluations(\n        self,\n        metrics_to_sort_by: Optional[\n            Dict[str, Union[Dict[str, str], str]]\n        ] = None,\n    ) -> List[Evaluation]:\n        \"\"\"\n        Get all evaluations associated with a given dataset.\n\n        Parameters\n        ----------\n        metrics_to_sort_by: dict[str, str | dict[str, str]], optional\n            An optional dict of metric types to sort the evaluations by.\n\n        Returns\n        ----------\n        List[Evaluation]\n            A list of `Evaluations` associated with the dataset.\n        \"\"\"\n        return Client(self.conn).get_evaluations(\n            datasets=[self], metrics_to_sort_by=metrics_to_sort_by\n        )\n\n    def get_summary(self) -> DatasetSummary:\n        \"\"\"\n        Get the summary of a given dataset.\n\n        Returns\n        -------\n        DatasetSummary\n            The summary of the dataset. This class has the following fields:\n\n            name: name of the dataset\n\n            num_datums: total number of datums in the dataset\n\n            num_annotations: total number of labeled annotations in the dataset; if an\n            object (such as a bounding box) has multiple labels, then each label is counted separately\n\n            num_bounding_boxes: total number of bounding boxes in the dataset\n\n            num_polygons: total number of polygons in the dataset\n\n            num_rasters: total number of rasters in the dataset\n\n            labels: list of the unique labels in the dataset\n\n            datum_metadata: list of the unique metadata dictionaries in the dataset that are associated\n            to datums\n\n            groundtruth_annotation_metadata: list of the unique metadata dictionaries in the dataset that are\n            associated to annotations\n        \"\"\"\n        return Client(self.conn).get_dataset_summary(self.name)  # type: ignore\n\n    def finalize(\n        self,\n    ):\n        \"\"\"\n        Finalizes the dataset such that new ground truths cannot be added to it.\n        \"\"\"\n        return Client(self.conn).finalize_dataset(self)\n\n    def delete(\n        self,\n        timeout: int = 0,\n    ):\n        \"\"\"\n        Delete the dataset from the back end.\n\n        Parameters\n        ----------\n        timeout : int, default=0\n            Sets a timeout in seconds.\n        \"\"\"\n        Client(self.conn).delete_dataset(self.name, timeout)  # type: ignore\n
"},{"location":"client_api/Dataset/#valor.Dataset-functions","title":"Functions","text":""},{"location":"client_api/Dataset/#valor.Dataset.__init__","title":"valor.Dataset.__init__(*, name, metadata=None, connection=None)","text":"

Creates a local instance of a dataset.

Use 'Dataset.create' classmethod to create a dataset with persistence.

Parameters:

Name Type Description Default name str

The name of the dataset.

required metadata dict

A dictionary of metadata that describes the dataset.

None connection ClientConnection

An initialized client connection.

None Source code in valor/coretypes.py
def __init__(\n    self,\n    *,\n    name: str,\n    metadata: Optional[dict] = None,\n    connection: Optional[ClientConnection] = None,\n):\n    \"\"\"\n    Creates a local instance of a dataset.\n\n    Use 'Dataset.create' classmethod to create a dataset with persistence.\n\n    Parameters\n    ----------\n    name : str\n        The name of the dataset.\n    metadata : dict, optional\n        A dictionary of metadata that describes the dataset.\n    connection : ClientConnection, optional\n        An initialized client connection.\n    \"\"\"\n    self.conn = connection\n    super().__init__(name=name, metadata=metadata if metadata else dict())\n
"},{"location":"client_api/Dataset/#valor.Dataset.add_groundtruth","title":"valor.Dataset.add_groundtruth(groundtruth)","text":"

Add a ground truth to the dataset.

Parameters:

Name Type Description Default groundtruth GroundTruth

The ground truth to create.

required Source code in valor/coretypes.py
def add_groundtruth(\n    self,\n    groundtruth: GroundTruth,\n) -> None:\n    \"\"\"\n    Add a ground truth to the dataset.\n\n    Parameters\n    ----------\n    groundtruth : GroundTruth\n        The ground truth to create.\n    \"\"\"\n    Client(self.conn).create_groundtruths(\n        dataset=self,\n        groundtruths=[groundtruth],\n    )\n
"},{"location":"client_api/Dataset/#valor.Dataset.add_groundtruths","title":"valor.Dataset.add_groundtruths(groundtruths, ignore_existing_datums=False)","text":"

Add multiple ground truths to the dataset.

Parameters:

Name Type Description Default groundtruths List[GroundTruth]

The ground truths to create.

required ignore_existing_datums bool

If True, will ignore datums that already exist in the backend. If False, will raise an error if any datums already exist. Default is False.

False Source code in valor/coretypes.py
def add_groundtruths(\n    self,\n    groundtruths: List[GroundTruth],\n    ignore_existing_datums: bool = False,\n) -> None:\n    \"\"\"\n    Add multiple ground truths to the dataset.\n\n    Parameters\n    ----------\n    groundtruths : List[GroundTruth]\n        The ground truths to create.\n    ignore_existing_datums : bool, default=False\n        If True, will ignore datums that already exist in the backend.\n        If False, will raise an error if any datums already exist.\n        Default is False.\n    \"\"\"\n    Client(self.conn).create_groundtruths(\n        dataset=self,\n        groundtruths=groundtruths,\n        ignore_existing_datums=ignore_existing_datums,\n    )\n
"},{"location":"client_api/Dataset/#valor.Dataset.create","title":"valor.Dataset.create(name, metadata=None, connection=None) classmethod","text":"

Creates a dataset that persists in the back end.

Parameters:

Name Type Description Default name str

The name of the dataset.

required metadata dict

A dictionary of metadata that describes the dataset.

None connection ClientConnection

An initialized client connection.

None Source code in valor/coretypes.py
@classmethod\ndef create(\n    cls,\n    name: str,\n    metadata: Optional[Dict[str, Any]] = None,\n    connection: Optional[ClientConnection] = None,\n) -> Dataset:\n    \"\"\"\n    Creates a dataset that persists in the back end.\n\n    Parameters\n    ----------\n    name : str\n        The name of the dataset.\n    metadata : dict, optional\n        A dictionary of metadata that describes the dataset.\n    connection : ClientConnection, optional\n        An initialized client connection.\n    \"\"\"\n    dataset = cls(name=name, metadata=metadata, connection=connection)\n    Client(dataset.conn).create_dataset(dataset)\n    return dataset\n
"},{"location":"client_api/Dataset/#valor.Dataset.delete","title":"valor.Dataset.delete(timeout=0)","text":"

Delete the dataset from the back end.

Parameters:

Name Type Description Default timeout int

Sets a timeout in seconds.

0 Source code in valor/coretypes.py
def delete(\n    self,\n    timeout: int = 0,\n):\n    \"\"\"\n    Delete the dataset from the back end.\n\n    Parameters\n    ----------\n    timeout : int, default=0\n        Sets a timeout in seconds.\n    \"\"\"\n    Client(self.conn).delete_dataset(self.name, timeout)  # type: ignore\n
"},{"location":"client_api/Dataset/#valor.Dataset.finalize","title":"valor.Dataset.finalize()","text":"

Finalizes the dataset such that new ground truths cannot be added to it.

Source code in valor/coretypes.py
def finalize(\n    self,\n):\n    \"\"\"\n    Finalizes the dataset such that new ground truths cannot be added to it.\n    \"\"\"\n    return Client(self.conn).finalize_dataset(self)\n
"},{"location":"client_api/Dataset/#valor.Dataset.get","title":"valor.Dataset.get(name, connection=None) classmethod","text":"

Retrieves a dataset from the back end database.

Parameters:

Name Type Description Default name str

The name of the dataset.

required

Returns:

Type Description Union[Dataset, None]

The dataset or 'None' if it doesn't exist.

Source code in valor/coretypes.py
@classmethod\ndef get(\n    cls,\n    name: str,\n    connection: Optional[ClientConnection] = None,\n) -> Union[Dataset, None]:\n    \"\"\"\n    Retrieves a dataset from the back end database.\n\n    Parameters\n    ----------\n    name : str\n        The name of the dataset.\n\n    Returns\n    -------\n    Union[valor.Dataset, None]\n        The dataset or 'None' if it doesn't exist.\n    \"\"\"\n    return Client(connection).get_dataset(name)\n
"},{"location":"client_api/Dataset/#valor.Dataset.get_datums","title":"valor.Dataset.get_datums(filter_by=None)","text":"

Get all datums associated with a given dataset.

Parameters:

Name Type Description Default filter_by Optional[FilterType]

Optional constraints to filter by.

None

Returns:

Type Description List[Datum]

A list of Datums associated with the dataset.

Source code in valor/coretypes.py
def get_datums(\n    self, filter_by: Optional[FilterType] = None\n) -> List[Datum]:\n    \"\"\"\n    Get all datums associated with a given dataset.\n\n    Parameters\n    ----------\n    filter_by\n        Optional constraints to filter by.\n\n    Returns\n    ----------\n    List[Datum]\n        A list of `Datums` associated with the dataset.\n    \"\"\"\n    filter_ = _format_filter(filter_by)\n    if isinstance(filter_, Filter):\n        filter_ = asdict(filter_)\n\n    if filter_.get(\"dataset_names\"):\n        raise ValueError(\n            \"Cannot filter by dataset_names when calling `Dataset.get_datums`.\"\n        )\n    filter_[\"dataset_names\"] = [self.name]  # type: ignore\n    return Client(self.conn).get_datums(filter_by=filter_)\n
"},{"location":"client_api/Dataset/#valor.Dataset.get_evaluations","title":"valor.Dataset.get_evaluations(metrics_to_sort_by=None)","text":"

Get all evaluations associated with a given dataset.

Parameters:

Name Type Description Default metrics_to_sort_by Optional[Dict[str, Union[Dict[str, str], str]]]

An optional dict of metric types to sort the evaluations by.

None

Returns:

Type Description List[Evaluation]

A list of Evaluations associated with the dataset.

Source code in valor/coretypes.py
def get_evaluations(\n    self,\n    metrics_to_sort_by: Optional[\n        Dict[str, Union[Dict[str, str], str]]\n    ] = None,\n) -> List[Evaluation]:\n    \"\"\"\n    Get all evaluations associated with a given dataset.\n\n    Parameters\n    ----------\n    metrics_to_sort_by: dict[str, str | dict[str, str]], optional\n        An optional dict of metric types to sort the evaluations by.\n\n    Returns\n    ----------\n    List[Evaluation]\n        A list of `Evaluations` associated with the dataset.\n    \"\"\"\n    return Client(self.conn).get_evaluations(\n        datasets=[self], metrics_to_sort_by=metrics_to_sort_by\n    )\n
"},{"location":"client_api/Dataset/#valor.Dataset.get_groundtruth","title":"valor.Dataset.get_groundtruth(datum)","text":"

Get a particular ground truth.

Parameters:

Name Type Description Default datum Union[Datum, str]

The desired datum.

required

Returns:

Type Description Union[GroundTruth, None]

The matching ground truth or 'None' if it doesn't exist.

Source code in valor/coretypes.py
def get_groundtruth(\n    self,\n    datum: Union[Datum, str],\n) -> Union[GroundTruth, None]:\n    \"\"\"\n    Get a particular ground truth.\n\n    Parameters\n    ----------\n    datum: Union[Datum, str]\n        The desired datum.\n\n    Returns\n    ----------\n    Union[GroundTruth, None]\n        The matching ground truth or 'None' if it doesn't exist.\n    \"\"\"\n    return Client(self.conn).get_groundtruth(dataset=self, datum=datum)\n
"},{"location":"client_api/Dataset/#valor.Dataset.get_labels","title":"valor.Dataset.get_labels()","text":"

Get all labels associated with a given dataset.

Returns:

Type Description List[Label]

A list of Labels associated with the dataset.

Source code in valor/coretypes.py
def get_labels(\n    self,\n) -> List[Label]:\n    \"\"\"\n    Get all labels associated with a given dataset.\n\n    Returns\n    ----------\n    List[Label]\n        A list of `Labels` associated with the dataset.\n    \"\"\"\n    return Client(self.conn).get_labels_from_dataset(self)\n
"},{"location":"client_api/Dataset/#valor.Dataset.get_summary","title":"valor.Dataset.get_summary()","text":"

Get the summary of a given dataset.

Returns:

Type Description DatasetSummary

The summary of the dataset. This class has the following fields:

name: name of the dataset

num_datums: total number of datums in the dataset

num_annotations: total number of labeled annotations in the dataset; if an object (such as a bounding box) has multiple labels, then each label is counted separately

num_bounding_boxes: total number of bounding boxes in the dataset

num_polygons: total number of polygons in the dataset

num_rasters: total number of rasters in the dataset

labels: list of the unique labels in the dataset

datum_metadata: list of the unique metadata dictionaries in the dataset that are associated to datums

groundtruth_annotation_metadata: list of the unique metadata dictionaries in the dataset that are associated to annotations

Source code in valor/coretypes.py
def get_summary(self) -> DatasetSummary:\n    \"\"\"\n    Get the summary of a given dataset.\n\n    Returns\n    -------\n    DatasetSummary\n        The summary of the dataset. This class has the following fields:\n\n        name: name of the dataset\n\n        num_datums: total number of datums in the dataset\n\n        num_annotations: total number of labeled annotations in the dataset; if an\n        object (such as a bounding box) has multiple labels, then each label is counted separately\n\n        num_bounding_boxes: total number of bounding boxes in the dataset\n\n        num_polygons: total number of polygons in the dataset\n\n        num_rasters: total number of rasters in the dataset\n\n        labels: list of the unique labels in the dataset\n\n        datum_metadata: list of the unique metadata dictionaries in the dataset that are associated\n        to datums\n\n        groundtruth_annotation_metadata: list of the unique metadata dictionaries in the dataset that are\n        associated to annotations\n    \"\"\"\n    return Client(self.conn).get_dataset_summary(self.name)  # type: ignore\n
"},{"location":"client_api/Datum/","title":"Datum","text":"

Bases: StaticCollection

A class used to store information about a datum for either a 'GroundTruth' or a 'Prediction'.

Attributes:

Name Type Description uid String

The UID of the datum.

metadata Dictionary

A dictionary of metadata that describes the datum.

Examples:

>>> Datum(uid=\"uid1\")\n>>> Datum(uid=\"uid1\", metadata={})\n>>> Datum(uid=\"uid1\", metadata={\"foo\": \"bar\", \"pi\": 3.14})\n
Source code in valor/schemas/symbolic/collections.py
class Datum(StaticCollection):\n    \"\"\"\n    A class used to store information about a datum for either a 'GroundTruth' or a 'Prediction'.\n\n    Attributes\n    ----------\n    uid : String\n        The UID of the datum.\n    metadata : Dictionary\n        A dictionary of metadata that describes the datum.\n\n    Examples\n    --------\n    >>> Datum(uid=\"uid1\")\n    >>> Datum(uid=\"uid1\", metadata={})\n    >>> Datum(uid=\"uid1\", metadata={\"foo\": \"bar\", \"pi\": 3.14})\n    \"\"\"\n\n    uid: String = String.symbolic(owner=\"datum\", name=\"uid\")\n    metadata: Dictionary = Dictionary.symbolic(owner=\"datum\", name=\"metadata\")\n\n    def __init__(\n        self,\n        *,\n        uid: str,\n        metadata: Optional[dict] = None,\n    ):\n        \"\"\"\n        Constructs a datum.\n\n        Parameters\n        ----------\n        uid : str\n            The UID of the datum.\n        metadata : dict, optional\n            A dictionary of metadata that describes the datum.\n        \"\"\"\n        super().__init__(uid=uid, metadata=metadata if metadata else dict())\n
"},{"location":"client_api/Datum/#valor.Datum-functions","title":"Functions","text":""},{"location":"client_api/Datum/#valor.Datum.__init__","title":"valor.Datum.__init__(*, uid, metadata=None)","text":"

Constructs a datum.

Parameters:

Name Type Description Default uid str

The UID of the datum.

required metadata dict

A dictionary of metadata that describes the datum.

None Source code in valor/schemas/symbolic/collections.py
def __init__(\n    self,\n    *,\n    uid: str,\n    metadata: Optional[dict] = None,\n):\n    \"\"\"\n    Constructs a datum.\n\n    Parameters\n    ----------\n    uid : str\n        The UID of the datum.\n    metadata : dict, optional\n        A dictionary of metadata that describes the datum.\n    \"\"\"\n    super().__init__(uid=uid, metadata=metadata if metadata else dict())\n
"},{"location":"client_api/Evaluation/","title":"Evaluation","text":"

Wraps valor.client.Job to provide evaluation-specifc members.

Source code in valor/coretypes.py
class Evaluation:\n    \"\"\"\n    Wraps `valor.client.Job` to provide evaluation-specifc members.\n    \"\"\"\n\n    def __init__(\n        self, connection: Optional[ClientConnection] = None, **kwargs\n    ):\n        \"\"\"\n        Defines important attributes of the API's `EvaluationResult`.\n\n        Attributes\n        ----------\n        id : int\n            The ID of the evaluation.\n        model_name : str\n            The name of the evaluated model.\n        datum_filter : schemas.Filter\n            The filter used to select the datums for evaluation.\n        status : EvaluationStatus\n            The status of the evaluation.\n        metrics : List[dict]\n            A list of metric dictionaries returned by the job.\n        confusion_matrices : List[dict]\n            A list of confusion matrix dictionaries returned by the job.\n        meta: dict[str, str | float | dict], optional\n            A dictionary of metadata describing the evaluation run.\n        \"\"\"\n        if not connection:\n            connection = get_connection()\n        self.conn = connection\n        self.update(**kwargs)\n\n    def update(\n        self,\n        *_,\n        id: int,\n        model_name: str,\n        datum_filter: Filter,\n        parameters: EvaluationParameters,\n        status: EvaluationStatus,\n        metrics: List[Dict],\n        confusion_matrices: List[Dict],\n        created_at: str,\n        meta: dict[str, str | float | dict] | None,\n        **kwargs,\n    ):\n        self.id = id\n        self.model_name = model_name\n        self.datum_filter = (\n            Filter(**datum_filter)\n            if isinstance(datum_filter, dict)\n            else datum_filter\n        )\n        self.parameters = (\n            EvaluationParameters(**parameters)\n            if isinstance(parameters, dict)\n            else parameters\n        )\n        self.status = EvaluationStatus(status)\n        self.metrics = metrics\n        self.meta = meta\n        self.confusion_matrices = confusion_matrices\n        self.kwargs = kwargs\n        self.ignored_pred_labels: Optional[List[Label]] = None\n        self.missing_pred_labels: Optional[List[Label]] = None\n        self.created_at = datetime.datetime.strptime(\n            created_at, \"%Y-%m-%dT%H:%M:%S.%fZ\"\n        ).replace(tzinfo=datetime.timezone.utc)\n\n        for k, v in kwargs.items():\n            setattr(self, k, v)\n\n    def poll(self) -> EvaluationStatus:\n        \"\"\"\n        Poll the back end.\n\n        Updates the evaluation with the latest state from the back end.\n\n        Returns\n        -------\n        enums.EvaluationStatus\n            The status of the evaluation.\n\n        Raises\n        ----------\n        ClientException\n            If an Evaluation with the given `evaluation_id` is not found.\n        \"\"\"\n        response = self.conn.get_evaluations(evaluation_ids=[self.id])\n        if not response:\n            raise ClientException(\"Not Found\")\n        self.update(**response[0])\n        return self.status\n\n    def wait_for_completion(\n        self,\n        *,\n        timeout: Optional[int] = None,\n        interval: float = 1.0,\n    ) -> EvaluationStatus:\n        \"\"\"\n        Blocking function that waits for evaluation to finish.\n\n        Parameters\n        ----------\n        timeout : int, optional\n            Length of timeout in seconds.\n        interval : float, default=1.0\n            Polling interval in seconds.\n        \"\"\"\n        t_start = time.time()\n        while self.poll() not in [\n            EvaluationStatus.DONE,\n            EvaluationStatus.FAILED,\n        ]:\n            time.sleep(interval)\n            if timeout and time.time() - t_start > timeout:\n                raise TimeoutError\n        return self.status\n\n    def __str__(self) -> str:\n        \"\"\"Dumps the object into a JSON formatted string.\"\"\"\n        return json.dumps(self.to_dict(), indent=4)\n\n    def to_dict(self) -> dict:\n        \"\"\"\n        Defines how a `valor.Evaluation` object is serialized into a dictionary.\n\n        Returns\n        ----------\n        dict\n            A dictionary describing an evaluation.\n        \"\"\"\n        return {\n            \"id\": self.id,\n            \"model_name\": self.model_name,\n            \"datum_filter\": asdict(self.datum_filter),\n            \"parameters\": asdict(self.parameters),\n            \"status\": self.status.value,\n            \"metrics\": self.metrics,\n            \"confusion_matrices\": self.confusion_matrices,\n            \"meta\": self.meta,\n            **self.kwargs,\n        }\n\n    def to_dataframe(\n        self,\n        stratify_by: Optional[Tuple[str, str]] = None,\n    ):\n        \"\"\"\n        Get all metrics associated with a Model and return them in a `pd.DataFrame`.\n\n        Returns\n        ----------\n        pd.DataFrame\n            Evaluation metrics being displayed in a `pd.DataFrame`.\n\n        Raises\n        ------\n        ModuleNotFoundError\n            This function requires the use of `pandas.DataFrame`.\n\n        \"\"\"\n        try:\n            import pandas as pd\n        except ModuleNotFoundError:\n            raise ModuleNotFoundError(\n                \"Must have pandas installed to use `get_metric_dataframes`.\"\n            )\n\n        if not stratify_by:\n            column_type = \"evaluation\"\n            column_name = self.id\n        else:\n            column_type = stratify_by[0]\n            column_name = stratify_by[1]\n\n        metrics = [\n            {**metric, column_type: column_name} for metric in self.metrics\n        ]\n        df = pd.DataFrame(metrics)\n        for k in [\"label\", \"parameters\"]:\n            df[k] = df[k].fillna(\"n/a\")\n        df[\"parameters\"] = df[\"parameters\"].apply(json.dumps)\n        df[\"label\"] = df[\"label\"].apply(\n            lambda x: f\"{x['key']}: {x['value']}\" if x != \"n/a\" else x\n        )\n        df = df.pivot(\n            index=[\"type\", \"parameters\", \"label\"], columns=[column_type]\n        )\n        return df\n
"},{"location":"client_api/Evaluation/#valor.Evaluation-functions","title":"Functions","text":""},{"location":"client_api/Evaluation/#valor.Evaluation.__init__","title":"valor.Evaluation.__init__(connection=None, **kwargs)","text":"

Defines important attributes of the API's EvaluationResult.

Attributes:

Name Type Description id int

The ID of the evaluation.

model_name str

The name of the evaluated model.

datum_filter Filter

The filter used to select the datums for evaluation.

status EvaluationStatus

The status of the evaluation.

metrics List[dict]

A list of metric dictionaries returned by the job.

confusion_matrices List[dict]

A list of confusion matrix dictionaries returned by the job.

meta (dict[str, str | float | dict], optional)

A dictionary of metadata describing the evaluation run.

Source code in valor/coretypes.py
def __init__(\n    self, connection: Optional[ClientConnection] = None, **kwargs\n):\n    \"\"\"\n    Defines important attributes of the API's `EvaluationResult`.\n\n    Attributes\n    ----------\n    id : int\n        The ID of the evaluation.\n    model_name : str\n        The name of the evaluated model.\n    datum_filter : schemas.Filter\n        The filter used to select the datums for evaluation.\n    status : EvaluationStatus\n        The status of the evaluation.\n    metrics : List[dict]\n        A list of metric dictionaries returned by the job.\n    confusion_matrices : List[dict]\n        A list of confusion matrix dictionaries returned by the job.\n    meta: dict[str, str | float | dict], optional\n        A dictionary of metadata describing the evaluation run.\n    \"\"\"\n    if not connection:\n        connection = get_connection()\n    self.conn = connection\n    self.update(**kwargs)\n
"},{"location":"client_api/Evaluation/#valor.Evaluation.__str__","title":"valor.Evaluation.__str__()","text":"

Dumps the object into a JSON formatted string.

Source code in valor/coretypes.py
def __str__(self) -> str:\n    \"\"\"Dumps the object into a JSON formatted string.\"\"\"\n    return json.dumps(self.to_dict(), indent=4)\n
"},{"location":"client_api/Evaluation/#valor.Evaluation.poll","title":"valor.Evaluation.poll()","text":"

Poll the back end.

Updates the evaluation with the latest state from the back end.

Returns:

Type Description EvaluationStatus

The status of the evaluation.

Raises:

Type Description ClientException

If an Evaluation with the given evaluation_id is not found.

Source code in valor/coretypes.py
def poll(self) -> EvaluationStatus:\n    \"\"\"\n    Poll the back end.\n\n    Updates the evaluation with the latest state from the back end.\n\n    Returns\n    -------\n    enums.EvaluationStatus\n        The status of the evaluation.\n\n    Raises\n    ----------\n    ClientException\n        If an Evaluation with the given `evaluation_id` is not found.\n    \"\"\"\n    response = self.conn.get_evaluations(evaluation_ids=[self.id])\n    if not response:\n        raise ClientException(\"Not Found\")\n    self.update(**response[0])\n    return self.status\n
"},{"location":"client_api/Evaluation/#valor.Evaluation.to_dataframe","title":"valor.Evaluation.to_dataframe(stratify_by=None)","text":"

Get all metrics associated with a Model and return them in a pd.DataFrame.

Returns:

Type Description DataFrame

Evaluation metrics being displayed in a pd.DataFrame.

Raises:

Type Description ModuleNotFoundError

This function requires the use of pandas.DataFrame.

Source code in valor/coretypes.py
def to_dataframe(\n    self,\n    stratify_by: Optional[Tuple[str, str]] = None,\n):\n    \"\"\"\n    Get all metrics associated with a Model and return them in a `pd.DataFrame`.\n\n    Returns\n    ----------\n    pd.DataFrame\n        Evaluation metrics being displayed in a `pd.DataFrame`.\n\n    Raises\n    ------\n    ModuleNotFoundError\n        This function requires the use of `pandas.DataFrame`.\n\n    \"\"\"\n    try:\n        import pandas as pd\n    except ModuleNotFoundError:\n        raise ModuleNotFoundError(\n            \"Must have pandas installed to use `get_metric_dataframes`.\"\n        )\n\n    if not stratify_by:\n        column_type = \"evaluation\"\n        column_name = self.id\n    else:\n        column_type = stratify_by[0]\n        column_name = stratify_by[1]\n\n    metrics = [\n        {**metric, column_type: column_name} for metric in self.metrics\n    ]\n    df = pd.DataFrame(metrics)\n    for k in [\"label\", \"parameters\"]:\n        df[k] = df[k].fillna(\"n/a\")\n    df[\"parameters\"] = df[\"parameters\"].apply(json.dumps)\n    df[\"label\"] = df[\"label\"].apply(\n        lambda x: f\"{x['key']}: {x['value']}\" if x != \"n/a\" else x\n    )\n    df = df.pivot(\n        index=[\"type\", \"parameters\", \"label\"], columns=[column_type]\n    )\n    return df\n
"},{"location":"client_api/Evaluation/#valor.Evaluation.to_dict","title":"valor.Evaluation.to_dict()","text":"

Defines how a valor.Evaluation object is serialized into a dictionary.

Returns:

Type Description dict

A dictionary describing an evaluation.

Source code in valor/coretypes.py
def to_dict(self) -> dict:\n    \"\"\"\n    Defines how a `valor.Evaluation` object is serialized into a dictionary.\n\n    Returns\n    ----------\n    dict\n        A dictionary describing an evaluation.\n    \"\"\"\n    return {\n        \"id\": self.id,\n        \"model_name\": self.model_name,\n        \"datum_filter\": asdict(self.datum_filter),\n        \"parameters\": asdict(self.parameters),\n        \"status\": self.status.value,\n        \"metrics\": self.metrics,\n        \"confusion_matrices\": self.confusion_matrices,\n        \"meta\": self.meta,\n        **self.kwargs,\n    }\n
"},{"location":"client_api/Evaluation/#valor.Evaluation.wait_for_completion","title":"valor.Evaluation.wait_for_completion(*, timeout=None, interval=1.0)","text":"

Blocking function that waits for evaluation to finish.

Parameters:

Name Type Description Default timeout int

Length of timeout in seconds.

None interval float

Polling interval in seconds.

1.0 Source code in valor/coretypes.py
def wait_for_completion(\n    self,\n    *,\n    timeout: Optional[int] = None,\n    interval: float = 1.0,\n) -> EvaluationStatus:\n    \"\"\"\n    Blocking function that waits for evaluation to finish.\n\n    Parameters\n    ----------\n    timeout : int, optional\n        Length of timeout in seconds.\n    interval : float, default=1.0\n        Polling interval in seconds.\n    \"\"\"\n    t_start = time.time()\n    while self.poll() not in [\n        EvaluationStatus.DONE,\n        EvaluationStatus.FAILED,\n    ]:\n        time.sleep(interval)\n        if timeout and time.time() - t_start > timeout:\n            raise TimeoutError\n    return self.status\n
"},{"location":"client_api/Groundtruth/","title":"Groundtruth","text":"

Bases: StaticCollection

An object describing a ground truth (e.g., a human-drawn bounding box on an image).

Attributes:

Name Type Description datum Datum

The datum associated with the groundtruth.

annotations List[Annotation]

The list of annotations associated with the groundtruth.

Examples:

>>> GroundTruth(\n...     datum=Datum(uid=\"uid1\"),\n...     annotations=[\n...         Annotation(\n...             labels=[Label(key=\"k1\", value=\"v1\")],\n...         )\n...     ]\n... )\n
Source code in valor/coretypes.py
class GroundTruth(StaticCollection):\n    \"\"\"\n    An object describing a ground truth (e.g., a human-drawn bounding box on an image).\n\n    Attributes\n    ----------\n    datum : Datum\n        The datum associated with the groundtruth.\n    annotations : List[Annotation]\n        The list of annotations associated with the groundtruth.\n\n    Examples\n    --------\n    >>> GroundTruth(\n    ...     datum=Datum(uid=\"uid1\"),\n    ...     annotations=[\n    ...         Annotation(\n    ...             labels=[Label(key=\"k1\", value=\"v1\")],\n    ...         )\n    ...     ]\n    ... )\n    \"\"\"\n\n    datum: Datum = Datum.symbolic(owner=\"groundtruth\", name=\"datum\")\n    annotations: SymbolicList[Annotation] = SymbolicList[Annotation].symbolic(\n        owner=\"groundtruth\", name=\"annotations\"\n    )\n\n    def __init__(\n        self,\n        *,\n        datum: Datum,\n        annotations: List[Annotation],\n    ):\n        \"\"\"\n        Creates a ground truth.\n\n        Parameters\n        ----------\n        datum : Datum\n            The datum that the ground truth is operating over.\n        annotations : List[Annotation]\n            The list of ground truth annotations.\n        \"\"\"\n        super().__init__(datum=datum, annotations=annotations)\n\n        for annotation in self.annotations:\n            for label in annotation.labels:\n                if label.score is not None:\n                    raise ValueError(\n                        \"GroundTruth labels should not have scores.\"\n                    )\n
"},{"location":"client_api/Groundtruth/#valor.GroundTruth-functions","title":"Functions","text":""},{"location":"client_api/Groundtruth/#valor.GroundTruth.__init__","title":"valor.GroundTruth.__init__(*, datum, annotations)","text":"

Creates a ground truth.

Parameters:

Name Type Description Default datum Datum

The datum that the ground truth is operating over.

required annotations List[Annotation]

The list of ground truth annotations.

required Source code in valor/coretypes.py
def __init__(\n    self,\n    *,\n    datum: Datum,\n    annotations: List[Annotation],\n):\n    \"\"\"\n    Creates a ground truth.\n\n    Parameters\n    ----------\n    datum : Datum\n        The datum that the ground truth is operating over.\n    annotations : List[Annotation]\n        The list of ground truth annotations.\n    \"\"\"\n    super().__init__(datum=datum, annotations=annotations)\n\n    for annotation in self.annotations:\n        for label in annotation.labels:\n            if label.score is not None:\n                raise ValueError(\n                    \"GroundTruth labels should not have scores.\"\n                )\n
"},{"location":"client_api/Label/","title":"Label","text":"

Bases: StaticCollection

An object for labeling datasets, models, and annotations.

Attributes:

Name Type Description key String

The class label key.

value String

The class label value.

score Score

The label score.

Examples:

>>> Label(key=\"k1\", value=\"v1\")\n>>> Label(key=\"k1\", value=\"v1\", score=None)\n>>> Label(key=\"k1\", value=\"v1\", score=0.9)\n
Source code in valor/schemas/symbolic/collections.py
class Label(StaticCollection):\n    \"\"\"\n    An object for labeling datasets, models, and annotations.\n\n    Attributes\n    ----------\n    key : String\n        The class label key.\n    value : String\n        The class label value.\n    score : Score\n        The label score.\n\n    Examples\n    --------\n    >>> Label(key=\"k1\", value=\"v1\")\n    >>> Label(key=\"k1\", value=\"v1\", score=None)\n    >>> Label(key=\"k1\", value=\"v1\", score=0.9)\n    \"\"\"\n\n    key: String = String.symbolic(owner=\"label\", name=\"key\")\n    value: String = String.symbolic(owner=\"label\", name=\"value\")\n    score: Float = Float.symbolic(owner=\"label\", name=\"score\")\n\n    def __init__(\n        self,\n        *,\n        key: str,\n        value: str,\n        score: Union[float, np.floating, None] = None,\n    ):\n        \"\"\"\n        Initializes an instance of a label.\n\n        Attributes\n        ----------\n        key : str\n            The class label key.\n        value : str\n            The class label value.\n        score : float, optional\n            The label score.\n        \"\"\"\n        super().__init__(key=key, value=value, score=score)\n\n    @staticmethod\n    def formatting() -> Dict[str, Any]:\n        \"\"\"Attribute format mapping.\"\"\"\n        return {\n            \"score\": Float.nullable,\n        }\n\n    def tuple(self):\n        \"\"\"\n        Defines how the `Label` is turned into a tuple.\n\n        Returns\n        ----------\n        tuple\n            A tuple of the `Label's` arguments.\n        \"\"\"\n        return (self.key, self.value, self.score)\n
"},{"location":"client_api/Label/#valor.Label-functions","title":"Functions","text":""},{"location":"client_api/Label/#valor.Label.__init__","title":"valor.Label.__init__(*, key, value, score=None)","text":"

Initializes an instance of a label.

Attributes:

Name Type Description key str

The class label key.

value str

The class label value.

score (float, optional)

The label score.

Source code in valor/schemas/symbolic/collections.py
def __init__(\n    self,\n    *,\n    key: str,\n    value: str,\n    score: Union[float, np.floating, None] = None,\n):\n    \"\"\"\n    Initializes an instance of a label.\n\n    Attributes\n    ----------\n    key : str\n        The class label key.\n    value : str\n        The class label value.\n    score : float, optional\n        The label score.\n    \"\"\"\n    super().__init__(key=key, value=value, score=score)\n
"},{"location":"client_api/Label/#valor.Label.formatting","title":"valor.Label.formatting() staticmethod","text":"

Attribute format mapping.

Source code in valor/schemas/symbolic/collections.py
@staticmethod\ndef formatting() -> Dict[str, Any]:\n    \"\"\"Attribute format mapping.\"\"\"\n    return {\n        \"score\": Float.nullable,\n    }\n
"},{"location":"client_api/Label/#valor.Label.tuple","title":"valor.Label.tuple()","text":"

Defines how the Label is turned into a tuple.

Returns:

Type Description tuple

A tuple of the Label's arguments.

Source code in valor/schemas/symbolic/collections.py
def tuple(self):\n    \"\"\"\n    Defines how the `Label` is turned into a tuple.\n\n    Returns\n    ----------\n    tuple\n        A tuple of the `Label's` arguments.\n    \"\"\"\n    return (self.key, self.value, self.score)\n
"},{"location":"client_api/Model/","title":"Model","text":"

Bases: StaticCollection

A class describing a model that was trained on a particular dataset.

Attributes:

Name Type Description name String

The name of the model.

metadata Dictionary

A dictionary of metadata that describes the model.

Examples:

>>> Model.create(name=\"model1\")\n>>> Model.create(name=\"model1\", metadata={})\n>>> Model.create(name=\"model1\", metadata={\"foo\": \"bar\", \"pi\": 3.14})\n
Source code in valor/coretypes.py
class Model(StaticCollection):\n    \"\"\"\n    A class describing a model that was trained on a particular dataset.\n\n    Attributes\n    ----------\n    name : String\n        The name of the model.\n    metadata : Dictionary\n        A dictionary of metadata that describes the model.\n\n    Examples\n    --------\n    >>> Model.create(name=\"model1\")\n    >>> Model.create(name=\"model1\", metadata={})\n    >>> Model.create(name=\"model1\", metadata={\"foo\": \"bar\", \"pi\": 3.14})\n    \"\"\"\n\n    name: String = String.symbolic(owner=\"model\", name=\"name\")\n    metadata: Dictionary = Dictionary.symbolic(owner=\"model\", name=\"metadata\")\n\n    def __init__(\n        self,\n        *,\n        name: str,\n        metadata: Optional[dict] = None,\n        connection: Optional[ClientConnection] = None,\n    ):\n        \"\"\"\n        Creates a local instance of a model.\n\n        Use 'Model.create' classmethod to create a model with persistence.\n\n        Parameters\n        ----------\n        name : String\n            The name of the model.\n        metadata : Dictionary\n            A dictionary of metadata that describes the model.\n        connection : ClientConnection, optional\n            An initialized client connection.\n        \"\"\"\n        self.conn = connection\n        super().__init__(name=name, metadata=metadata if metadata else dict())\n\n    @classmethod\n    def create(\n        cls,\n        name: str,\n        metadata: Optional[Dict[str, Any]] = None,\n        connection: Optional[ClientConnection] = None,\n        **_,\n    ) -> Model:\n        \"\"\"\n        Creates a model that persists in the back end.\n\n        Parameters\n        ----------\n        name : str\n            The name of the model.\n        metadata : dict, optional\n            A dictionary of metadata that describes the model.\n        connection : ClientConnection, optional\n            An initialized client connection.\n        \"\"\"\n        model = cls(name=name, metadata=metadata, connection=connection)\n        Client(connection).create_model(model)\n        return model\n\n    @classmethod\n    def get(\n        cls,\n        name: str,\n        connection: Optional[ClientConnection] = None,\n    ) -> Union[Model, None]:\n        \"\"\"\n        Retrieves a model from the back end database.\n\n        Parameters\n        ----------\n        name : str\n            The name of the model.\n        connection : ClientConnnetion, optional\n            An optional Valor client object for interacting with the API.\n\n        Returns\n        -------\n        Union[valor.Model, None]\n            The model or 'None' if it doesn't exist.\n        \"\"\"\n        return Client(connection).get_model(name)\n\n    def add_prediction(\n        self,\n        dataset: Dataset,\n        prediction: Prediction,\n    ) -> None:\n        \"\"\"\n        Add a prediction to the model.\n\n        Parameters\n        ----------\n        dataset : valor.Dataset\n            The dataset that is being operated over.\n        prediction : valor.Prediction\n            The prediction to create.\n        \"\"\"\n        Client(self.conn).create_predictions(\n            dataset=dataset,\n            model=self,\n            predictions=[prediction],\n        )\n\n    def add_predictions(\n        self,\n        dataset: Dataset,\n        predictions: List[Prediction],\n    ) -> None:\n        \"\"\"\n        Add multiple predictions to the model.\n\n        Parameters\n        ----------\n        dataset : valor.Dataset\n            The dataset that is being operated over.\n        predictions : List[valor.Prediction]\n            The predictions to create.\n        \"\"\"\n        Client(self.conn).create_predictions(\n            dataset=dataset,\n            model=self,\n            predictions=predictions,\n        )\n\n    def get_prediction(\n        self, dataset: Union[Dataset, str], datum: Union[Datum, str]\n    ) -> Union[Prediction, None]:\n        \"\"\"\n        Get a particular prediction.\n\n        Parameters\n        ----------\n        dataset: Union[Dataset, str]\n            The dataset the datum belongs to.\n        datum: Union[Datum, str]\n            The desired datum.\n\n        Returns\n        ----------\n        Union[Prediction, None]\n            The matching prediction or 'None' if it doesn't exist.\n        \"\"\"\n        return Client(self.conn).get_prediction(\n            dataset=dataset, model=self, datum=datum\n        )\n\n    def finalize_inferences(self, dataset: Union[Dataset, str]) -> None:\n        \"\"\"\n        Finalizes the model over a dataset such that new predictions cannot be added to it.\n        \"\"\"\n        return Client(self.conn).finalize_inferences(\n            dataset=dataset, model=self\n        )\n\n    def _format_constraints(\n        self,\n        datasets: Optional[Union[Dataset, List[Dataset]]] = None,\n        filter_by: Optional[FilterType] = None,\n    ) -> Filter:\n        \"\"\"Formats the 'datum_filter' for any evaluation requests.\"\"\"\n\n        # get list of dataset names\n        dataset_names_from_obj = []\n        if isinstance(datasets, list):\n            dataset_names_from_obj = [dataset.name for dataset in datasets]\n        elif isinstance(datasets, Dataset):\n            dataset_names_from_obj = [datasets.name]\n\n        # create a 'schemas.Filter' object from the constraints.\n        filter_ = _format_filter(filter_by)\n\n        # reset model name\n        filter_.model_names = None\n        filter_.model_metadata = None\n\n        # set dataset names\n        if not filter_.dataset_names:\n            filter_.dataset_names = []\n        filter_.dataset_names.extend(dataset_names_from_obj)  # type: ignore\n        return filter_\n\n    def _create_label_map(\n        self,\n        label_map: Optional[Dict[Label, Label]],\n    ) -> Union[List[List[List[str]]], None]:\n        \"\"\"Convert a dictionary of label maps to a serializable list format.\"\"\"\n        if not label_map:\n            return None\n\n        if not isinstance(label_map, dict) or not all(\n            [\n                isinstance(key, Label) and isinstance(value, Label)\n                for key, value in label_map.items()\n            ]\n        ):\n            raise TypeError(\n                \"label_map should be a dictionary with valid Labels for both the key and value.\"\n            )\n\n        return_value = []\n        for key, value in label_map.items():\n            if not all(\n                [\n                    (isinstance(v.key, str) and isinstance(v.value, str))\n                    for v in [key, value]\n                ]\n            ):\n                raise TypeError\n            return_value.append(\n                [\n                    [key.key, key.value],\n                    [value.key, value.value],\n                ]\n            )\n        return return_value\n\n    def evaluate_classification(\n        self,\n        datasets: Optional[Union[Dataset, List[Dataset]]] = None,\n        filter_by: Optional[FilterType] = None,\n        label_map: Optional[Dict[Label, Label]] = None,\n        pr_curve_max_examples: int = 1,\n        metrics_to_return: Optional[List[str]] = None,\n        allow_retries: bool = False,\n    ) -> Evaluation:\n        \"\"\"\n        Start a classification evaluation job.\n\n        Parameters\n        ----------\n        datasets : Union[Dataset, List[Dataset]], optional\n            The dataset or list of datasets to evaluate against.\n        filter_by : FilterType, optional\n            Optional set of constraints to filter evaluation by.\n        label_map : Dict[Label, Label], optional\n            Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.\n        metrics: List[str], optional\n            The list of metrics to compute, store, and return to the user.\n        allow_retries : bool, default = False\n            Option to retry previously failed evaluations.\n\n        Returns\n        -------\n        Evaluation\n            A job object that can be used to track the status of the job and get the metrics of it upon completion.\n        \"\"\"\n        if not datasets and not filter_by:\n            raise ValueError(\n                \"Evaluation requires the definition of either datasets, dataset filters or both.\"\n            )\n\n        # format request\n        datum_filter = self._format_constraints(datasets, filter_by)\n        request = EvaluationRequest(\n            model_names=[self.name],  # type: ignore\n            datum_filter=datum_filter,\n            parameters=EvaluationParameters(\n                task_type=TaskType.CLASSIFICATION,\n                label_map=self._create_label_map(label_map=label_map),\n                pr_curve_max_examples=pr_curve_max_examples,\n                metrics_to_return=metrics_to_return,\n            ),\n        )\n\n        # create evaluation\n        evaluation = Client(self.conn).evaluate(\n            request, allow_retries=allow_retries\n        )\n        if len(evaluation) != 1:\n            raise RuntimeError\n        return evaluation[0]\n\n    def evaluate_detection(\n        self,\n        datasets: Optional[Union[Dataset, List[Dataset]]] = None,\n        filter_by: Optional[FilterType] = None,\n        convert_annotations_to_type: Optional[AnnotationType] = None,\n        iou_thresholds_to_compute: Optional[List[float]] = None,\n        iou_thresholds_to_return: Optional[List[float]] = None,\n        label_map: Optional[Dict[Label, Label]] = None,\n        recall_score_threshold: float = 0,\n        metrics_to_return: Optional[List[str]] = None,\n        pr_curve_iou_threshold: float = 0.5,\n        pr_curve_max_examples: int = 1,\n        allow_retries: bool = False,\n    ) -> Evaluation:\n        \"\"\"\n        Start an object-detection evaluation job.\n\n        Parameters\n        ----------\n        datasets : Union[Dataset, List[Dataset]], optional\n            The dataset or list of datasets to evaluate against.\n        filter_by : FilterType, optional\n            Optional set of constraints to filter evaluation by.\n        convert_annotations_to_type : enums.AnnotationType, optional\n            Forces the object detection evaluation to compute over this type.\n        iou_thresholds_to_compute : List[float], optional\n            Thresholds to compute mAP against.\n        iou_thresholds_to_return : List[float], optional\n            Thresholds to return AP for. Must be subset of `iou_thresholds_to_compute`.\n        label_map : Dict[Label, Label], optional\n            Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.\n        recall_score_threshold: float, default=0\n            The confidence score threshold for use when determining whether to count a prediction as a true positive or not while calculating Average Recall.\n        pr_curve_iou_threshold: float, optional\n            The IOU threshold to use when calculating precision-recall curves. Defaults to 0.5.\n        pr_curve_max_examples: int, optional\n            The maximum number of datum examples to store when calculating PR curves.\n        allow_retries : bool, default = False\n            Option to retry previously failed evaluations.\n\n\n        Returns\n        -------\n        Evaluation\n            A job object that can be used to track the status of the job and get the metrics of it upon completion.\n        \"\"\"\n        if iou_thresholds_to_compute is None:\n            iou_thresholds_to_compute = [\n                round(0.5 + 0.05 * i, 2) for i in range(10)\n            ]\n        if iou_thresholds_to_return is None:\n            iou_thresholds_to_return = [0.5, 0.75]\n\n        # format request\n        parameters = EvaluationParameters(\n            task_type=TaskType.OBJECT_DETECTION,\n            convert_annotations_to_type=convert_annotations_to_type,\n            iou_thresholds_to_compute=iou_thresholds_to_compute,\n            iou_thresholds_to_return=iou_thresholds_to_return,\n            label_map=self._create_label_map(label_map=label_map),\n            recall_score_threshold=recall_score_threshold,\n            metrics_to_return=metrics_to_return,\n            pr_curve_iou_threshold=pr_curve_iou_threshold,\n            pr_curve_max_examples=pr_curve_max_examples,\n        )\n        datum_filter = self._format_constraints(datasets, filter_by)\n        request = EvaluationRequest(\n            model_names=[self.name],  # type: ignore\n            datum_filter=datum_filter,\n            parameters=parameters,\n        )\n\n        # create evaluation\n        evaluation = Client(self.conn).evaluate(\n            request, allow_retries=allow_retries\n        )\n        if len(evaluation) != 1:\n            raise RuntimeError\n        return evaluation[0]\n\n    def evaluate_segmentation(\n        self,\n        datasets: Optional[Union[Dataset, List[Dataset]]] = None,\n        filter_by: Optional[FilterType] = None,\n        label_map: Optional[Dict[Label, Label]] = None,\n        metrics_to_return: Optional[List[str]] = None,\n        allow_retries: bool = False,\n    ) -> Evaluation:\n        \"\"\"\n        Start a semantic-segmentation evaluation job.\n\n        Parameters\n        ----------\n        datasets : Union[Dataset, List[Dataset]], optional\n            The dataset or list of datasets to evaluate against.\n        filter_by : FilterType, optional\n            Optional set of constraints to filter evaluation by.\n        label_map : Dict[Label, Label], optional\n            Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.\n        metrics: List[str], optional\n            The list of metrics to compute, store, and return to the user.\n        allow_retries : bool, default = False\n            Option to retry previously failed evaluations.\n\n        Returns\n        -------\n        Evaluation\n            A job object that can be used to track the status of the job and get the metrics of it upon completion\n        \"\"\"\n        # format request\n        datum_filter = self._format_constraints(datasets, filter_by)\n        request = EvaluationRequest(\n            model_names=[self.name],  # type: ignore\n            datum_filter=datum_filter,\n            parameters=EvaluationParameters(\n                task_type=TaskType.SEMANTIC_SEGMENTATION,\n                label_map=self._create_label_map(label_map=label_map),\n                metrics_to_return=metrics_to_return,\n            ),\n        )\n\n        # create evaluation\n        evaluation = Client(self.conn).evaluate(\n            request, allow_retries=allow_retries\n        )\n        if len(evaluation) != 1:\n            raise RuntimeError\n        return evaluation[0]\n\n    def delete(self, timeout: int = 0):\n        \"\"\"\n        Delete the `Model` object from the back end.\n\n        Parameters\n        ----------\n        timeout : int, default=0\n            Sets a timeout in seconds.\n        \"\"\"\n        Client(self.conn).delete_model(self.name, timeout)  # type: ignore\n\n    def get_labels(\n        self,\n    ) -> List[Label]:\n        \"\"\"\n        Get all labels associated with a given model.\n\n        Returns\n        ----------\n        List[Label]\n            A list of `Labels` associated with the model.\n        \"\"\"\n        return Client(self.conn).get_labels_from_model(self)\n\n    def get_evaluations(\n        self,\n        metrics_to_sort_by: Optional[\n            Dict[str, Union[Dict[str, str], str]]\n        ] = None,\n    ) -> List[Evaluation]:\n        \"\"\"\n        Get all evaluations associated with a given model.\n\n        Parameters\n        ----------\n        metrics_to_sort_by: dict[str, str | dict[str, str]], optional\n            An optional dict of metric types to sort the evaluations by.\n\n\n        Returns\n        ----------\n        List[Evaluation]\n            A list of `Evaluations` associated with the model.\n        \"\"\"\n        return Client(self.conn).get_evaluations(\n            models=[self], metrics_to_sort_by=metrics_to_sort_by\n        )\n
"},{"location":"client_api/Model/#valor.Model-functions","title":"Functions","text":""},{"location":"client_api/Model/#valor.Model.__init__","title":"valor.Model.__init__(*, name, metadata=None, connection=None)","text":"

Creates a local instance of a model.

Use 'Model.create' classmethod to create a model with persistence.

Parameters:

Name Type Description Default name String

The name of the model.

required metadata Dictionary

A dictionary of metadata that describes the model.

None connection ClientConnection

An initialized client connection.

None Source code in valor/coretypes.py
def __init__(\n    self,\n    *,\n    name: str,\n    metadata: Optional[dict] = None,\n    connection: Optional[ClientConnection] = None,\n):\n    \"\"\"\n    Creates a local instance of a model.\n\n    Use 'Model.create' classmethod to create a model with persistence.\n\n    Parameters\n    ----------\n    name : String\n        The name of the model.\n    metadata : Dictionary\n        A dictionary of metadata that describes the model.\n    connection : ClientConnection, optional\n        An initialized client connection.\n    \"\"\"\n    self.conn = connection\n    super().__init__(name=name, metadata=metadata if metadata else dict())\n
"},{"location":"client_api/Model/#valor.Model.add_prediction","title":"valor.Model.add_prediction(dataset, prediction)","text":"

Add a prediction to the model.

Parameters:

Name Type Description Default dataset Dataset

The dataset that is being operated over.

required prediction Prediction

The prediction to create.

required Source code in valor/coretypes.py
def add_prediction(\n    self,\n    dataset: Dataset,\n    prediction: Prediction,\n) -> None:\n    \"\"\"\n    Add a prediction to the model.\n\n    Parameters\n    ----------\n    dataset : valor.Dataset\n        The dataset that is being operated over.\n    prediction : valor.Prediction\n        The prediction to create.\n    \"\"\"\n    Client(self.conn).create_predictions(\n        dataset=dataset,\n        model=self,\n        predictions=[prediction],\n    )\n
"},{"location":"client_api/Model/#valor.Model.add_predictions","title":"valor.Model.add_predictions(dataset, predictions)","text":"

Add multiple predictions to the model.

Parameters:

Name Type Description Default dataset Dataset

The dataset that is being operated over.

required predictions List[Prediction]

The predictions to create.

required Source code in valor/coretypes.py
def add_predictions(\n    self,\n    dataset: Dataset,\n    predictions: List[Prediction],\n) -> None:\n    \"\"\"\n    Add multiple predictions to the model.\n\n    Parameters\n    ----------\n    dataset : valor.Dataset\n        The dataset that is being operated over.\n    predictions : List[valor.Prediction]\n        The predictions to create.\n    \"\"\"\n    Client(self.conn).create_predictions(\n        dataset=dataset,\n        model=self,\n        predictions=predictions,\n    )\n
"},{"location":"client_api/Model/#valor.Model.create","title":"valor.Model.create(name, metadata=None, connection=None, **_) classmethod","text":"

Creates a model that persists in the back end.

Parameters:

Name Type Description Default name str

The name of the model.

required metadata dict

A dictionary of metadata that describes the model.

None connection ClientConnection

An initialized client connection.

None Source code in valor/coretypes.py
@classmethod\ndef create(\n    cls,\n    name: str,\n    metadata: Optional[Dict[str, Any]] = None,\n    connection: Optional[ClientConnection] = None,\n    **_,\n) -> Model:\n    \"\"\"\n    Creates a model that persists in the back end.\n\n    Parameters\n    ----------\n    name : str\n        The name of the model.\n    metadata : dict, optional\n        A dictionary of metadata that describes the model.\n    connection : ClientConnection, optional\n        An initialized client connection.\n    \"\"\"\n    model = cls(name=name, metadata=metadata, connection=connection)\n    Client(connection).create_model(model)\n    return model\n
"},{"location":"client_api/Model/#valor.Model.delete","title":"valor.Model.delete(timeout=0)","text":"

Delete the Model object from the back end.

Parameters:

Name Type Description Default timeout int

Sets a timeout in seconds.

0 Source code in valor/coretypes.py
def delete(self, timeout: int = 0):\n    \"\"\"\n    Delete the `Model` object from the back end.\n\n    Parameters\n    ----------\n    timeout : int, default=0\n        Sets a timeout in seconds.\n    \"\"\"\n    Client(self.conn).delete_model(self.name, timeout)  # type: ignore\n
"},{"location":"client_api/Model/#valor.Model.evaluate_classification","title":"valor.Model.evaluate_classification(datasets=None, filter_by=None, label_map=None, pr_curve_max_examples=1, metrics_to_return=None, allow_retries=False)","text":"

Start a classification evaluation job.

Parameters:

Name Type Description Default datasets Union[Dataset, List[Dataset]]

The dataset or list of datasets to evaluate against.

None filter_by FilterType

Optional set of constraints to filter evaluation by.

None label_map Dict[Label, Label]

Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.

None metrics

The list of metrics to compute, store, and return to the user.

required allow_retries bool

Option to retry previously failed evaluations.

= False

Returns:

Type Description Evaluation

A job object that can be used to track the status of the job and get the metrics of it upon completion.

Source code in valor/coretypes.py
def evaluate_classification(\n    self,\n    datasets: Optional[Union[Dataset, List[Dataset]]] = None,\n    filter_by: Optional[FilterType] = None,\n    label_map: Optional[Dict[Label, Label]] = None,\n    pr_curve_max_examples: int = 1,\n    metrics_to_return: Optional[List[str]] = None,\n    allow_retries: bool = False,\n) -> Evaluation:\n    \"\"\"\n    Start a classification evaluation job.\n\n    Parameters\n    ----------\n    datasets : Union[Dataset, List[Dataset]], optional\n        The dataset or list of datasets to evaluate against.\n    filter_by : FilterType, optional\n        Optional set of constraints to filter evaluation by.\n    label_map : Dict[Label, Label], optional\n        Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.\n    metrics: List[str], optional\n        The list of metrics to compute, store, and return to the user.\n    allow_retries : bool, default = False\n        Option to retry previously failed evaluations.\n\n    Returns\n    -------\n    Evaluation\n        A job object that can be used to track the status of the job and get the metrics of it upon completion.\n    \"\"\"\n    if not datasets and not filter_by:\n        raise ValueError(\n            \"Evaluation requires the definition of either datasets, dataset filters or both.\"\n        )\n\n    # format request\n    datum_filter = self._format_constraints(datasets, filter_by)\n    request = EvaluationRequest(\n        model_names=[self.name],  # type: ignore\n        datum_filter=datum_filter,\n        parameters=EvaluationParameters(\n            task_type=TaskType.CLASSIFICATION,\n            label_map=self._create_label_map(label_map=label_map),\n            pr_curve_max_examples=pr_curve_max_examples,\n            metrics_to_return=metrics_to_return,\n        ),\n    )\n\n    # create evaluation\n    evaluation = Client(self.conn).evaluate(\n        request, allow_retries=allow_retries\n    )\n    if len(evaluation) != 1:\n        raise RuntimeError\n    return evaluation[0]\n
"},{"location":"client_api/Model/#valor.Model.evaluate_detection","title":"valor.Model.evaluate_detection(datasets=None, filter_by=None, convert_annotations_to_type=None, iou_thresholds_to_compute=None, iou_thresholds_to_return=None, label_map=None, recall_score_threshold=0, metrics_to_return=None, pr_curve_iou_threshold=0.5, pr_curve_max_examples=1, allow_retries=False)","text":"

Start an object-detection evaluation job.

Parameters:

Name Type Description Default datasets Union[Dataset, List[Dataset]]

The dataset or list of datasets to evaluate against.

None filter_by FilterType

Optional set of constraints to filter evaluation by.

None convert_annotations_to_type AnnotationType

Forces the object detection evaluation to compute over this type.

None iou_thresholds_to_compute List[float]

Thresholds to compute mAP against.

None iou_thresholds_to_return List[float]

Thresholds to return AP for. Must be subset of iou_thresholds_to_compute.

None label_map Dict[Label, Label]

Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.

None recall_score_threshold float

The confidence score threshold for use when determining whether to count a prediction as a true positive or not while calculating Average Recall.

0 pr_curve_iou_threshold float

The IOU threshold to use when calculating precision-recall curves. Defaults to 0.5.

0.5 pr_curve_max_examples int

The maximum number of datum examples to store when calculating PR curves.

1 allow_retries bool

Option to retry previously failed evaluations.

= False

Returns:

Type Description Evaluation

A job object that can be used to track the status of the job and get the metrics of it upon completion.

Source code in valor/coretypes.py
def evaluate_detection(\n    self,\n    datasets: Optional[Union[Dataset, List[Dataset]]] = None,\n    filter_by: Optional[FilterType] = None,\n    convert_annotations_to_type: Optional[AnnotationType] = None,\n    iou_thresholds_to_compute: Optional[List[float]] = None,\n    iou_thresholds_to_return: Optional[List[float]] = None,\n    label_map: Optional[Dict[Label, Label]] = None,\n    recall_score_threshold: float = 0,\n    metrics_to_return: Optional[List[str]] = None,\n    pr_curve_iou_threshold: float = 0.5,\n    pr_curve_max_examples: int = 1,\n    allow_retries: bool = False,\n) -> Evaluation:\n    \"\"\"\n    Start an object-detection evaluation job.\n\n    Parameters\n    ----------\n    datasets : Union[Dataset, List[Dataset]], optional\n        The dataset or list of datasets to evaluate against.\n    filter_by : FilterType, optional\n        Optional set of constraints to filter evaluation by.\n    convert_annotations_to_type : enums.AnnotationType, optional\n        Forces the object detection evaluation to compute over this type.\n    iou_thresholds_to_compute : List[float], optional\n        Thresholds to compute mAP against.\n    iou_thresholds_to_return : List[float], optional\n        Thresholds to return AP for. Must be subset of `iou_thresholds_to_compute`.\n    label_map : Dict[Label, Label], optional\n        Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.\n    recall_score_threshold: float, default=0\n        The confidence score threshold for use when determining whether to count a prediction as a true positive or not while calculating Average Recall.\n    pr_curve_iou_threshold: float, optional\n        The IOU threshold to use when calculating precision-recall curves. Defaults to 0.5.\n    pr_curve_max_examples: int, optional\n        The maximum number of datum examples to store when calculating PR curves.\n    allow_retries : bool, default = False\n        Option to retry previously failed evaluations.\n\n\n    Returns\n    -------\n    Evaluation\n        A job object that can be used to track the status of the job and get the metrics of it upon completion.\n    \"\"\"\n    if iou_thresholds_to_compute is None:\n        iou_thresholds_to_compute = [\n            round(0.5 + 0.05 * i, 2) for i in range(10)\n        ]\n    if iou_thresholds_to_return is None:\n        iou_thresholds_to_return = [0.5, 0.75]\n\n    # format request\n    parameters = EvaluationParameters(\n        task_type=TaskType.OBJECT_DETECTION,\n        convert_annotations_to_type=convert_annotations_to_type,\n        iou_thresholds_to_compute=iou_thresholds_to_compute,\n        iou_thresholds_to_return=iou_thresholds_to_return,\n        label_map=self._create_label_map(label_map=label_map),\n        recall_score_threshold=recall_score_threshold,\n        metrics_to_return=metrics_to_return,\n        pr_curve_iou_threshold=pr_curve_iou_threshold,\n        pr_curve_max_examples=pr_curve_max_examples,\n    )\n    datum_filter = self._format_constraints(datasets, filter_by)\n    request = EvaluationRequest(\n        model_names=[self.name],  # type: ignore\n        datum_filter=datum_filter,\n        parameters=parameters,\n    )\n\n    # create evaluation\n    evaluation = Client(self.conn).evaluate(\n        request, allow_retries=allow_retries\n    )\n    if len(evaluation) != 1:\n        raise RuntimeError\n    return evaluation[0]\n
"},{"location":"client_api/Model/#valor.Model.evaluate_segmentation","title":"valor.Model.evaluate_segmentation(datasets=None, filter_by=None, label_map=None, metrics_to_return=None, allow_retries=False)","text":"

Start a semantic-segmentation evaluation job.

Parameters:

Name Type Description Default datasets Union[Dataset, List[Dataset]]

The dataset or list of datasets to evaluate against.

None filter_by FilterType

Optional set of constraints to filter evaluation by.

None label_map Dict[Label, Label]

Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.

None metrics

The list of metrics to compute, store, and return to the user.

required allow_retries bool

Option to retry previously failed evaluations.

= False

Returns:

Type Description Evaluation

A job object that can be used to track the status of the job and get the metrics of it upon completion

Source code in valor/coretypes.py
def evaluate_segmentation(\n    self,\n    datasets: Optional[Union[Dataset, List[Dataset]]] = None,\n    filter_by: Optional[FilterType] = None,\n    label_map: Optional[Dict[Label, Label]] = None,\n    metrics_to_return: Optional[List[str]] = None,\n    allow_retries: bool = False,\n) -> Evaluation:\n    \"\"\"\n    Start a semantic-segmentation evaluation job.\n\n    Parameters\n    ----------\n    datasets : Union[Dataset, List[Dataset]], optional\n        The dataset or list of datasets to evaluate against.\n    filter_by : FilterType, optional\n        Optional set of constraints to filter evaluation by.\n    label_map : Dict[Label, Label], optional\n        Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.\n    metrics: List[str], optional\n        The list of metrics to compute, store, and return to the user.\n    allow_retries : bool, default = False\n        Option to retry previously failed evaluations.\n\n    Returns\n    -------\n    Evaluation\n        A job object that can be used to track the status of the job and get the metrics of it upon completion\n    \"\"\"\n    # format request\n    datum_filter = self._format_constraints(datasets, filter_by)\n    request = EvaluationRequest(\n        model_names=[self.name],  # type: ignore\n        datum_filter=datum_filter,\n        parameters=EvaluationParameters(\n            task_type=TaskType.SEMANTIC_SEGMENTATION,\n            label_map=self._create_label_map(label_map=label_map),\n            metrics_to_return=metrics_to_return,\n        ),\n    )\n\n    # create evaluation\n    evaluation = Client(self.conn).evaluate(\n        request, allow_retries=allow_retries\n    )\n    if len(evaluation) != 1:\n        raise RuntimeError\n    return evaluation[0]\n
"},{"location":"client_api/Model/#valor.Model.finalize_inferences","title":"valor.Model.finalize_inferences(dataset)","text":"

Finalizes the model over a dataset such that new predictions cannot be added to it.

Source code in valor/coretypes.py
def finalize_inferences(self, dataset: Union[Dataset, str]) -> None:\n    \"\"\"\n    Finalizes the model over a dataset such that new predictions cannot be added to it.\n    \"\"\"\n    return Client(self.conn).finalize_inferences(\n        dataset=dataset, model=self\n    )\n
"},{"location":"client_api/Model/#valor.Model.get","title":"valor.Model.get(name, connection=None) classmethod","text":"

Retrieves a model from the back end database.

Parameters:

Name Type Description Default name str

The name of the model.

required connection ClientConnnetion

An optional Valor client object for interacting with the API.

None

Returns:

Type Description Union[Model, None]

The model or 'None' if it doesn't exist.

Source code in valor/coretypes.py
@classmethod\ndef get(\n    cls,\n    name: str,\n    connection: Optional[ClientConnection] = None,\n) -> Union[Model, None]:\n    \"\"\"\n    Retrieves a model from the back end database.\n\n    Parameters\n    ----------\n    name : str\n        The name of the model.\n    connection : ClientConnnetion, optional\n        An optional Valor client object for interacting with the API.\n\n    Returns\n    -------\n    Union[valor.Model, None]\n        The model or 'None' if it doesn't exist.\n    \"\"\"\n    return Client(connection).get_model(name)\n
"},{"location":"client_api/Model/#valor.Model.get_evaluations","title":"valor.Model.get_evaluations(metrics_to_sort_by=None)","text":"

Get all evaluations associated with a given model.

Parameters:

Name Type Description Default metrics_to_sort_by Optional[Dict[str, Union[Dict[str, str], str]]]

An optional dict of metric types to sort the evaluations by.

None

Returns:

Type Description List[Evaluation]

A list of Evaluations associated with the model.

Source code in valor/coretypes.py
def get_evaluations(\n    self,\n    metrics_to_sort_by: Optional[\n        Dict[str, Union[Dict[str, str], str]]\n    ] = None,\n) -> List[Evaluation]:\n    \"\"\"\n    Get all evaluations associated with a given model.\n\n    Parameters\n    ----------\n    metrics_to_sort_by: dict[str, str | dict[str, str]], optional\n        An optional dict of metric types to sort the evaluations by.\n\n\n    Returns\n    ----------\n    List[Evaluation]\n        A list of `Evaluations` associated with the model.\n    \"\"\"\n    return Client(self.conn).get_evaluations(\n        models=[self], metrics_to_sort_by=metrics_to_sort_by\n    )\n
"},{"location":"client_api/Model/#valor.Model.get_labels","title":"valor.Model.get_labels()","text":"

Get all labels associated with a given model.

Returns:

Type Description List[Label]

A list of Labels associated with the model.

Source code in valor/coretypes.py
def get_labels(\n    self,\n) -> List[Label]:\n    \"\"\"\n    Get all labels associated with a given model.\n\n    Returns\n    ----------\n    List[Label]\n        A list of `Labels` associated with the model.\n    \"\"\"\n    return Client(self.conn).get_labels_from_model(self)\n
"},{"location":"client_api/Model/#valor.Model.get_prediction","title":"valor.Model.get_prediction(dataset, datum)","text":"

Get a particular prediction.

Parameters:

Name Type Description Default dataset Union[Dataset, str]

The dataset the datum belongs to.

required datum Union[Datum, str]

The desired datum.

required

Returns:

Type Description Union[Prediction, None]

The matching prediction or 'None' if it doesn't exist.

Source code in valor/coretypes.py
def get_prediction(\n    self, dataset: Union[Dataset, str], datum: Union[Datum, str]\n) -> Union[Prediction, None]:\n    \"\"\"\n    Get a particular prediction.\n\n    Parameters\n    ----------\n    dataset: Union[Dataset, str]\n        The dataset the datum belongs to.\n    datum: Union[Datum, str]\n        The desired datum.\n\n    Returns\n    ----------\n    Union[Prediction, None]\n        The matching prediction or 'None' if it doesn't exist.\n    \"\"\"\n    return Client(self.conn).get_prediction(\n        dataset=dataset, model=self, datum=datum\n    )\n
"},{"location":"client_api/Prediction/","title":"Prediction","text":"

Bases: StaticCollection

An object describing a prediction (e.g., a machine-drawn bounding box on an image).

Attributes:

Name Type Description datum Datum

The datum associated with the prediction.

annotations List[Annotation]

The list of annotations associated with the prediction.

Examples:

>>> Prediction(\n...     datum=Datum(uid=\"uid1\"),\n...     annotations=[\n...         Annotation(\n...             labels=[\n...                 Label(key=\"k1\", value=\"v1\", score=0.9),\n...                 Label(key=\"k1\", value=\"v1\", score=0.1)\n...             ],\n...         )\n...     ]\n... )\n
Source code in valor/coretypes.py
class Prediction(StaticCollection):\n    \"\"\"\n    An object describing a prediction (e.g., a machine-drawn bounding box on an image).\n\n    Attributes\n    ----------\n    datum : Datum\n        The datum associated with the prediction.\n    annotations : List[Annotation]\n        The list of annotations associated with the prediction.\n\n    Examples\n    --------\n    >>> Prediction(\n    ...     datum=Datum(uid=\"uid1\"),\n    ...     annotations=[\n    ...         Annotation(\n    ...             labels=[\n    ...                 Label(key=\"k1\", value=\"v1\", score=0.9),\n    ...                 Label(key=\"k1\", value=\"v1\", score=0.1)\n    ...             ],\n    ...         )\n    ...     ]\n    ... )\n    \"\"\"\n\n    datum: Datum = Datum.symbolic(owner=\"prediction\", name=\"datum\")\n    annotations: SymbolicList[Annotation] = SymbolicList[Annotation].symbolic(\n        owner=\"prediction\", name=\"annotations\"\n    )\n\n    def __init__(\n        self,\n        *,\n        datum: Datum,\n        annotations: List[Annotation],\n    ):\n        \"\"\"\n        Creates a prediction.\n\n        Parameters\n        ----------\n        datum : Datum\n            The datum that the prediction is operating over.\n        annotations : List[Annotation]\n            The list of predicted annotations.\n        \"\"\"\n        super().__init__(datum=datum, annotations=annotations)\n
"},{"location":"client_api/Prediction/#valor.Prediction-functions","title":"Functions","text":""},{"location":"client_api/Prediction/#valor.Prediction.__init__","title":"valor.Prediction.__init__(*, datum, annotations)","text":"

Creates a prediction.

Parameters:

Name Type Description Default datum Datum

The datum that the prediction is operating over.

required annotations List[Annotation]

The list of predicted annotations.

required Source code in valor/coretypes.py
def __init__(\n    self,\n    *,\n    datum: Datum,\n    annotations: List[Annotation],\n):\n    \"\"\"\n    Creates a prediction.\n\n    Parameters\n    ----------\n    datum : Datum\n        The datum that the prediction is operating over.\n    annotations : List[Annotation]\n        The list of predicted annotations.\n    \"\"\"\n    super().__init__(datum=datum, annotations=annotations)\n
"},{"location":"client_api/Viz/","title":"Viz","text":""},{"location":"client_api/Viz/#valor.viz-classes","title":"Classes","text":""},{"location":"client_api/Viz/#valor.viz-functions","title":"Functions","text":""},{"location":"client_api/Viz/#valor.viz.create_combined_segmentation_mask","title":"valor.viz.create_combined_segmentation_mask(annotated_datum, label_key, filter_on_instance_segmentations=False)","text":"

Creates a combined segmentation mask from a list of segmentations.

Parameters:

Name Type Description Default annotated_datum Union[GroundTruth, Prediction]

A list of segmentations. These all must have the same image attribute.

required label_key str

The label key to use.

required filter_on_instance_segmentations bool

Whether to filter on instance segmentations or not.

False

Returns:

Type Description tuple

The first element of the tuple is the combined mask, as an RGB PIL image. The second element is a color legend: it's a dict with the unique labels as keys and the PIL image swatches as values.

Raises:

Type Description RuntimeError

If all segmentations don't belong to the same image or there is a segmentation that doesn't have label_key as the key of one of its labels.

ValueError

If there aren't any segmentations.

Source code in valor/viz.py
def create_combined_segmentation_mask(\n    annotated_datum: Union[GroundTruth, Prediction],\n    label_key: str,\n    filter_on_instance_segmentations: bool = False,\n) -> Tuple[Image.Image, Dict[str, Image.Image]]:\n    \"\"\"\n    Creates a combined segmentation mask from a list of segmentations.\n\n    Parameters\n    -------\n    annotated_datum : Union[GroundTruth, Prediction]\n        A list of segmentations. These all must have the same `image` attribute.\n    label_key : str\n        The label key to use.\n    filter_on_instance_segmentations : bool, optional\n        Whether to filter on instance segmentations or not.\n\n    Returns\n    -------\n    tuple\n        The first element of the tuple is the combined mask, as an RGB PIL image. The second\n        element is a color legend: it's a dict with the unique labels as keys and the\n        PIL image swatches as values.\n\n    Raises\n    ------\n    RuntimeError\n        If all segmentations don't belong to the same image or there is a\n        segmentation that doesn't have `label_key` as the key of one of its labels.\n    ValueError\n        If there aren't any segmentations.\n    \"\"\"\n\n    # validate input type\n    if not isinstance(annotated_datum, (GroundTruth, Prediction)):\n        raise ValueError(\"Expected either a 'GroundTruth' or 'Prediction'\")\n\n    # verify there are a nonzero number of annotations\n    if len(annotated_datum.annotations) == 0:\n        raise ValueError(\"annotations cannot be empty.\")\n\n    # validate raster size\n    img_h = None\n    img_w = None\n    for annotation in annotated_datum.annotations:\n        raster = annotation.raster\n        if raster.get_value() is None:\n            raise ValueError(\"No raster exists.\")\n        if img_h is None:\n            img_h = raster.height\n        if img_w is None:\n            img_w = raster.width\n        if (img_h != raster.height) or (img_w != raster.width):\n            raise ValueError(\n                f\"Size mismatch between rasters. {(img_h, img_w)} != {(raster.height, raster.width)}\"\n            )\n    if img_h is None or img_w is None:\n        raise ValueError(\n            f\"Segmentation bounds not properly defined. {(img_h, img_w)}\"\n        )\n\n    # unpack raster annotations\n    annotations: List[Annotation] = []\n    for annotation in annotated_datum.annotations:\n        if (\n            annotation.is_instance or False\n        ) == filter_on_instance_segmentations:\n            annotations.append(annotation)\n\n    # unpack label values\n    label_values = []\n    for annotation in annotations:\n        for label in annotation.labels:\n            if label.key == label_key:\n                label_values.append(label.value)\n    if not label_values:\n        raise RuntimeError(\n            f\"Annotation doesn't have a label with key `{label_key}`\"\n        )\n\n    # assign label coloring\n    unique_label_values = list(set(label_values))\n    label_value_to_color = {\n        v: COLOR_MAP[i] for i, v in enumerate(unique_label_values)\n    }\n    seg_colors = [label_value_to_color[v] for v in label_values]\n\n    # create mask\n    combined_mask = np.zeros((img_h, img_w, 3), dtype=np.uint8)\n    for annotation, color in zip(annotations, seg_colors):\n        raster = annotation.raster\n        if raster.get_value() is None:\n            raise ValueError(\"No raster exists.\")\n        if raster.array is not None:\n            if raster.geometry is None:\n                mask = raster.array\n            elif isinstance(raster.geometry, schemas.MultiPolygon):\n                mask = _polygons_to_binary_mask(\n                    raster.geometry.to_polygons(),\n                    img_w=img_w,\n                    img_h=img_h,\n                )\n            elif isinstance(raster.geometry, (schemas.Box, schemas.Polygon)):\n                mask = _polygons_to_binary_mask(\n                    [raster.geometry],\n                    img_w=img_w,\n                    img_h=img_h,\n                )\n            else:\n                continue\n            combined_mask[np.where(mask)] = color\n        else:\n            continue\n\n    legend = {\n        v: Image.new(\"RGB\", (20, 20), color)\n        for v, color in label_value_to_color.items()\n    }\n\n    return Image.fromarray(combined_mask), legend\n
"},{"location":"client_api/Viz/#valor.viz.draw_bounding_box_on_image","title":"valor.viz.draw_bounding_box_on_image(bounding_box, img, color=(255, 0, 0))","text":"

Draws a bounding polygon on an image. This operation is not done in place.

Parameters:

Name Type Description Default bounding_box Box

Bounding box to draw on the image.

required img Image

Pillow image to draw on.

required color Tuple[int, int, int]

RGB tuple of the color to use.

(255, 0, 0)

Returns:

Type Description img

Pillow image with bounding box drawn on it.

Source code in valor/viz.py
def draw_bounding_box_on_image(\n    bounding_box: schemas.Box,\n    img: Image.Image,\n    color: Tuple[int, int, int] = (255, 0, 0),\n) -> Image.Image:\n    \"\"\"Draws a bounding polygon on an image. This operation is not done in place.\n\n    Parameters\n    ----------\n    bounding_box\n        Bounding box to draw on the image.\n    img\n        Pillow image to draw on.\n    color\n        RGB tuple of the color to use.\n\n    Returns\n    -------\n    img\n        Pillow image with bounding box drawn on it.\n    \"\"\"\n    coords = bounding_box.get_value()\n    return _draw_bounding_polygon_on_image(\n        schemas.Polygon(coords), img, color=color, inplace=False\n    )\n
"},{"location":"client_api/Viz/#valor.viz.draw_detections_on_image","title":"valor.viz.draw_detections_on_image(detections, img)","text":"

Draws detections (bounding boxes and labels) on an image.

Parameters:

Name Type Description Default detections List[Union[GroundTruth, Prediction]]

A list of GroundTruths or Predictions to draw on the image.

required img Image

The image to draw the detections on.

required

Returns:

Name Type Description img Image

An image with the detections drawn on.

Source code in valor/viz.py
def draw_detections_on_image(\n    detections: Sequence[Union[GroundTruth, Prediction]],\n    img: Image.Image,\n) -> Image.Image:\n    \"\"\"\n    Draws detections (bounding boxes and labels) on an image.\n    Parameters\n    -------\n    detections : List[Union[GroundTruth, Prediction]]\n        A list of `GroundTruths` or `Predictions` to draw on the image.\n    img : Image.Image\n        The image to draw the detections on.\n    Returns\n    -------\n    img : Image.Image\n        An image with the detections drawn on.\n    \"\"\"\n\n    annotations = []\n    for datum in detections:\n        annotations.extend(datum.annotations)\n\n    for i, detection in enumerate(annotations):\n        if detection.raster and detection.is_instance is True:\n            img = _draw_detection_on_image(detection, img, inplace=i != 0)\n    return img\n
"},{"location":"client_api/Viz/#valor.viz.draw_raster_on_image","title":"valor.viz.draw_raster_on_image(raster, img, color=(255, 0, 0), alpha=0.4)","text":"

Draws the raster on top of an image. This operation is not done in place.

Parameters:

Name Type Description Default img Image

pillow image to draw on.

required color Tuple[int, int, int]

RGB tuple of the color to use

(255, 0, 0) alpha float

alpha (transparency) value of the mask. 0 is fully transparent, 1 is fully opaque

0.4 Source code in valor/viz.py
def draw_raster_on_image(\n    raster: schemas.Raster,\n    img: Image.Image,\n    color: Tuple[int, int, int] = (255, 0, 0),\n    alpha: float = 0.4,\n) -> Image.Image:\n    \"\"\"Draws the raster on top of an image. This operation is not done in place.\n\n    Parameters\n    ----------\n    img\n        pillow image to draw on.\n    color\n        RGB tuple of the color to use\n    alpha\n        alpha (transparency) value of the mask. 0 is fully transparent, 1 is fully opaque\n    \"\"\"\n    img = img.copy()\n    binary_mask = raster.array\n    mask_arr = np.zeros(\n        (binary_mask.shape[0], binary_mask.shape[1], 3), dtype=np.uint8\n    )\n    mask_arr[binary_mask] = color\n    mask_img = Image.fromarray(mask_arr)\n\n    if mask_img.size != img.size:\n        raise ValueError(\"Input image and raster must be the same size.\")\n    blend = Image.blend(img, mask_img, alpha=alpha)\n    img.paste(blend, (0, 0), mask=Image.fromarray(binary_mask))\n\n    return img\n
"},{"location":"client_api/Schemas/Filters/","title":"Filters","text":""},{"location":"client_api/Schemas/Filters/#valor.schemas.filters-classes","title":"Classes","text":""},{"location":"client_api/Schemas/Filters/#valor.schemas.filters.Constraint","title":"valor.schemas.filters.Constraint dataclass","text":"

Represents a constraint with a value and an operator.

Attributes: value : Any The value associated with the constraint. operator : str The operator used to define the constraint.

Source code in valor/schemas/filters.py
@dataclass\nclass Constraint:\n    \"\"\"\n    Represents a constraint with a value and an operator.\n\n    Attributes:\n        value : Any\n            The value associated with the constraint.\n        operator : str\n            The operator used to define the constraint.\n    \"\"\"\n\n    value: Any\n    operator: str\n
"},{"location":"client_api/Schemas/Filters/#valor.schemas.filters.Filter","title":"valor.schemas.filters.Filter dataclass","text":"

Used to filter Evaluations according to specific, user-defined criteria.

Attributes:

Name Type Description dataset_names (List[str], optional)

A list of Dataset names to filter on.

dataset_metadata (Dict[str, List[Constraint]], optional)

A dictionary of Dataset metadata to filter on.

model_names (List[str], optional)

A list of Model names to filter on.

model_metadata (Dict[str, List[Constraint]], optional)

A dictionary of Model metadata to filter on.

datum_uids (List[str], optional)

A list of Datum UIDs to filter on.

datum_metadata (Dict[str, List[Constraint]], optional)

A dictionary of Datum metadata to filter on.

task_types (List[TaskType], optional)

A list of task types to filter on.

annotation_metadata (Dict[str, List[Constraint]], optional)

A dictionary of Annotation metadata to filter on.

require_bounding_box (bool, optional)

A toggle for filtering by bounding boxes.

bounding_box_area (bool, optional)

An optional constraint to filter by bounding box area.

require_polygon (bool, optional)

A toggle for filtering by polygons.

polygon_area (bool, optional)

An optional constraint to filter by polygon area.

require_raster (bool, optional)

A toggle for filtering by rasters.

raster_area (bool, optional)

An optional constraint to filter by raster area.

labels (List[Label], optional)

A list of `Labels' to filter on.

label_ids (List[int], optional)

A list of label row id's.

label_keys (List[str], optional)

A list of Label keys to filter on.

label_scores (List[Constraint], optional)

A list of Constraints which are used to filter Evaluations according to the Model's prediction scores.

Raises:

Type Description TypeError

If value isn't of the correct type.

ValueError

If the operator doesn't match one of the allowed patterns.

Source code in valor/schemas/filters.py
@dataclass\nclass Filter:\n    \"\"\"\n    Used to filter Evaluations according to specific, user-defined criteria.\n\n    Attributes\n    ----------\n    dataset_names : List[str], optional\n        A list of `Dataset` names to filter on.\n    dataset_metadata : Dict[str, List[Constraint]], optional\n        A dictionary of `Dataset` metadata to filter on.\n    model_names : List[str], optional\n        A list of `Model` names to filter on.\n    model_metadata : Dict[str, List[Constraint]], optional\n        A dictionary of `Model` metadata to filter on.\n    datum_uids : List[str], optional\n        A list of `Datum` UIDs to filter on.\n    datum_metadata : Dict[str, List[Constraint]], optional\n        A dictionary of `Datum` metadata to filter on.\n    task_types : List[TaskType], optional\n        A list of task types to filter on.\n    annotation_metadata : Dict[str, List[Constraint]], optional\n        A dictionary of `Annotation` metadata to filter on.\n    require_bounding_box : bool, optional\n        A toggle for filtering by bounding boxes.\n    bounding_box_area : bool, optional\n        An optional constraint to filter by bounding box area.\n    require_polygon : bool, optional\n        A toggle for filtering by polygons.\n    polygon_area : bool, optional\n        An optional constraint to filter by polygon area.\n    require_raster : bool, optional\n        A toggle for filtering by rasters.\n    raster_area : bool, optional\n        An optional constraint to filter by raster area.\n    labels : List[Label], optional\n        A list of `Labels' to filter on.\n    label_ids : List[int], optional\n        A list of label row id's.\n    label_keys : List[str], optional\n        A list of `Label` keys to filter on.\n    label_scores : List[Constraint], optional\n        A list of `Constraints` which are used to filter `Evaluations` according to the `Model`'s prediction scores.\n\n    Raises\n    ------\n    TypeError\n        If `value` isn't of the correct type.\n    ValueError\n        If the `operator` doesn't match one of the allowed patterns.\n    \"\"\"\n\n    # datasets\n    dataset_names: Optional[List[str]] = None\n    dataset_metadata: Optional[Dict[str, List[Constraint]]] = None\n\n    # models\n    model_names: Optional[List[str]] = None\n    model_metadata: Optional[Dict[str, List[Constraint]]] = None\n\n    # datums\n    datum_uids: Optional[List[str]] = None\n    datum_metadata: Optional[Dict[str, List[Constraint]]] = None\n\n    # annotations\n    task_types: Optional[List[TaskType]] = None\n    annotation_metadata: Optional[Dict[str, List[Constraint]]] = None\n\n    # geometries\n    require_bounding_box: Optional[bool] = None\n    bounding_box_area: Optional[List[Constraint]] = None\n    require_polygon: Optional[bool] = None\n    polygon_area: Optional[List[Constraint]] = None\n    require_raster: Optional[bool] = None\n    raster_area: Optional[List[Constraint]] = None\n\n    # labels\n    labels: Optional[List[Dict[str, str]]] = None\n    label_ids: Optional[List[int]] = None\n    label_keys: Optional[List[str]] = None\n    label_scores: Optional[List[Constraint]] = None\n\n    @staticmethod\n    def _supports_and():\n        return {\n            \"area\",\n            \"score\",\n            \"metadata\",\n        }\n\n    @staticmethod\n    def _supports_or():\n        return {\n            \"name\",\n            \"uid\",\n            \"task_type\",\n            \"labels\",\n            \"keys\",\n        }\n\n    def __post_init__(self):\n        def _unpack_metadata(metadata: Optional[dict]) -> Union[dict, None]:\n            if metadata is None:\n                return None\n            for k, vlist in metadata.items():\n                metadata[k] = [\n                    v if isinstance(v, Constraint) else Constraint(**v)\n                    for v in vlist\n                ]\n            return metadata\n\n        # unpack metadata\n        self.dataset_metadata = _unpack_metadata(self.dataset_metadata)\n        self.model_metadata = _unpack_metadata(self.model_metadata)\n        self.datum_metadata = _unpack_metadata(self.datum_metadata)\n        self.annotation_metadata = _unpack_metadata(self.annotation_metadata)\n\n        def _unpack_list(\n            vlist: Optional[list], object_type: type\n        ) -> Optional[list]:\n            def _handle_conversion(v, object_type):\n                if object_type is Constraint:\n                    return object_type(**v)\n                else:\n                    return object_type(v)\n\n            if vlist is None:\n                return None\n\n            return [\n                (\n                    v\n                    if isinstance(v, object_type)\n                    else _handle_conversion(v=v, object_type=object_type)\n                )\n                for v in vlist\n            ]\n\n        # unpack tasktypes\n        self.task_types = _unpack_list(self.task_types, TaskType)\n\n        # unpack area\n        self.bounding_box_area = _unpack_list(\n            self.bounding_box_area, Constraint\n        )\n        self.polygon_area = _unpack_list(self.polygon_area, Constraint)\n        self.raster_area = _unpack_list(self.raster_area, Constraint)\n\n        # scores\n        self.label_scores = _unpack_list(self.label_scores, Constraint)\n\n    @classmethod\n    def create(cls, expressions: List[Any]):\n        \"\"\"\n        Parses a list of `BinaryExpression` to create a `schemas.Filter` object.\n\n        Parameters\n        ----------\n        expressions: Sequence[Union[BinaryExpression, Sequence[BinaryExpression]]]\n            A list of (lists of) `BinaryExpressions' to parse into a `Filter` object.\n        \"\"\"\n\n        constraints = _parse_listed_expressions(expressions)\n\n        # create filter\n        filter_request = cls()\n\n        # metadata constraints\n        for attr in [\n            \"dataset_metadata\",\n            \"model_metadata\",\n            \"datum_metadata\",\n            \"annotation_metadata\",\n            \"bounding_box_area\",\n            \"polygon_area\",\n            \"raster_area\",\n            \"label_scores\",\n        ]:\n            if attr in constraints:\n                setattr(filter_request, attr, constraints[attr])\n\n        # boolean constraints\n        for attr in [\n            \"require_bounding_box\",\n            \"require_polygon\",\n            \"require_raster\",\n        ]:\n            if attr in constraints:\n                for constraint in constraints[attr]:\n                    if constraint.operator == \"exists\":\n                        setattr(filter_request, attr, True)\n                    elif constraint.operator == \"is_none\":\n                        setattr(filter_request, attr, False)\n\n        # equality constraints\n        for attr in [\n            \"dataset_names\",\n            \"model_names\",\n            \"datum_uids\",\n            \"task_types\",\n            \"label_keys\",\n        ]:\n            if attr in constraints:\n                setattr(\n                    filter_request,\n                    attr,\n                    [expr.value for expr in constraints[attr]],\n                )\n\n        # edge case - label list\n        if \"labels\" in constraints:\n            setattr(\n                filter_request,\n                \"labels\",\n                [\n                    {label[\"key\"]: label[\"value\"]}\n                    for labels in constraints[\"labels\"]\n                    for label in labels.value\n                ],\n            )\n\n        return filter_request\n
"},{"location":"client_api/Schemas/Filters/#valor.schemas.filters.Filter-functions","title":"Functions","text":""},{"location":"client_api/Schemas/Filters/#valor.schemas.filters.Filter.create","title":"valor.schemas.filters.Filter.create(expressions) classmethod","text":"

Parses a list of BinaryExpression to create a schemas.Filter object.

Parameters:

Name Type Description Default expressions List[Any]

A list of (lists of) BinaryExpressions' to parse into aFilter` object.

required Source code in valor/schemas/filters.py
@classmethod\ndef create(cls, expressions: List[Any]):\n    \"\"\"\n    Parses a list of `BinaryExpression` to create a `schemas.Filter` object.\n\n    Parameters\n    ----------\n    expressions: Sequence[Union[BinaryExpression, Sequence[BinaryExpression]]]\n        A list of (lists of) `BinaryExpressions' to parse into a `Filter` object.\n    \"\"\"\n\n    constraints = _parse_listed_expressions(expressions)\n\n    # create filter\n    filter_request = cls()\n\n    # metadata constraints\n    for attr in [\n        \"dataset_metadata\",\n        \"model_metadata\",\n        \"datum_metadata\",\n        \"annotation_metadata\",\n        \"bounding_box_area\",\n        \"polygon_area\",\n        \"raster_area\",\n        \"label_scores\",\n    ]:\n        if attr in constraints:\n            setattr(filter_request, attr, constraints[attr])\n\n    # boolean constraints\n    for attr in [\n        \"require_bounding_box\",\n        \"require_polygon\",\n        \"require_raster\",\n    ]:\n        if attr in constraints:\n            for constraint in constraints[attr]:\n                if constraint.operator == \"exists\":\n                    setattr(filter_request, attr, True)\n                elif constraint.operator == \"is_none\":\n                    setattr(filter_request, attr, False)\n\n    # equality constraints\n    for attr in [\n        \"dataset_names\",\n        \"model_names\",\n        \"datum_uids\",\n        \"task_types\",\n        \"label_keys\",\n    ]:\n        if attr in constraints:\n            setattr(\n                filter_request,\n                attr,\n                [expr.value for expr in constraints[attr]],\n            )\n\n    # edge case - label list\n    if \"labels\" in constraints:\n        setattr(\n            filter_request,\n            \"labels\",\n            [\n                {label[\"key\"]: label[\"value\"]}\n                for labels in constraints[\"labels\"]\n                for label in labels.value\n            ],\n        )\n\n    return filter_request\n
"},{"location":"client_api/Schemas/Evaluation/EvaluationParameters/","title":"EvaluationParameters","text":""},{"location":"client_api/Schemas/Evaluation/EvaluationParameters/#valor.schemas.evaluation-classes","title":"Classes","text":""},{"location":"client_api/Schemas/Evaluation/EvaluationParameters/#valor.schemas.evaluation.EvaluationParameters","title":"valor.schemas.evaluation.EvaluationParameters dataclass","text":"

Defines parameters for evaluation methods.

Attributes:

Name Type Description task_type TaskType

The task type of a given evaluation.

label_map Optional[List[List[List[str]]]]

Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.

metrics (List[str], optional)

The list of metrics to compute, store, and return to the user.

convert_annotations_to_type AnnotationType | None = None

The type to convert all annotations to.

iou_thresholds_to_compute (List[float], optional)

A list of floats describing which Intersection over Unions (IoUs) to use when calculating metrics (i.e., mAP).

iou_thresholds_to_return (List[float], optional)

A list of floats describing which Intersection over Union (IoUs) thresholds to calculate a metric for. Must be a subset of iou_thresholds_to_compute.

recall_score_threshold float, default=0

The confidence score threshold for use when determining whether to count a prediction as a true positive or not while calculating Average Recall.

pr_curve_iou_threshold (float, optional)

The IOU threshold to use when calculating precision-recall curves for object detection tasks. Defaults to 0.5.

pr_curve_max_examples int

The maximum number of datum examples to store when calculating PR curves.

Source code in valor/schemas/evaluation.py
@dataclass\nclass EvaluationParameters:\n    \"\"\"\n    Defines parameters for evaluation methods.\n\n    Attributes\n    ----------\n    task_type: TaskType\n        The task type of a given evaluation.\n    label_map: Optional[List[List[List[str]]]]\n        Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.\n    metrics: List[str], optional\n        The list of metrics to compute, store, and return to the user.\n    convert_annotations_to_type: AnnotationType | None = None\n        The type to convert all annotations to.\n    iou_thresholds_to_compute: List[float], optional\n        A list of floats describing which Intersection over Unions (IoUs) to use when calculating metrics (i.e., mAP).\n    iou_thresholds_to_return: List[float], optional\n        A list of floats describing which Intersection over Union (IoUs) thresholds to calculate a metric for. Must be a subset of `iou_thresholds_to_compute`.\n    recall_score_threshold: float, default=0\n        The confidence score threshold for use when determining whether to count a prediction as a true positive or not while calculating Average Recall.\n    pr_curve_iou_threshold: float, optional\n            The IOU threshold to use when calculating precision-recall curves for object detection tasks. Defaults to 0.5.\n    pr_curve_max_examples: int\n        The maximum number of datum examples to store when calculating PR curves.\n    \"\"\"\n\n    task_type: TaskType\n    label_map: Optional[List[List[List[str]]]] = None\n    metrics_to_return: Optional[List[str]] = None\n\n    convert_annotations_to_type: Optional[AnnotationType] = None\n    iou_thresholds_to_compute: Optional[List[float]] = None\n    iou_thresholds_to_return: Optional[List[float]] = None\n    recall_score_threshold: float = 0\n    pr_curve_iou_threshold: float = 0.5\n    pr_curve_max_examples: int = 1\n
"},{"location":"client_api/Schemas/Evaluation/EvaluationParameters/#valor.schemas.evaluation.EvaluationRequest","title":"valor.schemas.evaluation.EvaluationRequest dataclass","text":"

An evaluation request.

Defines important attributes of the API's EvaluationRequest.

Attributes:

Name Type Description model_names List[str]

The list of models we want to evaluate by name.

datum_filter Filter

The filter object used to define what the model(s) is evaluating against.

parameters EvaluationParameters

Any parameters that are used to modify an evaluation method.

Source code in valor/schemas/evaluation.py
@dataclass\nclass EvaluationRequest:\n    \"\"\"\n    An evaluation request.\n\n    Defines important attributes of the API's `EvaluationRequest`.\n\n    Attributes\n    ----------\n    model_names : List[str]\n        The list of models we want to evaluate by name.\n    datum_filter : schemas.Filter\n        The filter object used to define what the model(s) is evaluating against.\n    parameters : EvaluationParameters\n        Any parameters that are used to modify an evaluation method.\n    \"\"\"\n\n    model_names: Union[str, List[str]]\n    datum_filter: Filter\n    parameters: EvaluationParameters\n\n    def __post_init__(self):\n        if isinstance(self.datum_filter, dict):\n            self.datum_filter = Filter(**self.datum_filter)\n        if isinstance(self.parameters, dict):\n            self.parameters = EvaluationParameters(**self.parameters)\n
"},{"location":"client_api/Schemas/Spatial/Box/","title":"Box","text":"

Bases: Polygon

A Box is a polygon that is constrained to 4 unique points.

Note that this does not need to be axis-aligned.

Parameters:

Name Type Description Default value List[List[Tuple[float, float]]]

An polygon value representing a box.

required

Attributes:

Name Type Description area polygon boundary holes xmin xmax ymin ymax

Examples:

>>> Box([[(0,0), (0,1), (1,1), (1,0), (0,0)]])\n

Create a Box using extrema.

>>> Box.from_extrema(\n...     xmin=0, xmax=1,\n...     ymin=0, ymax=1,\n... )\n
Source code in valor/schemas/symbolic/types.py
class Box(Polygon):\n    \"\"\"\n    A Box is a polygon that is constrained to 4 unique points.\n\n    Note that this does not need to be axis-aligned.\n\n    Parameters\n    ----------\n    value : List[List[Tuple[float, float]]], optional\n        An polygon value representing a box.\n\n    Attributes\n    ----------\n    area\n    polygon\n    boundary\n    holes\n    xmin\n    xmax\n    ymin\n    ymax\n\n    Examples\n    --------\n    >>> Box([[(0,0), (0,1), (1,1), (1,0), (0,0)]])\n\n    Create a Box using extrema.\n    >>> Box.from_extrema(\n    ...     xmin=0, xmax=1,\n    ...     ymin=0, ymax=1,\n    ... )\n    \"\"\"\n\n    def __init__(\n        self, value: typing.List[typing.List[typing.Tuple[float, float]]]\n    ):\n        super().__init__(value=value)\n\n    @classmethod\n    def __validate__(cls, value: typing.Any):\n        \"\"\"\n        Validates typing.\n\n        Parameters\n        ----------\n        value : typing.Any\n            The value to validate.\n\n        Raises\n        ------\n        TypeError\n            If the value type is not supported.\n        \"\"\"\n        Polygon.__validate__(value)\n        if len(value) != 1:\n            raise ValueError(\"Box should not contain holes.\")\n        elif len(value[0]) != 5:\n            raise ValueError(\"Box should consist of four unique points.\")\n\n    @classmethod\n    def decode_value(\n        cls,\n        value: typing.Optional[typing.List[typing.List[typing.List[float]]]],\n    ):\n        \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n        if value is None:\n            return None\n        return super().decode_value(value)\n\n    @classmethod\n    def from_extrema(\n        cls,\n        xmin: float,\n        xmax: float,\n        ymin: float,\n        ymax: float,\n    ):\n        \"\"\"\n        Create a Box from extrema values.\n\n        Parameters\n        ----------\n        xmin : float\n            Minimum x-coordinate of the bounding box.\n        xmax : float\n            Maximum x-coordinate of the bounding box.\n        ymin : float\n            Minimum y-coordinate of the bounding box.\n        ymax : float\n            Maximum y-coordinate of the bounding box.\n\n        Returns\n        -------\n        Box\n            A Box created from the provided extrema values.\n        \"\"\"\n        points = [\n            [\n                (xmin, ymin),\n                (xmax, ymin),\n                (xmax, ymax),\n                (xmin, ymax),\n                (xmin, ymin),\n            ]\n        ]\n        return cls(value=points)\n\n    def to_polygon(self) -> Polygon:\n        \"\"\"\n        Converts box to a generic polygon.\n\n        Returns\n        -------\n        Polygon\n            The box as a Polygon.\n        \"\"\"\n        return Polygon(self.get_value())\n
"},{"location":"client_api/Schemas/Spatial/Box/#valor.schemas.Box-functions","title":"Functions","text":""},{"location":"client_api/Schemas/Spatial/Box/#valor.schemas.Box.__validate__","title":"valor.schemas.Box.__validate__(value) classmethod","text":"

Validates typing.

Parameters:

Name Type Description Default value Any

The value to validate.

required

Raises:

Type Description TypeError

If the value type is not supported.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef __validate__(cls, value: typing.Any):\n    \"\"\"\n    Validates typing.\n\n    Parameters\n    ----------\n    value : typing.Any\n        The value to validate.\n\n    Raises\n    ------\n    TypeError\n        If the value type is not supported.\n    \"\"\"\n    Polygon.__validate__(value)\n    if len(value) != 1:\n        raise ValueError(\"Box should not contain holes.\")\n    elif len(value[0]) != 5:\n        raise ValueError(\"Box should consist of four unique points.\")\n
"},{"location":"client_api/Schemas/Spatial/Box/#valor.schemas.Box.decode_value","title":"valor.schemas.Box.decode_value(value) classmethod","text":"

Decode object from JSON compatible dictionary.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef decode_value(\n    cls,\n    value: typing.Optional[typing.List[typing.List[typing.List[float]]]],\n):\n    \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n    if value is None:\n        return None\n    return super().decode_value(value)\n
"},{"location":"client_api/Schemas/Spatial/Box/#valor.schemas.Box.from_extrema","title":"valor.schemas.Box.from_extrema(xmin, xmax, ymin, ymax) classmethod","text":"

Create a Box from extrema values.

Parameters:

Name Type Description Default xmin float

Minimum x-coordinate of the bounding box.

required xmax float

Maximum x-coordinate of the bounding box.

required ymin float

Minimum y-coordinate of the bounding box.

required ymax float

Maximum y-coordinate of the bounding box.

required

Returns:

Type Description Box

A Box created from the provided extrema values.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef from_extrema(\n    cls,\n    xmin: float,\n    xmax: float,\n    ymin: float,\n    ymax: float,\n):\n    \"\"\"\n    Create a Box from extrema values.\n\n    Parameters\n    ----------\n    xmin : float\n        Minimum x-coordinate of the bounding box.\n    xmax : float\n        Maximum x-coordinate of the bounding box.\n    ymin : float\n        Minimum y-coordinate of the bounding box.\n    ymax : float\n        Maximum y-coordinate of the bounding box.\n\n    Returns\n    -------\n    Box\n        A Box created from the provided extrema values.\n    \"\"\"\n    points = [\n        [\n            (xmin, ymin),\n            (xmax, ymin),\n            (xmax, ymax),\n            (xmin, ymax),\n            (xmin, ymin),\n        ]\n    ]\n    return cls(value=points)\n
"},{"location":"client_api/Schemas/Spatial/Box/#valor.schemas.Box.to_polygon","title":"valor.schemas.Box.to_polygon()","text":"

Converts box to a generic polygon.

Returns:

Type Description Polygon

The box as a Polygon.

Source code in valor/schemas/symbolic/types.py
def to_polygon(self) -> Polygon:\n    \"\"\"\n    Converts box to a generic polygon.\n\n    Returns\n    -------\n    Polygon\n        The box as a Polygon.\n    \"\"\"\n    return Polygon(self.get_value())\n
"},{"location":"client_api/Schemas/Spatial/LineString/","title":"LineString","text":"

Bases: Spatial

Represents a line.

Follows the GeoJSON specification (RFC 7946).

Parameters:

Name Type Description Default value List[Tuple[float, float]]

A linestring.

required

Methods:

Name Description colorspace

Represent the photo in the given colorspace.

gamma

Change the photo's gamma exposure.

Examples:

Create a line.

>>> LineString([(0,0), (0,1), (1,1)])\n
Source code in valor/schemas/symbolic/types.py
class LineString(Spatial):\n    \"\"\"\n    Represents a line.\n\n    Follows the GeoJSON specification (RFC 7946).\n\n    Parameters\n    ----------\n    value : List[Tuple[float, float]], optional\n        A linestring.\n\n    Methods\n    -------\n    colorspace(c='rgb')\n        Represent the photo in the given colorspace.\n    gamma(n=1.0)\n        Change the photo's gamma exposure.\n\n    Examples\n    --------\n    Create a line.\n    >>> LineString([(0,0), (0,1), (1,1)])\n    \"\"\"\n\n    def __init__(self, value: typing.List[typing.Tuple[float, float]]):\n        super().__init__(value=value)\n\n    @classmethod\n    def __validate__(cls, value: typing.Any):\n        MultiPoint.__validate__(value)\n        if len(value) < 2:\n            raise ValueError(\n                \"At least two points are required to make a line.\"\n            )\n\n    @classmethod\n    def decode_value(\n        cls, value: typing.Optional[typing.List[typing.List[float]]]\n    ):\n        \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n        if value is None:\n            return None\n        return cls([(point[0], point[1]) for point in value])\n
"},{"location":"client_api/Schemas/Spatial/LineString/#valor.schemas.LineString-functions","title":"Functions","text":""},{"location":"client_api/Schemas/Spatial/LineString/#valor.schemas.LineString.decode_value","title":"valor.schemas.LineString.decode_value(value) classmethod","text":"

Decode object from JSON compatible dictionary.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef decode_value(\n    cls, value: typing.Optional[typing.List[typing.List[float]]]\n):\n    \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n    if value is None:\n        return None\n    return cls([(point[0], point[1]) for point in value])\n
"},{"location":"client_api/Schemas/Spatial/MultiLineString/","title":"MultiLineString","text":"

Bases: Spatial

Represents a list of lines.

Follows the GeoJSON specification (RFC 7946).

Parameters:

Name Type Description Default value List[List[Tuple[float, float]]]

A multilinestring.

required

Examples:

Create a single line.

>>> MultiLineString([[(0,0), (0,1), (1,1), (0,0)]])\n

Create 3 lines.

>>> MultiLineString(\n...     [\n...         [(0,0), (0,1), (1,1)],\n...         [(0.1, 0.1), (0.1, 0.2), (0.2, 0.2)],\n...         [(0.6, 0.6), (0.6, 0.7), (0.7, 0.7)],\n...     ]\n... )\n
Source code in valor/schemas/symbolic/types.py
class MultiLineString(Spatial):\n    \"\"\"\n    Represents a list of lines.\n\n    Follows the GeoJSON specification (RFC 7946).\n\n    Parameters\n    ----------\n    value : List[List[Tuple[float, float]]], optional\n        A multilinestring.\n\n    Examples\n    --------\n    Create a single line.\n    >>> MultiLineString([[(0,0), (0,1), (1,1), (0,0)]])\n\n    Create 3 lines.\n    >>> MultiLineString(\n    ...     [\n    ...         [(0,0), (0,1), (1,1)],\n    ...         [(0.1, 0.1), (0.1, 0.2), (0.2, 0.2)],\n    ...         [(0.6, 0.6), (0.6, 0.7), (0.7, 0.7)],\n    ...     ]\n    ... )\n    \"\"\"\n\n    def __init__(\n        self,\n        value: typing.List[typing.List[typing.Tuple[float, float]]],\n    ):\n        super().__init__(value=value)\n\n    @classmethod\n    def __validate__(cls, value: typing.Any):\n        if not isinstance(value, list):\n            raise TypeError(\n                f\"Expected type 'List[List[Tuple[float, float]]]' received type '{type(value).__name__}'\"\n            )\n        for line in value:\n            LineString.__validate__(line)\n\n    @classmethod\n    def decode_value(\n        cls,\n        value: typing.Optional[typing.List[typing.List[typing.List[float]]]],\n    ):\n        \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n        if value is None:\n            return None\n        return cls(\n            [[(point[0], point[1]) for point in line] for line in value]\n        )\n
"},{"location":"client_api/Schemas/Spatial/MultiLineString/#valor.schemas.MultiLineString-functions","title":"Functions","text":""},{"location":"client_api/Schemas/Spatial/MultiLineString/#valor.schemas.MultiLineString.decode_value","title":"valor.schemas.MultiLineString.decode_value(value) classmethod","text":"

Decode object from JSON compatible dictionary.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef decode_value(\n    cls,\n    value: typing.Optional[typing.List[typing.List[typing.List[float]]]],\n):\n    \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n    if value is None:\n        return None\n    return cls(\n        [[(point[0], point[1]) for point in line] for line in value]\n    )\n
"},{"location":"client_api/Schemas/Spatial/MultiPoint/","title":"MultiPoint","text":"

Bases: Spatial

Represents a list of points.

Follows the GeoJSON specification (RFC 7946).

Parameters:

Name Type Description Default value List[Tuple[float, float]]

A multipoint.

required

Examples:

>>> MultiPoint([(0,0), (0,1), (1,1)])\n
Source code in valor/schemas/symbolic/types.py
class MultiPoint(Spatial):\n    \"\"\"\n    Represents a list of points.\n\n    Follows the GeoJSON specification (RFC 7946).\n\n    Parameters\n    ----------\n    value : List[Tuple[float, float]], optional\n        A multipoint.\n\n    Examples\n    --------\n    >>> MultiPoint([(0,0), (0,1), (1,1)])\n    \"\"\"\n\n    def __init__(\n        self,\n        value: typing.List[typing.Tuple[float, float]],\n    ):\n        super().__init__(value=value)\n\n    @classmethod\n    def __validate__(cls, value: typing.Any):\n        if not isinstance(value, list):\n            raise TypeError(\n                f\"Expected 'typing.List[typing.Tuple[float, float]]' received type '{type(value).__name__}'\"\n            )\n        for point in value:\n            Point.__validate__(point)\n\n    @classmethod\n    def decode_value(\n        cls, value: typing.Optional[typing.List[typing.List[float]]]\n    ):\n        \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n        if value is None:\n            return None\n        return cls([(point[0], point[1]) for point in value])\n
"},{"location":"client_api/Schemas/Spatial/MultiPoint/#valor.schemas.MultiPoint-functions","title":"Functions","text":""},{"location":"client_api/Schemas/Spatial/MultiPoint/#valor.schemas.MultiPoint.decode_value","title":"valor.schemas.MultiPoint.decode_value(value) classmethod","text":"

Decode object from JSON compatible dictionary.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef decode_value(\n    cls, value: typing.Optional[typing.List[typing.List[float]]]\n):\n    \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n    if value is None:\n        return None\n    return cls([(point[0], point[1]) for point in value])\n
"},{"location":"client_api/Schemas/Spatial/Point/","title":"Point","text":"

Bases: Spatial, Equatable

Represents a point in 2D space.

Follows the GeoJSON specification (RFC 7946).

Parameters:

Name Type Description Default value Tuple[float, float]

A point.

required

Examples:

>>> Point((1,2))\n
Source code in valor/schemas/symbolic/types.py
class Point(Spatial, Equatable):\n    \"\"\"\n    Represents a point in 2D space.\n\n    Follows the GeoJSON specification (RFC 7946).\n\n    Parameters\n    ----------\n    value : Tuple[float, float], optional\n        A point.\n\n    Examples\n    --------\n    >>> Point((1,2))\n    \"\"\"\n\n    def __init__(\n        self,\n        value: typing.Tuple[float, float],\n    ):\n        super().__init__(value=value)\n\n    @classmethod\n    def __validate__(cls, value: typing.Any):\n        if not isinstance(value, tuple):\n            raise TypeError(\n                f\"Expected type 'typing.Tuple[float, float]' received type '{type(value).__name__}'\"\n            )\n        elif len(value) != 2:\n            raise ValueError(\"\")\n        for item in value:\n            if not isinstance(item, (int, float, np.floating)):\n                raise TypeError(\n                    f\"Expected type '{float.__name__}' received type '{type(item).__name__}'\"\n                )\n\n    @classmethod\n    def decode_value(cls, value: typing.Optional[typing.List[float]]):\n        \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n        if value is None:\n            return None\n        return cls((value[0], value[1]))\n\n    def encode_value(self) -> typing.Any:\n        \"\"\"Encode object to JSON compatible dictionary.\"\"\"\n        value = self.get_value()\n        if value is None:\n            return None\n        return (float(value[0]), float(value[1]))\n\n    def tuple(self):\n        return self.get_value()\n\n    def resize(\n        self,\n        og_img_h=10,\n        og_img_w=10,\n        new_img_h=100,\n        new_img_w=100,\n    ):\n        value = self.get_value()\n        h_ratio = new_img_h / og_img_h\n        w_ratio = new_img_w / og_img_w\n        return Point((value[0] * h_ratio, value[1] * w_ratio))\n\n    @property\n    def x(self):\n        return self.get_value()[0]\n\n    @property\n    def y(self):\n        return self.get_value()[1]\n
"},{"location":"client_api/Schemas/Spatial/Point/#valor.schemas.Point-functions","title":"Functions","text":""},{"location":"client_api/Schemas/Spatial/Point/#valor.schemas.Point.decode_value","title":"valor.schemas.Point.decode_value(value) classmethod","text":"

Decode object from JSON compatible dictionary.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef decode_value(cls, value: typing.Optional[typing.List[float]]):\n    \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n    if value is None:\n        return None\n    return cls((value[0], value[1]))\n
"},{"location":"client_api/Schemas/Spatial/Point/#valor.schemas.Point.encode_value","title":"valor.schemas.Point.encode_value()","text":"

Encode object to JSON compatible dictionary.

Source code in valor/schemas/symbolic/types.py
def encode_value(self) -> typing.Any:\n    \"\"\"Encode object to JSON compatible dictionary.\"\"\"\n    value = self.get_value()\n    if value is None:\n        return None\n    return (float(value[0]), float(value[1]))\n
"},{"location":"client_api/Schemas/Spatial/Polygon/","title":"Polygon","text":"

Bases: Spatial

Represents a polygon with a boundary and optional holes.

Follows the GeoJSON specification (RFC 7946).

Parameters:

Name Type Description Default value List[List[Tuple[float, float]]]

A polygon.

required

Attributes:

Name Type Description area Float boundary List[Tuple[float, float]] holes List[List[Tuple[float, float]]] xmin float xmax float ymin float ymax float

Examples:

Create a polygon without any holes.

>>> Polygon([[(0,0), (0,1), (1,1), (0,0)]])\n

Create a polygon with 2 holes.

>>> Polygon(\n...     [\n...         [(0,0), (0,1), (1,1), (0,0)],\n...         [(0.1, 0.1), (0.1, 0.2), (0.2, 0.2), (0.1, 0.1)],\n...         [(0.6, 0.6), (0.6, 0.7), (0.7, 0.7), (0.6, 0.6)],\n...     ]\n... )\n
Source code in valor/schemas/symbolic/types.py
class Polygon(Spatial):\n    \"\"\"\n    Represents a polygon with a boundary and optional holes.\n\n    Follows the GeoJSON specification (RFC 7946).\n\n    Parameters\n    ----------\n    value : List[List[Tuple[float, float]]], optional\n        A polygon.\n\n    Attributes\n    ----------\n    area\n    boundary\n    holes\n    xmin\n    xmax\n    ymin\n    ymax\n\n    Examples\n    --------\n    Create a polygon without any holes.\n    >>> Polygon([[(0,0), (0,1), (1,1), (0,0)]])\n\n    Create a polygon with 2 holes.\n    >>> Polygon(\n    ...     [\n    ...         [(0,0), (0,1), (1,1), (0,0)],\n    ...         [(0.1, 0.1), (0.1, 0.2), (0.2, 0.2), (0.1, 0.1)],\n    ...         [(0.6, 0.6), (0.6, 0.7), (0.7, 0.7), (0.6, 0.6)],\n    ...     ]\n    ... )\n    \"\"\"\n\n    def __init__(\n        self, value: typing.List[typing.List[typing.Tuple[float, float]]]\n    ):\n        super().__init__(value=value)\n\n    @classmethod\n    def __validate__(cls, value: typing.Any):\n        MultiLineString.__validate__(value)\n        for line in value:\n            if not (len(line) >= 4 and line[0] == line[-1]):\n                raise ValueError(\n                    \"Polygons are defined by at least 4 points with the first point being repeated at the end.\"\n                )\n\n    @classmethod\n    def decode_value(\n        cls,\n        value: typing.Optional[typing.List[typing.List[typing.List[float]]]],\n    ):\n        \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n        if value is None:\n            return None\n        return cls(\n            [\n                [(point[0], point[1]) for point in subpolygon]\n                for subpolygon in value\n            ]\n        )\n\n    @property\n    def area(self) -> Float:\n        \"\"\"\n        Symbolic representation of area.\n        \"\"\"\n        if not isinstance(self._value, Symbol):\n            raise ValueError\n        return Float.symbolic(\n            owner=self._value._owner,\n            name=self._value._name,\n            key=self._value._key,\n            attribute=\"area\",\n        )\n\n    @property\n    def boundary(self) -> typing.List[typing.Tuple[float, float]]:\n        \"\"\"\n        The boundary of the polygon.\n\n        Returns\n        -------\n        List[Tuple(float, float)]\n            A list of points.\n        \"\"\"\n        value = self.get_value()\n        if value is None:\n            raise ValueError(\"Polygon is 'None'\")\n        return value[0]\n\n    @property\n    def holes(self) -> typing.List[typing.List[typing.Tuple[float, float]]]:\n        \"\"\"\n        typing.Any holes in the polygon.\n\n        Returns\n        -------\n        List[List[Tuple(float, float)]]\n            A list of holes.\n        \"\"\"\n        value = self.get_value()\n        if value is None:\n            raise ValueError(\"Polygon is 'None'\")\n        return value[1:]\n\n    @property\n    def xmin(self) -> float:\n        \"\"\"\n        Minimum x-value.\n\n        Returns\n        -------\n        float\n        \"\"\"\n        return min([p[0] for p in self.boundary])\n\n    @property\n    def xmax(self) -> float:\n        \"\"\"\n        Maximum x-value.\n\n        Returns\n        -------\n        float\n        \"\"\"\n        return max([p[0] for p in self.boundary])\n\n    @property\n    def ymin(self) -> float:\n        \"\"\"\n        Minimum y-value.\n\n        Returns\n        -------\n        float\n        \"\"\"\n        return min([p[1] for p in self.boundary])\n\n    @property\n    def ymax(self) -> float:\n        \"\"\"\n        Maximum y-value.\n\n        Returns\n        -------\n        float\n        \"\"\"\n        return max([p[1] for p in self.boundary])\n
"},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon-attributes","title":"Attributes","text":""},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon.area","title":"valor.schemas.Polygon.area: Float property","text":"

Symbolic representation of area.

"},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon.boundary","title":"valor.schemas.Polygon.boundary: typing.List[typing.Tuple[float, float]] property","text":"

The boundary of the polygon.

Returns:

Type Description List[Tuple(float, float)]

A list of points.

"},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon.holes","title":"valor.schemas.Polygon.holes: typing.List[typing.List[typing.Tuple[float, float]]] property","text":"

typing.Any holes in the polygon.

Returns:

Type Description List[List[Tuple(float, float)]]

A list of holes.

"},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon.xmax","title":"valor.schemas.Polygon.xmax: float property","text":"

Maximum x-value.

Returns:

Type Description float"},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon.xmin","title":"valor.schemas.Polygon.xmin: float property","text":"

Minimum x-value.

Returns:

Type Description float"},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon.ymax","title":"valor.schemas.Polygon.ymax: float property","text":"

Maximum y-value.

Returns:

Type Description float"},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon.ymin","title":"valor.schemas.Polygon.ymin: float property","text":"

Minimum y-value.

Returns:

Type Description float"},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon-functions","title":"Functions","text":""},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon.decode_value","title":"valor.schemas.Polygon.decode_value(value) classmethod","text":"

Decode object from JSON compatible dictionary.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef decode_value(\n    cls,\n    value: typing.Optional[typing.List[typing.List[typing.List[float]]]],\n):\n    \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n    if value is None:\n        return None\n    return cls(\n        [\n            [(point[0], point[1]) for point in subpolygon]\n            for subpolygon in value\n        ]\n    )\n
"},{"location":"client_api/Schemas/Spatial/Raster/","title":"Raster","text":"

Bases: Spatial

Represents a binary mask.

Parameters:

Name Type Description Default value Dict[str, Union[ndarray, str, None]]

An raster value.

required

Attributes:

Name Type Description area Float array ndarray geometry Union[Box, Polygon, MultiPolygon] height int width int

Raises:

Type Description TypeError

If encoding is not a string.

Examples:

Generate a random mask.

>>> import numpy.random\n>>> height = 640\n>>> width = 480\n>>> array = numpy.random.rand(height, width)\n

Convert to binary mask.

>>> mask = (array > 0.5)\n

Create Raster.

>>> Raster.from_numpy(mask)\n
Source code in valor/schemas/symbolic/types.py
class Raster(Spatial):\n    \"\"\"\n    Represents a binary mask.\n\n    Parameters\n    ----------\n    value : Dict[str, typing.Union[np.ndarray, str, None]], optional\n        An raster value.\n\n    Attributes\n    ----------\n    area\n    array\n    geometry\n    height\n    width\n\n    Raises\n    ------\n    TypeError\n        If `encoding` is not a string.\n\n    Examples\n    --------\n    Generate a random mask.\n    >>> import numpy.random\n    >>> height = 640\n    >>> width = 480\n    >>> array = numpy.random.rand(height, width)\n\n    Convert to binary mask.\n    >>> mask = (array > 0.5)\n\n    Create Raster.\n    >>> Raster.from_numpy(mask)\n    \"\"\"\n\n    def __init__(\n        self,\n        value: typing.Dict[\n            str, typing.Union[np.ndarray, Box, Polygon, MultiPolygon, None]\n        ],\n    ):\n        \"\"\"\n        Initialize and instance of a raster.\n\n        Parameters\n        ----------\n        value : Dict[str, Union[np.ndarray, Box, Polygon, MultiPolygon, None]]\n            The raster in dictionary format {\"mask\": <np.ndarray>, \"geometry\": <geometry | None>}.\n        \"\"\"\n        super().__init__(value)\n\n    @classmethod\n    def __validate__(cls, value: typing.Any):\n        \"\"\"\n        Validates typing.\n\n        Parameters\n        ----------\n        value : Any\n            The value to validate.\n\n        Raises\n        ------\n        TypeError\n            If the value type is not supported.\n        \"\"\"\n        if not isinstance(value, dict):\n            raise TypeError(\n                \"Raster should contain a dictionary describing a mask and optionally a geometry.\"\n            )\n        elif set(value.keys()) != {\"mask\", \"geometry\"}:\n            raise ValueError(\n                \"Raster should be described by a dictionary with keys 'mask' and 'geometry'\"\n            )\n        elif not isinstance(value[\"mask\"], np.ndarray):\n            raise TypeError(\n                f\"Expected mask to have type '{np.ndarray}' receieved type '{value['mask']}'\"\n            )\n        elif len(value[\"mask\"].shape) != 2:\n            raise ValueError(\"raster only supports 2d arrays\")\n        elif value[\"mask\"].dtype != bool:\n            raise ValueError(\n                f\"Expecting a binary mask (i.e. of dtype bool) but got dtype {value['mask'].dtype}\"\n            )\n        elif (\n            value[\"geometry\"] is not None\n            and not Polygon.supports(value[\"geometry\"])\n            and not MultiPolygon.supports(value[\"geometry\"])\n        ):\n            raise TypeError(\n                \"Expected geometry to conform to either Polygon or MultiPolygon or be 'None'\"\n            )\n\n    def encode_value(self) -> typing.Any:\n        \"\"\"Encode object to JSON compatible dictionary.\"\"\"\n        value = self.get_value()\n        if value is None:\n            return None\n        f = io.BytesIO()\n        PIL.Image.fromarray(value[\"mask\"]).save(f, format=\"PNG\")\n        f.seek(0)\n        mask_bytes = f.read()\n        f.close()\n        return {\n            \"mask\": b64encode(mask_bytes).decode(),\n            \"geometry\": value[\"geometry\"],\n        }\n\n    @classmethod\n    def decode_value(cls, value: typing.Any):\n        \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n        if value is None:\n            return None\n        if not (\n            isinstance(value, dict)\n            and set(value.keys()) == {\"mask\", \"geometry\"}\n        ):\n            raise ValueError(\n                f\"Improperly formatted raster encoding. Received '{value}'\"\n            )\n        mask_bytes = b64decode(value[\"mask\"])\n        with io.BytesIO(mask_bytes) as f:\n            img = PIL.Image.open(f)\n            value = {\n                \"mask\": np.array(img),\n                \"geometry\": value[\"geometry\"],\n            }\n        return cls(value=value)\n\n    @classmethod\n    def from_numpy(cls, mask: np.ndarray):\n        \"\"\"\n        Create a Raster object from a NumPy array.\n\n        Parameters\n        ----------\n        mask : np.ndarray\n            The 2D binary array representing the mask.\n\n        Returns\n        -------\n        Raster\n\n        Raises\n        ------\n        ValueError\n            If the input array is not 2D or not of dtype bool.\n        \"\"\"\n        return cls(value={\"mask\": mask, \"geometry\": None})\n\n    @classmethod\n    def from_geometry(\n        cls,\n        geometry: typing.Union[Box, Polygon, MultiPolygon],\n        height: int,\n        width: int,\n    ):\n        \"\"\"\n        Create a Raster object from a geometric mask.\n\n        Parameters\n        ----------\n        geometry : Union[Box, Polygon, MultiPolygon]\n            Defines the bitmask as a geometry. Overrides any existing mask.\n        height : int\n            The intended height of the binary mask.\n        width : int\n            The intended width of the binary mask.\n\n        Returns\n        -------\n        Raster\n        \"\"\"\n        bitmask = np.full((int(height), int(width)), False)\n        return cls(value={\"mask\": bitmask, \"geometry\": geometry.get_value()})\n\n    @property\n    def area(self) -> Float:\n        \"\"\"\n        Symbolic representation of area.\n        \"\"\"\n        if not isinstance(self._value, Symbol):\n            raise ValueError\n        return Float.symbolic(\n            owner=self._value._owner,\n            name=self._value._name,\n            key=self._value._key,\n            attribute=\"area\",\n        )\n\n    @property\n    def array(self) -> np.ndarray:\n        \"\"\"\n        The bitmask as a numpy array.\n\n        Returns\n        -------\n        Optional[np.ndarray]\n            A 2D binary array representing the mask if it exists.\n        \"\"\"\n        value = self.get_value()\n        if value[\"geometry\"] is not None:\n            warnings.warn(\n                \"Raster array does not contain bitmask as this is a geometry-defined raster.\",\n                RuntimeWarning,\n            )\n        return value[\"mask\"]\n\n    @property\n    def geometry(self) -> typing.Union[Box, Polygon, MultiPolygon]:\n        \"\"\"\n        The geometric mask if it exists.\n\n        Returns\n        -------\n        Box | Polygon | MultiPolygon | None\n            The geometry if it exists.\n        \"\"\"\n        return self.get_value()[\"geometry\"]\n\n    @property\n    def height(self) -> int:\n        \"\"\"Returns the height of the raster if it exists.\"\"\"\n        return self.array.shape[0]\n\n    @property\n    def width(self) -> int:\n        \"\"\"Returns the width of the raster if it exists.\"\"\"\n        return self.array.shape[1]\n
"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster-attributes","title":"Attributes","text":""},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.area","title":"valor.schemas.Raster.area: Float property","text":"

Symbolic representation of area.

"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.array","title":"valor.schemas.Raster.array: np.ndarray property","text":"

The bitmask as a numpy array.

Returns:

Type Description Optional[ndarray]

A 2D binary array representing the mask if it exists.

"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.geometry","title":"valor.schemas.Raster.geometry: typing.Union[Box, Polygon, MultiPolygon] property","text":"

The geometric mask if it exists.

Returns:

Type Description Box | Polygon | MultiPolygon | None

The geometry if it exists.

"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.height","title":"valor.schemas.Raster.height: int property","text":"

Returns the height of the raster if it exists.

"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.width","title":"valor.schemas.Raster.width: int property","text":"

Returns the width of the raster if it exists.

"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster-functions","title":"Functions","text":""},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.__init__","title":"valor.schemas.Raster.__init__(value)","text":"

Initialize and instance of a raster.

Parameters:

Name Type Description Default value Dict[str, Union[ndarray, Box, Polygon, MultiPolygon, None]]

The raster in dictionary format {\"mask\": , \"geometry\": }. required Source code in valor/schemas/symbolic/types.py

def __init__(\n    self,\n    value: typing.Dict[\n        str, typing.Union[np.ndarray, Box, Polygon, MultiPolygon, None]\n    ],\n):\n    \"\"\"\n    Initialize and instance of a raster.\n\n    Parameters\n    ----------\n    value : Dict[str, Union[np.ndarray, Box, Polygon, MultiPolygon, None]]\n        The raster in dictionary format {\"mask\": <np.ndarray>, \"geometry\": <geometry | None>}.\n    \"\"\"\n    super().__init__(value)\n
"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.__validate__","title":"valor.schemas.Raster.__validate__(value) classmethod","text":"

Validates typing.

Parameters:

Name Type Description Default value Any

The value to validate.

required

Raises:

Type Description TypeError

If the value type is not supported.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef __validate__(cls, value: typing.Any):\n    \"\"\"\n    Validates typing.\n\n    Parameters\n    ----------\n    value : Any\n        The value to validate.\n\n    Raises\n    ------\n    TypeError\n        If the value type is not supported.\n    \"\"\"\n    if not isinstance(value, dict):\n        raise TypeError(\n            \"Raster should contain a dictionary describing a mask and optionally a geometry.\"\n        )\n    elif set(value.keys()) != {\"mask\", \"geometry\"}:\n        raise ValueError(\n            \"Raster should be described by a dictionary with keys 'mask' and 'geometry'\"\n        )\n    elif not isinstance(value[\"mask\"], np.ndarray):\n        raise TypeError(\n            f\"Expected mask to have type '{np.ndarray}' receieved type '{value['mask']}'\"\n        )\n    elif len(value[\"mask\"].shape) != 2:\n        raise ValueError(\"raster only supports 2d arrays\")\n    elif value[\"mask\"].dtype != bool:\n        raise ValueError(\n            f\"Expecting a binary mask (i.e. of dtype bool) but got dtype {value['mask'].dtype}\"\n        )\n    elif (\n        value[\"geometry\"] is not None\n        and not Polygon.supports(value[\"geometry\"])\n        and not MultiPolygon.supports(value[\"geometry\"])\n    ):\n        raise TypeError(\n            \"Expected geometry to conform to either Polygon or MultiPolygon or be 'None'\"\n        )\n
"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.decode_value","title":"valor.schemas.Raster.decode_value(value) classmethod","text":"

Decode object from JSON compatible dictionary.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef decode_value(cls, value: typing.Any):\n    \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n    if value is None:\n        return None\n    if not (\n        isinstance(value, dict)\n        and set(value.keys()) == {\"mask\", \"geometry\"}\n    ):\n        raise ValueError(\n            f\"Improperly formatted raster encoding. Received '{value}'\"\n        )\n    mask_bytes = b64decode(value[\"mask\"])\n    with io.BytesIO(mask_bytes) as f:\n        img = PIL.Image.open(f)\n        value = {\n            \"mask\": np.array(img),\n            \"geometry\": value[\"geometry\"],\n        }\n    return cls(value=value)\n
"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.encode_value","title":"valor.schemas.Raster.encode_value()","text":"

Encode object to JSON compatible dictionary.

Source code in valor/schemas/symbolic/types.py
def encode_value(self) -> typing.Any:\n    \"\"\"Encode object to JSON compatible dictionary.\"\"\"\n    value = self.get_value()\n    if value is None:\n        return None\n    f = io.BytesIO()\n    PIL.Image.fromarray(value[\"mask\"]).save(f, format=\"PNG\")\n    f.seek(0)\n    mask_bytes = f.read()\n    f.close()\n    return {\n        \"mask\": b64encode(mask_bytes).decode(),\n        \"geometry\": value[\"geometry\"],\n    }\n
"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.from_geometry","title":"valor.schemas.Raster.from_geometry(geometry, height, width) classmethod","text":"

Create a Raster object from a geometric mask.

Parameters:

Name Type Description Default geometry Union[Box, Polygon, MultiPolygon]

Defines the bitmask as a geometry. Overrides any existing mask.

required height int

The intended height of the binary mask.

required width int

The intended width of the binary mask.

required

Returns:

Type Description Raster Source code in valor/schemas/symbolic/types.py
@classmethod\ndef from_geometry(\n    cls,\n    geometry: typing.Union[Box, Polygon, MultiPolygon],\n    height: int,\n    width: int,\n):\n    \"\"\"\n    Create a Raster object from a geometric mask.\n\n    Parameters\n    ----------\n    geometry : Union[Box, Polygon, MultiPolygon]\n        Defines the bitmask as a geometry. Overrides any existing mask.\n    height : int\n        The intended height of the binary mask.\n    width : int\n        The intended width of the binary mask.\n\n    Returns\n    -------\n    Raster\n    \"\"\"\n    bitmask = np.full((int(height), int(width)), False)\n    return cls(value={\"mask\": bitmask, \"geometry\": geometry.get_value()})\n
"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.from_numpy","title":"valor.schemas.Raster.from_numpy(mask) classmethod","text":"

Create a Raster object from a NumPy array.

Parameters:

Name Type Description Default mask ndarray

The 2D binary array representing the mask.

required

Returns:

Type Description Raster

Raises:

Type Description ValueError

If the input array is not 2D or not of dtype bool.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef from_numpy(cls, mask: np.ndarray):\n    \"\"\"\n    Create a Raster object from a NumPy array.\n\n    Parameters\n    ----------\n    mask : np.ndarray\n        The 2D binary array representing the mask.\n\n    Returns\n    -------\n    Raster\n\n    Raises\n    ------\n    ValueError\n        If the input array is not 2D or not of dtype bool.\n    \"\"\"\n    return cls(value={\"mask\": mask, \"geometry\": None})\n
"}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Introduction","text":"

Valor is a centralized evaluation store that makes it easy to measure, explore, and rank model performance. Valor empowers data scientists and engineers to evaluate the performance of their machine learning pipelines and use those evaluations to make better modeling decisions in the future. To skip this textual introduction and dive right in, first go here for instructions to setup the Valor service, and then checkout the sample notebooks.

Valor is maintained by Striveworks, a cutting-edge machine learning operations (MLOps) company based out of Austin, Texas. We'd love to learn more about your interest in Valor and answer any questions you may have; please don't hesitate to reach out to us on Slack or GitHub.

These docs are organized as follows:

  • Overview (this page): Provides an overview of what Valor is, why it's important, and how it works.
  • Installation: Explains how to install Valor.
  • Getting Started Notebook: Details everything you need to get up and running with using Valor.
  • All Sample Notebooks: Collection of descriptive Jupyter notebooks giving examples of how to evaluate model performance using Valor.
  • Metadata and Filtering: Describes Valor's robust support for adding metadata to data, along with how to filter evaluations and Valor objects based on metadata and other attributes.
  • Metrics: Describes all of the metrics that you can calculate using Valor.
  • Endpoints: Documents Valor's various API endpoints.
  • Technical Concepts: Describes the technical concepts that underpin Valor.
  • Contributing and Development: Explains how you can build on and contribute to Valor.
  • Python Client API: Shares reference documentation for our Python client.
"},{"location":"#overview","title":"Overview","text":"

In this section, we'll explore what Valor is, why it's important, and provide a high-level description of how it works. This overview is also available in the following five-minute video:

"},{"location":"#use-cases-for-a-containerized-evaluation-store","title":"Use Cases for a Containerized Evaluation Store","text":"

As we've worked with dozens of data scientists and engineers on their MLOps pipelines, we have identified three important questions that an effective evaluation store could help them answer. First, they wanted to understand: \"Of the various models I tested for a given dataset, which one performs best?\". This is a very common and important use case\u2014and one that is often solved on a model-to-model basis in a local Jupyter notebook. This focus on bespoke implementations limits traceability and makes it difficult to create apples-to-apples comparisons between new model runs and prior model runs.

Second, our users wanted to understand: \"How does the performance of a particular model vary across datasets?\". We found that many practitioners use the same computer vision model (e.g., YOLOv8) for a variety of supervised learning tasks, and they needed a way to identify patterns where that particular model didn't meet expectations.

Finally, our users wanted to understand: \"How can I use my prior evaluations to pick the best model for a future ML pipeline?\". This last question requires the ability to filter previous evaluations on granular metadata (e.g., time of day, geospatial coordinates, etc.) in order to provide tailored recommendations regarding which model to pick in the future.

With these three use cases in mind, we set out to build a centralized evaluation store that we later named Valor.

"},{"location":"#introducing-valor","title":"Introducing Valor","text":"

Valor is a centralized evaluation store that makes it easy to measure, explore, and rank model performance. Our ultimate goal with Valor is to help data scientists and engineers pick the right ML model for their specific needs. To that end, we built Valor with three design principles in mind:

  • Valor works with any dataset or model: We believe Valor should be able to handle any supervised learning task that you want to throw at it. Just pass in your ground truth annotations and predictions, describe your learning task (i.e., object detection), and Valor will do the rest. (Note: At launch, Valor will only support classification and computer vision (i.e., image segmentation and object detection) tasks. We're confident this framework will abstract well to other supervised learning tasks and plan to support them in later releases).
  • Valor can handle any type of image, model, or dataset metadata you throw at it: Metadata is a critical component of any evaluation store as it enables the system to offer tailored model recommendations based on a user's specific needs. To that end, we built Valor to handle any metadata under the sun. Dates, geospatial coordinates, and even JSONs filled with configuration details are all on the table. This means you can slice and dice your evaluations any way you want: just pass in the right labels for your use case and define your filter (say a geographic bounding box), and you\u2019ll get back results for your specific needs.
  • Valor standardizes the evaluation process: The trickiest part of comparing two different model runs is avoiding apples-to-oranges comparisons. Valor helps you audit your metrics and avoid false comparisons by versioning your uploads, storing them in a centralized location, and ensuring that you only compare runs that used the exact same filters and metrics.
"},{"location":"#how-it-works-an-illustrative-example","title":"How It Works: An Illustrative Example","text":"

Let\u2019s walk through a quick example to bring Valor to life.

Say that you're interested in using computer vision models to detect forest fires around the world using satellite imagery. You've just been tasked with building a new ML pipeline to detect fires in an unfamiliar region of interest. How might you leverage your evaluation metrics from prior ML pipelines to understand which model will perform best for this particular use case?

To answer this question, we'll start by passing in three pieces of information from each of our prior modeling runs:

  • GroundTruths: First, we'll pass in human-annotated bounding boxes to tell Valor exactly where forest fires can be found across all of the satellite images used in prior runs.
  • Predictions: Next, we'll pass machine-generated predictions for each image (also in the form of bounding boxes) so that Valor can evaluate how well each model did at predicting forest fires.
  • Labels: Finally, we'll pass metadata to Valor describing each of our various images (e.g., the time of day the photo was taken, the geospatial coordinates of the forest in the photo, etc.). We'll use this metadata later on in order to identify the right model for our new use case.

Once we pass in these three ingredients, Valor will compare all of our GroundTruths and Predictions in order to calculate various evaluation metrics (i.e., mean average precision or mAP). These metrics, Labels, GroundTruths, and Predictions, will all be stored in Postgres, with PostGIS support for fast geospatial lookups and geometric comparisons at a later date.

Finally, once all of our previous pipeline runs and evaluations are stored in Valor, we can use Valor\u2019s API to specify our exact filter criteria and get back its model rankings. In this case, we can ask Valor to find us the best model for detecting forest fires at night in a 50 mile radius around (42.36, -71.03), sorted by mAP. Valor will then filter all of our stored evaluation metrics, rank each model with evaluations that meet our criteria, and send back all relevant evaluation metrics to help us determine which model to use for our new modeling pipeline.

"},{"location":"#next-steps","title":"Next Steps","text":"

We'd recommend reviewing our \"Getting Started\" sample notebook to become further acquainted with Valor. For more detailed explanations of Valor's technical underpinnings, see our technical concepts guide.

"},{"location":"#faq","title":"FAQ","text":"

Q. What is Valor?

A. Valor is a centralized evaluation store that makes it easy to measure, explore, and rank model performance. For an overview of what Valor is and why it's important, please refer to our high-level overview.

Q. What evaluation methods are supported?

A. Valor currently supports generic classification as well as object-detection and semantic-segmentation for images. The long-term goal for Valor is to support the most popular supervised learning methods.

Q. Does Valor store data?

A. Valor only stores ground truth annotations, model predictions, and user-defined metadata.

Q. What is a Datum?

A. A valor.Datum object is a generic type that represents a datum in the context of a machine learning workflow. The object stores a UID and related metadata in a dictionary. This metadata allows for the user to construct their own abstraction layer by mapping a real-world type (e.g., an image) into a valor.Datum type.

from valor.metatypes import ImageMetadata\nimage = ImageMetadata.create(\n  uid = \"1234\",\n  height = 100,\n  width = 100,\n)\n\n# access the datum\ndatum = image.datum\n

Q. What is a GroundTruth?

A. valor.GroundTruth objects in Valor each represent a singular datum and its associated annotations that provide a reference standard or the 'truth' against which predictions are compared. There cannot be multiple ground truths per datum.

Q. What is a Prediction?

A. valor.Prediction objects are similar to valor.GroundTruth objects in that they also contain a list of annotations over a datum. However, these annotations are generated by a model as inferences, and the object also includes the name of the model that was used for creating these inferences. There cannot be multiple predictions by the same model over a single datum.

Q. Can Valor handle multiple data types?

A. Valor abstracts data types through metadata. An example of this can be seen in valor.metatypes.ImageMetadata which describes the mapping of an image to a valor.Datum.

Q. Does Valor support geospatial queries?

A. Valor follows the GeoJSON specification (RFC 7946) in the implementation of Point, MulitPoint, LineString, MultiLineString, Polygon and MulitPolygon geometries. These objects are used to define annotations and facilitate the creation of geospatial metadata.

"},{"location":"#troubleshooting","title":"Troubleshooting","text":"

Q. Why am I getting NotFinalizedError when trying to run an evaluation?

A. Valor requires both dataset and model representations to be finalized before evaluation can take place. Finalization is crucial for auditability as it ensures that data finalized at a certain date is immutable.

Dataset finalization is accomplished through the valor.Dataset.finalize member function.

from valor import Client, Dataset\nclient = Client(...)\ndataset = Dataset(name=\"test_dataset\")\n...\ndataset.finalize()\n

Models are finalized automatically given two conditions.

  1. The working dataset is finalized.
  2. There is a 1:1 mapping of predictions to ground truths.

Models and their predictions can also be finalized prematurely using the valor.Model.finalize_inferences member function. This will generate empty predictions with task type enums.TaskType.SKIP to achieve the 1:1 ground truth mapping.

from valor import Client, Dataset, Model\nclient = Client(...)\ndataset = Dataset(name=\"test_dataset\")\nmodel = Model(name=\"test_model\")\n...\ndataset.finalize()\nmodel.finalize_inferences(dataset)\n

Q. Why am I getting GDAL driver errors?

A. For some computations (mostly involving rasters), Valor requires the PostGIS database to have all GDAL drivers enabled. The Valor back end attempts to enable these drivers, but it might not have permission depending on your specific setup. If you encounter this error, see here for ways to enable the drivers directly in the PostGIS instance.

"},{"location":"contributing/","title":"Contributing to Valor","text":"

We welcome all contributions, bug reports, bug fixes, documentation improvements, enhancements, and ideas aimed at improving Valor. This doc describes the high-level process for how to contribute to this repository. If you have any questions or comments about this process, please feel free to reach out to us on Slack.

"},{"location":"contributing/#on-github","title":"On GitHub","text":"

We use Git on GitHub to manage this repo, which means you will need to sign up for a free GitHub account to submit issues, ideas, and pull requests. We use Git for version control to allow contributors from all over the world to work together on this project.

If you are new to Git, these official resources can help bring you up to speed:

  • GitHub documentation for forking a repo
  • GitHub documentation for collaborating with pull requests
  • GitHub documentation for working with forks
"},{"location":"contributing/#contribution-workflow","title":"Contribution Workflow","text":"

Generally, the high-level workflow for contributing to this repo includes:

  1. Submitting an issue or enhancement request using the appropriate template on GitHub Issues.
  2. Gathering feedback from devs and the broader community in your issue before starting to code.
  3. Forking the Valor repo, making your proposed changes, and submitting a pull request (PR). When submitting a PR, please be sure to:
    1. Update the README.md and/or any relevant docstrings with details of your change.
    2. Add tests where necessary.
    3. Run pre-commit install on your local repo before your last commit to ensure your changes follow our formatting guidelines.
    4. Double-check that your code passes all of the tests that are automated via GitHub Actions.
    5. Ping us on Slack to ensure timely review.
  4. Working with repo maintainers to review and improve your PR before it is merged into the official repo.

For questions or comments on this process, please reach out to us at any time on Slack.

"},{"location":"contributing/#development-tips-and-tricks","title":"Development Tips and Tricks","text":""},{"location":"contributing/#deploying-the-back-end-for-development","title":"Deploying the Back End for Development","text":""},{"location":"contributing/#docker-compose","title":"Docker Compose","text":"

The fastest way to test the API and Python client is via Docker Compose. Start by setting the environment variable POSTGRES_PASSWORD to your liking, and then start Docker and build the container:

export POSTGRES_PASSWORD=\"my_password\"\ndocker compose up\n
"},{"location":"contributing/#makefile-requires-docker","title":"Makefile (requires Docker)","text":"

Alternatively, you may want to run the API service from a terminal to enable faster debugging. To start the service, you can run:

pip install api # Install the API in your python environment\n\nexport POSTGRES_PASSWORD=password\nexport POSTGRES_HOST=localhost\nmake start-postgres-docker # Start the custom postgres service in Docker\nmake run-migrations # Instantiate the table schemas in Postgres\nmake start-server # Start the API service locally\n
"},{"location":"contributing/#setting-up-your-environment","title":"Setting Up Your Environment","text":"

Creating a Valor-specific Python environment at the start of development can help you avoid dependency and versioning issues later on. To start, we'd recommend activating a new Python environment:

# venv\npython3 -m venv .env-valor\nsource .env-valor/bin/activate\n\n# conda\nconda create --name valor python=3.11\nconda activate valor\n

Next, install pre-commit to ensure formatting consistency throughout your repo:

pip install pre-commit\npre-commit install\n

Finally, you're ready to install your client and API modules:

# Install the Client module\npython -m pip install -e client/.\n\n# Install the API module\npython -m pip install -e api/.\n
"},{"location":"contributing/#use-pgadmin-to-debug-postgis","title":"Use pgAdmin to Debug PostGIS","text":"

You can use the pgAdmin utility to debug your PostGIS tables as you code. Start by installing pgAdmin, and then select Object > Register > Server to connect to your PostGIS container. The default connection details are listed below for convenience:

- *Host name/address*: 0.0.0.0\n- *Port*: 5432\n- *Maintenance database*: postgres\n- *Username*: postgres\n
"},{"location":"contributing/#running-tests","title":"Running Tests","text":"

All of our tests are run automatically via GitHub Actions on every push, so it's important to double-check that your code passes all local tests before committing your code. All of the tests below require pytest:

pip install pytest\n
"},{"location":"contributing/#running-integration-tests","title":"Running integration tests","text":"
pytest integration_tests\n
"},{"location":"contributing/#running-back-end-unit-tests","title":"Running back end unit tests","text":"
pytest api/tests/unit-tests\n
"},{"location":"contributing/#running-back-end-functional-tests","title":"Running back end functional tests","text":"

Note: Functional tests require a running instance of PostgreSQL, which you can start using make start-postgres-docker.

POSTGRES_PASSWORD=password \\\nPOSTGRES_HOST=localhost \\\npytest api/tests/functional-tests/\n
"},{"location":"endpoints/","title":"Endpoints","text":""},{"location":"installation/","title":"Installation","text":"

Valor comprises two services: a back-end service (which consists of a REST API and a Postgres database with the PostGIS extension), and a Python client for interacting with the back-end service.

"},{"location":"installation/#setting-up-the-back-end-service","title":"Setting up the back-end service","text":""},{"location":"installation/#using-docker-compose","title":"Using Docker Compose","text":"

The easiest way to get up and running with Valor is to use Docker Compose with the docker-compose.yml file in the repository root:

git clone https://github.com/striveworks/valor\ncd valor\ndocker compose --env-file ./api/.env.testing up\n

This will set up the necessary environment variables, start both the API and database services, and run the database migration job. The endpoint localhost:8000/health should return {\"status\":\"ok\"} if all of Valor's services were started correctly.

Note: running Valor this way is not intended for production and scalable use and is only recommended for development and testing purposes.

"},{"location":"installation/#deploying-via-docker-and-a-hosted-database","title":"Deploying via Docker and a hosted database","text":"

For a more production-grade deployment, we publish the images ghcr.io/striveworks/valor/valor-service (used for the REST API) and ghcr.io/striveworks/valor/migrations (used for setting up the database and migrations). These can be paired with any Postgres database with the PostGIS extension.

The following environment variables are required for running these images:

Variable Description Images that need it POSTGRES_HOST The host of the Postgres database valor-service, migrations POSTGRES_PORT The port of the Postgres database valor-service, migrations POSTGRES_DB The name of the Postgres database valor-service, migrations POSTGRES_USERNAME The user of the Postgres database valor-service, migrations POSTGRES_PASSWORD The password of the Postgres database valor-service, migrations POSTGRES_SSLMODE Sets the Postgres instance SSL mode (typically needs to be \"require\") migrations API_ROOT_PATH The root path of the API (if serving behind a proxy) valor-service

Additionally, the Valor REST API has an optional single username/password/bearer token authentication. To enable this feature, the valor-service image requires the following environment variables:

Variable Description VALOR_USERNAME The username to use VALOR_PASSWORD The password to use VALOR_SECRET_KEY A random, secret string used for signing JWT tokens"},{"location":"installation/#manual-deployment","title":"Manual deployment","text":"

If you would prefer to build your own image or if you want a debug console for the back-end, please see the deployment instructions in Contributing to Valor.

"},{"location":"installation/#setting-up-the-python-client","title":"Setting up the Python client","text":"

The Python client can be installed via pip:

pip install valor-client\n
"},{"location":"metadata_and_filtering/","title":"Metadata and Filtering","text":""},{"location":"metadata_and_filtering/#metadata","title":"Metadata","text":"

Valor offers rich support for attaching metadata to almost any object, which can then be used to filter, group, and organize objects in Valor.

The metadata types supported are:

  • simple data types (strings, numerics, boolean)
  • datetimes (via datetime.datetime, datetime.date, datetime.time, and datetime.timedelta in the Valor client)
  • geometries and geographies (via GeoJSON)

Metadata is added on object creation. For example, if you want to use metadata to organize models that come from training run checkpoints, this may look like:

run_name: str\nckpt: int\n\nModel.create(name=f\"{run_name}-ckpt{ckpt}\", metadata={\"run_name\": run_name, \"ckpt\": ckpt})\n

or if a datum has an associated datetime of capture, that can be added in the creation stage:

from datetime import datetime\n\nDatum(uid=fname, metadata={\"capture_day\": datetime.datetime(day=1, month=1, year=2021)})\n
"},{"location":"metadata_and_filtering/#filtering","title":"Filtering","text":"

Valor supports filtering objects based on metadata or other attributes (such as labels or bounding boxes). One of the most important use cases of filtering is to define a subset of a dataset to evaluate a model on.

"},{"location":"metadata_and_filtering/#filtering-by-metadata","title":"Filtering by metadata","text":"

For example, using the above example where capture_day was added as metadata, one way to test model drift could be to evaluate the model over different time periods. Such a workflow may look like:

import datetime\n\nimport valor\n\nmodel: valor.Model # classification model\ndset: valor.Dataset # dataset to evaluate on\n\n# compare performance on data captured before and after 2020\nd = datetime.datetime(day=5, month=10, year=2020)\neval1 = model.evaluate_classification(dset, filter_by=[Datum.metadata[\"capture_day\"] < d])\neval2 = model.evaluate_classification(dset, filter_by=[Datum.metadata[\"capture_day\"] > d])\n
"},{"location":"metadata_and_filtering/#filtering-by-geometric-attributes","title":"Filtering by geometric attributes","text":"

As an example for filtering by geometric attributes, consider evaluating an object detection model's performance on small objects, where we define small as being less than 500 square pixels in area. This can be achieved via:

import valor\n\nmodel: valor.Model # object detection model\ndset: valor.Dataset # dataset to evaluate on\n\ndset.evaluate_detection(dset, filter_by=[valor.Annotation.bounding_box.area < 500])\n
"},{"location":"metadata_and_filtering/#filtering-in-queries","title":"Filtering in queries","text":"

Filtering can also be used when querying for different objects. For example, taking the model section checkpoint example from above, we could query model checkpoints from a training run based on the checkpoint number greater than 100 by:

from valor import client\n\nrun_name: str # run name to query for\n\nclient.get_models([Model.metadata[\"run_name\"] == run_name, Model.metadata[\"ckpt\"] > 100])\n
"},{"location":"metrics/","title":"Metrics","text":"

Let's look at the various metrics you can calculate using Valor.

If we're missing an important metric for your particular use case, please write us a GitHub Issue ticket. We love hearing your suggestions.

"},{"location":"metrics/#classification-metrics","title":"Classification Metrics","text":"Name Description Equation Precision The number of true positives divided by the total number of positive predictions (i.e., the number of true positives plus the number of false positives). \\(\\dfrac{\\|TP\\|}{\\|TP\\|+\\|FP\\|}\\) Recall The number of true positives divided by the total count of the class of interest (i.e., the number of true positives plus the number of true negatives). \\(\\dfrac{\\|TP\\|}{\\|TP\\|+\\|FN\\|}\\) F1 A weighted average of precision and recall. \\(\\frac{2 * Precision * Recall}{Precision + Recall}\\) Accuracy The number of true predictions divided by the total number of predictions. \\(\\dfrac{\\|TP\\|+\\|TN\\|}{\\|TP\\|+\\|TN\\|+\\|FP\\|+\\|FN\\|}\\) ROC AUC The area under the Receiver Operating Characteristic (ROC) curve for the predictions generated by a given model. See ROCAUC methods. Precision-Recall Curves Outputs a nested dictionary containing the true positives, false positives, true negatives, false negatives, precision, recall, and F1 score for each (label key, label value, confidence threshold) combination. See precision-recall curve methods Detailed Precision-Recall Curves Similar to PrecisionRecallCurve, except this metric a) classifies false positives as hallucinations or misclassifications, b) classifies false negatives as misclassifications or missed_detections, and c) gives example datums for each observation, up to a maximum of pr_curve_max_examples. See detailed precision-recall curve methods"},{"location":"metrics/#object-detection-and-instance-segmentation-metrics","title":"Object Detection and Instance Segmentation Metrics**","text":"Name Description Equation Average Precision (AP) The weighted mean of precisions achieved at several different recall thresholds for a single Intersection over Union (IOU), grouped by class. See AP methods. AP Averaged Over IOUs The average of several AP metrics across IOU thresholds, grouped by class labels. \\(\\dfrac{1}{\\text{number of thresholds}} \\sum\\limits_{iou \\in thresholds} AP_{iou}\\) Mean Average Precision (mAP) The average of several AP metrics, grouped by label keys and IOU thresholds. \\(\\dfrac{1}{\\text{number of labels}} \\sum\\limits_{label \\in labels} AP_{c}\\) mAP Averaged Over IOUs The average of several mAP metrics grouped by label keys. \\(\\dfrac{1}{\\text{number of thresholds}} \\sum\\limits_{iou \\in thresholds} mAP_{iou}\\) Average Recall (AR) The average of several recall metrics across IOU thresholds, grouped by class labels. See AR methods. Mean Average Recall (mAR) The average of several AR metrics, grouped by label keys. \\(\\dfrac{1}{\\text{number of labels}} \\sum\\limits_{label \\in labels} AR_{class}\\) Precision-Recall Curves Outputs a nested dictionary containing the true positives, false positives, true negatives, false negatives, precision, recall, and F1 score for each (label key, label value, confidence threshold) combination. These curves are calculated using a default IOU threshold of 0.5; you can set your own threshold by passing a float between 0 and 1 to the pr_curve_iou_threshold parameter at evaluation time. See precision-recall curve methods Detailed Precision-Recall Curves Similar to PrecisionRecallCurve, except this metric a) classifies false positives as hallucinations or misclassifications, b) classifies false negatives as misclassifications or missed_detections, and c) gives example datums and bounding boxes for each observation, up to a maximum of pr_curve_max_examples. See detailed precision-recall curve methods

**When calculating IOUs for object detection metrics, Valor handles the necessary conversion between different types of geometric annotations. For example, if your model prediction is a polygon and your groundtruth is a raster, then the raster will be converted to a polygon prior to calculating the IOU.

"},{"location":"metrics/#semantic-segmentation-metrics","title":"Semantic Segmentation Metrics","text":"Name Description Equation Intersection Over Union (IOU) A ratio between the groundtruth and predicted regions of an image, measured as a percentage, grouped by class. \\(\\dfrac{area( prediction \\cap groundtruth )}{area( prediction \\cup groundtruth )}\\) Mean IOU The average of IOU across labels, grouped by label key. \\(\\dfrac{1}{\\text{number of labels}} \\sum\\limits_{label \\in labels} IOU_{c}\\)"},{"location":"metrics/#appendix-metric-calculations","title":"Appendix: Metric Calculations","text":""},{"location":"metrics/#binary-roc-auc","title":"Binary ROC AUC","text":""},{"location":"metrics/#receiver-operating-characteristic-roc","title":"Receiver Operating Characteristic (ROC)","text":"

An ROC curve plots the True Positive Rate (TPR) vs. the False Positive Rate (FPR) at different confidence thresholds.

In Valor, we use the confidence scores sorted in decreasing order as our thresholds. Using these thresholds, we can calculate our TPR and FPR as follows:

"},{"location":"metrics/#determining-the-rate-of-correct-predictions","title":"Determining the Rate of Correct Predictions","text":"Element Description True Positive (TP) Prediction confidence score >= threshold and is correct. False Positive (FP) Prediction confidence score >= threshold and is incorrect. True Negative (TN) Prediction confidence score < threshold and is correct. False Negative (FN) Prediction confidence score < threshold and is incorrect.
  • \\(\\text{True Positive Rate (TPR)} = \\dfrac{|TP|}{|TP| + |FN|} = \\dfrac{|TP(threshold)|}{|TP(threshold)| + |FN(threshold)|}\\)

  • \\(\\text{False Positive Rate (FPR)} = \\dfrac{|FP|}{|FP| + |TN|} = \\dfrac{|FP(threshold)|}{|FP(threshold)| + |TN(threshold)|}\\)

We now use the confidence scores, sorted in decreasing order, as our thresholds in order to generate points on a curve.

\\(Point(score) = (FPR(score), \\ TPR(score))\\)

"},{"location":"metrics/#area-under-the-roc-curve-roc-auc","title":"Area Under the ROC Curve (ROC AUC)","text":"

After calculating the ROC curve, we find the ROC AUC metric by approximating the integral using the trapezoidal rule formula.

\\(ROC AUC = \\sum_{i=1}^{|scores|} \\frac{ \\lVert Point(score_{i-1}) - Point(score_i) \\rVert }{2}\\)

See Classification: ROC Curve and AUC for more information.

"},{"location":"metrics/#average-precision-ap","title":"Average Precision (AP)","text":"

For object detection and instance segmentation tasks, average precision is calculated from the intersection-over-union (IOU) of geometric predictions and ground truths.

"},{"location":"metrics/#multiclass-precision-and-recall","title":"Multiclass Precision and Recall","text":"

Tasks that predict geometries (such as object detection or instance segmentation) use the ratio intersection-over-union (IOU) to calculate precision and recall. IOU is the ratio of the intersecting area over the joint area spanned by the two geometries, and is defined in the following equation.

\\(Intersection \\ over \\ Union \\ (IOU) = \\dfrac{Area( prediction \\cap groundtruth )}{Area( prediction \\cup groundtruth )}\\)

Using different IOU thresholds, we can determine whether we count a pairing between a prediction and a ground truth pairing based on their overlap.

Case Description True Positive (TP) Prediction-GroundTruth pair exists with IOU >= threshold. False Positive (FP) Prediction-GroundTruth pair exists with IOU < threshold. True Negative (TN) Unused in multi-class evaluation. False Negative (FN) No Prediction with a matching label exists for the GroundTruth.
  • \\(Precision = \\dfrac{|TP|}{|TP| + |FP|} = \\dfrac{\\text{Number of True Predictions}}{|\\text{Predictions}|}\\)

  • \\(Recall = \\dfrac{|TP|}{|TP| + |FN|} = \\dfrac{\\text{Number of True Predictions}}{|\\text{Groundtruths}|}\\)

"},{"location":"metrics/#matching-ground-truths-with-predictions","title":"Matching Ground Truths with Predictions","text":"

To properly evaluate a detection, we must first find the best pairings of predictions to ground truths. We start by iterating over our predictions, ordering them by highest scores first. We pair each prediction with the ground truth that has the highest calculated IOU. Both the prediction and ground truth are now considered paired and removed from the pool of choices.

def rank_ious(\n    groundtruths: list,\n    predictions: list,\n) -> list[float]:\n    \"\"\"Ranks ious by unique pairings.\"\"\"\n\n    retval = []\n    groundtruths = set(groundtruths)\n    for prediction in sorted(predictions, key=lambda x : -x.score):\n        groundtruth = max(groundtruths, key=lambda x : calculate_iou(groundtruth, prediction))\n        groundtruths.remove(groundtruth)\n        retval.append(calculate_iou(groundtruth, prediction))\n
"},{"location":"metrics/#precision-recall-curve","title":"Precision-Recall Curve","text":"

We can now compute the precision-recall curve using our previously ranked IOU's. We do this by iterating through the ranked IOU's and creating points cumulatively using recall and precision.

def create_precision_recall_curve(\n    number_of_groundtruths: int,\n    ranked_ious: list[float],\n    threshold: float\n) -> list[tuple[float, float]]:\n    \"\"\"Creates the precision-recall curve from a list of IOU's and a threshold.\"\"\"\n\n    retval = []\n    count_tp = 0\n    for i in range(ranked_ious):\n        if ranked_ious[i] >= threshold:\n            count_tp += 1\n        precision = count_tp / (i + 1)\n        recall = count_tp / number_of_groundtruths\n        retval.append((recall, precision))\n
"},{"location":"metrics/#calculating-average-precision","title":"Calculating Average Precision","text":"

Average precision is defined as the area under the precision-recall curve.

We will use a 101-point interpolation of the curve to be consistent with the COCO evaluator. The intent behind interpolation is to reduce the fuzziness that results from ranking pairs.

\\(AP = \\frac{1}{101} \\sum\\limits_{r\\in\\{ 0, 0.01, \\ldots , 1 \\}}\\rho_{interp}(r)\\)

\\(\\rho_{interp} = \\underset{\\tilde{r}:\\tilde{r} \\ge r}{max \\ \\rho (\\tilde{r})}\\)

"},{"location":"metrics/#references","title":"References","text":"
  • MS COCO Detection Evaluation
  • The PASCAL Visual Object Classes (VOC) Challenge
  • Mean Average Precision (mAP) Using the COCO Evaluator
"},{"location":"metrics/#average-recall-ar","title":"Average Recall (AR)","text":"

To calculate Average Recall (AR), we:

  1. Find the count of true positives above specified IOU and confidence thresholds for all images containing a ground truth of a particular class.
  2. Divide that count of true positives by the total number of ground truths to get the recall value per class and IOU threshold. Append that recall value to a list.
  3. Repeat steps 1 & 2 for multiple IOU thresholds (e.g., [.5, .75])
  4. Take the average of our list of recalls to arrive at the AR value per class.

Note that this metric differs from COCO's calculation in two ways:

  • COCO averages across classes while calculating AR, while we calculate AR separately for each class. Our AR calculations matches the original FAIR definition of AR, while our mAR calculations match what COCO calls AR.
  • COCO calculates three different AR metrics (AR@1, AR@5, AR@100) by considering only the top 1/5/100 most confident predictions during the matching process. Valor, on the other hand, allows users to input a recall_score_threshold value that will prevent low-confidence predictions from being counted as true positives when calculating AR.
"},{"location":"metrics/#precision-recall-curves","title":"Precision-Recall Curves","text":"

Precision-recall curves offer insight into which confidence threshold you should pick for your production pipeline. The PrecisionRecallCurve metric includes the true positives, false positives, true negatives, false negatives, precision, recall, and F1 score for each (label key, label value, confidence threshold) combination. When using the Valor Python client, the output will be formatted as follows:

pr_evaluation = evaluate_detection(\n    data=dataset,\n)\nprint(pr_evaluation)\n\n[...,\n{\n    \"type\": \"PrecisionRecallCurve\",\n    \"parameters\": {\n        \"label_key\": \"class\", # The key of the label.\n        \"pr_curve_iou_threshold\": 0.5, # Note that this value will be None for classification tasks. For detection tasks, we use 0.5 as the default threshold, but allow users to pass an optional `pr_curve_iou_threshold` parameter in their evaluation call.\n    },\n    \"value\": {\n        \"cat\": { # The value of the label.\n            \"0.05\": { # The confidence score threshold, ranging from 0.05 to 0.95 in increments of 0.05.\n                \"fn\": 0,\n                \"fp\": 1,\n                \"tp\": 3,\n                \"recall\": 1,\n                \"precision\": 0.75,\n                \"f1_score\": .857,\n            },\n            ...\n        },\n    }\n}]\n

It's important to note that these curves are computed slightly differently from our other aggregate metrics above:

"},{"location":"metrics/#classification-tasks","title":"Classification Tasks","text":"

Valor calculates its aggregate precision, recall, and F1 metrics by matching the highest confidence prediction with each groundtruth. One issue with this approach is that we may throw away useful information in cases where prediction labels all have similarly strong confidence scores. For example: if our top two predictions for a given ground truth are {\u201clabel\u201d: cat, \u201cscore\u201d:.91} and {\u201clabel\u201d: dog, \u201cscore\u201d:.90}, then our aggregated precision and recall metrics would penalize the dog label even though its confidence score was nearly equal to the cat label.

We think the approach above makes sense when calculating aggregate precision and recall metrics, but, when calculating the PrecisionRecallCurve value for each label, we consider all ground truth-prediction matches in order to treat each label as its own, separate binary classification problem.

"},{"location":"metrics/#detection-tasks","title":"Detection Tasks","text":"

The PrecisionRecallCurve values differ from the precision-recall curves used to calculate Average Precision in two subtle ways:

  • The PrecisionRecallCurve values visualize how precision and recall change as confidence thresholds vary from 0.05 to 0.95 in increments of 0.05. In contrast, the precision-recall curves used to calculate Average Precision are non-uniform; they vary over the actual confidence scores for each ground truth-prediction match.
  • If your pipeline predicts a label on an image, but that label doesn't exist on any ground truths in that particular image, then the PrecisionRecallCurve values will consider that prediction to be a false positive, whereas the other detection metrics will ignore that particular prediction.
"},{"location":"metrics/#detailedprecisionrecallcurve","title":"DetailedPrecisionRecallCurve","text":"

Valor also includes a more detailed version of PrecisionRecallCurve which can be useful for debugging your model's false positives and false negatives. When calculating DetailedPrecisionCurve, Valor will classify false positives as either hallucinations or misclassifications and your false negatives as either missed_detections or misclassifications using the following logic:

"},{"location":"metrics/#classification-tasks_1","title":"Classification Tasks","text":"
  • A false positive is a misclassification if there is a qualified prediction (with score >= score_threshold) with the same Label.key as the groundtruth on the datum, but the Label.value is incorrect. For example: if there's a photo with one groundtruth label on it (e.g., Label(key='animal', value='dog')), and we predicted another label value (e.g., Label(key='animal', value='cat')) on that datum, we'd say it's a misclassification since the key was correct but the value was not. Any false positives that do not meet this criteria are considered to be hallucinations.
  • Similarly, a false negative is a misclassification if there is a prediction with the same Label.key as the groundtruth on the datum, but the Label.value is incorrect. Any false negatives that do not meet this criteria are considered to be missed_detections.
"},{"location":"metrics/#object-detection-tasks","title":"Object Detection Tasks","text":"
  • A false positive is a misclassification if a) there is a qualified prediction with the same Label.key as the groundtruth on the datum, but the Label.value is incorrect, and b) the qualified prediction and groundtruth have an IOU >= pr_curve_iou_threshold. For example: if there's a photo with one groundtruth label on it (e.g., Label(key='animal', value='dog')), and we predicted another bounding box directly over that same object (e.g., Label(key='animal', value='cat')), we'd say it's a misclassification. Any false positives that do not meet this criteria are considered to be hallucinations.
  • A false negative is determined to be a misclassification if the two criteria above are met: a) there is a qualified prediction with the same Label.key as the groundtruth on the datum, but the Label.value is incorrect, and b) the qualified prediction and groundtruth have an IOU >= pr_curve_iou_threshold. Any false negatives that do not meet this criteria are considered to be missed_detections.

The DetailedPrecisionRecallOutput also includes up to n examples of each type of error, where n is set using pr_curve_max_examples. An example output is as follows:

# To retrieve more detailed examples for each `fn`, `fp`, and `tp`, look at the `DetailedPrecisionRecallCurve` metric\ndetailed_evaluation = evaluate_detection(\n    data=dataset,\n    pr_curve_max_examples=1 # The maximum number of examples to return for each obseration type (e.g., hallucinations, misclassifications, etc.)\n    metrics_to_return=[..., 'DetailedPrecisionRecallCurve'] # DetailedPrecisionRecallCurve isn't returned by default; the user must ask for it explicitely\n)\nprint(detailed_evaluation)\n\n[...,\n{\n    \"type\": \"PrecisionRecallCurve\",\n    \"parameters\": {\n        \"label_key\": \"class\", # The key of the label.\n        \"pr_curve_iou_threshold\": 0.5,\n    },\n    \"value\": {\n        \"cat\": { # The value of the label.\n            \"0.05\": { # The confidence score threshold, ranging from 0.05 to 0.95 in increments of 0.05.\n                \"fp\": {\n                    \"total\": 1,\n                    \"observations\": {\n                        'hallucinations': {\n                            \"count\": 1,\n                            \"examples\": [\n                                (\n                                    'test_dataset',\n                                     1,\n                                    '{\"type\":\"Polygon\",\"coordinates\":[[[464.08,105.09],[495.74,105.09],[495.74,146.99],[464.08,146.99],[464.08,105.91]]]}'\n                               ) # There's one false positive for this (key, value, confidence threshold) combination as indicated by the one tuple shown here. This tuple contains that observation's dataset name, datum ID, and coordinates in the form of a GeoJSON string. For classification tasks, this tuple will only contain the given observation's dataset name and datum ID.\n                            ],\n                        }\n                    },\n                },\n                \"tp\": {\n                    \"total\": 3,\n                    \"observations\": {\n                        'all': {\n                            \"count\": 3,\n                            \"examples\": [\n                                (\n                                    'test_dataset',\n                                     2,\n                                    '{\"type\":\"Polygon\",\"coordinates\":[[[464.08,105.09],[495.74,105.09],[495.74,146.99],[464.08,146.99],[464.08,105.91]]]}'\n                               ) # We only return one example since `pr_curve_max_examples` is set to 1 by default; update this argument at evaluation time to store and retrieve an arbitrary number of examples.\n                            ],\n                        },\n                    }\n                },\n                \"fn\": {...},\n            },\n        },\n    }\n}]\n
"},{"location":"technical_concepts/","title":"Technical Concepts","text":"

On this page, we'll describe many of the technical concepts underpinning Valor.

"},{"location":"technical_concepts/#high-level-workflow","title":"High-Level Workflow","text":"

The typical Valor workflow involves POSTing ground truth annotations (e.g., class labels, bounding boxes, segmentation masks, etc.) and model predictions to our API service. The service leverages these ground truths and predictions to compute evaluation metrics, and then stores the ground truths, predictions, and evaluation metrics centrally in Postgres. Users can also attach metadata to their Datasets, Models, GroundTruths, and Annotations; this metadata makes it easy to query for specific subsets of evaluations at a later date. Once an evaluation is stored in Valor, users can query those evaluations from Postgres via GET requests to the Valor API.

Note that Valor does not store raw data (such as underlying images) or facilitate model inference. Only the following items are stored in Postgres:

  • Ground truth annotations
  • Predictions outputted from a model
  • Metadata from any of Valor's various classes
  • Evaluation metrics computed by Valor
  • State related to any of the above
"},{"location":"technical_concepts/#supported-task-types","title":"Supported Task Types","text":"

As of May 2024, Valor supports the following types of supervised learning tasks and associated metrics:

  • Classification (including multi-label classification)
  • F1
  • ROC AUC
  • Accuracy
  • Precision
  • Recall
  • Precision Recall Curve
  • Detailed Precision Recall Curve
  • Object detection
  • AP
  • mAP
  • AP Averaged Over IOUs
  • mAP Averaged Over IOUs
  • Precision Recall Curve
  • Detailed Precision Recall Curve
  • Segmentation (including both instance and semantic segmentation)
  • IOU
  • mIOU

For descriptions of each of these metrics, see our Metrics page.

We expect the Valor framework to extend well to other types of supervised learning tasks and plan to expand our supported task types in future releases.

"},{"location":"technical_concepts/#components","title":"Components","text":"

We can think of Valor in terms of four orthogonal components:

"},{"location":"technical_concepts/#api","title":"API","text":"

The core of Valor is a back end REST API service. Users can call the API's endpoints directly (e.g., POST /datasets), or they can use our Python client to handle the API calls in their Python environment. All of Valor's state is stored in Postgres; the API itself is completely stateless.

Note that, after you start the API service in Dockers, you'll be able to view FastAPI's automatically generated API documentation at https://<your host>/docs.

"},{"location":"technical_concepts/#postgresql","title":"PostgreSQL","text":"

PostgreSQL (a.k.a. Postgres or psql) is an open-source relational database management system. We use Postgres to store all of Valor's various objects and states.

One of the most important reasons we chose Postgres was its PostGIS extension, which adds support for storing, indexing, and querying geographic data. PostGIS enables Valor to quickly filter prior evaluations using geographic coordinates, which is a critically important feature for any computer vision task involving satellite data.

"},{"location":"technical_concepts/#python-client","title":"Python Client","text":"

Finally, we created a client to make it easier for our users to play with Valor from their Python environment. All of Valor's validations and computations are handled by our API; the Python client simply provides convenient methods to call the API's endpoints.

"},{"location":"technical_concepts/#classes","title":"Classes","text":"

The Valor API and Python client both make use of six core classes:

"},{"location":"technical_concepts/#dataset","title":"Dataset","text":"

The highest-level class is a Dataset, which stores metadata and annotations associated with a particular set of data. Note that Dataset is an abstraction: You can have multiple Datasets that reference the exact same input data, which is useful if you need to update or version your data over time.

Datasets require a name at instantiation and can optionally take in various types of metadata that you want to associate with your data.

"},{"location":"technical_concepts/#model","title":"Model","text":"

Models describe a particular instantiation of a machine learning model. We use the Model object to delineate between different model runs or between the same model run over time. Note that Models aren't children of Datasets; you can have one Model contain predictions for multiple Datasets.

Models require a name at instantiation and can optionally take in various types of metadata that you want to associate with your model.

"},{"location":"technical_concepts/#groundtruth","title":"GroundTruth","text":"

A GroundTruth object clarifies what the correct prediction should be for a given piece of data (e.g., an image). For an object detection task, for example, the GroundTruth would store a human-drawn bounding box that, when overlayed on an object, would correctly enclose the object that we're trying to predict.

GroundTruths take one Datum and a list of Annotations as arguments.

"},{"location":"technical_concepts/#prediction","title":"Prediction","text":"

A Prediction object describes the output of a machine learning model. For an object detection task, for example, the Prediction would describe a machine-generated bounding box enclosing the area where a computer vision model believes a certain class of object can be found.

Predictions take one Datum and a list of Annotations as arguments.

"},{"location":"technical_concepts/#datum","title":"Datum","text":"

Datums are used to store metadata about GroundTruths or Predictions. This metadata can include user-supplied metadata (e.g., JSONs filled with configuration details) or geospatial coordinates (via the geospatial argument). Datums provide the vital link between GroundTruths / Predictions and Datasets, and they are useful when filtering your evaluations on specific conditions.

A Datum requires a universal ID (UID) and dataset name at instantiation, along with any metadata or geospatial dictionaries that you want to associate with your GroundTruth or Prediction.

"},{"location":"technical_concepts/#annotation","title":"Annotation","text":"

Annotations attach to both GroundTruths and Predictions, enabling users to add textual labels to these objects. If a GroundTruth depicts a bounding box around a cat, for example, the Annotation would be passed into the GroundTruth to clarify the correct label for the GroundTruth (e.g., class=cat) and any other labels the user wants to specify for that bounding box (e.g., breed=tabby).

Annotations require the user to specify their task type, labels, and metadata at instantiation. Users can also pass in various visual representations tailored to their specific task, such as bounding boxes, segmentations, or image rasters.

"},{"location":"technical_concepts/#authentication","title":"Authentication","text":"

The API can be run without authentication (by default), or with authentication with a single global username and password. To set this up, set the following environment variables when running the back end:

  • Set the environment variables VALOR_SECRET_KEY, VALOR_USERNAME, and VALOR_PASSWORD manually (e.g., export SECRET_KEY=<secret key>)
  • Set these environment variables in a file named .env.auth, and place that file in the api directory. An example of such a file would look like:
VALOR_SECRET_KEY=\"secret key\"\nVALOR_USERNAME=\"username\"\nVALOR_PASSWORD=\"password\"\n

VALOR_SECRET_KEY is the key used for encoding and decoding tokens, and should be a random string. VALOR_USERNAME and VALOR_PASSWORD are the username and password that will be used to authenticate requests.

You can use the tests in integration_tests/test_client_auth.py to check whether your authenticator is running correctly.

"},{"location":"technical_concepts/#deployment-settings","title":"Deployment Settings","text":"

When deploying behind a proxy or with external routing, the API_ROOT_PATH environment variable should be used to set the root_path argument to fastapi.FastAPI (see https://fastapi.tiangolo.com/advanced/behind-a-proxy/#setting-the-root_path-in-the-fastapi-app).

"},{"location":"technical_concepts/#release-process","title":"Release Process","text":"

A release is made by publishing a tag of the form vX.Y.Z (e.g., v0.1.0). This will trigger a GitHub action that will build and publish the Python client to PyPI. These releases should be created using the GitHub UI.

"},{"location":"client_api/Annotation/","title":"Annotation","text":"

Bases: StaticCollection

A class used to annotate GroundTruths and Predictions.

Attributes:

Name Type Description metadata Dictionary

A dictionary of metadata that describes the Annotation.

labels (List[Label], optional)

A list of labels to use for the Annotation.

bounding_box Box

A bounding box to assign to the Annotation.

polygon BoundingPolygon

A polygon to assign to the Annotation.

raster Raster

A raster to assign to the Annotation.

embedding List[float]

An embedding, described by a list of values with type float and a maximum length of 16,000.

is_instance (bool, optional)

A boolean describing whether we should treat the Raster attached to an annotation as an instance segmentation or not. If set to true, then the Annotation will be validated for use in object detection tasks. If set to false, then the Annotation will be validated for use in semantic segmentation tasks.

implied_task_types (list[str], optional)

The validated task types that are applicable to each Annotation. Doesn't need to bet set by the user.

Examples:

Classification

>>> Annotation.create(\n...     labels=[\n...         Label(key=\"class\", value=\"dog\"),\n...         Label(key=\"category\", value=\"animal\"),\n...     ]\n... )\n

Object-Detection Box

>>> annotation = Annotation.create(\n...     labels=[Label(key=\"k1\", value=\"v1\")],\n...    bounding_box=box2,\n... )\n

Object-Detection Polygon

>>> annotation = Annotation.create(\n...     labels=[Label(key=\"k1\", value=\"v1\")],\n...     polygon=BoundingPolygon(...),\n... )\n

Object-Detection Raster

>>> annotation = Annotation.create(\n...     labels=[Label(key=\"k1\", value=\"v1\")],\n...     raster=Raster(...),\n...     is_instance=True\n... )\n

Semantic-Segmentation Raster

>>> annotation = Annotation.create(\n...     labels=[Label(key=\"k1\", value=\"v1\")],\n...     raster=Raster(...),\n...     is_instance=False # or None\n... )\n

Defining all supported annotation types is allowed!

>>> Annotation.create(\n...     labels=[Label(key=\"k1\", value=\"v1\")],\n...     bounding_box=Box(...),\n...     polygon=BoundingPolygon(...),\n...     raster=Raster(...),\n... )\n
Source code in valor/schemas/symbolic/collections.py
class Annotation(StaticCollection):\n    \"\"\"\n    A class used to annotate `GroundTruths` and `Predictions`.\n\n    Attributes\n    ----------\n    metadata: Dictionary\n        A dictionary of metadata that describes the `Annotation`.\n    labels: List[Label], optional\n        A list of labels to use for the `Annotation`.\n    bounding_box: Box\n        A bounding box to assign to the `Annotation`.\n    polygon: BoundingPolygon\n        A polygon to assign to the `Annotation`.\n    raster: Raster\n        A raster to assign to the `Annotation`.\n    embedding: List[float]\n        An embedding, described by a list of values with type float and a maximum length of 16,000.\n    is_instance: bool, optional\n        A boolean describing whether we should treat the Raster attached to an annotation as an instance segmentation or not. If set to true, then the Annotation will be validated for use in object detection tasks. If set to false, then the Annotation will be validated for use in semantic segmentation tasks.\n    implied_task_types: list[str], optional\n        The validated task types that are applicable to each Annotation. Doesn't need to bet set by the user.\n\n    Examples\n    --------\n\n    Classification\n    >>> Annotation.create(\n    ...     labels=[\n    ...         Label(key=\"class\", value=\"dog\"),\n    ...         Label(key=\"category\", value=\"animal\"),\n    ...     ]\n    ... )\n\n    Object-Detection Box\n    >>> annotation = Annotation.create(\n    ...     labels=[Label(key=\"k1\", value=\"v1\")],\n    ...    bounding_box=box2,\n    ... )\n\n    Object-Detection Polygon\n    >>> annotation = Annotation.create(\n    ...     labels=[Label(key=\"k1\", value=\"v1\")],\n    ...     polygon=BoundingPolygon(...),\n    ... )\n\n    Object-Detection Raster\n    >>> annotation = Annotation.create(\n    ...     labels=[Label(key=\"k1\", value=\"v1\")],\n    ...     raster=Raster(...),\n    ...     is_instance=True\n    ... )\n\n    Semantic-Segmentation Raster\n    >>> annotation = Annotation.create(\n    ...     labels=[Label(key=\"k1\", value=\"v1\")],\n    ...     raster=Raster(...),\n    ...     is_instance=False # or None\n    ... )\n\n    Defining all supported annotation types is allowed!\n    >>> Annotation.create(\n    ...     labels=[Label(key=\"k1\", value=\"v1\")],\n    ...     bounding_box=Box(...),\n    ...     polygon=BoundingPolygon(...),\n    ...     raster=Raster(...),\n    ... )\n    \"\"\"\n\n    metadata: Dictionary = Dictionary.symbolic(\n        owner=\"annotation\", name=\"metadata\"\n    )\n    labels: SymbolicList[Label] = SymbolicList[Label].symbolic(\n        owner=\"annotation\", name=\"labels\"\n    )\n    bounding_box: Box = Box.symbolic(owner=\"annotation\", name=\"bounding_box\")\n    polygon: Polygon = Polygon.symbolic(owner=\"annotation\", name=\"polygon\")\n    raster: Raster = Raster.symbolic(owner=\"annotation\", name=\"raster\")\n    embedding: Embedding = Embedding.symbolic(\n        owner=\"annotation\", name=\"embedding\"\n    )\n    is_instance: Bool = Bool.symbolic(owner=\"annotation\", name=\"is_instance\")\n    implied_task_types: SymbolicList[String] = SymbolicList[String].symbolic(\n        owner=\"annotation\", name=\"implied_task_types\"\n    )\n\n    def __init__(\n        self,\n        *,\n        metadata: Optional[dict] = None,\n        labels: Optional[List[Label]] = None,\n        bounding_box: Optional[Box] = None,\n        polygon: Optional[Polygon] = None,\n        raster: Optional[Raster] = None,\n        embedding: Optional[Embedding] = None,\n        is_instance: Optional[bool] = None,\n        implied_task_types: Optional[List[String]] = None,\n    ):\n        \"\"\"\n        Constructs an annotation.\n\n        Parameters\n        ----------\n        metadata: Dict[str, Union[int, float, str, bool, datetime.datetime, datetime.date, datetime.time]]\n            A dictionary of metadata that describes the `Annotation`.\n        labels: List[Label]\n            A list of labels to use for the `Annotation`.\n        bounding_box: Box, optional\n            A bounding box annotation.\n        polygon: Polygon, optional\n            A polygon annotation.\n        raster: Raster, optional\n            A raster annotation.\n        embedding: List[float], optional\n            An embedding, described by a list of values with type float and a maximum length of 16,000.\n        is_instance: bool, optional\n            A boolean describing whether we should treat the Raster attached to an annotation as an instance segmentation or not. If set to true, then the Annotation will be validated for use in object detection tasks. If set to false, then the Annotation will be validated for use in semantic segmentation tasks.\n        implied_task_types: list[str], optional\n            The validated task types that are applicable to each Annotation. Doesn't need to bet set by the user.\n\n        \"\"\"\n        super().__init__(\n            metadata=metadata if metadata else dict(),\n            labels=labels if labels else list(),\n            bounding_box=bounding_box,\n            polygon=polygon,\n            raster=raster,\n            embedding=embedding,\n            is_instance=is_instance,\n            implied_task_types=implied_task_types,\n        )\n\n    @staticmethod\n    def formatting() -> Dict[str, Any]:\n        \"\"\"Attribute format mapping.\"\"\"\n        return {\n            \"bounding_box\": Box.nullable,\n            \"polygon\": Polygon.nullable,\n            \"raster\": Raster.nullable,\n            \"embedding\": Embedding.nullable,\n            \"is_instance\": Bool.nullable,\n            \"implied_task_types\": SymbolicList,\n        }\n
"},{"location":"client_api/Annotation/#valor.Annotation-functions","title":"Functions","text":""},{"location":"client_api/Annotation/#valor.Annotation.__init__","title":"valor.Annotation.__init__(*, metadata=None, labels=None, bounding_box=None, polygon=None, raster=None, embedding=None, is_instance=None, implied_task_types=None)","text":"

Constructs an annotation.

Parameters:

Name Type Description Default metadata Optional[dict]

A dictionary of metadata that describes the Annotation.

None labels Optional[List[Label]]

A list of labels to use for the Annotation.

None bounding_box Optional[Box]

A bounding box annotation.

None polygon Optional[Polygon]

A polygon annotation.

None raster Optional[Raster]

A raster annotation.

None embedding Optional[Embedding]

An embedding, described by a list of values with type float and a maximum length of 16,000.

None is_instance Optional[bool]

A boolean describing whether we should treat the Raster attached to an annotation as an instance segmentation or not. If set to true, then the Annotation will be validated for use in object detection tasks. If set to false, then the Annotation will be validated for use in semantic segmentation tasks.

None implied_task_types Optional[List[String]]

The validated task types that are applicable to each Annotation. Doesn't need to bet set by the user.

None Source code in valor/schemas/symbolic/collections.py
def __init__(\n    self,\n    *,\n    metadata: Optional[dict] = None,\n    labels: Optional[List[Label]] = None,\n    bounding_box: Optional[Box] = None,\n    polygon: Optional[Polygon] = None,\n    raster: Optional[Raster] = None,\n    embedding: Optional[Embedding] = None,\n    is_instance: Optional[bool] = None,\n    implied_task_types: Optional[List[String]] = None,\n):\n    \"\"\"\n    Constructs an annotation.\n\n    Parameters\n    ----------\n    metadata: Dict[str, Union[int, float, str, bool, datetime.datetime, datetime.date, datetime.time]]\n        A dictionary of metadata that describes the `Annotation`.\n    labels: List[Label]\n        A list of labels to use for the `Annotation`.\n    bounding_box: Box, optional\n        A bounding box annotation.\n    polygon: Polygon, optional\n        A polygon annotation.\n    raster: Raster, optional\n        A raster annotation.\n    embedding: List[float], optional\n        An embedding, described by a list of values with type float and a maximum length of 16,000.\n    is_instance: bool, optional\n        A boolean describing whether we should treat the Raster attached to an annotation as an instance segmentation or not. If set to true, then the Annotation will be validated for use in object detection tasks. If set to false, then the Annotation will be validated for use in semantic segmentation tasks.\n    implied_task_types: list[str], optional\n        The validated task types that are applicable to each Annotation. Doesn't need to bet set by the user.\n\n    \"\"\"\n    super().__init__(\n        metadata=metadata if metadata else dict(),\n        labels=labels if labels else list(),\n        bounding_box=bounding_box,\n        polygon=polygon,\n        raster=raster,\n        embedding=embedding,\n        is_instance=is_instance,\n        implied_task_types=implied_task_types,\n    )\n
"},{"location":"client_api/Annotation/#valor.Annotation.formatting","title":"valor.Annotation.formatting() staticmethod","text":"

Attribute format mapping.

Source code in valor/schemas/symbolic/collections.py
@staticmethod\ndef formatting() -> Dict[str, Any]:\n    \"\"\"Attribute format mapping.\"\"\"\n    return {\n        \"bounding_box\": Box.nullable,\n        \"polygon\": Polygon.nullable,\n        \"raster\": Raster.nullable,\n        \"embedding\": Embedding.nullable,\n        \"is_instance\": Bool.nullable,\n        \"implied_task_types\": SymbolicList,\n    }\n
"},{"location":"client_api/Client/","title":"Client","text":"

Valor client object for interacting with the api.

Parameters:

Name Type Description Default connection ClientConnection

Option to use an existing connection object.

None Source code in valor/coretypes.py
class Client:\n    \"\"\"\n    Valor client object for interacting with the api.\n\n    Parameters\n    ----------\n    connection : ClientConnection, optional\n        Option to use an existing connection object.\n    \"\"\"\n\n    def __init__(self, connection: Optional[ClientConnection] = None):\n        if not connection:\n            connection = get_connection()\n        self.conn = connection\n\n    @classmethod\n    def connect(\n        cls,\n        host: str,\n        access_token: Optional[str] = None,\n        reconnect: bool = False,\n    ) -> Client:\n        \"\"\"\n        Establishes a connection to the Valor API.\n\n        Parameters\n        ----------\n        host : str\n            The host to connect to. Should start with \"http://\" or \"https://\".\n        access_token : str\n            The access token for the host (if the host requires authentication).\n        \"\"\"\n        connect(host=host, access_token=access_token, reconnect=reconnect)\n        return cls(get_connection())\n\n    def get_labels(\n        self,\n        filter_by: Optional[FilterType] = None,\n    ) -> List[Label]:\n        \"\"\"\n        Gets all labels using an optional filter.\n\n        Parameters\n        ----------\n        filter_by : FilterType, optional\n            Optional constraints to filter by.\n\n        Returns\n        ------\n        List[valor.Label]\n            A list of labels.\n        \"\"\"\n        filters = _format_filter(filter_by)\n        filters = asdict(filters)\n        return [Label(**label) for label in self.conn.get_labels(filters)]\n\n    def get_labels_from_dataset(\n        self, dataset: Union[Dataset, str]\n    ) -> List[Label]:\n        \"\"\"\n        Get all labels associated with a dataset's ground truths.\n\n        Parameters\n        ----------\n        dataset : valor.Dataset\n            The dataset to search by.\n\n        Returns\n        ------\n        List[valor.Label]\n            A list of labels.\n        \"\"\"\n        dataset_name = (\n            dataset.name if isinstance(dataset, Dataset) else dataset\n        )\n        return [\n            Label(**label)\n            for label in self.conn.get_labels_from_dataset(dataset_name)  # type: ignore\n        ]\n\n    def get_labels_from_model(self, model: Union[Model, str]) -> List[Label]:\n        \"\"\"\n        Get all labels associated with a model's ground truths.\n\n        Parameters\n        ----------\n        model : valor.Model\n            The model to search by.\n\n        Returns\n        ------\n        List[valor.Label]\n            A list of labels.\n        \"\"\"\n        model_name = model.name if isinstance(model, Model) else model\n        return [\n            Label(**label)\n            for label in self.conn.get_labels_from_model(model_name)  # type: ignore\n        ]\n\n    def create_dataset(\n        self,\n        dataset: Union[Dataset, dict],\n    ) -> None:\n        \"\"\"\n        Creates a dataset.\n\n        Parameters\n        ----------\n        dataset : valor.Dataset\n            The dataset to create.\n        \"\"\"\n        if isinstance(dataset, Dataset):\n            dataset = dataset.encode_value()\n        self.conn.create_dataset(dataset)\n\n    def create_groundtruths(\n        self,\n        dataset: Dataset,\n        groundtruths: List[GroundTruth],\n        ignore_existing_datums: bool = False,\n    ):\n        \"\"\"\n        Creates ground truths.\n\n        Parameters\n        ----------\n\n        dataset : valor.Dataset\n            The dataset to create the ground truth for.\n        groundtruths : List[valor.GroundTruth]\n            The ground truths to create.\n        ignore_existing_datums : bool, default=False\n            If True, will ignore datums that already exist in the backend.\n            If False, will raise an error if any datums already exist.\n            Default is False.\n        \"\"\"\n        groundtruths_json = []\n        for groundtruth in groundtruths:\n            if not isinstance(groundtruth, GroundTruth):\n                raise TypeError(\n                    f\"Expected ground truth to be of type 'valor.GroundTruth' not '{type(groundtruth)}'.\"\n                )\n            if not isinstance(groundtruth.annotations._value, list):\n                raise TypeError\n            groundtruth_dict = groundtruth.encode_value()\n            groundtruth_dict[\"dataset_name\"] = dataset.name\n            groundtruths_json.append(groundtruth_dict)\n        self.conn.create_groundtruths(\n            groundtruths_json, ignore_existing_datums=ignore_existing_datums\n        )\n\n    def get_groundtruth(\n        self,\n        dataset: Union[Dataset, str],\n        datum: Union[Datum, str],\n    ) -> Union[GroundTruth, None]:\n        \"\"\"\n        Get a particular ground truth.\n\n        Parameters\n        ----------\n        dataset: Union[Dataset, str]\n            The dataset the datum belongs to.\n        datum: Union[Datum, str]\n            The desired datum.\n\n        Returns\n        ----------\n        Union[GroundTruth, None]\n            The matching ground truth or 'None' if it doesn't exist.\n        \"\"\"\n        dataset_name = (\n            dataset.name if isinstance(dataset, Dataset) else dataset\n        )\n        datum_uid = datum.uid if isinstance(datum, Datum) else datum\n        try:\n            resp = self.conn.get_groundtruth(\n                dataset_name=dataset_name, datum_uid=datum_uid  # type: ignore\n            )\n            resp.pop(\"dataset_name\")\n            return GroundTruth.decode_value(resp)\n        except ClientException as e:\n            if e.status_code == 404:\n                return None\n            raise e\n\n    def finalize_dataset(self, dataset: Union[Dataset, str]) -> None:\n        \"\"\"\n        Finalizes a dataset such that new ground truths cannot be added to it.\n\n        Parameters\n        ----------\n        dataset : str\n            The dataset to be finalized.\n        \"\"\"\n        dataset_name = (\n            dataset.name if isinstance(dataset, Dataset) else dataset\n        )\n        return self.conn.finalize_dataset(name=dataset_name)  # type: ignore\n\n    def get_dataset(\n        self,\n        name: str,\n    ) -> Union[Dataset, None]:\n        \"\"\"\n        Gets a dataset by name.\n\n        Parameters\n        ----------\n        name : str\n            The name of the dataset to fetch.\n\n        Returns\n        -------\n        Union[Dataset, None]\n            A Dataset with a matching name, or 'None' if one doesn't exist.\n        \"\"\"\n        dataset = Dataset.decode_value(\n            {\n                **self.conn.get_dataset(name),\n                \"connection\": self.conn,\n            }\n        )\n        return dataset\n\n    def get_datasets(\n        self,\n        filter_by: Optional[FilterType] = None,\n    ) -> List[Dataset]:\n        \"\"\"\n        Get all datasets, with an option to filter results according to some user-defined parameters.\n\n        Parameters\n        ----------\n        filter_by : FilterType, optional\n            Optional constraints to filter by.\n\n        Returns\n        ------\n        List[valor.Dataset]\n            A list of datasets.\n        \"\"\"\n        filters = _format_filter(filter_by)\n        if isinstance(filters, Filter):\n            filters = asdict(filters)\n        dataset_list = []\n        for kwargs in self.conn.get_datasets(filters):\n            dataset = Dataset.decode_value({**kwargs, \"connection\": self.conn})\n            dataset_list.append(dataset)\n        return dataset_list\n\n    def get_datums(\n        self,\n        filter_by: Optional[FilterType] = None,\n    ) -> List[Datum]:\n        \"\"\"\n        Get all datums using an optional filter.\n\n        Parameters\n        ----------\n        filter_by : FilterType, optional\n            Optional constraints to filter by.\n\n        Returns\n        -------\n        List[valor.Datum]\n            A list datums.\n        \"\"\"\n        filters = _format_filter(filter_by)\n        if isinstance(filters, Filter):\n            filters = asdict(filters)\n        return [\n            Datum.decode_value(datum)\n            for datum in self.conn.get_datums(filters)\n        ]\n\n    def get_datum(\n        self,\n        dataset: Union[Dataset, str],\n        uid: str,\n    ) -> Union[Datum, None]:\n        \"\"\"\n        Get datum.\n        `GET` endpoint.\n        Parameters\n        ----------\n        dataset : valor.Dataset\n            The dataset the datum belongs to.\n        uid : str\n            The UID of the datum.\n        Returns\n        -------\n        valor.Datum\n            The requested datum or 'None' if it doesn't exist.\n        \"\"\"\n        dataset_name = (\n            dataset.name if isinstance(dataset, Dataset) else dataset\n        )\n        resp = self.conn.get_datum(dataset_name=dataset_name, uid=uid)  # type: ignore\n        return Datum.decode_value(resp)\n\n    def get_dataset_status(\n        self,\n        name: str,\n    ) -> Union[TableStatus, None]:\n        \"\"\"\n        Get the state of a given dataset.\n\n        Parameters\n        ----------\n        name : str\n            The name of the dataset we want to fetch the state of.\n\n        Returns\n        ------\n        TableStatus | None\n            The state of the dataset, or 'None' if the dataset does not exist.\n        \"\"\"\n        try:\n            return self.conn.get_dataset_status(name)\n        except ClientException as e:\n            if e.status_code == 404:\n                return None\n            raise e\n\n    def get_dataset_summary(self, name: str) -> DatasetSummary:\n        \"\"\"\n        Gets the summary of a dataset.\n\n        Parameters\n        ----------\n        name : str\n            The name of the dataset to create a summary for.\n\n        Returns\n        -------\n        DatasetSummary\n            A dataclass containing the dataset summary.\n        \"\"\"\n        return DatasetSummary(**self.conn.get_dataset_summary(name))\n\n    def delete_dataset(self, name: str, timeout: int = 0) -> None:\n        \"\"\"\n        Deletes a dataset.\n\n        Parameters\n        ----------\n        name : str\n            The name of the dataset to be deleted.\n        timeout : int\n            The number of seconds to wait in order to confirm that the dataset was deleted.\n        \"\"\"\n        self.conn.delete_dataset(name)\n        if timeout:\n            for _ in range(timeout):\n                try:\n                    self.get_dataset(name)\n                except DatasetDoesNotExistError:\n                    break\n                time.sleep(1)\n            else:\n                raise TimeoutError(\n                    \"Dataset wasn't deleted within timeout interval\"\n                )\n\n    def create_model(\n        self,\n        model: Union[Model, dict],\n    ):\n        \"\"\"\n        Creates a model.\n\n        Parameters\n        ----------\n        model : valor.Model\n            The model to create.\n        \"\"\"\n        if isinstance(model, Model):\n            model = model.encode_value()\n        self.conn.create_model(model)\n\n    def create_predictions(\n        self,\n        dataset: Dataset,\n        model: Model,\n        predictions: List[Prediction],\n    ) -> None:\n        \"\"\"\n        Creates predictions.\n\n        Parameters\n        ----------\n        dataset : valor.Dataset\n            The dataset that is being operated over.\n        model : valor.Model\n            The model making the prediction.\n        predictions : List[valor.Prediction]\n            The predictions to create.\n        \"\"\"\n        predictions_json = []\n        for prediction in predictions:\n            if not isinstance(prediction, Prediction):\n                raise TypeError(\n                    f\"Expected prediction to be of type 'valor.Prediction' not '{type(prediction)}'.\"\n                )\n            if not isinstance(prediction.annotations._value, list):\n                raise TypeError\n            prediction_dict = prediction.encode_value()\n            prediction_dict[\"dataset_name\"] = dataset.name\n            prediction_dict[\"model_name\"] = model.name\n            predictions_json.append(prediction_dict)\n        self.conn.create_predictions(predictions_json)\n\n    def get_prediction(\n        self,\n        dataset: Union[Dataset, str],\n        model: Union[Model, str],\n        datum: Union[Datum, str],\n    ) -> Union[Prediction, None]:\n        \"\"\"\n        Get a particular prediction.\n\n        Parameters\n        ----------\n        dataset: Union[Dataset, str]\n            The dataset the datum belongs to.\n        model: Union[Model, str]\n            The model that made the prediction.\n        datum: Union[Datum, str]\n            The desired datum.\n\n        Returns\n        ----------\n        Union[Prediction, None]\n            The matching prediction or 'None' if it doesn't exist.\n        \"\"\"\n        dataset_name = (\n            dataset.name if isinstance(dataset, Dataset) else dataset\n        )\n        model_name = model.name if isinstance(model, Model) else model\n        datum_uid = datum.uid if isinstance(datum, Datum) else datum\n\n        resp = self.conn.get_prediction(\n            dataset_name=dataset_name,  # type: ignore\n            model_name=model_name,  # type: ignore\n            datum_uid=datum_uid,  # type: ignore\n        )\n        resp.pop(\"dataset_name\")\n        resp.pop(\"model_name\")\n        return Prediction.decode_value(resp)\n\n    def finalize_inferences(\n        self, dataset: Union[Dataset, str], model: Union[Model, str]\n    ) -> None:\n        \"\"\"\n        Finalizes a model-dataset pairing such that new predictions cannot be added to it.\n        \"\"\"\n        dataset_name = (\n            dataset.name if isinstance(dataset, Dataset) else dataset\n        )\n        model_name = model.name if isinstance(model, Model) else model\n        return self.conn.finalize_inferences(\n            dataset_name=dataset_name,  # type: ignore\n            model_name=model_name,  # type: ignore\n        )\n\n    def get_model(\n        self,\n        name: str,\n    ) -> Union[Model, None]:\n        \"\"\"\n        Gets a model by name.\n\n        Parameters\n        ----------\n        name : str\n            The name of the model to fetch.\n\n        Returns\n        -------\n        Union[valor.Model, None]\n            A Model with matching name or 'None' if one doesn't exist.\n        \"\"\"\n        return Model.decode_value(\n            {\n                **self.conn.get_model(name),\n                \"connection\": self.conn,\n            }\n        )\n\n    def get_models(\n        self,\n        filter_by: Optional[FilterType] = None,\n    ) -> List[Model]:\n        \"\"\"\n        Get all models using an optional filter.\n\n        Parameters\n        ----------\n        filter_by : FilterType, optional\n            Optional constraints to filter by.\n\n        Returns\n        ------\n        List[valor.Model]\n            A list of models.\n        \"\"\"\n        filters = _format_filter(filter_by)\n        if isinstance(filters, Filter):\n            filters = asdict(filters)\n        model_list = []\n        for kwargs in self.conn.get_models(filters):\n            model = Model.decode_value({**kwargs, \"connection\": self.conn})\n            model_list.append(model)\n        return model_list\n\n    def get_model_status(\n        self,\n        dataset_name: str,\n        model_name: str,\n    ) -> Optional[TableStatus]:\n        \"\"\"\n        Get the state of a given model over a dataset.\n\n        Parameters\n        ----------\n        dataset_name : str\n            The name of the dataset that the model is operating over.\n        model_name : str\n            The name of the model we want to fetch the state of.\n\n        Returns\n        ------\n        Union[TableStatus, None]\n            The state of the model or 'None' if the model doesn't exist.\n        \"\"\"\n        try:\n            return self.conn.get_model_status(dataset_name, model_name)\n        except ClientException as e:\n            if e.status_code == 404:\n                return None\n            raise e\n\n    def get_model_eval_requests(\n        self, model: Union[Model, str]\n    ) -> List[Evaluation]:\n        \"\"\"\n        Get all evaluations that have been created for a model.\n\n        This does not return evaluation results.\n\n        `GET` endpoint.\n\n        Parameters\n        ----------\n        model : str\n            The model to search by.\n\n        Returns\n        -------\n        List[Evaluation]\n            A list of evaluations.\n        \"\"\"\n        model_name = model.name if isinstance(model, Model) else model\n        return [\n            Evaluation(**evaluation, connection=self.conn)\n            for evaluation in self.conn.get_model_eval_requests(model_name)  # type: ignore\n        ]\n\n    def delete_model(self, name: str, timeout: int = 0) -> None:\n        \"\"\"\n        Deletes a model.\n\n        Parameters\n        ----------\n        name : str\n            The name of the model to be deleted.\n        timeout : int\n            The number of seconds to wait in order to confirm that the model was deleted.\n        \"\"\"\n        self.conn.delete_model(name)\n        if timeout:\n            for _ in range(timeout):\n                try:\n                    self.get_model(name)\n                except ModelDoesNotExistError:\n                    break\n                time.sleep(1)\n            else:\n                raise TimeoutError(\n                    \"Model wasn't deleted within timeout interval\"\n                )\n\n    def get_evaluations(\n        self,\n        *,\n        evaluation_ids: Optional[List[int]] = None,\n        models: Union[List[Model], List[str], None] = None,\n        datasets: Union[List[Dataset], List[str], None] = None,\n        metrics_to_sort_by: Optional[\n            Dict[str, Union[Dict[str, str], str]]\n        ] = None,\n    ) -> List[Evaluation]:\n        \"\"\"\n        Returns all evaluations associated with user-supplied dataset and/or model names.\n\n        Parameters\n        ----------\n        evaluation_ids : List[int], optional.\n            A list of job IDs to return metrics for.\n        models : Union[List[valor.Model], List[str]], optional\n            A list of model names that we want to return metrics for.\n        datasets : Union[List[valor.Dataset], List[str]], optional\n            A list of dataset names that we want to return metrics for.\n        metrics_to_sort_by: dict[str, str | dict[str, str]], optional\n            An optional dict of metric types to sort the evaluations by.\n\n        Returns\n        -------\n        List[valor.Evaluation]\n            A list of evaluations.\n        \"\"\"\n        if isinstance(datasets, list):\n            datasets = [  # type: ignore\n                element.name if isinstance(element, Dataset) else element\n                for element in datasets\n            ]\n        if isinstance(models, list):\n            models = [  # type: ignore\n                element.name if isinstance(element, Model) else element\n                for element in models\n            ]\n        return [\n            Evaluation(connection=self.conn, **evaluation)\n            for evaluation in self.conn.get_evaluations(\n                evaluation_ids=evaluation_ids,\n                models=models,  # type: ignore\n                datasets=datasets,  # type: ignore\n                metrics_to_sort_by=metrics_to_sort_by,\n            )\n        ]\n\n    def evaluate(\n        self, request: EvaluationRequest, allow_retries: bool = False\n    ) -> List[Evaluation]:\n        \"\"\"\n        Creates as many evaluations as necessary to fulfill the request.\n\n        Parameters\n        ----------\n        request : schemas.EvaluationRequest\n            The requested evaluation parameters.\n        allow_retries : bool, default = False\n            Option to retry previously failed evaluations.\n\n        Returns\n        -------\n        List[Evaluation]\n            A list of evaluations that meet the parameters.\n        \"\"\"\n        return [\n            Evaluation(**evaluation)\n            for evaluation in self.conn.evaluate(\n                request, allow_retries=allow_retries\n            )\n        ]\n
"},{"location":"client_api/Client/#valor.Client-functions","title":"Functions","text":""},{"location":"client_api/Client/#valor.Client.connect","title":"valor.Client.connect(host, access_token=None, reconnect=False) classmethod","text":"

Establishes a connection to the Valor API.

Parameters:

Name Type Description Default host str

The host to connect to. Should start with \"http://\" or \"https://\".

required access_token str

The access token for the host (if the host requires authentication).

None Source code in valor/coretypes.py
@classmethod\ndef connect(\n    cls,\n    host: str,\n    access_token: Optional[str] = None,\n    reconnect: bool = False,\n) -> Client:\n    \"\"\"\n    Establishes a connection to the Valor API.\n\n    Parameters\n    ----------\n    host : str\n        The host to connect to. Should start with \"http://\" or \"https://\".\n    access_token : str\n        The access token for the host (if the host requires authentication).\n    \"\"\"\n    connect(host=host, access_token=access_token, reconnect=reconnect)\n    return cls(get_connection())\n
"},{"location":"client_api/Client/#valor.Client.create_dataset","title":"valor.Client.create_dataset(dataset)","text":"

Creates a dataset.

Parameters:

Name Type Description Default dataset Dataset

The dataset to create.

required Source code in valor/coretypes.py
def create_dataset(\n    self,\n    dataset: Union[Dataset, dict],\n) -> None:\n    \"\"\"\n    Creates a dataset.\n\n    Parameters\n    ----------\n    dataset : valor.Dataset\n        The dataset to create.\n    \"\"\"\n    if isinstance(dataset, Dataset):\n        dataset = dataset.encode_value()\n    self.conn.create_dataset(dataset)\n
"},{"location":"client_api/Client/#valor.Client.create_groundtruths","title":"valor.Client.create_groundtruths(dataset, groundtruths, ignore_existing_datums=False)","text":"

Creates ground truths.

Parameters:

Name Type Description Default dataset Dataset

The dataset to create the ground truth for.

required groundtruths List[GroundTruth]

The ground truths to create.

required ignore_existing_datums bool

If True, will ignore datums that already exist in the backend. If False, will raise an error if any datums already exist. Default is False.

False Source code in valor/coretypes.py
def create_groundtruths(\n    self,\n    dataset: Dataset,\n    groundtruths: List[GroundTruth],\n    ignore_existing_datums: bool = False,\n):\n    \"\"\"\n    Creates ground truths.\n\n    Parameters\n    ----------\n\n    dataset : valor.Dataset\n        The dataset to create the ground truth for.\n    groundtruths : List[valor.GroundTruth]\n        The ground truths to create.\n    ignore_existing_datums : bool, default=False\n        If True, will ignore datums that already exist in the backend.\n        If False, will raise an error if any datums already exist.\n        Default is False.\n    \"\"\"\n    groundtruths_json = []\n    for groundtruth in groundtruths:\n        if not isinstance(groundtruth, GroundTruth):\n            raise TypeError(\n                f\"Expected ground truth to be of type 'valor.GroundTruth' not '{type(groundtruth)}'.\"\n            )\n        if not isinstance(groundtruth.annotations._value, list):\n            raise TypeError\n        groundtruth_dict = groundtruth.encode_value()\n        groundtruth_dict[\"dataset_name\"] = dataset.name\n        groundtruths_json.append(groundtruth_dict)\n    self.conn.create_groundtruths(\n        groundtruths_json, ignore_existing_datums=ignore_existing_datums\n    )\n
"},{"location":"client_api/Client/#valor.Client.create_model","title":"valor.Client.create_model(model)","text":"

Creates a model.

Parameters:

Name Type Description Default model Model

The model to create.

required Source code in valor/coretypes.py
def create_model(\n    self,\n    model: Union[Model, dict],\n):\n    \"\"\"\n    Creates a model.\n\n    Parameters\n    ----------\n    model : valor.Model\n        The model to create.\n    \"\"\"\n    if isinstance(model, Model):\n        model = model.encode_value()\n    self.conn.create_model(model)\n
"},{"location":"client_api/Client/#valor.Client.create_predictions","title":"valor.Client.create_predictions(dataset, model, predictions)","text":"

Creates predictions.

Parameters:

Name Type Description Default dataset Dataset

The dataset that is being operated over.

required model Model

The model making the prediction.

required predictions List[Prediction]

The predictions to create.

required Source code in valor/coretypes.py
def create_predictions(\n    self,\n    dataset: Dataset,\n    model: Model,\n    predictions: List[Prediction],\n) -> None:\n    \"\"\"\n    Creates predictions.\n\n    Parameters\n    ----------\n    dataset : valor.Dataset\n        The dataset that is being operated over.\n    model : valor.Model\n        The model making the prediction.\n    predictions : List[valor.Prediction]\n        The predictions to create.\n    \"\"\"\n    predictions_json = []\n    for prediction in predictions:\n        if not isinstance(prediction, Prediction):\n            raise TypeError(\n                f\"Expected prediction to be of type 'valor.Prediction' not '{type(prediction)}'.\"\n            )\n        if not isinstance(prediction.annotations._value, list):\n            raise TypeError\n        prediction_dict = prediction.encode_value()\n        prediction_dict[\"dataset_name\"] = dataset.name\n        prediction_dict[\"model_name\"] = model.name\n        predictions_json.append(prediction_dict)\n    self.conn.create_predictions(predictions_json)\n
"},{"location":"client_api/Client/#valor.Client.delete_dataset","title":"valor.Client.delete_dataset(name, timeout=0)","text":"

Deletes a dataset.

Parameters:

Name Type Description Default name str

The name of the dataset to be deleted.

required timeout int

The number of seconds to wait in order to confirm that the dataset was deleted.

0 Source code in valor/coretypes.py
def delete_dataset(self, name: str, timeout: int = 0) -> None:\n    \"\"\"\n    Deletes a dataset.\n\n    Parameters\n    ----------\n    name : str\n        The name of the dataset to be deleted.\n    timeout : int\n        The number of seconds to wait in order to confirm that the dataset was deleted.\n    \"\"\"\n    self.conn.delete_dataset(name)\n    if timeout:\n        for _ in range(timeout):\n            try:\n                self.get_dataset(name)\n            except DatasetDoesNotExistError:\n                break\n            time.sleep(1)\n        else:\n            raise TimeoutError(\n                \"Dataset wasn't deleted within timeout interval\"\n            )\n
"},{"location":"client_api/Client/#valor.Client.delete_model","title":"valor.Client.delete_model(name, timeout=0)","text":"

Deletes a model.

Parameters:

Name Type Description Default name str

The name of the model to be deleted.

required timeout int

The number of seconds to wait in order to confirm that the model was deleted.

0 Source code in valor/coretypes.py
def delete_model(self, name: str, timeout: int = 0) -> None:\n    \"\"\"\n    Deletes a model.\n\n    Parameters\n    ----------\n    name : str\n        The name of the model to be deleted.\n    timeout : int\n        The number of seconds to wait in order to confirm that the model was deleted.\n    \"\"\"\n    self.conn.delete_model(name)\n    if timeout:\n        for _ in range(timeout):\n            try:\n                self.get_model(name)\n            except ModelDoesNotExistError:\n                break\n            time.sleep(1)\n        else:\n            raise TimeoutError(\n                \"Model wasn't deleted within timeout interval\"\n            )\n
"},{"location":"client_api/Client/#valor.Client.evaluate","title":"valor.Client.evaluate(request, allow_retries=False)","text":"

Creates as many evaluations as necessary to fulfill the request.

Parameters:

Name Type Description Default request EvaluationRequest

The requested evaluation parameters.

required allow_retries bool

Option to retry previously failed evaluations.

= False

Returns:

Type Description List[Evaluation]

A list of evaluations that meet the parameters.

Source code in valor/coretypes.py
def evaluate(\n    self, request: EvaluationRequest, allow_retries: bool = False\n) -> List[Evaluation]:\n    \"\"\"\n    Creates as many evaluations as necessary to fulfill the request.\n\n    Parameters\n    ----------\n    request : schemas.EvaluationRequest\n        The requested evaluation parameters.\n    allow_retries : bool, default = False\n        Option to retry previously failed evaluations.\n\n    Returns\n    -------\n    List[Evaluation]\n        A list of evaluations that meet the parameters.\n    \"\"\"\n    return [\n        Evaluation(**evaluation)\n        for evaluation in self.conn.evaluate(\n            request, allow_retries=allow_retries\n        )\n    ]\n
"},{"location":"client_api/Client/#valor.Client.finalize_dataset","title":"valor.Client.finalize_dataset(dataset)","text":"

Finalizes a dataset such that new ground truths cannot be added to it.

Parameters:

Name Type Description Default dataset str

The dataset to be finalized.

required Source code in valor/coretypes.py
def finalize_dataset(self, dataset: Union[Dataset, str]) -> None:\n    \"\"\"\n    Finalizes a dataset such that new ground truths cannot be added to it.\n\n    Parameters\n    ----------\n    dataset : str\n        The dataset to be finalized.\n    \"\"\"\n    dataset_name = (\n        dataset.name if isinstance(dataset, Dataset) else dataset\n    )\n    return self.conn.finalize_dataset(name=dataset_name)  # type: ignore\n
"},{"location":"client_api/Client/#valor.Client.finalize_inferences","title":"valor.Client.finalize_inferences(dataset, model)","text":"

Finalizes a model-dataset pairing such that new predictions cannot be added to it.

Source code in valor/coretypes.py
def finalize_inferences(\n    self, dataset: Union[Dataset, str], model: Union[Model, str]\n) -> None:\n    \"\"\"\n    Finalizes a model-dataset pairing such that new predictions cannot be added to it.\n    \"\"\"\n    dataset_name = (\n        dataset.name if isinstance(dataset, Dataset) else dataset\n    )\n    model_name = model.name if isinstance(model, Model) else model\n    return self.conn.finalize_inferences(\n        dataset_name=dataset_name,  # type: ignore\n        model_name=model_name,  # type: ignore\n    )\n
"},{"location":"client_api/Client/#valor.Client.get_dataset","title":"valor.Client.get_dataset(name)","text":"

Gets a dataset by name.

Parameters:

Name Type Description Default name str

The name of the dataset to fetch.

required

Returns:

Type Description Union[Dataset, None]

A Dataset with a matching name, or 'None' if one doesn't exist.

Source code in valor/coretypes.py
def get_dataset(\n    self,\n    name: str,\n) -> Union[Dataset, None]:\n    \"\"\"\n    Gets a dataset by name.\n\n    Parameters\n    ----------\n    name : str\n        The name of the dataset to fetch.\n\n    Returns\n    -------\n    Union[Dataset, None]\n        A Dataset with a matching name, or 'None' if one doesn't exist.\n    \"\"\"\n    dataset = Dataset.decode_value(\n        {\n            **self.conn.get_dataset(name),\n            \"connection\": self.conn,\n        }\n    )\n    return dataset\n
"},{"location":"client_api/Client/#valor.Client.get_dataset_status","title":"valor.Client.get_dataset_status(name)","text":"

Get the state of a given dataset.

Parameters:

Name Type Description Default name str

The name of the dataset we want to fetch the state of.

required

Returns:

Type Description TableStatus | None

The state of the dataset, or 'None' if the dataset does not exist.

Source code in valor/coretypes.py
def get_dataset_status(\n    self,\n    name: str,\n) -> Union[TableStatus, None]:\n    \"\"\"\n    Get the state of a given dataset.\n\n    Parameters\n    ----------\n    name : str\n        The name of the dataset we want to fetch the state of.\n\n    Returns\n    ------\n    TableStatus | None\n        The state of the dataset, or 'None' if the dataset does not exist.\n    \"\"\"\n    try:\n        return self.conn.get_dataset_status(name)\n    except ClientException as e:\n        if e.status_code == 404:\n            return None\n        raise e\n
"},{"location":"client_api/Client/#valor.Client.get_dataset_summary","title":"valor.Client.get_dataset_summary(name)","text":"

Gets the summary of a dataset.

Parameters:

Name Type Description Default name str

The name of the dataset to create a summary for.

required

Returns:

Type Description DatasetSummary

A dataclass containing the dataset summary.

Source code in valor/coretypes.py
def get_dataset_summary(self, name: str) -> DatasetSummary:\n    \"\"\"\n    Gets the summary of a dataset.\n\n    Parameters\n    ----------\n    name : str\n        The name of the dataset to create a summary for.\n\n    Returns\n    -------\n    DatasetSummary\n        A dataclass containing the dataset summary.\n    \"\"\"\n    return DatasetSummary(**self.conn.get_dataset_summary(name))\n
"},{"location":"client_api/Client/#valor.Client.get_datasets","title":"valor.Client.get_datasets(filter_by=None)","text":"

Get all datasets, with an option to filter results according to some user-defined parameters.

Parameters:

Name Type Description Default filter_by FilterType

Optional constraints to filter by.

None

Returns:

Type Description List[Dataset]

A list of datasets.

Source code in valor/coretypes.py
def get_datasets(\n    self,\n    filter_by: Optional[FilterType] = None,\n) -> List[Dataset]:\n    \"\"\"\n    Get all datasets, with an option to filter results according to some user-defined parameters.\n\n    Parameters\n    ----------\n    filter_by : FilterType, optional\n        Optional constraints to filter by.\n\n    Returns\n    ------\n    List[valor.Dataset]\n        A list of datasets.\n    \"\"\"\n    filters = _format_filter(filter_by)\n    if isinstance(filters, Filter):\n        filters = asdict(filters)\n    dataset_list = []\n    for kwargs in self.conn.get_datasets(filters):\n        dataset = Dataset.decode_value({**kwargs, \"connection\": self.conn})\n        dataset_list.append(dataset)\n    return dataset_list\n
"},{"location":"client_api/Client/#valor.Client.get_datum","title":"valor.Client.get_datum(dataset, uid)","text":"

Get datum. GET endpoint.

Parameters:

Name Type Description Default dataset Dataset

The dataset the datum belongs to.

required uid str

The UID of the datum.

required

Returns:

Type Description Datum

The requested datum or 'None' if it doesn't exist.

Source code in valor/coretypes.py
def get_datum(\n    self,\n    dataset: Union[Dataset, str],\n    uid: str,\n) -> Union[Datum, None]:\n    \"\"\"\n    Get datum.\n    `GET` endpoint.\n    Parameters\n    ----------\n    dataset : valor.Dataset\n        The dataset the datum belongs to.\n    uid : str\n        The UID of the datum.\n    Returns\n    -------\n    valor.Datum\n        The requested datum or 'None' if it doesn't exist.\n    \"\"\"\n    dataset_name = (\n        dataset.name if isinstance(dataset, Dataset) else dataset\n    )\n    resp = self.conn.get_datum(dataset_name=dataset_name, uid=uid)  # type: ignore\n    return Datum.decode_value(resp)\n
"},{"location":"client_api/Client/#valor.Client.get_datums","title":"valor.Client.get_datums(filter_by=None)","text":"

Get all datums using an optional filter.

Parameters:

Name Type Description Default filter_by FilterType

Optional constraints to filter by.

None

Returns:

Type Description List[Datum]

A list datums.

Source code in valor/coretypes.py
def get_datums(\n    self,\n    filter_by: Optional[FilterType] = None,\n) -> List[Datum]:\n    \"\"\"\n    Get all datums using an optional filter.\n\n    Parameters\n    ----------\n    filter_by : FilterType, optional\n        Optional constraints to filter by.\n\n    Returns\n    -------\n    List[valor.Datum]\n        A list datums.\n    \"\"\"\n    filters = _format_filter(filter_by)\n    if isinstance(filters, Filter):\n        filters = asdict(filters)\n    return [\n        Datum.decode_value(datum)\n        for datum in self.conn.get_datums(filters)\n    ]\n
"},{"location":"client_api/Client/#valor.Client.get_evaluations","title":"valor.Client.get_evaluations(*, evaluation_ids=None, models=None, datasets=None, metrics_to_sort_by=None)","text":"

Returns all evaluations associated with user-supplied dataset and/or model names.

Parameters:

Name Type Description Default evaluation_ids List[int], optional.

A list of job IDs to return metrics for.

None models Union[List[Model], List[str]]

A list of model names that we want to return metrics for.

None datasets Union[List[Dataset], List[str]]

A list of dataset names that we want to return metrics for.

None metrics_to_sort_by Optional[Dict[str, Union[Dict[str, str], str]]]

An optional dict of metric types to sort the evaluations by.

None

Returns:

Type Description List[Evaluation]

A list of evaluations.

Source code in valor/coretypes.py
def get_evaluations(\n    self,\n    *,\n    evaluation_ids: Optional[List[int]] = None,\n    models: Union[List[Model], List[str], None] = None,\n    datasets: Union[List[Dataset], List[str], None] = None,\n    metrics_to_sort_by: Optional[\n        Dict[str, Union[Dict[str, str], str]]\n    ] = None,\n) -> List[Evaluation]:\n    \"\"\"\n    Returns all evaluations associated with user-supplied dataset and/or model names.\n\n    Parameters\n    ----------\n    evaluation_ids : List[int], optional.\n        A list of job IDs to return metrics for.\n    models : Union[List[valor.Model], List[str]], optional\n        A list of model names that we want to return metrics for.\n    datasets : Union[List[valor.Dataset], List[str]], optional\n        A list of dataset names that we want to return metrics for.\n    metrics_to_sort_by: dict[str, str | dict[str, str]], optional\n        An optional dict of metric types to sort the evaluations by.\n\n    Returns\n    -------\n    List[valor.Evaluation]\n        A list of evaluations.\n    \"\"\"\n    if isinstance(datasets, list):\n        datasets = [  # type: ignore\n            element.name if isinstance(element, Dataset) else element\n            for element in datasets\n        ]\n    if isinstance(models, list):\n        models = [  # type: ignore\n            element.name if isinstance(element, Model) else element\n            for element in models\n        ]\n    return [\n        Evaluation(connection=self.conn, **evaluation)\n        for evaluation in self.conn.get_evaluations(\n            evaluation_ids=evaluation_ids,\n            models=models,  # type: ignore\n            datasets=datasets,  # type: ignore\n            metrics_to_sort_by=metrics_to_sort_by,\n        )\n    ]\n
"},{"location":"client_api/Client/#valor.Client.get_groundtruth","title":"valor.Client.get_groundtruth(dataset, datum)","text":"

Get a particular ground truth.

Parameters:

Name Type Description Default dataset Union[Dataset, str]

The dataset the datum belongs to.

required datum Union[Datum, str]

The desired datum.

required

Returns:

Type Description Union[GroundTruth, None]

The matching ground truth or 'None' if it doesn't exist.

Source code in valor/coretypes.py
def get_groundtruth(\n    self,\n    dataset: Union[Dataset, str],\n    datum: Union[Datum, str],\n) -> Union[GroundTruth, None]:\n    \"\"\"\n    Get a particular ground truth.\n\n    Parameters\n    ----------\n    dataset: Union[Dataset, str]\n        The dataset the datum belongs to.\n    datum: Union[Datum, str]\n        The desired datum.\n\n    Returns\n    ----------\n    Union[GroundTruth, None]\n        The matching ground truth or 'None' if it doesn't exist.\n    \"\"\"\n    dataset_name = (\n        dataset.name if isinstance(dataset, Dataset) else dataset\n    )\n    datum_uid = datum.uid if isinstance(datum, Datum) else datum\n    try:\n        resp = self.conn.get_groundtruth(\n            dataset_name=dataset_name, datum_uid=datum_uid  # type: ignore\n        )\n        resp.pop(\"dataset_name\")\n        return GroundTruth.decode_value(resp)\n    except ClientException as e:\n        if e.status_code == 404:\n            return None\n        raise e\n
"},{"location":"client_api/Client/#valor.Client.get_labels","title":"valor.Client.get_labels(filter_by=None)","text":"

Gets all labels using an optional filter.

Parameters:

Name Type Description Default filter_by FilterType

Optional constraints to filter by.

None

Returns:

Type Description List[Label]

A list of labels.

Source code in valor/coretypes.py
def get_labels(\n    self,\n    filter_by: Optional[FilterType] = None,\n) -> List[Label]:\n    \"\"\"\n    Gets all labels using an optional filter.\n\n    Parameters\n    ----------\n    filter_by : FilterType, optional\n        Optional constraints to filter by.\n\n    Returns\n    ------\n    List[valor.Label]\n        A list of labels.\n    \"\"\"\n    filters = _format_filter(filter_by)\n    filters = asdict(filters)\n    return [Label(**label) for label in self.conn.get_labels(filters)]\n
"},{"location":"client_api/Client/#valor.Client.get_labels_from_dataset","title":"valor.Client.get_labels_from_dataset(dataset)","text":"

Get all labels associated with a dataset's ground truths.

Parameters:

Name Type Description Default dataset Dataset

The dataset to search by.

required

Returns:

Type Description List[Label]

A list of labels.

Source code in valor/coretypes.py
def get_labels_from_dataset(\n    self, dataset: Union[Dataset, str]\n) -> List[Label]:\n    \"\"\"\n    Get all labels associated with a dataset's ground truths.\n\n    Parameters\n    ----------\n    dataset : valor.Dataset\n        The dataset to search by.\n\n    Returns\n    ------\n    List[valor.Label]\n        A list of labels.\n    \"\"\"\n    dataset_name = (\n        dataset.name if isinstance(dataset, Dataset) else dataset\n    )\n    return [\n        Label(**label)\n        for label in self.conn.get_labels_from_dataset(dataset_name)  # type: ignore\n    ]\n
"},{"location":"client_api/Client/#valor.Client.get_labels_from_model","title":"valor.Client.get_labels_from_model(model)","text":"

Get all labels associated with a model's ground truths.

Parameters:

Name Type Description Default model Model

The model to search by.

required

Returns:

Type Description List[Label]

A list of labels.

Source code in valor/coretypes.py
def get_labels_from_model(self, model: Union[Model, str]) -> List[Label]:\n    \"\"\"\n    Get all labels associated with a model's ground truths.\n\n    Parameters\n    ----------\n    model : valor.Model\n        The model to search by.\n\n    Returns\n    ------\n    List[valor.Label]\n        A list of labels.\n    \"\"\"\n    model_name = model.name if isinstance(model, Model) else model\n    return [\n        Label(**label)\n        for label in self.conn.get_labels_from_model(model_name)  # type: ignore\n    ]\n
"},{"location":"client_api/Client/#valor.Client.get_model","title":"valor.Client.get_model(name)","text":"

Gets a model by name.

Parameters:

Name Type Description Default name str

The name of the model to fetch.

required

Returns:

Type Description Union[Model, None]

A Model with matching name or 'None' if one doesn't exist.

Source code in valor/coretypes.py
def get_model(\n    self,\n    name: str,\n) -> Union[Model, None]:\n    \"\"\"\n    Gets a model by name.\n\n    Parameters\n    ----------\n    name : str\n        The name of the model to fetch.\n\n    Returns\n    -------\n    Union[valor.Model, None]\n        A Model with matching name or 'None' if one doesn't exist.\n    \"\"\"\n    return Model.decode_value(\n        {\n            **self.conn.get_model(name),\n            \"connection\": self.conn,\n        }\n    )\n
"},{"location":"client_api/Client/#valor.Client.get_model_eval_requests","title":"valor.Client.get_model_eval_requests(model)","text":"

Get all evaluations that have been created for a model.

This does not return evaluation results.

GET endpoint.

Parameters:

Name Type Description Default model str

The model to search by.

required

Returns:

Type Description List[Evaluation]

A list of evaluations.

Source code in valor/coretypes.py
def get_model_eval_requests(\n    self, model: Union[Model, str]\n) -> List[Evaluation]:\n    \"\"\"\n    Get all evaluations that have been created for a model.\n\n    This does not return evaluation results.\n\n    `GET` endpoint.\n\n    Parameters\n    ----------\n    model : str\n        The model to search by.\n\n    Returns\n    -------\n    List[Evaluation]\n        A list of evaluations.\n    \"\"\"\n    model_name = model.name if isinstance(model, Model) else model\n    return [\n        Evaluation(**evaluation, connection=self.conn)\n        for evaluation in self.conn.get_model_eval_requests(model_name)  # type: ignore\n    ]\n
"},{"location":"client_api/Client/#valor.Client.get_model_status","title":"valor.Client.get_model_status(dataset_name, model_name)","text":"

Get the state of a given model over a dataset.

Parameters:

Name Type Description Default dataset_name str

The name of the dataset that the model is operating over.

required model_name str

The name of the model we want to fetch the state of.

required

Returns:

Type Description Union[TableStatus, None]

The state of the model or 'None' if the model doesn't exist.

Source code in valor/coretypes.py
def get_model_status(\n    self,\n    dataset_name: str,\n    model_name: str,\n) -> Optional[TableStatus]:\n    \"\"\"\n    Get the state of a given model over a dataset.\n\n    Parameters\n    ----------\n    dataset_name : str\n        The name of the dataset that the model is operating over.\n    model_name : str\n        The name of the model we want to fetch the state of.\n\n    Returns\n    ------\n    Union[TableStatus, None]\n        The state of the model or 'None' if the model doesn't exist.\n    \"\"\"\n    try:\n        return self.conn.get_model_status(dataset_name, model_name)\n    except ClientException as e:\n        if e.status_code == 404:\n            return None\n        raise e\n
"},{"location":"client_api/Client/#valor.Client.get_models","title":"valor.Client.get_models(filter_by=None)","text":"

Get all models using an optional filter.

Parameters:

Name Type Description Default filter_by FilterType

Optional constraints to filter by.

None

Returns:

Type Description List[Model]

A list of models.

Source code in valor/coretypes.py
def get_models(\n    self,\n    filter_by: Optional[FilterType] = None,\n) -> List[Model]:\n    \"\"\"\n    Get all models using an optional filter.\n\n    Parameters\n    ----------\n    filter_by : FilterType, optional\n        Optional constraints to filter by.\n\n    Returns\n    ------\n    List[valor.Model]\n        A list of models.\n    \"\"\"\n    filters = _format_filter(filter_by)\n    if isinstance(filters, Filter):\n        filters = asdict(filters)\n    model_list = []\n    for kwargs in self.conn.get_models(filters):\n        model = Model.decode_value({**kwargs, \"connection\": self.conn})\n        model_list.append(model)\n    return model_list\n
"},{"location":"client_api/Client/#valor.Client.get_prediction","title":"valor.Client.get_prediction(dataset, model, datum)","text":"

Get a particular prediction.

Parameters:

Name Type Description Default dataset Union[Dataset, str]

The dataset the datum belongs to.

required model Union[Model, str]

The model that made the prediction.

required datum Union[Datum, str]

The desired datum.

required

Returns:

Type Description Union[Prediction, None]

The matching prediction or 'None' if it doesn't exist.

Source code in valor/coretypes.py
def get_prediction(\n    self,\n    dataset: Union[Dataset, str],\n    model: Union[Model, str],\n    datum: Union[Datum, str],\n) -> Union[Prediction, None]:\n    \"\"\"\n    Get a particular prediction.\n\n    Parameters\n    ----------\n    dataset: Union[Dataset, str]\n        The dataset the datum belongs to.\n    model: Union[Model, str]\n        The model that made the prediction.\n    datum: Union[Datum, str]\n        The desired datum.\n\n    Returns\n    ----------\n    Union[Prediction, None]\n        The matching prediction or 'None' if it doesn't exist.\n    \"\"\"\n    dataset_name = (\n        dataset.name if isinstance(dataset, Dataset) else dataset\n    )\n    model_name = model.name if isinstance(model, Model) else model\n    datum_uid = datum.uid if isinstance(datum, Datum) else datum\n\n    resp = self.conn.get_prediction(\n        dataset_name=dataset_name,  # type: ignore\n        model_name=model_name,  # type: ignore\n        datum_uid=datum_uid,  # type: ignore\n    )\n    resp.pop(\"dataset_name\")\n    resp.pop(\"model_name\")\n    return Prediction.decode_value(resp)\n
"},{"location":"client_api/Dataset/","title":"Dataset","text":"

Bases: StaticCollection

A class describing a given dataset.

Attributes:

Name Type Description name String

The name of the dataset.

metadata Dictionary

A dictionary of metadata that describes the dataset.

Examples:

>>> Dataset.create(name=\"dataset1\")\n>>> Dataset.create(name=\"dataset1\", metadata={})\n>>> Dataset.create(name=\"dataset1\", metadata={\"foo\": \"bar\", \"pi\": 3.14})\n
Source code in valor/coretypes.py
class Dataset(StaticCollection):\n    \"\"\"\n    A class describing a given dataset.\n\n    Attributes\n    ----------\n    name : String\n        The name of the dataset.\n    metadata : Dictionary\n        A dictionary of metadata that describes the dataset.\n\n    Examples\n    --------\n    >>> Dataset.create(name=\"dataset1\")\n    >>> Dataset.create(name=\"dataset1\", metadata={})\n    >>> Dataset.create(name=\"dataset1\", metadata={\"foo\": \"bar\", \"pi\": 3.14})\n    \"\"\"\n\n    name: String = String.symbolic(owner=\"dataset\", name=\"name\")\n    metadata: Dictionary = Dictionary.symbolic(\n        owner=\"dataset\", name=\"metadata\"\n    )\n\n    def __init__(\n        self,\n        *,\n        name: str,\n        metadata: Optional[dict] = None,\n        connection: Optional[ClientConnection] = None,\n    ):\n        \"\"\"\n        Creates a local instance of a dataset.\n\n        Use 'Dataset.create' classmethod to create a dataset with persistence.\n\n        Parameters\n        ----------\n        name : str\n            The name of the dataset.\n        metadata : dict, optional\n            A dictionary of metadata that describes the dataset.\n        connection : ClientConnection, optional\n            An initialized client connection.\n        \"\"\"\n        self.conn = connection\n        super().__init__(name=name, metadata=metadata if metadata else dict())\n\n    @classmethod\n    def create(\n        cls,\n        name: str,\n        metadata: Optional[Dict[str, Any]] = None,\n        connection: Optional[ClientConnection] = None,\n    ) -> Dataset:\n        \"\"\"\n        Creates a dataset that persists in the back end.\n\n        Parameters\n        ----------\n        name : str\n            The name of the dataset.\n        metadata : dict, optional\n            A dictionary of metadata that describes the dataset.\n        connection : ClientConnection, optional\n            An initialized client connection.\n        \"\"\"\n        dataset = cls(name=name, metadata=metadata, connection=connection)\n        Client(dataset.conn).create_dataset(dataset)\n        return dataset\n\n    @classmethod\n    def get(\n        cls,\n        name: str,\n        connection: Optional[ClientConnection] = None,\n    ) -> Union[Dataset, None]:\n        \"\"\"\n        Retrieves a dataset from the back end database.\n\n        Parameters\n        ----------\n        name : str\n            The name of the dataset.\n\n        Returns\n        -------\n        Union[valor.Dataset, None]\n            The dataset or 'None' if it doesn't exist.\n        \"\"\"\n        return Client(connection).get_dataset(name)\n\n    def add_groundtruth(\n        self,\n        groundtruth: GroundTruth,\n    ) -> None:\n        \"\"\"\n        Add a ground truth to the dataset.\n\n        Parameters\n        ----------\n        groundtruth : GroundTruth\n            The ground truth to create.\n        \"\"\"\n        Client(self.conn).create_groundtruths(\n            dataset=self,\n            groundtruths=[groundtruth],\n        )\n\n    def add_groundtruths(\n        self,\n        groundtruths: List[GroundTruth],\n        ignore_existing_datums: bool = False,\n    ) -> None:\n        \"\"\"\n        Add multiple ground truths to the dataset.\n\n        Parameters\n        ----------\n        groundtruths : List[GroundTruth]\n            The ground truths to create.\n        ignore_existing_datums : bool, default=False\n            If True, will ignore datums that already exist in the backend.\n            If False, will raise an error if any datums already exist.\n            Default is False.\n        \"\"\"\n        Client(self.conn).create_groundtruths(\n            dataset=self,\n            groundtruths=groundtruths,\n            ignore_existing_datums=ignore_existing_datums,\n        )\n\n    def get_groundtruth(\n        self,\n        datum: Union[Datum, str],\n    ) -> Union[GroundTruth, None]:\n        \"\"\"\n        Get a particular ground truth.\n\n        Parameters\n        ----------\n        datum: Union[Datum, str]\n            The desired datum.\n\n        Returns\n        ----------\n        Union[GroundTruth, None]\n            The matching ground truth or 'None' if it doesn't exist.\n        \"\"\"\n        return Client(self.conn).get_groundtruth(dataset=self, datum=datum)\n\n    def get_labels(\n        self,\n    ) -> List[Label]:\n        \"\"\"\n        Get all labels associated with a given dataset.\n\n        Returns\n        ----------\n        List[Label]\n            A list of `Labels` associated with the dataset.\n        \"\"\"\n        return Client(self.conn).get_labels_from_dataset(self)\n\n    def get_datums(\n        self, filter_by: Optional[FilterType] = None\n    ) -> List[Datum]:\n        \"\"\"\n        Get all datums associated with a given dataset.\n\n        Parameters\n        ----------\n        filter_by\n            Optional constraints to filter by.\n\n        Returns\n        ----------\n        List[Datum]\n            A list of `Datums` associated with the dataset.\n        \"\"\"\n        filters = _format_filter(filter_by)\n        if isinstance(filters, Filter):\n            filters = asdict(filters)\n\n        if filters.get(\"dataset_names\"):\n            raise ValueError(\n                \"Cannot filter by dataset_names when calling `Dataset.get_datums`.\"\n            )\n        filters[\"dataset_names\"] = [self.name]  # type: ignore\n        return Client(self.conn).get_datums(filter_by=filters)\n\n    def get_evaluations(\n        self,\n        metrics_to_sort_by: Optional[\n            Dict[str, Union[Dict[str, str], str]]\n        ] = None,\n    ) -> List[Evaluation]:\n        \"\"\"\n        Get all evaluations associated with a given dataset.\n\n        Parameters\n        ----------\n        metrics_to_sort_by: dict[str, str | dict[str, str]], optional\n            An optional dict of metric types to sort the evaluations by.\n\n        Returns\n        ----------\n        List[Evaluation]\n            A list of `Evaluations` associated with the dataset.\n        \"\"\"\n        return Client(self.conn).get_evaluations(\n            datasets=[self], metrics_to_sort_by=metrics_to_sort_by\n        )\n\n    def get_summary(self) -> DatasetSummary:\n        \"\"\"\n        Get the summary of a given dataset.\n\n        Returns\n        -------\n        DatasetSummary\n            The summary of the dataset. This class has the following fields:\n\n            name: name of the dataset\n\n            num_datums: total number of datums in the dataset\n\n            num_annotations: total number of labeled annotations in the dataset; if an\n            object (such as a bounding box) has multiple labels, then each label is counted separately\n\n            num_bounding_boxes: total number of bounding boxes in the dataset\n\n            num_polygons: total number of polygons in the dataset\n\n            num_rasters: total number of rasters in the dataset\n\n            labels: list of the unique labels in the dataset\n\n            datum_metadata: list of the unique metadata dictionaries in the dataset that are associated\n            to datums\n\n            groundtruth_annotation_metadata: list of the unique metadata dictionaries in the dataset that are\n            associated to annotations\n        \"\"\"\n        return Client(self.conn).get_dataset_summary(self.name)  # type: ignore\n\n    def finalize(\n        self,\n    ):\n        \"\"\"\n        Finalizes the dataset such that new ground truths cannot be added to it.\n        \"\"\"\n        return Client(self.conn).finalize_dataset(self)\n\n    def delete(\n        self,\n        timeout: int = 0,\n    ):\n        \"\"\"\n        Delete the dataset from the back end.\n\n        Parameters\n        ----------\n        timeout : int, default=0\n            Sets a timeout in seconds.\n        \"\"\"\n        Client(self.conn).delete_dataset(self.name, timeout)  # type: ignore\n
"},{"location":"client_api/Dataset/#valor.Dataset-functions","title":"Functions","text":""},{"location":"client_api/Dataset/#valor.Dataset.__init__","title":"valor.Dataset.__init__(*, name, metadata=None, connection=None)","text":"

Creates a local instance of a dataset.

Use 'Dataset.create' classmethod to create a dataset with persistence.

Parameters:

Name Type Description Default name str

The name of the dataset.

required metadata dict

A dictionary of metadata that describes the dataset.

None connection ClientConnection

An initialized client connection.

None Source code in valor/coretypes.py
def __init__(\n    self,\n    *,\n    name: str,\n    metadata: Optional[dict] = None,\n    connection: Optional[ClientConnection] = None,\n):\n    \"\"\"\n    Creates a local instance of a dataset.\n\n    Use 'Dataset.create' classmethod to create a dataset with persistence.\n\n    Parameters\n    ----------\n    name : str\n        The name of the dataset.\n    metadata : dict, optional\n        A dictionary of metadata that describes the dataset.\n    connection : ClientConnection, optional\n        An initialized client connection.\n    \"\"\"\n    self.conn = connection\n    super().__init__(name=name, metadata=metadata if metadata else dict())\n
"},{"location":"client_api/Dataset/#valor.Dataset.add_groundtruth","title":"valor.Dataset.add_groundtruth(groundtruth)","text":"

Add a ground truth to the dataset.

Parameters:

Name Type Description Default groundtruth GroundTruth

The ground truth to create.

required Source code in valor/coretypes.py
def add_groundtruth(\n    self,\n    groundtruth: GroundTruth,\n) -> None:\n    \"\"\"\n    Add a ground truth to the dataset.\n\n    Parameters\n    ----------\n    groundtruth : GroundTruth\n        The ground truth to create.\n    \"\"\"\n    Client(self.conn).create_groundtruths(\n        dataset=self,\n        groundtruths=[groundtruth],\n    )\n
"},{"location":"client_api/Dataset/#valor.Dataset.add_groundtruths","title":"valor.Dataset.add_groundtruths(groundtruths, ignore_existing_datums=False)","text":"

Add multiple ground truths to the dataset.

Parameters:

Name Type Description Default groundtruths List[GroundTruth]

The ground truths to create.

required ignore_existing_datums bool

If True, will ignore datums that already exist in the backend. If False, will raise an error if any datums already exist. Default is False.

False Source code in valor/coretypes.py
def add_groundtruths(\n    self,\n    groundtruths: List[GroundTruth],\n    ignore_existing_datums: bool = False,\n) -> None:\n    \"\"\"\n    Add multiple ground truths to the dataset.\n\n    Parameters\n    ----------\n    groundtruths : List[GroundTruth]\n        The ground truths to create.\n    ignore_existing_datums : bool, default=False\n        If True, will ignore datums that already exist in the backend.\n        If False, will raise an error if any datums already exist.\n        Default is False.\n    \"\"\"\n    Client(self.conn).create_groundtruths(\n        dataset=self,\n        groundtruths=groundtruths,\n        ignore_existing_datums=ignore_existing_datums,\n    )\n
"},{"location":"client_api/Dataset/#valor.Dataset.create","title":"valor.Dataset.create(name, metadata=None, connection=None) classmethod","text":"

Creates a dataset that persists in the back end.

Parameters:

Name Type Description Default name str

The name of the dataset.

required metadata dict

A dictionary of metadata that describes the dataset.

None connection ClientConnection

An initialized client connection.

None Source code in valor/coretypes.py
@classmethod\ndef create(\n    cls,\n    name: str,\n    metadata: Optional[Dict[str, Any]] = None,\n    connection: Optional[ClientConnection] = None,\n) -> Dataset:\n    \"\"\"\n    Creates a dataset that persists in the back end.\n\n    Parameters\n    ----------\n    name : str\n        The name of the dataset.\n    metadata : dict, optional\n        A dictionary of metadata that describes the dataset.\n    connection : ClientConnection, optional\n        An initialized client connection.\n    \"\"\"\n    dataset = cls(name=name, metadata=metadata, connection=connection)\n    Client(dataset.conn).create_dataset(dataset)\n    return dataset\n
"},{"location":"client_api/Dataset/#valor.Dataset.delete","title":"valor.Dataset.delete(timeout=0)","text":"

Delete the dataset from the back end.

Parameters:

Name Type Description Default timeout int

Sets a timeout in seconds.

0 Source code in valor/coretypes.py
def delete(\n    self,\n    timeout: int = 0,\n):\n    \"\"\"\n    Delete the dataset from the back end.\n\n    Parameters\n    ----------\n    timeout : int, default=0\n        Sets a timeout in seconds.\n    \"\"\"\n    Client(self.conn).delete_dataset(self.name, timeout)  # type: ignore\n
"},{"location":"client_api/Dataset/#valor.Dataset.finalize","title":"valor.Dataset.finalize()","text":"

Finalizes the dataset such that new ground truths cannot be added to it.

Source code in valor/coretypes.py
def finalize(\n    self,\n):\n    \"\"\"\n    Finalizes the dataset such that new ground truths cannot be added to it.\n    \"\"\"\n    return Client(self.conn).finalize_dataset(self)\n
"},{"location":"client_api/Dataset/#valor.Dataset.get","title":"valor.Dataset.get(name, connection=None) classmethod","text":"

Retrieves a dataset from the back end database.

Parameters:

Name Type Description Default name str

The name of the dataset.

required

Returns:

Type Description Union[Dataset, None]

The dataset or 'None' if it doesn't exist.

Source code in valor/coretypes.py
@classmethod\ndef get(\n    cls,\n    name: str,\n    connection: Optional[ClientConnection] = None,\n) -> Union[Dataset, None]:\n    \"\"\"\n    Retrieves a dataset from the back end database.\n\n    Parameters\n    ----------\n    name : str\n        The name of the dataset.\n\n    Returns\n    -------\n    Union[valor.Dataset, None]\n        The dataset or 'None' if it doesn't exist.\n    \"\"\"\n    return Client(connection).get_dataset(name)\n
"},{"location":"client_api/Dataset/#valor.Dataset.get_datums","title":"valor.Dataset.get_datums(filter_by=None)","text":"

Get all datums associated with a given dataset.

Parameters:

Name Type Description Default filter_by Optional[FilterType]

Optional constraints to filter by.

None

Returns:

Type Description List[Datum]

A list of Datums associated with the dataset.

Source code in valor/coretypes.py
def get_datums(\n    self, filter_by: Optional[FilterType] = None\n) -> List[Datum]:\n    \"\"\"\n    Get all datums associated with a given dataset.\n\n    Parameters\n    ----------\n    filter_by\n        Optional constraints to filter by.\n\n    Returns\n    ----------\n    List[Datum]\n        A list of `Datums` associated with the dataset.\n    \"\"\"\n    filters = _format_filter(filter_by)\n    if isinstance(filters, Filter):\n        filters = asdict(filters)\n\n    if filters.get(\"dataset_names\"):\n        raise ValueError(\n            \"Cannot filter by dataset_names when calling `Dataset.get_datums`.\"\n        )\n    filters[\"dataset_names\"] = [self.name]  # type: ignore\n    return Client(self.conn).get_datums(filter_by=filters)\n
"},{"location":"client_api/Dataset/#valor.Dataset.get_evaluations","title":"valor.Dataset.get_evaluations(metrics_to_sort_by=None)","text":"

Get all evaluations associated with a given dataset.

Parameters:

Name Type Description Default metrics_to_sort_by Optional[Dict[str, Union[Dict[str, str], str]]]

An optional dict of metric types to sort the evaluations by.

None

Returns:

Type Description List[Evaluation]

A list of Evaluations associated with the dataset.

Source code in valor/coretypes.py
def get_evaluations(\n    self,\n    metrics_to_sort_by: Optional[\n        Dict[str, Union[Dict[str, str], str]]\n    ] = None,\n) -> List[Evaluation]:\n    \"\"\"\n    Get all evaluations associated with a given dataset.\n\n    Parameters\n    ----------\n    metrics_to_sort_by: dict[str, str | dict[str, str]], optional\n        An optional dict of metric types to sort the evaluations by.\n\n    Returns\n    ----------\n    List[Evaluation]\n        A list of `Evaluations` associated with the dataset.\n    \"\"\"\n    return Client(self.conn).get_evaluations(\n        datasets=[self], metrics_to_sort_by=metrics_to_sort_by\n    )\n
"},{"location":"client_api/Dataset/#valor.Dataset.get_groundtruth","title":"valor.Dataset.get_groundtruth(datum)","text":"

Get a particular ground truth.

Parameters:

Name Type Description Default datum Union[Datum, str]

The desired datum.

required

Returns:

Type Description Union[GroundTruth, None]

The matching ground truth or 'None' if it doesn't exist.

Source code in valor/coretypes.py
def get_groundtruth(\n    self,\n    datum: Union[Datum, str],\n) -> Union[GroundTruth, None]:\n    \"\"\"\n    Get a particular ground truth.\n\n    Parameters\n    ----------\n    datum: Union[Datum, str]\n        The desired datum.\n\n    Returns\n    ----------\n    Union[GroundTruth, None]\n        The matching ground truth or 'None' if it doesn't exist.\n    \"\"\"\n    return Client(self.conn).get_groundtruth(dataset=self, datum=datum)\n
"},{"location":"client_api/Dataset/#valor.Dataset.get_labels","title":"valor.Dataset.get_labels()","text":"

Get all labels associated with a given dataset.

Returns:

Type Description List[Label]

A list of Labels associated with the dataset.

Source code in valor/coretypes.py
def get_labels(\n    self,\n) -> List[Label]:\n    \"\"\"\n    Get all labels associated with a given dataset.\n\n    Returns\n    ----------\n    List[Label]\n        A list of `Labels` associated with the dataset.\n    \"\"\"\n    return Client(self.conn).get_labels_from_dataset(self)\n
"},{"location":"client_api/Dataset/#valor.Dataset.get_summary","title":"valor.Dataset.get_summary()","text":"

Get the summary of a given dataset.

Returns:

Type Description DatasetSummary

The summary of the dataset. This class has the following fields:

name: name of the dataset

num_datums: total number of datums in the dataset

num_annotations: total number of labeled annotations in the dataset; if an object (such as a bounding box) has multiple labels, then each label is counted separately

num_bounding_boxes: total number of bounding boxes in the dataset

num_polygons: total number of polygons in the dataset

num_rasters: total number of rasters in the dataset

labels: list of the unique labels in the dataset

datum_metadata: list of the unique metadata dictionaries in the dataset that are associated to datums

groundtruth_annotation_metadata: list of the unique metadata dictionaries in the dataset that are associated to annotations

Source code in valor/coretypes.py
def get_summary(self) -> DatasetSummary:\n    \"\"\"\n    Get the summary of a given dataset.\n\n    Returns\n    -------\n    DatasetSummary\n        The summary of the dataset. This class has the following fields:\n\n        name: name of the dataset\n\n        num_datums: total number of datums in the dataset\n\n        num_annotations: total number of labeled annotations in the dataset; if an\n        object (such as a bounding box) has multiple labels, then each label is counted separately\n\n        num_bounding_boxes: total number of bounding boxes in the dataset\n\n        num_polygons: total number of polygons in the dataset\n\n        num_rasters: total number of rasters in the dataset\n\n        labels: list of the unique labels in the dataset\n\n        datum_metadata: list of the unique metadata dictionaries in the dataset that are associated\n        to datums\n\n        groundtruth_annotation_metadata: list of the unique metadata dictionaries in the dataset that are\n        associated to annotations\n    \"\"\"\n    return Client(self.conn).get_dataset_summary(self.name)  # type: ignore\n
"},{"location":"client_api/Datum/","title":"Datum","text":"

Bases: StaticCollection

A class used to store information about a datum for either a 'GroundTruth' or a 'Prediction'.

Attributes:

Name Type Description uid String

The UID of the datum.

metadata Dictionary

A dictionary of metadata that describes the datum.

Examples:

>>> Datum(uid=\"uid1\")\n>>> Datum(uid=\"uid1\", metadata={})\n>>> Datum(uid=\"uid1\", metadata={\"foo\": \"bar\", \"pi\": 3.14})\n
Source code in valor/schemas/symbolic/collections.py
class Datum(StaticCollection):\n    \"\"\"\n    A class used to store information about a datum for either a 'GroundTruth' or a 'Prediction'.\n\n    Attributes\n    ----------\n    uid : String\n        The UID of the datum.\n    metadata : Dictionary\n        A dictionary of metadata that describes the datum.\n\n    Examples\n    --------\n    >>> Datum(uid=\"uid1\")\n    >>> Datum(uid=\"uid1\", metadata={})\n    >>> Datum(uid=\"uid1\", metadata={\"foo\": \"bar\", \"pi\": 3.14})\n    \"\"\"\n\n    uid: String = String.symbolic(owner=\"datum\", name=\"uid\")\n    metadata: Dictionary = Dictionary.symbolic(owner=\"datum\", name=\"metadata\")\n\n    def __init__(\n        self,\n        *,\n        uid: str,\n        metadata: Optional[dict] = None,\n    ):\n        \"\"\"\n        Constructs a datum.\n\n        Parameters\n        ----------\n        uid : str\n            The UID of the datum.\n        metadata : dict, optional\n            A dictionary of metadata that describes the datum.\n        \"\"\"\n        super().__init__(uid=uid, metadata=metadata if metadata else dict())\n
"},{"location":"client_api/Datum/#valor.Datum-functions","title":"Functions","text":""},{"location":"client_api/Datum/#valor.Datum.__init__","title":"valor.Datum.__init__(*, uid, metadata=None)","text":"

Constructs a datum.

Parameters:

Name Type Description Default uid str

The UID of the datum.

required metadata dict

A dictionary of metadata that describes the datum.

None Source code in valor/schemas/symbolic/collections.py
def __init__(\n    self,\n    *,\n    uid: str,\n    metadata: Optional[dict] = None,\n):\n    \"\"\"\n    Constructs a datum.\n\n    Parameters\n    ----------\n    uid : str\n        The UID of the datum.\n    metadata : dict, optional\n        A dictionary of metadata that describes the datum.\n    \"\"\"\n    super().__init__(uid=uid, metadata=metadata if metadata else dict())\n
"},{"location":"client_api/Evaluation/","title":"Evaluation","text":"

Wraps valor.client.Job to provide evaluation-specifc members.

Source code in valor/coretypes.py
class Evaluation:\n    \"\"\"\n    Wraps `valor.client.Job` to provide evaluation-specifc members.\n    \"\"\"\n\n    def __init__(\n        self, connection: Optional[ClientConnection] = None, **kwargs\n    ):\n        \"\"\"\n        Defines important attributes of the API's `EvaluationResult`.\n\n        Attributes\n        ----------\n        id : int\n            The ID of the evaluation.\n        dataset_names : list[str]\n            The names of the datasets the model was evaluated over.\n        model_name : str\n            The name of the evaluated model.\n        filters : schemas.Filter\n            The filter used to select data partitions for evaluation.\n        status : EvaluationStatus\n            The status of the evaluation.\n        metrics : List[dict]\n            A list of metric dictionaries returned by the job.\n        confusion_matrices : List[dict]\n            A list of confusion matrix dictionaries returned by the job.\n        meta: dict[str, str | float | dict], optional\n            A dictionary of metadata describing the evaluation run.\n        \"\"\"\n        if not connection:\n            connection = get_connection()\n        self.conn = connection\n        self.update(**kwargs)\n\n    def update(\n        self,\n        *_,\n        id: int,\n        dataset_names: list[str],\n        model_name: str,\n        filters: Filter,\n        parameters: EvaluationParameters,\n        status: EvaluationStatus,\n        metrics: List[Dict],\n        confusion_matrices: List[Dict],\n        created_at: str,\n        meta: dict[str, str | float | dict] | None,\n        **kwargs,\n    ):\n        self.id = id\n        self.dataset_names = dataset_names\n        self.model_name = model_name\n        self.filters = (\n            Filter(**filters) if isinstance(filters, dict) else Filter()\n        )\n        self.parameters = (\n            EvaluationParameters(**parameters)\n            if isinstance(parameters, dict)\n            else parameters\n        )\n        self.status = EvaluationStatus(status)\n        self.metrics = metrics\n        self.meta = meta\n        self.confusion_matrices = confusion_matrices\n        self.kwargs = kwargs\n        self.ignored_pred_labels: Optional[List[Label]] = None\n        self.missing_pred_labels: Optional[List[Label]] = None\n        self.created_at = datetime.datetime.strptime(\n            created_at, \"%Y-%m-%dT%H:%M:%S.%fZ\"\n        ).replace(tzinfo=datetime.timezone.utc)\n\n        for k, v in kwargs.items():\n            setattr(self, k, v)\n\n    def poll(self) -> EvaluationStatus:\n        \"\"\"\n        Poll the back end.\n\n        Updates the evaluation with the latest state from the back end.\n\n        Returns\n        -------\n        enums.EvaluationStatus\n            The status of the evaluation.\n\n        Raises\n        ----------\n        ClientException\n            If an Evaluation with the given `evaluation_id` is not found.\n        \"\"\"\n        response = self.conn.get_evaluations(evaluation_ids=[self.id])\n        if not response:\n            raise EvaluationDoesNotExist(self.id)\n        self.update(**response[0])\n        return self.status\n\n    def wait_for_completion(\n        self,\n        *,\n        timeout: Optional[int] = None,\n        interval: float = 1.0,\n    ) -> EvaluationStatus:\n        \"\"\"\n        Blocking function that waits for evaluation to finish.\n\n        Parameters\n        ----------\n        timeout : int, optional\n            Length of timeout in seconds.\n        interval : float, default=1.0\n            Polling interval in seconds.\n        \"\"\"\n        t_start = time.time()\n        while self.poll() not in [\n            EvaluationStatus.DONE,\n            EvaluationStatus.FAILED,\n        ]:\n            time.sleep(interval)\n            if timeout and time.time() - t_start > timeout:\n                raise TimeoutError\n        return self.status\n\n    def __str__(self) -> str:\n        \"\"\"Dumps the object into a JSON formatted string.\"\"\"\n        return json.dumps(self.to_dict(), indent=4)\n\n    def to_dict(self) -> dict:\n        \"\"\"\n        Defines how a `valor.Evaluation` object is serialized into a dictionary.\n\n        Returns\n        ----------\n        dict\n            A dictionary describing an evaluation.\n        \"\"\"\n        return {\n            \"id\": self.id,\n            \"dataset_names\": self.dataset_names,\n            \"model_name\": self.model_name,\n            \"filters\": asdict(self.filters),\n            \"parameters\": asdict(self.parameters),\n            \"status\": self.status.value,\n            \"metrics\": self.metrics,\n            \"confusion_matrices\": self.confusion_matrices,\n            \"meta\": self.meta,\n            **self.kwargs,\n        }\n\n    def to_dataframe(\n        self,\n        stratify_by: Optional[Tuple[str, str]] = None,\n    ):\n        \"\"\"\n        Get all metrics associated with a Model and return them in a `pd.DataFrame`.\n\n        Returns\n        ----------\n        pd.DataFrame\n            Evaluation metrics being displayed in a `pd.DataFrame`.\n\n        Raises\n        ------\n        ModuleNotFoundError\n            This function requires the use of `pandas.DataFrame`.\n\n        \"\"\"\n        try:\n            import pandas as pd\n        except ModuleNotFoundError:\n            raise ModuleNotFoundError(\n                \"Must have pandas installed to use `get_metric_dataframes`.\"\n            )\n\n        if not stratify_by:\n            column_type = \"evaluation\"\n            column_name = self.id\n        else:\n            column_type = stratify_by[0]\n            column_name = stratify_by[1]\n\n        metrics = [\n            {**metric, column_type: column_name} for metric in self.metrics\n        ]\n        df = pd.DataFrame(metrics)\n        for k in [\"label\", \"parameters\"]:\n            df[k] = df[k].fillna(\"n/a\")\n        df[\"parameters\"] = df[\"parameters\"].apply(json.dumps)\n        df[\"label\"] = df[\"label\"].apply(\n            lambda x: f\"{x['key']}: {x['value']}\" if x != \"n/a\" else x\n        )\n        df = df.pivot(\n            index=[\"type\", \"parameters\", \"label\"], columns=[column_type]\n        )\n        return df\n
"},{"location":"client_api/Evaluation/#valor.Evaluation-functions","title":"Functions","text":""},{"location":"client_api/Evaluation/#valor.Evaluation.__init__","title":"valor.Evaluation.__init__(connection=None, **kwargs)","text":"

Defines important attributes of the API's EvaluationResult.

Attributes:

Name Type Description id int

The ID of the evaluation.

dataset_names list[str]

The names of the datasets the model was evaluated over.

model_name str

The name of the evaluated model.

filters Filter

The filter used to select data partitions for evaluation.

status EvaluationStatus

The status of the evaluation.

metrics List[dict]

A list of metric dictionaries returned by the job.

confusion_matrices List[dict]

A list of confusion matrix dictionaries returned by the job.

meta (dict[str, str | float | dict], optional)

A dictionary of metadata describing the evaluation run.

Source code in valor/coretypes.py
def __init__(\n    self, connection: Optional[ClientConnection] = None, **kwargs\n):\n    \"\"\"\n    Defines important attributes of the API's `EvaluationResult`.\n\n    Attributes\n    ----------\n    id : int\n        The ID of the evaluation.\n    dataset_names : list[str]\n        The names of the datasets the model was evaluated over.\n    model_name : str\n        The name of the evaluated model.\n    filters : schemas.Filter\n        The filter used to select data partitions for evaluation.\n    status : EvaluationStatus\n        The status of the evaluation.\n    metrics : List[dict]\n        A list of metric dictionaries returned by the job.\n    confusion_matrices : List[dict]\n        A list of confusion matrix dictionaries returned by the job.\n    meta: dict[str, str | float | dict], optional\n        A dictionary of metadata describing the evaluation run.\n    \"\"\"\n    if not connection:\n        connection = get_connection()\n    self.conn = connection\n    self.update(**kwargs)\n
"},{"location":"client_api/Evaluation/#valor.Evaluation.__str__","title":"valor.Evaluation.__str__()","text":"

Dumps the object into a JSON formatted string.

Source code in valor/coretypes.py
def __str__(self) -> str:\n    \"\"\"Dumps the object into a JSON formatted string.\"\"\"\n    return json.dumps(self.to_dict(), indent=4)\n
"},{"location":"client_api/Evaluation/#valor.Evaluation.poll","title":"valor.Evaluation.poll()","text":"

Poll the back end.

Updates the evaluation with the latest state from the back end.

Returns:

Type Description EvaluationStatus

The status of the evaluation.

Raises:

Type Description ClientException

If an Evaluation with the given evaluation_id is not found.

Source code in valor/coretypes.py
def poll(self) -> EvaluationStatus:\n    \"\"\"\n    Poll the back end.\n\n    Updates the evaluation with the latest state from the back end.\n\n    Returns\n    -------\n    enums.EvaluationStatus\n        The status of the evaluation.\n\n    Raises\n    ----------\n    ClientException\n        If an Evaluation with the given `evaluation_id` is not found.\n    \"\"\"\n    response = self.conn.get_evaluations(evaluation_ids=[self.id])\n    if not response:\n        raise EvaluationDoesNotExist(self.id)\n    self.update(**response[0])\n    return self.status\n
"},{"location":"client_api/Evaluation/#valor.Evaluation.to_dataframe","title":"valor.Evaluation.to_dataframe(stratify_by=None)","text":"

Get all metrics associated with a Model and return them in a pd.DataFrame.

Returns:

Type Description DataFrame

Evaluation metrics being displayed in a pd.DataFrame.

Raises:

Type Description ModuleNotFoundError

This function requires the use of pandas.DataFrame.

Source code in valor/coretypes.py
def to_dataframe(\n    self,\n    stratify_by: Optional[Tuple[str, str]] = None,\n):\n    \"\"\"\n    Get all metrics associated with a Model and return them in a `pd.DataFrame`.\n\n    Returns\n    ----------\n    pd.DataFrame\n        Evaluation metrics being displayed in a `pd.DataFrame`.\n\n    Raises\n    ------\n    ModuleNotFoundError\n        This function requires the use of `pandas.DataFrame`.\n\n    \"\"\"\n    try:\n        import pandas as pd\n    except ModuleNotFoundError:\n        raise ModuleNotFoundError(\n            \"Must have pandas installed to use `get_metric_dataframes`.\"\n        )\n\n    if not stratify_by:\n        column_type = \"evaluation\"\n        column_name = self.id\n    else:\n        column_type = stratify_by[0]\n        column_name = stratify_by[1]\n\n    metrics = [\n        {**metric, column_type: column_name} for metric in self.metrics\n    ]\n    df = pd.DataFrame(metrics)\n    for k in [\"label\", \"parameters\"]:\n        df[k] = df[k].fillna(\"n/a\")\n    df[\"parameters\"] = df[\"parameters\"].apply(json.dumps)\n    df[\"label\"] = df[\"label\"].apply(\n        lambda x: f\"{x['key']}: {x['value']}\" if x != \"n/a\" else x\n    )\n    df = df.pivot(\n        index=[\"type\", \"parameters\", \"label\"], columns=[column_type]\n    )\n    return df\n
"},{"location":"client_api/Evaluation/#valor.Evaluation.to_dict","title":"valor.Evaluation.to_dict()","text":"

Defines how a valor.Evaluation object is serialized into a dictionary.

Returns:

Type Description dict

A dictionary describing an evaluation.

Source code in valor/coretypes.py
def to_dict(self) -> dict:\n    \"\"\"\n    Defines how a `valor.Evaluation` object is serialized into a dictionary.\n\n    Returns\n    ----------\n    dict\n        A dictionary describing an evaluation.\n    \"\"\"\n    return {\n        \"id\": self.id,\n        \"dataset_names\": self.dataset_names,\n        \"model_name\": self.model_name,\n        \"filters\": asdict(self.filters),\n        \"parameters\": asdict(self.parameters),\n        \"status\": self.status.value,\n        \"metrics\": self.metrics,\n        \"confusion_matrices\": self.confusion_matrices,\n        \"meta\": self.meta,\n        **self.kwargs,\n    }\n
"},{"location":"client_api/Evaluation/#valor.Evaluation.wait_for_completion","title":"valor.Evaluation.wait_for_completion(*, timeout=None, interval=1.0)","text":"

Blocking function that waits for evaluation to finish.

Parameters:

Name Type Description Default timeout int

Length of timeout in seconds.

None interval float

Polling interval in seconds.

1.0 Source code in valor/coretypes.py
def wait_for_completion(\n    self,\n    *,\n    timeout: Optional[int] = None,\n    interval: float = 1.0,\n) -> EvaluationStatus:\n    \"\"\"\n    Blocking function that waits for evaluation to finish.\n\n    Parameters\n    ----------\n    timeout : int, optional\n        Length of timeout in seconds.\n    interval : float, default=1.0\n        Polling interval in seconds.\n    \"\"\"\n    t_start = time.time()\n    while self.poll() not in [\n        EvaluationStatus.DONE,\n        EvaluationStatus.FAILED,\n    ]:\n        time.sleep(interval)\n        if timeout and time.time() - t_start > timeout:\n            raise TimeoutError\n    return self.status\n
"},{"location":"client_api/Groundtruth/","title":"Groundtruth","text":"

Bases: StaticCollection

An object describing a ground truth (e.g., a human-drawn bounding box on an image).

Attributes:

Name Type Description datum Datum

The datum associated with the groundtruth.

annotations List[Annotation]

The list of annotations associated with the groundtruth.

Examples:

>>> GroundTruth(\n...     datum=Datum(uid=\"uid1\"),\n...     annotations=[\n...         Annotation(\n...             labels=[Label(key=\"k1\", value=\"v1\")],\n...         )\n...     ]\n... )\n
Source code in valor/coretypes.py
class GroundTruth(StaticCollection):\n    \"\"\"\n    An object describing a ground truth (e.g., a human-drawn bounding box on an image).\n\n    Attributes\n    ----------\n    datum : Datum\n        The datum associated with the groundtruth.\n    annotations : List[Annotation]\n        The list of annotations associated with the groundtruth.\n\n    Examples\n    --------\n    >>> GroundTruth(\n    ...     datum=Datum(uid=\"uid1\"),\n    ...     annotations=[\n    ...         Annotation(\n    ...             labels=[Label(key=\"k1\", value=\"v1\")],\n    ...         )\n    ...     ]\n    ... )\n    \"\"\"\n\n    datum: Datum = Datum.symbolic(owner=\"groundtruth\", name=\"datum\")\n    annotations: SymbolicList[Annotation] = SymbolicList[Annotation].symbolic(\n        owner=\"groundtruth\", name=\"annotations\"\n    )\n\n    def __init__(\n        self,\n        *,\n        datum: Datum,\n        annotations: List[Annotation],\n    ):\n        \"\"\"\n        Creates a ground truth.\n\n        Parameters\n        ----------\n        datum : Datum\n            The datum that the ground truth is operating over.\n        annotations : List[Annotation]\n            The list of ground truth annotations.\n        \"\"\"\n        super().__init__(datum=datum, annotations=annotations)\n\n        for annotation in self.annotations:\n            for label in annotation.labels:\n                if label.score is not None:\n                    raise ValueError(\n                        \"GroundTruth labels should not have scores.\"\n                    )\n
"},{"location":"client_api/Groundtruth/#valor.GroundTruth-functions","title":"Functions","text":""},{"location":"client_api/Groundtruth/#valor.GroundTruth.__init__","title":"valor.GroundTruth.__init__(*, datum, annotations)","text":"

Creates a ground truth.

Parameters:

Name Type Description Default datum Datum

The datum that the ground truth is operating over.

required annotations List[Annotation]

The list of ground truth annotations.

required Source code in valor/coretypes.py
def __init__(\n    self,\n    *,\n    datum: Datum,\n    annotations: List[Annotation],\n):\n    \"\"\"\n    Creates a ground truth.\n\n    Parameters\n    ----------\n    datum : Datum\n        The datum that the ground truth is operating over.\n    annotations : List[Annotation]\n        The list of ground truth annotations.\n    \"\"\"\n    super().__init__(datum=datum, annotations=annotations)\n\n    for annotation in self.annotations:\n        for label in annotation.labels:\n            if label.score is not None:\n                raise ValueError(\n                    \"GroundTruth labels should not have scores.\"\n                )\n
"},{"location":"client_api/Label/","title":"Label","text":"

Bases: StaticCollection

An object for labeling datasets, models, and annotations.

Attributes:

Name Type Description key String

The class label key.

value String

The class label value.

score Score

The label score.

Examples:

>>> Label(key=\"k1\", value=\"v1\")\n>>> Label(key=\"k1\", value=\"v1\", score=None)\n>>> Label(key=\"k1\", value=\"v1\", score=0.9)\n
Source code in valor/schemas/symbolic/collections.py
class Label(StaticCollection):\n    \"\"\"\n    An object for labeling datasets, models, and annotations.\n\n    Attributes\n    ----------\n    key : String\n        The class label key.\n    value : String\n        The class label value.\n    score : Score\n        The label score.\n\n    Examples\n    --------\n    >>> Label(key=\"k1\", value=\"v1\")\n    >>> Label(key=\"k1\", value=\"v1\", score=None)\n    >>> Label(key=\"k1\", value=\"v1\", score=0.9)\n    \"\"\"\n\n    key: String = String.symbolic(owner=\"label\", name=\"key\")\n    value: String = String.symbolic(owner=\"label\", name=\"value\")\n    score: Float = Float.symbolic(owner=\"label\", name=\"score\")\n\n    def __init__(\n        self,\n        *,\n        key: str,\n        value: str,\n        score: Union[float, np.floating, None] = None,\n    ):\n        \"\"\"\n        Initializes an instance of a label.\n\n        Attributes\n        ----------\n        key : str\n            The class label key.\n        value : str\n            The class label value.\n        score : float, optional\n            The label score.\n        \"\"\"\n        super().__init__(key=key, value=value, score=score)\n\n    @staticmethod\n    def formatting() -> Dict[str, Any]:\n        \"\"\"Attribute format mapping.\"\"\"\n        return {\n            \"score\": Float.nullable,\n        }\n\n    def tuple(self):\n        \"\"\"\n        Defines how the `Label` is turned into a tuple.\n\n        Returns\n        ----------\n        tuple\n            A tuple of the `Label's` arguments.\n        \"\"\"\n        return (self.key, self.value, self.score)\n
"},{"location":"client_api/Label/#valor.Label-functions","title":"Functions","text":""},{"location":"client_api/Label/#valor.Label.__init__","title":"valor.Label.__init__(*, key, value, score=None)","text":"

Initializes an instance of a label.

Attributes:

Name Type Description key str

The class label key.

value str

The class label value.

score (float, optional)

The label score.

Source code in valor/schemas/symbolic/collections.py
def __init__(\n    self,\n    *,\n    key: str,\n    value: str,\n    score: Union[float, np.floating, None] = None,\n):\n    \"\"\"\n    Initializes an instance of a label.\n\n    Attributes\n    ----------\n    key : str\n        The class label key.\n    value : str\n        The class label value.\n    score : float, optional\n        The label score.\n    \"\"\"\n    super().__init__(key=key, value=value, score=score)\n
"},{"location":"client_api/Label/#valor.Label.formatting","title":"valor.Label.formatting() staticmethod","text":"

Attribute format mapping.

Source code in valor/schemas/symbolic/collections.py
@staticmethod\ndef formatting() -> Dict[str, Any]:\n    \"\"\"Attribute format mapping.\"\"\"\n    return {\n        \"score\": Float.nullable,\n    }\n
"},{"location":"client_api/Label/#valor.Label.tuple","title":"valor.Label.tuple()","text":"

Defines how the Label is turned into a tuple.

Returns:

Type Description tuple

A tuple of the Label's arguments.

Source code in valor/schemas/symbolic/collections.py
def tuple(self):\n    \"\"\"\n    Defines how the `Label` is turned into a tuple.\n\n    Returns\n    ----------\n    tuple\n        A tuple of the `Label's` arguments.\n    \"\"\"\n    return (self.key, self.value, self.score)\n
"},{"location":"client_api/Model/","title":"Model","text":"

Bases: StaticCollection

A class describing a model that was trained on a particular dataset.

Attributes:

Name Type Description name String

The name of the model.

metadata Dictionary

A dictionary of metadata that describes the model.

Examples:

>>> Model.create(name=\"model1\")\n>>> Model.create(name=\"model1\", metadata={})\n>>> Model.create(name=\"model1\", metadata={\"foo\": \"bar\", \"pi\": 3.14})\n
Source code in valor/coretypes.py
class Model(StaticCollection):\n    \"\"\"\n    A class describing a model that was trained on a particular dataset.\n\n    Attributes\n    ----------\n    name : String\n        The name of the model.\n    metadata : Dictionary\n        A dictionary of metadata that describes the model.\n\n    Examples\n    --------\n    >>> Model.create(name=\"model1\")\n    >>> Model.create(name=\"model1\", metadata={})\n    >>> Model.create(name=\"model1\", metadata={\"foo\": \"bar\", \"pi\": 3.14})\n    \"\"\"\n\n    name: String = String.symbolic(owner=\"model\", name=\"name\")\n    metadata: Dictionary = Dictionary.symbolic(owner=\"model\", name=\"metadata\")\n\n    def __init__(\n        self,\n        *,\n        name: str,\n        metadata: Optional[dict] = None,\n        connection: Optional[ClientConnection] = None,\n    ):\n        \"\"\"\n        Creates a local instance of a model.\n\n        Use 'Model.create' classmethod to create a model with persistence.\n\n        Parameters\n        ----------\n        name : String\n            The name of the model.\n        metadata : Dictionary\n            A dictionary of metadata that describes the model.\n        connection : ClientConnection, optional\n            An initialized client connection.\n        \"\"\"\n        self.conn = connection\n        super().__init__(name=name, metadata=metadata if metadata else dict())\n\n    @classmethod\n    def create(\n        cls,\n        name: str,\n        metadata: Optional[Dict[str, Any]] = None,\n        connection: Optional[ClientConnection] = None,\n        **_,\n    ) -> Model:\n        \"\"\"\n        Creates a model that persists in the back end.\n\n        Parameters\n        ----------\n        name : str\n            The name of the model.\n        metadata : dict, optional\n            A dictionary of metadata that describes the model.\n        connection : ClientConnection, optional\n            An initialized client connection.\n        \"\"\"\n        model = cls(name=name, metadata=metadata, connection=connection)\n        Client(connection).create_model(model)\n        return model\n\n    @classmethod\n    def get(\n        cls,\n        name: str,\n        connection: Optional[ClientConnection] = None,\n    ) -> Union[Model, None]:\n        \"\"\"\n        Retrieves a model from the back end database.\n\n        Parameters\n        ----------\n        name : str\n            The name of the model.\n        connection : ClientConnnetion, optional\n            An optional Valor client object for interacting with the API.\n\n        Returns\n        -------\n        Union[valor.Model, None]\n            The model or 'None' if it doesn't exist.\n        \"\"\"\n        return Client(connection).get_model(name)\n\n    def add_prediction(\n        self,\n        dataset: Dataset,\n        prediction: Prediction,\n    ) -> None:\n        \"\"\"\n        Add a prediction to the model.\n\n        Parameters\n        ----------\n        dataset : valor.Dataset\n            The dataset that is being operated over.\n        prediction : valor.Prediction\n            The prediction to create.\n        \"\"\"\n        Client(self.conn).create_predictions(\n            dataset=dataset,\n            model=self,\n            predictions=[prediction],\n        )\n\n    def add_predictions(\n        self,\n        dataset: Dataset,\n        predictions: List[Prediction],\n    ) -> None:\n        \"\"\"\n        Add multiple predictions to the model.\n\n        Parameters\n        ----------\n        dataset : valor.Dataset\n            The dataset that is being operated over.\n        predictions : List[valor.Prediction]\n            The predictions to create.\n        \"\"\"\n        Client(self.conn).create_predictions(\n            dataset=dataset,\n            model=self,\n            predictions=predictions,\n        )\n\n    def get_prediction(\n        self, dataset: Union[Dataset, str], datum: Union[Datum, str]\n    ) -> Union[Prediction, None]:\n        \"\"\"\n        Get a particular prediction.\n\n        Parameters\n        ----------\n        dataset: Union[Dataset, str]\n            The dataset the datum belongs to.\n        datum: Union[Datum, str]\n            The desired datum.\n\n        Returns\n        ----------\n        Union[Prediction, None]\n            The matching prediction or 'None' if it doesn't exist.\n        \"\"\"\n        return Client(self.conn).get_prediction(\n            dataset=dataset, model=self, datum=datum\n        )\n\n    def finalize_inferences(self, dataset: Union[Dataset, str]) -> None:\n        \"\"\"\n        Finalizes the model over a dataset such that new predictions cannot be added to it.\n        \"\"\"\n        return Client(self.conn).finalize_inferences(\n            dataset=dataset, model=self\n        )\n\n    def _format_constraints(\n        self,\n        datasets: Optional[Union[Dataset, List[Dataset]]] = None,\n        filter_by: Optional[FilterType] = None,\n    ) -> Filter:\n        \"\"\"Formats the 'filter' for any evaluation requests.\"\"\"\n\n        # get list of dataset names\n        dataset_names_from_obj = []\n        if isinstance(datasets, list):\n            dataset_names_from_obj = [dataset.name for dataset in datasets]\n        elif isinstance(datasets, Dataset):\n            dataset_names_from_obj = [datasets.name]\n\n        # create a 'schemas.Filter' object from the constraints.\n        filters = _format_filter(filter_by)\n\n        # reset model name\n        filters.model_names = None\n        filters.model_metadata = None\n\n        # set dataset names\n        if not filters.dataset_names:\n            filters.dataset_names = []\n        filters.dataset_names.extend(dataset_names_from_obj)  # type: ignore\n        return filters\n\n    def _create_label_map(\n        self,\n        label_map: Optional[Dict[Label, Label]],\n    ) -> Union[List[List[List[str]]], None]:\n        \"\"\"Convert a dictionary of label maps to a serializable list format.\"\"\"\n        if not label_map:\n            return None\n\n        if not isinstance(label_map, dict) or not all(\n            [\n                isinstance(key, Label) and isinstance(value, Label)\n                for key, value in label_map.items()\n            ]\n        ):\n            raise TypeError(\n                \"label_map should be a dictionary with valid Labels for both the key and value.\"\n            )\n\n        return_value = []\n        for key, value in label_map.items():\n            if not all(\n                [\n                    (isinstance(v.key, str) and isinstance(v.value, str))\n                    for v in [key, value]\n                ]\n            ):\n                raise TypeError\n            return_value.append(\n                [\n                    [key.key, key.value],\n                    [value.key, value.value],\n                ]\n            )\n        return return_value\n\n    def evaluate_classification(\n        self,\n        datasets: Union[Dataset, List[Dataset]],\n        filter_by: Optional[FilterType] = None,\n        label_map: Optional[Dict[Label, Label]] = None,\n        pr_curve_max_examples: int = 1,\n        metrics_to_return: Optional[List[MetricType]] = None,\n        allow_retries: bool = False,\n    ) -> Evaluation:\n        \"\"\"\n        Start a classification evaluation job.\n\n        Parameters\n        ----------\n        datasets : Union[Dataset, List[Dataset]], optional\n            The dataset or list of datasets to evaluate against.\n        filter_by : FilterType, optional\n            Optional set of constraints to filter evaluation by.\n        label_map : Dict[Label, Label], optional\n            Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.\n        metrics_to_return: List[MetricType], optional\n            The list of metrics to compute, store, and return to the user.\n        allow_retries : bool, default = False\n            Option to retry previously failed evaluations.\n\n        Returns\n        -------\n        Evaluation\n            A job object that can be used to track the status of the job and get the metrics of it upon completion.\n        \"\"\"\n        if not datasets and not filter_by:\n            raise ValueError(\n                \"Evaluation requires the definition of either datasets, dataset filters or both.\"\n            )\n        elif metrics_to_return and not set(metrics_to_return).issubset(\n            MetricType.classification()\n        ):\n            raise ValueError(\n                f\"The following metrics are not supported for classification: '{set(metrics_to_return) - MetricType.classification()}'\"\n            )\n\n        # format request\n        filters = self._format_constraints(datasets, filter_by)\n        datasets = datasets if isinstance(datasets, list) else [datasets]\n        request = EvaluationRequest(\n            dataset_names=[dataset.name for dataset in datasets],  # type: ignore - issue #604\n            model_names=[self.name],  # type: ignore - issue #604\n            filters=filters,\n            parameters=EvaluationParameters(\n                task_type=TaskType.CLASSIFICATION,\n                label_map=self._create_label_map(label_map=label_map),\n                pr_curve_max_examples=pr_curve_max_examples,\n                metrics_to_return=metrics_to_return,\n            ),\n        )\n\n        # create evaluation\n        evaluation = Client(self.conn).evaluate(\n            request, allow_retries=allow_retries\n        )\n        if len(evaluation) != 1:\n            raise RuntimeError\n        return evaluation[0]\n\n    def evaluate_detection(\n        self,\n        datasets: Union[Dataset, List[Dataset]],\n        filter_by: Optional[FilterType] = None,\n        convert_annotations_to_type: Optional[AnnotationType] = None,\n        iou_thresholds_to_compute: Optional[List[float]] = None,\n        iou_thresholds_to_return: Optional[List[float]] = None,\n        label_map: Optional[Dict[Label, Label]] = None,\n        recall_score_threshold: float = 0,\n        metrics_to_return: Optional[List[MetricType]] = None,\n        pr_curve_iou_threshold: float = 0.5,\n        pr_curve_max_examples: int = 1,\n        allow_retries: bool = False,\n    ) -> Evaluation:\n        \"\"\"\n        Start an object-detection evaluation job.\n\n        Parameters\n        ----------\n        datasets : Union[Dataset, List[Dataset]], optional\n            The dataset or list of datasets to evaluate against.\n        filter_by : FilterType, optional\n            Optional set of constraints to filter evaluation by.\n        convert_annotations_to_type : enums.AnnotationType, optional\n            Forces the object detection evaluation to compute over this type.\n        iou_thresholds_to_compute : List[float], optional\n            Thresholds to compute mAP against.\n        iou_thresholds_to_return : List[float], optional\n            Thresholds to return AP for. Must be subset of `iou_thresholds_to_compute`.\n        label_map : Dict[Label, Label], optional\n            Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.\n        recall_score_threshold: float, default=0\n            The confidence score threshold for use when determining whether to count a prediction as a true positive or not while calculating Average Recall.\n        metrics_to_return: List[MetricType], optional\n            The list of metrics to compute, store, and return to the user.\n        pr_curve_iou_threshold: float, optional\n            The IOU threshold to use when calculating precision-recall curves. Defaults to 0.5.\n        pr_curve_max_examples: int, optional\n            The maximum number of datum examples to store when calculating PR curves.\n        allow_retries : bool, default = False\n            Option to retry previously failed evaluations.\n\n        Returns\n        -------\n        Evaluation\n            A job object that can be used to track the status of the job and get the metrics of it upon completion.\n        \"\"\"\n        if metrics_to_return and not set(metrics_to_return).issubset(\n            MetricType.object_detection()\n        ):\n            raise ValueError(\n                f\"The following metrics are not supported for object detection: '{set(metrics_to_return) - MetricType.object_detection()}'\"\n            )\n\n        if iou_thresholds_to_compute is None:\n            iou_thresholds_to_compute = [\n                round(0.5 + 0.05 * i, 2) for i in range(10)\n            ]\n        if iou_thresholds_to_return is None:\n            iou_thresholds_to_return = [0.5, 0.75]\n\n        # format request\n        parameters = EvaluationParameters(\n            task_type=TaskType.OBJECT_DETECTION,\n            convert_annotations_to_type=convert_annotations_to_type,\n            iou_thresholds_to_compute=iou_thresholds_to_compute,\n            iou_thresholds_to_return=iou_thresholds_to_return,\n            label_map=self._create_label_map(label_map=label_map),\n            recall_score_threshold=recall_score_threshold,\n            metrics_to_return=metrics_to_return,\n            pr_curve_iou_threshold=pr_curve_iou_threshold,\n            pr_curve_max_examples=pr_curve_max_examples,\n        )\n        filters = self._format_constraints(datasets, filter_by)\n        datasets = datasets if isinstance(datasets, list) else [datasets]\n        request = EvaluationRequest(\n            dataset_names=[dataset.name for dataset in datasets],  # type: ignore - issue #604\n            model_names=[self.name],  # type: ignore - issue #604\n            filters=filters,\n            parameters=parameters,\n        )\n\n        # create evaluation\n        evaluation = Client(self.conn).evaluate(\n            request, allow_retries=allow_retries\n        )\n        if len(evaluation) != 1:\n            raise RuntimeError\n        return evaluation[0]\n\n    def evaluate_segmentation(\n        self,\n        datasets: Union[Dataset, List[Dataset]],\n        filter_by: Optional[FilterType] = None,\n        label_map: Optional[Dict[Label, Label]] = None,\n        metrics_to_return: Optional[List[MetricType]] = None,\n        allow_retries: bool = False,\n    ) -> Evaluation:\n        \"\"\"\n        Start a semantic-segmentation evaluation job.\n\n        Parameters\n        ----------\n        datasets : Union[Dataset, List[Dataset]], optional\n            The dataset or list of datasets to evaluate against.\n        filter_by : FilterType, optional\n            Optional set of constraints to filter evaluation by.\n        label_map : Dict[Label, Label], optional\n            Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.\n        metrics_to_return: List[MetricType], optional\n            The list of metrics to compute, store, and return to the user.\n        allow_retries : bool, default = False\n            Option to retry previously failed evaluations.\n\n        Returns\n        -------\n        Evaluation\n            A job object that can be used to track the status of the job and get the metrics of it upon completion\n        \"\"\"\n        if metrics_to_return and not set(metrics_to_return).issubset(\n            MetricType.semantic_segmentation()\n        ):\n            raise ValueError(\n                f\"The following metrics are not supported for semantic segmentation: '{set(metrics_to_return) - MetricType.semantic_segmentation()}'\"\n            )\n\n        # format request\n        filters = self._format_constraints(datasets, filter_by)\n        datasets = datasets if isinstance(datasets, list) else [datasets]\n        request = EvaluationRequest(\n            dataset_names=[dataset.name for dataset in datasets],  # type: ignore - issue #604\n            model_names=[self.name],  # type: ignore - issue #604\n            filters=filters,\n            parameters=EvaluationParameters(\n                task_type=TaskType.SEMANTIC_SEGMENTATION,\n                label_map=self._create_label_map(label_map=label_map),\n                metrics_to_return=metrics_to_return,\n            ),\n        )\n\n        # create evaluation\n        evaluation = Client(self.conn).evaluate(\n            request, allow_retries=allow_retries\n        )\n        if len(evaluation) != 1:\n            raise RuntimeError\n        return evaluation[0]\n\n    def delete(self, timeout: int = 0):\n        \"\"\"\n        Delete the `Model` object from the back end.\n\n        Parameters\n        ----------\n        timeout : int, default=0\n            Sets a timeout in seconds.\n        \"\"\"\n        Client(self.conn).delete_model(self.name, timeout)  # type: ignore\n\n    def get_labels(\n        self,\n    ) -> List[Label]:\n        \"\"\"\n        Get all labels associated with a given model.\n\n        Returns\n        ----------\n        List[Label]\n            A list of `Labels` associated with the model.\n        \"\"\"\n        return Client(self.conn).get_labels_from_model(self)\n\n    def get_evaluations(\n        self,\n        metrics_to_sort_by: Optional[\n            Dict[str, Union[Dict[str, str], str]]\n        ] = None,\n    ) -> List[Evaluation]:\n        \"\"\"\n        Get all evaluations associated with a given model.\n\n        Parameters\n        ----------\n        metrics_to_sort_by: dict[str, str | dict[str, str]], optional\n            An optional dict of metric types to sort the evaluations by.\n\n\n        Returns\n        ----------\n        List[Evaluation]\n            A list of `Evaluations` associated with the model.\n        \"\"\"\n        return Client(self.conn).get_evaluations(\n            models=[self], metrics_to_sort_by=metrics_to_sort_by\n        )\n
"},{"location":"client_api/Model/#valor.Model-functions","title":"Functions","text":""},{"location":"client_api/Model/#valor.Model.__init__","title":"valor.Model.__init__(*, name, metadata=None, connection=None)","text":"

Creates a local instance of a model.

Use 'Model.create' classmethod to create a model with persistence.

Parameters:

Name Type Description Default name String

The name of the model.

required metadata Dictionary

A dictionary of metadata that describes the model.

None connection ClientConnection

An initialized client connection.

None Source code in valor/coretypes.py
def __init__(\n    self,\n    *,\n    name: str,\n    metadata: Optional[dict] = None,\n    connection: Optional[ClientConnection] = None,\n):\n    \"\"\"\n    Creates a local instance of a model.\n\n    Use 'Model.create' classmethod to create a model with persistence.\n\n    Parameters\n    ----------\n    name : String\n        The name of the model.\n    metadata : Dictionary\n        A dictionary of metadata that describes the model.\n    connection : ClientConnection, optional\n        An initialized client connection.\n    \"\"\"\n    self.conn = connection\n    super().__init__(name=name, metadata=metadata if metadata else dict())\n
"},{"location":"client_api/Model/#valor.Model.add_prediction","title":"valor.Model.add_prediction(dataset, prediction)","text":"

Add a prediction to the model.

Parameters:

Name Type Description Default dataset Dataset

The dataset that is being operated over.

required prediction Prediction

The prediction to create.

required Source code in valor/coretypes.py
def add_prediction(\n    self,\n    dataset: Dataset,\n    prediction: Prediction,\n) -> None:\n    \"\"\"\n    Add a prediction to the model.\n\n    Parameters\n    ----------\n    dataset : valor.Dataset\n        The dataset that is being operated over.\n    prediction : valor.Prediction\n        The prediction to create.\n    \"\"\"\n    Client(self.conn).create_predictions(\n        dataset=dataset,\n        model=self,\n        predictions=[prediction],\n    )\n
"},{"location":"client_api/Model/#valor.Model.add_predictions","title":"valor.Model.add_predictions(dataset, predictions)","text":"

Add multiple predictions to the model.

Parameters:

Name Type Description Default dataset Dataset

The dataset that is being operated over.

required predictions List[Prediction]

The predictions to create.

required Source code in valor/coretypes.py
def add_predictions(\n    self,\n    dataset: Dataset,\n    predictions: List[Prediction],\n) -> None:\n    \"\"\"\n    Add multiple predictions to the model.\n\n    Parameters\n    ----------\n    dataset : valor.Dataset\n        The dataset that is being operated over.\n    predictions : List[valor.Prediction]\n        The predictions to create.\n    \"\"\"\n    Client(self.conn).create_predictions(\n        dataset=dataset,\n        model=self,\n        predictions=predictions,\n    )\n
"},{"location":"client_api/Model/#valor.Model.create","title":"valor.Model.create(name, metadata=None, connection=None, **_) classmethod","text":"

Creates a model that persists in the back end.

Parameters:

Name Type Description Default name str

The name of the model.

required metadata dict

A dictionary of metadata that describes the model.

None connection ClientConnection

An initialized client connection.

None Source code in valor/coretypes.py
@classmethod\ndef create(\n    cls,\n    name: str,\n    metadata: Optional[Dict[str, Any]] = None,\n    connection: Optional[ClientConnection] = None,\n    **_,\n) -> Model:\n    \"\"\"\n    Creates a model that persists in the back end.\n\n    Parameters\n    ----------\n    name : str\n        The name of the model.\n    metadata : dict, optional\n        A dictionary of metadata that describes the model.\n    connection : ClientConnection, optional\n        An initialized client connection.\n    \"\"\"\n    model = cls(name=name, metadata=metadata, connection=connection)\n    Client(connection).create_model(model)\n    return model\n
"},{"location":"client_api/Model/#valor.Model.delete","title":"valor.Model.delete(timeout=0)","text":"

Delete the Model object from the back end.

Parameters:

Name Type Description Default timeout int

Sets a timeout in seconds.

0 Source code in valor/coretypes.py
def delete(self, timeout: int = 0):\n    \"\"\"\n    Delete the `Model` object from the back end.\n\n    Parameters\n    ----------\n    timeout : int, default=0\n        Sets a timeout in seconds.\n    \"\"\"\n    Client(self.conn).delete_model(self.name, timeout)  # type: ignore\n
"},{"location":"client_api/Model/#valor.Model.evaluate_classification","title":"valor.Model.evaluate_classification(datasets, filter_by=None, label_map=None, pr_curve_max_examples=1, metrics_to_return=None, allow_retries=False)","text":"

Start a classification evaluation job.

Parameters:

Name Type Description Default datasets Union[Dataset, List[Dataset]]

The dataset or list of datasets to evaluate against.

required filter_by FilterType

Optional set of constraints to filter evaluation by.

None label_map Dict[Label, Label]

Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.

None metrics_to_return Optional[List[MetricType]]

The list of metrics to compute, store, and return to the user.

None allow_retries bool

Option to retry previously failed evaluations.

= False

Returns:

Type Description Evaluation

A job object that can be used to track the status of the job and get the metrics of it upon completion.

Source code in valor/coretypes.py
def evaluate_classification(\n    self,\n    datasets: Union[Dataset, List[Dataset]],\n    filter_by: Optional[FilterType] = None,\n    label_map: Optional[Dict[Label, Label]] = None,\n    pr_curve_max_examples: int = 1,\n    metrics_to_return: Optional[List[MetricType]] = None,\n    allow_retries: bool = False,\n) -> Evaluation:\n    \"\"\"\n    Start a classification evaluation job.\n\n    Parameters\n    ----------\n    datasets : Union[Dataset, List[Dataset]], optional\n        The dataset or list of datasets to evaluate against.\n    filter_by : FilterType, optional\n        Optional set of constraints to filter evaluation by.\n    label_map : Dict[Label, Label], optional\n        Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.\n    metrics_to_return: List[MetricType], optional\n        The list of metrics to compute, store, and return to the user.\n    allow_retries : bool, default = False\n        Option to retry previously failed evaluations.\n\n    Returns\n    -------\n    Evaluation\n        A job object that can be used to track the status of the job and get the metrics of it upon completion.\n    \"\"\"\n    if not datasets and not filter_by:\n        raise ValueError(\n            \"Evaluation requires the definition of either datasets, dataset filters or both.\"\n        )\n    elif metrics_to_return and not set(metrics_to_return).issubset(\n        MetricType.classification()\n    ):\n        raise ValueError(\n            f\"The following metrics are not supported for classification: '{set(metrics_to_return) - MetricType.classification()}'\"\n        )\n\n    # format request\n    filters = self._format_constraints(datasets, filter_by)\n    datasets = datasets if isinstance(datasets, list) else [datasets]\n    request = EvaluationRequest(\n        dataset_names=[dataset.name for dataset in datasets],  # type: ignore - issue #604\n        model_names=[self.name],  # type: ignore - issue #604\n        filters=filters,\n        parameters=EvaluationParameters(\n            task_type=TaskType.CLASSIFICATION,\n            label_map=self._create_label_map(label_map=label_map),\n            pr_curve_max_examples=pr_curve_max_examples,\n            metrics_to_return=metrics_to_return,\n        ),\n    )\n\n    # create evaluation\n    evaluation = Client(self.conn).evaluate(\n        request, allow_retries=allow_retries\n    )\n    if len(evaluation) != 1:\n        raise RuntimeError\n    return evaluation[0]\n
"},{"location":"client_api/Model/#valor.Model.evaluate_detection","title":"valor.Model.evaluate_detection(datasets, filter_by=None, convert_annotations_to_type=None, iou_thresholds_to_compute=None, iou_thresholds_to_return=None, label_map=None, recall_score_threshold=0, metrics_to_return=None, pr_curve_iou_threshold=0.5, pr_curve_max_examples=1, allow_retries=False)","text":"

Start an object-detection evaluation job.

Parameters:

Name Type Description Default datasets Union[Dataset, List[Dataset]]

The dataset or list of datasets to evaluate against.

required filter_by FilterType

Optional set of constraints to filter evaluation by.

None convert_annotations_to_type AnnotationType

Forces the object detection evaluation to compute over this type.

None iou_thresholds_to_compute List[float]

Thresholds to compute mAP against.

None iou_thresholds_to_return List[float]

Thresholds to return AP for. Must be subset of iou_thresholds_to_compute.

None label_map Dict[Label, Label]

Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.

None recall_score_threshold float

The confidence score threshold for use when determining whether to count a prediction as a true positive or not while calculating Average Recall.

0 metrics_to_return Optional[List[MetricType]]

The list of metrics to compute, store, and return to the user.

None pr_curve_iou_threshold float

The IOU threshold to use when calculating precision-recall curves. Defaults to 0.5.

0.5 pr_curve_max_examples int

The maximum number of datum examples to store when calculating PR curves.

1 allow_retries bool

Option to retry previously failed evaluations.

= False

Returns:

Type Description Evaluation

A job object that can be used to track the status of the job and get the metrics of it upon completion.

Source code in valor/coretypes.py
def evaluate_detection(\n    self,\n    datasets: Union[Dataset, List[Dataset]],\n    filter_by: Optional[FilterType] = None,\n    convert_annotations_to_type: Optional[AnnotationType] = None,\n    iou_thresholds_to_compute: Optional[List[float]] = None,\n    iou_thresholds_to_return: Optional[List[float]] = None,\n    label_map: Optional[Dict[Label, Label]] = None,\n    recall_score_threshold: float = 0,\n    metrics_to_return: Optional[List[MetricType]] = None,\n    pr_curve_iou_threshold: float = 0.5,\n    pr_curve_max_examples: int = 1,\n    allow_retries: bool = False,\n) -> Evaluation:\n    \"\"\"\n    Start an object-detection evaluation job.\n\n    Parameters\n    ----------\n    datasets : Union[Dataset, List[Dataset]], optional\n        The dataset or list of datasets to evaluate against.\n    filter_by : FilterType, optional\n        Optional set of constraints to filter evaluation by.\n    convert_annotations_to_type : enums.AnnotationType, optional\n        Forces the object detection evaluation to compute over this type.\n    iou_thresholds_to_compute : List[float], optional\n        Thresholds to compute mAP against.\n    iou_thresholds_to_return : List[float], optional\n        Thresholds to return AP for. Must be subset of `iou_thresholds_to_compute`.\n    label_map : Dict[Label, Label], optional\n        Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.\n    recall_score_threshold: float, default=0\n        The confidence score threshold for use when determining whether to count a prediction as a true positive or not while calculating Average Recall.\n    metrics_to_return: List[MetricType], optional\n        The list of metrics to compute, store, and return to the user.\n    pr_curve_iou_threshold: float, optional\n        The IOU threshold to use when calculating precision-recall curves. Defaults to 0.5.\n    pr_curve_max_examples: int, optional\n        The maximum number of datum examples to store when calculating PR curves.\n    allow_retries : bool, default = False\n        Option to retry previously failed evaluations.\n\n    Returns\n    -------\n    Evaluation\n        A job object that can be used to track the status of the job and get the metrics of it upon completion.\n    \"\"\"\n    if metrics_to_return and not set(metrics_to_return).issubset(\n        MetricType.object_detection()\n    ):\n        raise ValueError(\n            f\"The following metrics are not supported for object detection: '{set(metrics_to_return) - MetricType.object_detection()}'\"\n        )\n\n    if iou_thresholds_to_compute is None:\n        iou_thresholds_to_compute = [\n            round(0.5 + 0.05 * i, 2) for i in range(10)\n        ]\n    if iou_thresholds_to_return is None:\n        iou_thresholds_to_return = [0.5, 0.75]\n\n    # format request\n    parameters = EvaluationParameters(\n        task_type=TaskType.OBJECT_DETECTION,\n        convert_annotations_to_type=convert_annotations_to_type,\n        iou_thresholds_to_compute=iou_thresholds_to_compute,\n        iou_thresholds_to_return=iou_thresholds_to_return,\n        label_map=self._create_label_map(label_map=label_map),\n        recall_score_threshold=recall_score_threshold,\n        metrics_to_return=metrics_to_return,\n        pr_curve_iou_threshold=pr_curve_iou_threshold,\n        pr_curve_max_examples=pr_curve_max_examples,\n    )\n    filters = self._format_constraints(datasets, filter_by)\n    datasets = datasets if isinstance(datasets, list) else [datasets]\n    request = EvaluationRequest(\n        dataset_names=[dataset.name for dataset in datasets],  # type: ignore - issue #604\n        model_names=[self.name],  # type: ignore - issue #604\n        filters=filters,\n        parameters=parameters,\n    )\n\n    # create evaluation\n    evaluation = Client(self.conn).evaluate(\n        request, allow_retries=allow_retries\n    )\n    if len(evaluation) != 1:\n        raise RuntimeError\n    return evaluation[0]\n
"},{"location":"client_api/Model/#valor.Model.evaluate_segmentation","title":"valor.Model.evaluate_segmentation(datasets, filter_by=None, label_map=None, metrics_to_return=None, allow_retries=False)","text":"

Start a semantic-segmentation evaluation job.

Parameters:

Name Type Description Default datasets Union[Dataset, List[Dataset]]

The dataset or list of datasets to evaluate against.

required filter_by FilterType

Optional set of constraints to filter evaluation by.

None label_map Dict[Label, Label]

Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.

None metrics_to_return Optional[List[MetricType]]

The list of metrics to compute, store, and return to the user.

None allow_retries bool

Option to retry previously failed evaluations.

= False

Returns:

Type Description Evaluation

A job object that can be used to track the status of the job and get the metrics of it upon completion

Source code in valor/coretypes.py
def evaluate_segmentation(\n    self,\n    datasets: Union[Dataset, List[Dataset]],\n    filter_by: Optional[FilterType] = None,\n    label_map: Optional[Dict[Label, Label]] = None,\n    metrics_to_return: Optional[List[MetricType]] = None,\n    allow_retries: bool = False,\n) -> Evaluation:\n    \"\"\"\n    Start a semantic-segmentation evaluation job.\n\n    Parameters\n    ----------\n    datasets : Union[Dataset, List[Dataset]], optional\n        The dataset or list of datasets to evaluate against.\n    filter_by : FilterType, optional\n        Optional set of constraints to filter evaluation by.\n    label_map : Dict[Label, Label], optional\n        Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.\n    metrics_to_return: List[MetricType], optional\n        The list of metrics to compute, store, and return to the user.\n    allow_retries : bool, default = False\n        Option to retry previously failed evaluations.\n\n    Returns\n    -------\n    Evaluation\n        A job object that can be used to track the status of the job and get the metrics of it upon completion\n    \"\"\"\n    if metrics_to_return and not set(metrics_to_return).issubset(\n        MetricType.semantic_segmentation()\n    ):\n        raise ValueError(\n            f\"The following metrics are not supported for semantic segmentation: '{set(metrics_to_return) - MetricType.semantic_segmentation()}'\"\n        )\n\n    # format request\n    filters = self._format_constraints(datasets, filter_by)\n    datasets = datasets if isinstance(datasets, list) else [datasets]\n    request = EvaluationRequest(\n        dataset_names=[dataset.name for dataset in datasets],  # type: ignore - issue #604\n        model_names=[self.name],  # type: ignore - issue #604\n        filters=filters,\n        parameters=EvaluationParameters(\n            task_type=TaskType.SEMANTIC_SEGMENTATION,\n            label_map=self._create_label_map(label_map=label_map),\n            metrics_to_return=metrics_to_return,\n        ),\n    )\n\n    # create evaluation\n    evaluation = Client(self.conn).evaluate(\n        request, allow_retries=allow_retries\n    )\n    if len(evaluation) != 1:\n        raise RuntimeError\n    return evaluation[0]\n
"},{"location":"client_api/Model/#valor.Model.finalize_inferences","title":"valor.Model.finalize_inferences(dataset)","text":"

Finalizes the model over a dataset such that new predictions cannot be added to it.

Source code in valor/coretypes.py
def finalize_inferences(self, dataset: Union[Dataset, str]) -> None:\n    \"\"\"\n    Finalizes the model over a dataset such that new predictions cannot be added to it.\n    \"\"\"\n    return Client(self.conn).finalize_inferences(\n        dataset=dataset, model=self\n    )\n
"},{"location":"client_api/Model/#valor.Model.get","title":"valor.Model.get(name, connection=None) classmethod","text":"

Retrieves a model from the back end database.

Parameters:

Name Type Description Default name str

The name of the model.

required connection ClientConnnetion

An optional Valor client object for interacting with the API.

None

Returns:

Type Description Union[Model, None]

The model or 'None' if it doesn't exist.

Source code in valor/coretypes.py
@classmethod\ndef get(\n    cls,\n    name: str,\n    connection: Optional[ClientConnection] = None,\n) -> Union[Model, None]:\n    \"\"\"\n    Retrieves a model from the back end database.\n\n    Parameters\n    ----------\n    name : str\n        The name of the model.\n    connection : ClientConnnetion, optional\n        An optional Valor client object for interacting with the API.\n\n    Returns\n    -------\n    Union[valor.Model, None]\n        The model or 'None' if it doesn't exist.\n    \"\"\"\n    return Client(connection).get_model(name)\n
"},{"location":"client_api/Model/#valor.Model.get_evaluations","title":"valor.Model.get_evaluations(metrics_to_sort_by=None)","text":"

Get all evaluations associated with a given model.

Parameters:

Name Type Description Default metrics_to_sort_by Optional[Dict[str, Union[Dict[str, str], str]]]

An optional dict of metric types to sort the evaluations by.

None

Returns:

Type Description List[Evaluation]

A list of Evaluations associated with the model.

Source code in valor/coretypes.py
def get_evaluations(\n    self,\n    metrics_to_sort_by: Optional[\n        Dict[str, Union[Dict[str, str], str]]\n    ] = None,\n) -> List[Evaluation]:\n    \"\"\"\n    Get all evaluations associated with a given model.\n\n    Parameters\n    ----------\n    metrics_to_sort_by: dict[str, str | dict[str, str]], optional\n        An optional dict of metric types to sort the evaluations by.\n\n\n    Returns\n    ----------\n    List[Evaluation]\n        A list of `Evaluations` associated with the model.\n    \"\"\"\n    return Client(self.conn).get_evaluations(\n        models=[self], metrics_to_sort_by=metrics_to_sort_by\n    )\n
"},{"location":"client_api/Model/#valor.Model.get_labels","title":"valor.Model.get_labels()","text":"

Get all labels associated with a given model.

Returns:

Type Description List[Label]

A list of Labels associated with the model.

Source code in valor/coretypes.py
def get_labels(\n    self,\n) -> List[Label]:\n    \"\"\"\n    Get all labels associated with a given model.\n\n    Returns\n    ----------\n    List[Label]\n        A list of `Labels` associated with the model.\n    \"\"\"\n    return Client(self.conn).get_labels_from_model(self)\n
"},{"location":"client_api/Model/#valor.Model.get_prediction","title":"valor.Model.get_prediction(dataset, datum)","text":"

Get a particular prediction.

Parameters:

Name Type Description Default dataset Union[Dataset, str]

The dataset the datum belongs to.

required datum Union[Datum, str]

The desired datum.

required

Returns:

Type Description Union[Prediction, None]

The matching prediction or 'None' if it doesn't exist.

Source code in valor/coretypes.py
def get_prediction(\n    self, dataset: Union[Dataset, str], datum: Union[Datum, str]\n) -> Union[Prediction, None]:\n    \"\"\"\n    Get a particular prediction.\n\n    Parameters\n    ----------\n    dataset: Union[Dataset, str]\n        The dataset the datum belongs to.\n    datum: Union[Datum, str]\n        The desired datum.\n\n    Returns\n    ----------\n    Union[Prediction, None]\n        The matching prediction or 'None' if it doesn't exist.\n    \"\"\"\n    return Client(self.conn).get_prediction(\n        dataset=dataset, model=self, datum=datum\n    )\n
"},{"location":"client_api/Prediction/","title":"Prediction","text":"

Bases: StaticCollection

An object describing a prediction (e.g., a machine-drawn bounding box on an image).

Attributes:

Name Type Description datum Datum

The datum associated with the prediction.

annotations List[Annotation]

The list of annotations associated with the prediction.

Examples:

>>> Prediction(\n...     datum=Datum(uid=\"uid1\"),\n...     annotations=[\n...         Annotation(\n...             labels=[\n...                 Label(key=\"k1\", value=\"v1\", score=0.9),\n...                 Label(key=\"k1\", value=\"v1\", score=0.1)\n...             ],\n...         )\n...     ]\n... )\n
Source code in valor/coretypes.py
class Prediction(StaticCollection):\n    \"\"\"\n    An object describing a prediction (e.g., a machine-drawn bounding box on an image).\n\n    Attributes\n    ----------\n    datum : Datum\n        The datum associated with the prediction.\n    annotations : List[Annotation]\n        The list of annotations associated with the prediction.\n\n    Examples\n    --------\n    >>> Prediction(\n    ...     datum=Datum(uid=\"uid1\"),\n    ...     annotations=[\n    ...         Annotation(\n    ...             labels=[\n    ...                 Label(key=\"k1\", value=\"v1\", score=0.9),\n    ...                 Label(key=\"k1\", value=\"v1\", score=0.1)\n    ...             ],\n    ...         )\n    ...     ]\n    ... )\n    \"\"\"\n\n    datum: Datum = Datum.symbolic(owner=\"prediction\", name=\"datum\")\n    annotations: SymbolicList[Annotation] = SymbolicList[Annotation].symbolic(\n        owner=\"prediction\", name=\"annotations\"\n    )\n\n    def __init__(\n        self,\n        *,\n        datum: Datum,\n        annotations: List[Annotation],\n    ):\n        \"\"\"\n        Creates a prediction.\n\n        Parameters\n        ----------\n        datum : Datum\n            The datum that the prediction is operating over.\n        annotations : List[Annotation]\n            The list of predicted annotations.\n        \"\"\"\n        super().__init__(datum=datum, annotations=annotations)\n
"},{"location":"client_api/Prediction/#valor.Prediction-functions","title":"Functions","text":""},{"location":"client_api/Prediction/#valor.Prediction.__init__","title":"valor.Prediction.__init__(*, datum, annotations)","text":"

Creates a prediction.

Parameters:

Name Type Description Default datum Datum

The datum that the prediction is operating over.

required annotations List[Annotation]

The list of predicted annotations.

required Source code in valor/coretypes.py
def __init__(\n    self,\n    *,\n    datum: Datum,\n    annotations: List[Annotation],\n):\n    \"\"\"\n    Creates a prediction.\n\n    Parameters\n    ----------\n    datum : Datum\n        The datum that the prediction is operating over.\n    annotations : List[Annotation]\n        The list of predicted annotations.\n    \"\"\"\n    super().__init__(datum=datum, annotations=annotations)\n
"},{"location":"client_api/Viz/","title":"Viz","text":""},{"location":"client_api/Viz/#valor.viz-classes","title":"Classes","text":""},{"location":"client_api/Viz/#valor.viz-functions","title":"Functions","text":""},{"location":"client_api/Viz/#valor.viz.create_combined_segmentation_mask","title":"valor.viz.create_combined_segmentation_mask(annotated_datum, label_key, filter_on_instance_segmentations=False)","text":"

Creates a combined segmentation mask from a list of segmentations.

Parameters:

Name Type Description Default annotated_datum Union[GroundTruth, Prediction]

A list of segmentations. These all must have the same image attribute.

required label_key str

The label key to use.

required filter_on_instance_segmentations bool

Whether to filter on instance segmentations or not.

False

Returns:

Type Description tuple

The first element of the tuple is the combined mask, as an RGB PIL image. The second element is a color legend: it's a dict with the unique labels as keys and the PIL image swatches as values.

Raises:

Type Description RuntimeError

If all segmentations don't belong to the same image or there is a segmentation that doesn't have label_key as the key of one of its labels.

ValueError

If there aren't any segmentations.

Source code in valor/viz.py
def create_combined_segmentation_mask(\n    annotated_datum: Union[GroundTruth, Prediction],\n    label_key: str,\n    filter_on_instance_segmentations: bool = False,\n) -> Tuple[Image.Image, Dict[str, Image.Image]]:\n    \"\"\"\n    Creates a combined segmentation mask from a list of segmentations.\n\n    Parameters\n    -------\n    annotated_datum : Union[GroundTruth, Prediction]\n        A list of segmentations. These all must have the same `image` attribute.\n    label_key : str\n        The label key to use.\n    filter_on_instance_segmentations : bool, optional\n        Whether to filter on instance segmentations or not.\n\n    Returns\n    -------\n    tuple\n        The first element of the tuple is the combined mask, as an RGB PIL image. The second\n        element is a color legend: it's a dict with the unique labels as keys and the\n        PIL image swatches as values.\n\n    Raises\n    ------\n    RuntimeError\n        If all segmentations don't belong to the same image or there is a\n        segmentation that doesn't have `label_key` as the key of one of its labels.\n    ValueError\n        If there aren't any segmentations.\n    \"\"\"\n\n    # validate input type\n    if not isinstance(annotated_datum, (GroundTruth, Prediction)):\n        raise ValueError(\"Expected either a 'GroundTruth' or 'Prediction'\")\n\n    # verify there are a nonzero number of annotations\n    if len(annotated_datum.annotations) == 0:\n        raise ValueError(\"annotations cannot be empty.\")\n\n    # validate raster size\n    img_h = None\n    img_w = None\n    for annotation in annotated_datum.annotations:\n        raster = annotation.raster\n        if raster.get_value() is None:\n            raise ValueError(\"No raster exists.\")\n        if img_h is None:\n            img_h = raster.height\n        if img_w is None:\n            img_w = raster.width\n        if (img_h != raster.height) or (img_w != raster.width):\n            raise ValueError(\n                f\"Size mismatch between rasters. {(img_h, img_w)} != {(raster.height, raster.width)}\"\n            )\n    if img_h is None or img_w is None:\n        raise ValueError(\n            f\"Segmentation bounds not properly defined. {(img_h, img_w)}\"\n        )\n\n    # unpack raster annotations\n    annotations: List[Annotation] = []\n    for annotation in annotated_datum.annotations:\n        if (\n            annotation.is_instance or False\n        ) == filter_on_instance_segmentations:\n            annotations.append(annotation)\n\n    # unpack label values\n    label_values = []\n    for annotation in annotations:\n        for label in annotation.labels:\n            if label.key == label_key:\n                label_values.append(label.value)\n    if not label_values:\n        raise RuntimeError(\n            f\"Annotation doesn't have a label with key `{label_key}`\"\n        )\n\n    # assign label coloring\n    unique_label_values = list(set(label_values))\n    label_value_to_color = {\n        v: COLOR_MAP[i] for i, v in enumerate(unique_label_values)\n    }\n    seg_colors = [label_value_to_color[v] for v in label_values]\n\n    # create mask\n    combined_mask = np.zeros((img_h, img_w, 3), dtype=np.uint8)\n    for annotation, color in zip(annotations, seg_colors):\n        raster = annotation.raster\n        if raster.get_value() is None:\n            raise ValueError(\"No raster exists.\")\n        if raster.array is not None:\n            if raster.geometry is None:\n                mask = raster.array\n            elif isinstance(raster.geometry, schemas.MultiPolygon):\n                mask = _polygons_to_binary_mask(\n                    raster.geometry.to_polygons(),\n                    img_w=img_w,\n                    img_h=img_h,\n                )\n            elif isinstance(raster.geometry, (schemas.Box, schemas.Polygon)):\n                mask = _polygons_to_binary_mask(\n                    [raster.geometry],\n                    img_w=img_w,\n                    img_h=img_h,\n                )\n            else:\n                continue\n            combined_mask[np.where(mask)] = color\n        else:\n            continue\n\n    legend = {\n        v: Image.new(\"RGB\", (20, 20), color)\n        for v, color in label_value_to_color.items()\n    }\n\n    return Image.fromarray(combined_mask), legend\n
"},{"location":"client_api/Viz/#valor.viz.draw_bounding_box_on_image","title":"valor.viz.draw_bounding_box_on_image(bounding_box, img, color=(255, 0, 0))","text":"

Draws a bounding polygon on an image. This operation is not done in place.

Parameters:

Name Type Description Default bounding_box Box

Bounding box to draw on the image.

required img Image

Pillow image to draw on.

required color Tuple[int, int, int]

RGB tuple of the color to use.

(255, 0, 0)

Returns:

Type Description img

Pillow image with bounding box drawn on it.

Source code in valor/viz.py
def draw_bounding_box_on_image(\n    bounding_box: schemas.Box,\n    img: Image.Image,\n    color: Tuple[int, int, int] = (255, 0, 0),\n) -> Image.Image:\n    \"\"\"Draws a bounding polygon on an image. This operation is not done in place.\n\n    Parameters\n    ----------\n    bounding_box\n        Bounding box to draw on the image.\n    img\n        Pillow image to draw on.\n    color\n        RGB tuple of the color to use.\n\n    Returns\n    -------\n    img\n        Pillow image with bounding box drawn on it.\n    \"\"\"\n    coords = bounding_box.get_value()\n    return _draw_bounding_polygon_on_image(\n        schemas.Polygon(coords), img, color=color, inplace=False\n    )\n
"},{"location":"client_api/Viz/#valor.viz.draw_detections_on_image","title":"valor.viz.draw_detections_on_image(detections, img)","text":"

Draws detections (bounding boxes and labels) on an image.

Parameters:

Name Type Description Default detections List[Union[GroundTruth, Prediction]]

A list of GroundTruths or Predictions to draw on the image.

required img Image

The image to draw the detections on.

required

Returns:

Name Type Description img Image

An image with the detections drawn on.

Source code in valor/viz.py
def draw_detections_on_image(\n    detections: Sequence[Union[GroundTruth, Prediction]],\n    img: Image.Image,\n) -> Image.Image:\n    \"\"\"\n    Draws detections (bounding boxes and labels) on an image.\n    Parameters\n    -------\n    detections : List[Union[GroundTruth, Prediction]]\n        A list of `GroundTruths` or `Predictions` to draw on the image.\n    img : Image.Image\n        The image to draw the detections on.\n    Returns\n    -------\n    img : Image.Image\n        An image with the detections drawn on.\n    \"\"\"\n\n    annotations = []\n    for datum in detections:\n        annotations.extend(datum.annotations)\n\n    for i, detection in enumerate(annotations):\n        if detection.raster and detection.is_instance is True:\n            img = _draw_detection_on_image(detection, img, inplace=i != 0)\n    return img\n
"},{"location":"client_api/Viz/#valor.viz.draw_raster_on_image","title":"valor.viz.draw_raster_on_image(raster, img, color=(255, 0, 0), alpha=0.4)","text":"

Draws the raster on top of an image. This operation is not done in place.

Parameters:

Name Type Description Default img Image

pillow image to draw on.

required color Tuple[int, int, int]

RGB tuple of the color to use

(255, 0, 0) alpha float

alpha (transparency) value of the mask. 0 is fully transparent, 1 is fully opaque

0.4 Source code in valor/viz.py
def draw_raster_on_image(\n    raster: schemas.Raster,\n    img: Image.Image,\n    color: Tuple[int, int, int] = (255, 0, 0),\n    alpha: float = 0.4,\n) -> Image.Image:\n    \"\"\"Draws the raster on top of an image. This operation is not done in place.\n\n    Parameters\n    ----------\n    img\n        pillow image to draw on.\n    color\n        RGB tuple of the color to use\n    alpha\n        alpha (transparency) value of the mask. 0 is fully transparent, 1 is fully opaque\n    \"\"\"\n    img = img.copy()\n    binary_mask = raster.array\n    mask_arr = np.zeros(\n        (binary_mask.shape[0], binary_mask.shape[1], 3), dtype=np.uint8\n    )\n    mask_arr[binary_mask] = color\n    mask_img = Image.fromarray(mask_arr)\n\n    if mask_img.size != img.size:\n        raise ValueError(\"Input image and raster must be the same size.\")\n    blend = Image.blend(img, mask_img, alpha=alpha)\n    img.paste(blend, (0, 0), mask=Image.fromarray(binary_mask))\n\n    return img\n
"},{"location":"client_api/Schemas/Filters/","title":"Filters","text":""},{"location":"client_api/Schemas/Filters/#valor.schemas.filters-classes","title":"Classes","text":""},{"location":"client_api/Schemas/Filters/#valor.schemas.filters.Constraint","title":"valor.schemas.filters.Constraint dataclass","text":"

Represents a constraint with a value and an operator.

Attributes: value : Any The value associated with the constraint. operator : str The operator used to define the constraint.

Source code in valor/schemas/filters.py
@dataclass\nclass Constraint:\n    \"\"\"\n    Represents a constraint with a value and an operator.\n\n    Attributes:\n        value : Any\n            The value associated with the constraint.\n        operator : str\n            The operator used to define the constraint.\n    \"\"\"\n\n    value: Any\n    operator: str\n
"},{"location":"client_api/Schemas/Filters/#valor.schemas.filters.Filter","title":"valor.schemas.filters.Filter dataclass","text":"

Used to filter Evaluations according to specific, user-defined criteria.

Attributes:

Name Type Description dataset_names (List[str], optional)

A list of Dataset names to filter on.

dataset_metadata (Dict[str, List[Constraint]], optional)

A dictionary of Dataset metadata to filter on.

model_names (List[str], optional)

A list of Model names to filter on.

model_metadata (Dict[str, List[Constraint]], optional)

A dictionary of Model metadata to filter on.

datum_uids (List[str], optional)

A list of Datum UIDs to filter on.

datum_metadata (Dict[str, List[Constraint]], optional)

A dictionary of Datum metadata to filter on.

task_types (List[TaskType], optional)

A list of task types to filter on.

annotation_metadata (Dict[str, List[Constraint]], optional)

A dictionary of Annotation metadata to filter on.

require_bounding_box (bool, optional)

A toggle for filtering by bounding boxes.

bounding_box_area (bool, optional)

An optional constraint to filter by bounding box area.

require_polygon (bool, optional)

A toggle for filtering by polygons.

polygon_area (bool, optional)

An optional constraint to filter by polygon area.

require_raster (bool, optional)

A toggle for filtering by rasters.

raster_area (bool, optional)

An optional constraint to filter by raster area.

labels (List[Label], optional)

A list of `Labels' to filter on.

label_ids (List[int], optional)

A list of label row id's.

label_keys (List[str], optional)

A list of Label keys to filter on.

label_scores (List[Constraint], optional)

A list of Constraints which are used to filter Evaluations according to the Model's prediction scores.

Raises:

Type Description TypeError

If value isn't of the correct type.

ValueError

If the operator doesn't match one of the allowed patterns.

Source code in valor/schemas/filters.py
@dataclass\nclass Filter:\n    \"\"\"\n    Used to filter Evaluations according to specific, user-defined criteria.\n\n    Attributes\n    ----------\n    dataset_names : List[str], optional\n        A list of `Dataset` names to filter on.\n    dataset_metadata : Dict[str, List[Constraint]], optional\n        A dictionary of `Dataset` metadata to filter on.\n    model_names : List[str], optional\n        A list of `Model` names to filter on.\n    model_metadata : Dict[str, List[Constraint]], optional\n        A dictionary of `Model` metadata to filter on.\n    datum_uids : List[str], optional\n        A list of `Datum` UIDs to filter on.\n    datum_metadata : Dict[str, List[Constraint]], optional\n        A dictionary of `Datum` metadata to filter on.\n    task_types : List[TaskType], optional\n        A list of task types to filter on.\n    annotation_metadata : Dict[str, List[Constraint]], optional\n        A dictionary of `Annotation` metadata to filter on.\n    require_bounding_box : bool, optional\n        A toggle for filtering by bounding boxes.\n    bounding_box_area : bool, optional\n        An optional constraint to filter by bounding box area.\n    require_polygon : bool, optional\n        A toggle for filtering by polygons.\n    polygon_area : bool, optional\n        An optional constraint to filter by polygon area.\n    require_raster : bool, optional\n        A toggle for filtering by rasters.\n    raster_area : bool, optional\n        An optional constraint to filter by raster area.\n    labels : List[Label], optional\n        A list of `Labels' to filter on.\n    label_ids : List[int], optional\n        A list of label row id's.\n    label_keys : List[str], optional\n        A list of `Label` keys to filter on.\n    label_scores : List[Constraint], optional\n        A list of `Constraints` which are used to filter `Evaluations` according to the `Model`'s prediction scores.\n\n    Raises\n    ------\n    TypeError\n        If `value` isn't of the correct type.\n    ValueError\n        If the `operator` doesn't match one of the allowed patterns.\n    \"\"\"\n\n    # datasets\n    dataset_names: Optional[List[str]] = None\n    dataset_metadata: Optional[Dict[str, List[Constraint]]] = None\n\n    # models\n    model_names: Optional[List[str]] = None\n    model_metadata: Optional[Dict[str, List[Constraint]]] = None\n\n    # datums\n    datum_uids: Optional[List[str]] = None\n    datum_metadata: Optional[Dict[str, List[Constraint]]] = None\n\n    # annotations\n    task_types: Optional[List[TaskType]] = None\n    annotation_metadata: Optional[Dict[str, List[Constraint]]] = None\n\n    # geometries\n    require_bounding_box: Optional[bool] = None\n    bounding_box_area: Optional[List[Constraint]] = None\n    require_polygon: Optional[bool] = None\n    polygon_area: Optional[List[Constraint]] = None\n    require_raster: Optional[bool] = None\n    raster_area: Optional[List[Constraint]] = None\n\n    # labels\n    labels: Optional[List[Dict[str, str]]] = None\n    label_ids: Optional[List[int]] = None\n    label_keys: Optional[List[str]] = None\n    label_scores: Optional[List[Constraint]] = None\n\n    @staticmethod\n    def _supports_and():\n        return {\n            \"area\",\n            \"score\",\n            \"metadata\",\n        }\n\n    @staticmethod\n    def _supports_or():\n        return {\n            \"name\",\n            \"uid\",\n            \"task_type\",\n            \"labels\",\n            \"keys\",\n        }\n\n    def __post_init__(self):\n        def _unpack_metadata(metadata: Optional[dict]) -> Union[dict, None]:\n            if metadata is None:\n                return None\n            for k, vlist in metadata.items():\n                metadata[k] = [\n                    v if isinstance(v, Constraint) else Constraint(**v)\n                    for v in vlist\n                ]\n            return metadata\n\n        # unpack metadata\n        self.dataset_metadata = _unpack_metadata(self.dataset_metadata)\n        self.model_metadata = _unpack_metadata(self.model_metadata)\n        self.datum_metadata = _unpack_metadata(self.datum_metadata)\n        self.annotation_metadata = _unpack_metadata(self.annotation_metadata)\n\n        def _unpack_list(\n            vlist: Optional[list], object_type: type\n        ) -> Optional[list]:\n            def _handle_conversion(v, object_type):\n                if object_type is Constraint:\n                    return object_type(**v)\n                else:\n                    return object_type(v)\n\n            if vlist is None:\n                return None\n\n            return [\n                (\n                    v\n                    if isinstance(v, object_type)\n                    else _handle_conversion(v=v, object_type=object_type)\n                )\n                for v in vlist\n            ]\n\n        # unpack tasktypes\n        self.task_types = _unpack_list(self.task_types, TaskType)\n\n        # unpack area\n        self.bounding_box_area = _unpack_list(\n            self.bounding_box_area, Constraint\n        )\n        self.polygon_area = _unpack_list(self.polygon_area, Constraint)\n        self.raster_area = _unpack_list(self.raster_area, Constraint)\n\n        # scores\n        self.label_scores = _unpack_list(self.label_scores, Constraint)\n\n    @classmethod\n    def create(cls, expressions: List[Any]):\n        \"\"\"\n        Parses a list of `BinaryExpression` to create a `schemas.Filter` object.\n\n        Parameters\n        ----------\n        expressions: Sequence[Union[BinaryExpression, Sequence[BinaryExpression]]]\n            A list of (lists of) `BinaryExpressions' to parse into a `Filter` object.\n        \"\"\"\n\n        constraints = _parse_listed_expressions(expressions)\n\n        # create filter\n        filter_request = cls()\n\n        # metadata constraints\n        for attr in [\n            \"dataset_metadata\",\n            \"model_metadata\",\n            \"datum_metadata\",\n            \"annotation_metadata\",\n            \"bounding_box_area\",\n            \"polygon_area\",\n            \"raster_area\",\n            \"label_scores\",\n        ]:\n            if attr in constraints:\n                setattr(filter_request, attr, constraints[attr])\n\n        # boolean constraints\n        for attr in [\n            \"require_bounding_box\",\n            \"require_polygon\",\n            \"require_raster\",\n        ]:\n            if attr in constraints:\n                for constraint in constraints[attr]:\n                    if constraint.operator == \"exists\":\n                        setattr(filter_request, attr, True)\n                    elif constraint.operator == \"is_none\":\n                        setattr(filter_request, attr, False)\n\n        # equality constraints\n        for attr in [\n            \"dataset_names\",\n            \"model_names\",\n            \"datum_uids\",\n            \"task_types\",\n            \"label_keys\",\n        ]:\n            if attr in constraints:\n                setattr(\n                    filter_request,\n                    attr,\n                    [expr.value for expr in constraints[attr]],\n                )\n\n        # edge case - label list\n        if \"labels\" in constraints:\n            setattr(\n                filter_request,\n                \"labels\",\n                [\n                    {label[\"key\"]: label[\"value\"]}\n                    for labels in constraints[\"labels\"]\n                    for label in labels.value\n                ],\n            )\n\n        return filter_request\n
"},{"location":"client_api/Schemas/Filters/#valor.schemas.filters.Filter-functions","title":"Functions","text":""},{"location":"client_api/Schemas/Filters/#valor.schemas.filters.Filter.create","title":"valor.schemas.filters.Filter.create(expressions) classmethod","text":"

Parses a list of BinaryExpression to create a schemas.Filter object.

Parameters:

Name Type Description Default expressions List[Any]

A list of (lists of) BinaryExpressions' to parse into aFilter` object.

required Source code in valor/schemas/filters.py
@classmethod\ndef create(cls, expressions: List[Any]):\n    \"\"\"\n    Parses a list of `BinaryExpression` to create a `schemas.Filter` object.\n\n    Parameters\n    ----------\n    expressions: Sequence[Union[BinaryExpression, Sequence[BinaryExpression]]]\n        A list of (lists of) `BinaryExpressions' to parse into a `Filter` object.\n    \"\"\"\n\n    constraints = _parse_listed_expressions(expressions)\n\n    # create filter\n    filter_request = cls()\n\n    # metadata constraints\n    for attr in [\n        \"dataset_metadata\",\n        \"model_metadata\",\n        \"datum_metadata\",\n        \"annotation_metadata\",\n        \"bounding_box_area\",\n        \"polygon_area\",\n        \"raster_area\",\n        \"label_scores\",\n    ]:\n        if attr in constraints:\n            setattr(filter_request, attr, constraints[attr])\n\n    # boolean constraints\n    for attr in [\n        \"require_bounding_box\",\n        \"require_polygon\",\n        \"require_raster\",\n    ]:\n        if attr in constraints:\n            for constraint in constraints[attr]:\n                if constraint.operator == \"exists\":\n                    setattr(filter_request, attr, True)\n                elif constraint.operator == \"is_none\":\n                    setattr(filter_request, attr, False)\n\n    # equality constraints\n    for attr in [\n        \"dataset_names\",\n        \"model_names\",\n        \"datum_uids\",\n        \"task_types\",\n        \"label_keys\",\n    ]:\n        if attr in constraints:\n            setattr(\n                filter_request,\n                attr,\n                [expr.value for expr in constraints[attr]],\n            )\n\n    # edge case - label list\n    if \"labels\" in constraints:\n        setattr(\n            filter_request,\n            \"labels\",\n            [\n                {label[\"key\"]: label[\"value\"]}\n                for labels in constraints[\"labels\"]\n                for label in labels.value\n            ],\n        )\n\n    return filter_request\n
"},{"location":"client_api/Schemas/Evaluation/EvaluationParameters/","title":"EvaluationParameters","text":""},{"location":"client_api/Schemas/Evaluation/EvaluationParameters/#valor.schemas.evaluation-classes","title":"Classes","text":""},{"location":"client_api/Schemas/Evaluation/EvaluationParameters/#valor.schemas.evaluation.EvaluationParameters","title":"valor.schemas.evaluation.EvaluationParameters dataclass","text":"

Defines parameters for evaluation methods.

Attributes:

Name Type Description task_type TaskType

The task type of a given evaluation.

label_map Optional[List[List[List[str]]]]

Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.

metrics (List[str], optional)

The list of metrics to compute, store, and return to the user.

convert_annotations_to_type AnnotationType | None = None

The type to convert all annotations to.

iou_thresholds_to_compute (List[float], optional)

A list of floats describing which Intersection over Unions (IoUs) to use when calculating metrics (i.e., mAP).

iou_thresholds_to_return (List[float], optional)

A list of floats describing which Intersection over Union (IoUs) thresholds to calculate a metric for. Must be a subset of iou_thresholds_to_compute.

recall_score_threshold float, default=0

The confidence score threshold for use when determining whether to count a prediction as a true positive or not while calculating Average Recall.

pr_curve_iou_threshold (float, optional)

The IOU threshold to use when calculating precision-recall curves for object detection tasks. Defaults to 0.5.

pr_curve_max_examples int

The maximum number of datum examples to store when calculating PR curves.

Source code in valor/schemas/evaluation.py
@dataclass\nclass EvaluationParameters:\n    \"\"\"\n    Defines parameters for evaluation methods.\n\n    Attributes\n    ----------\n    task_type: TaskType\n        The task type of a given evaluation.\n    label_map: Optional[List[List[List[str]]]]\n        Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.\n    metrics: List[str], optional\n        The list of metrics to compute, store, and return to the user.\n    convert_annotations_to_type: AnnotationType | None = None\n        The type to convert all annotations to.\n    iou_thresholds_to_compute: List[float], optional\n        A list of floats describing which Intersection over Unions (IoUs) to use when calculating metrics (i.e., mAP).\n    iou_thresholds_to_return: List[float], optional\n        A list of floats describing which Intersection over Union (IoUs) thresholds to calculate a metric for. Must be a subset of `iou_thresholds_to_compute`.\n    recall_score_threshold: float, default=0\n        The confidence score threshold for use when determining whether to count a prediction as a true positive or not while calculating Average Recall.\n    pr_curve_iou_threshold: float, optional\n            The IOU threshold to use when calculating precision-recall curves for object detection tasks. Defaults to 0.5.\n    pr_curve_max_examples: int\n        The maximum number of datum examples to store when calculating PR curves.\n    \"\"\"\n\n    task_type: TaskType\n    label_map: Optional[List[List[List[str]]]] = None\n    metrics_to_return: Optional[List[MetricType]] = None\n\n    convert_annotations_to_type: Optional[AnnotationType] = None\n    iou_thresholds_to_compute: Optional[List[float]] = None\n    iou_thresholds_to_return: Optional[List[float]] = None\n    recall_score_threshold: float = 0\n    pr_curve_iou_threshold: float = 0.5\n    pr_curve_max_examples: int = 1\n
"},{"location":"client_api/Schemas/Evaluation/EvaluationParameters/#valor.schemas.evaluation.EvaluationRequest","title":"valor.schemas.evaluation.EvaluationRequest dataclass","text":"

An evaluation request.

Defines important attributes of the API's EvaluationRequest.

Attributes:

Name Type Description dataset_names List[str]

The list of datasets we want to evaluate by name.

model_names List[str]

The list of models we want to evaluate by name.

filters Filter

The filter object used to define what the model(s) is evaluating against.

parameters EvaluationParameters

Any parameters that are used to modify an evaluation method.

Source code in valor/schemas/evaluation.py
@dataclass\nclass EvaluationRequest:\n    \"\"\"\n    An evaluation request.\n\n    Defines important attributes of the API's `EvaluationRequest`.\n\n    Attributes\n    ----------\n    dataset_names : List[str]\n        The list of datasets we want to evaluate by name.\n    model_names : List[str]\n        The list of models we want to evaluate by name.\n    filters : schemas.Filter\n        The filter object used to define what the model(s) is evaluating against.\n    parameters : EvaluationParameters\n        Any parameters that are used to modify an evaluation method.\n    \"\"\"\n\n    dataset_names: Union[str, List[str]]\n    model_names: Union[str, List[str]]\n    parameters: EvaluationParameters\n    filters: Optional[Filter] = field(default=None)\n\n    def __post_init__(self):\n        if isinstance(self.filters, dict):\n            self.filters = Filter(**self.filters)\n        if isinstance(self.parameters, dict):\n            self.parameters = EvaluationParameters(**self.parameters)\n
"},{"location":"client_api/Schemas/Spatial/Box/","title":"Box","text":"

Bases: Polygon

A Box is a polygon that is constrained to 4 unique points.

Note that this does not need to be axis-aligned.

Parameters:

Name Type Description Default value List[List[Tuple[float, float]]]

An polygon value representing a box.

required

Attributes:

Name Type Description area polygon boundary holes xmin xmax ymin ymax

Examples:

>>> Box([[(0,0), (0,1), (1,1), (1,0), (0,0)]])\n

Create a Box using extrema.

>>> Box.from_extrema(\n...     xmin=0, xmax=1,\n...     ymin=0, ymax=1,\n... )\n
Source code in valor/schemas/symbolic/types.py
class Box(Polygon):\n    \"\"\"\n    A Box is a polygon that is constrained to 4 unique points.\n\n    Note that this does not need to be axis-aligned.\n\n    Parameters\n    ----------\n    value : List[List[Tuple[float, float]]], optional\n        An polygon value representing a box.\n\n    Attributes\n    ----------\n    area\n    polygon\n    boundary\n    holes\n    xmin\n    xmax\n    ymin\n    ymax\n\n    Examples\n    --------\n    >>> Box([[(0,0), (0,1), (1,1), (1,0), (0,0)]])\n\n    Create a Box using extrema.\n    >>> Box.from_extrema(\n    ...     xmin=0, xmax=1,\n    ...     ymin=0, ymax=1,\n    ... )\n    \"\"\"\n\n    def __init__(\n        self, value: typing.List[typing.List[typing.Tuple[float, float]]]\n    ):\n        super().__init__(value=value)\n\n    @classmethod\n    def __validate__(cls, value: typing.Any):\n        \"\"\"\n        Validates typing.\n\n        Parameters\n        ----------\n        value : typing.Any\n            The value to validate.\n\n        Raises\n        ------\n        TypeError\n            If the value type is not supported.\n        \"\"\"\n        Polygon.__validate__(value)\n        if len(value) != 1:\n            raise ValueError(\"Box should not contain holes.\")\n        elif len(value[0]) != 5:\n            raise ValueError(\"Box should consist of four unique points.\")\n\n    @classmethod\n    def decode_value(\n        cls,\n        value: typing.Optional[typing.List[typing.List[typing.List[float]]]],\n    ):\n        \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n        if value is None:\n            return None\n        return super().decode_value(value)\n\n    @classmethod\n    def from_extrema(\n        cls,\n        xmin: float,\n        xmax: float,\n        ymin: float,\n        ymax: float,\n    ):\n        \"\"\"\n        Create a Box from extrema values.\n\n        Parameters\n        ----------\n        xmin : float\n            Minimum x-coordinate of the bounding box.\n        xmax : float\n            Maximum x-coordinate of the bounding box.\n        ymin : float\n            Minimum y-coordinate of the bounding box.\n        ymax : float\n            Maximum y-coordinate of the bounding box.\n\n        Returns\n        -------\n        Box\n            A Box created from the provided extrema values.\n        \"\"\"\n        points = [\n            [\n                (xmin, ymin),\n                (xmax, ymin),\n                (xmax, ymax),\n                (xmin, ymax),\n                (xmin, ymin),\n            ]\n        ]\n        return cls(value=points)\n\n    def to_polygon(self) -> Polygon:\n        \"\"\"\n        Converts box to a generic polygon.\n\n        Returns\n        -------\n        Polygon\n            The box as a Polygon.\n        \"\"\"\n        return Polygon(self.get_value())\n
"},{"location":"client_api/Schemas/Spatial/Box/#valor.schemas.Box-functions","title":"Functions","text":""},{"location":"client_api/Schemas/Spatial/Box/#valor.schemas.Box.__validate__","title":"valor.schemas.Box.__validate__(value) classmethod","text":"

Validates typing.

Parameters:

Name Type Description Default value Any

The value to validate.

required

Raises:

Type Description TypeError

If the value type is not supported.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef __validate__(cls, value: typing.Any):\n    \"\"\"\n    Validates typing.\n\n    Parameters\n    ----------\n    value : typing.Any\n        The value to validate.\n\n    Raises\n    ------\n    TypeError\n        If the value type is not supported.\n    \"\"\"\n    Polygon.__validate__(value)\n    if len(value) != 1:\n        raise ValueError(\"Box should not contain holes.\")\n    elif len(value[0]) != 5:\n        raise ValueError(\"Box should consist of four unique points.\")\n
"},{"location":"client_api/Schemas/Spatial/Box/#valor.schemas.Box.decode_value","title":"valor.schemas.Box.decode_value(value) classmethod","text":"

Decode object from JSON compatible dictionary.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef decode_value(\n    cls,\n    value: typing.Optional[typing.List[typing.List[typing.List[float]]]],\n):\n    \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n    if value is None:\n        return None\n    return super().decode_value(value)\n
"},{"location":"client_api/Schemas/Spatial/Box/#valor.schemas.Box.from_extrema","title":"valor.schemas.Box.from_extrema(xmin, xmax, ymin, ymax) classmethod","text":"

Create a Box from extrema values.

Parameters:

Name Type Description Default xmin float

Minimum x-coordinate of the bounding box.

required xmax float

Maximum x-coordinate of the bounding box.

required ymin float

Minimum y-coordinate of the bounding box.

required ymax float

Maximum y-coordinate of the bounding box.

required

Returns:

Type Description Box

A Box created from the provided extrema values.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef from_extrema(\n    cls,\n    xmin: float,\n    xmax: float,\n    ymin: float,\n    ymax: float,\n):\n    \"\"\"\n    Create a Box from extrema values.\n\n    Parameters\n    ----------\n    xmin : float\n        Minimum x-coordinate of the bounding box.\n    xmax : float\n        Maximum x-coordinate of the bounding box.\n    ymin : float\n        Minimum y-coordinate of the bounding box.\n    ymax : float\n        Maximum y-coordinate of the bounding box.\n\n    Returns\n    -------\n    Box\n        A Box created from the provided extrema values.\n    \"\"\"\n    points = [\n        [\n            (xmin, ymin),\n            (xmax, ymin),\n            (xmax, ymax),\n            (xmin, ymax),\n            (xmin, ymin),\n        ]\n    ]\n    return cls(value=points)\n
"},{"location":"client_api/Schemas/Spatial/Box/#valor.schemas.Box.to_polygon","title":"valor.schemas.Box.to_polygon()","text":"

Converts box to a generic polygon.

Returns:

Type Description Polygon

The box as a Polygon.

Source code in valor/schemas/symbolic/types.py
def to_polygon(self) -> Polygon:\n    \"\"\"\n    Converts box to a generic polygon.\n\n    Returns\n    -------\n    Polygon\n        The box as a Polygon.\n    \"\"\"\n    return Polygon(self.get_value())\n
"},{"location":"client_api/Schemas/Spatial/LineString/","title":"LineString","text":"

Bases: Spatial

Represents a line.

Follows the GeoJSON specification (RFC 7946).

Parameters:

Name Type Description Default value List[Tuple[float, float]]

A linestring.

required

Methods:

Name Description colorspace

Represent the photo in the given colorspace.

gamma

Change the photo's gamma exposure.

Examples:

Create a line.

>>> LineString([(0,0), (0,1), (1,1)])\n
Source code in valor/schemas/symbolic/types.py
class LineString(Spatial):\n    \"\"\"\n    Represents a line.\n\n    Follows the GeoJSON specification (RFC 7946).\n\n    Parameters\n    ----------\n    value : List[Tuple[float, float]], optional\n        A linestring.\n\n    Methods\n    -------\n    colorspace(c='rgb')\n        Represent the photo in the given colorspace.\n    gamma(n=1.0)\n        Change the photo's gamma exposure.\n\n    Examples\n    --------\n    Create a line.\n    >>> LineString([(0,0), (0,1), (1,1)])\n    \"\"\"\n\n    def __init__(self, value: typing.List[typing.Tuple[float, float]]):\n        super().__init__(value=value)\n\n    @classmethod\n    def __validate__(cls, value: typing.Any):\n        MultiPoint.__validate__(value)\n        if len(value) < 2:\n            raise ValueError(\n                \"At least two points are required to make a line.\"\n            )\n\n    @classmethod\n    def decode_value(\n        cls, value: typing.Optional[typing.List[typing.List[float]]]\n    ):\n        \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n        if value is None:\n            return None\n        return cls([(point[0], point[1]) for point in value])\n
"},{"location":"client_api/Schemas/Spatial/LineString/#valor.schemas.LineString-functions","title":"Functions","text":""},{"location":"client_api/Schemas/Spatial/LineString/#valor.schemas.LineString.decode_value","title":"valor.schemas.LineString.decode_value(value) classmethod","text":"

Decode object from JSON compatible dictionary.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef decode_value(\n    cls, value: typing.Optional[typing.List[typing.List[float]]]\n):\n    \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n    if value is None:\n        return None\n    return cls([(point[0], point[1]) for point in value])\n
"},{"location":"client_api/Schemas/Spatial/MultiLineString/","title":"MultiLineString","text":"

Bases: Spatial

Represents a list of lines.

Follows the GeoJSON specification (RFC 7946).

Parameters:

Name Type Description Default value List[List[Tuple[float, float]]]

A multilinestring.

required

Examples:

Create a single line.

>>> MultiLineString([[(0,0), (0,1), (1,1), (0,0)]])\n

Create 3 lines.

>>> MultiLineString(\n...     [\n...         [(0,0), (0,1), (1,1)],\n...         [(0.1, 0.1), (0.1, 0.2), (0.2, 0.2)],\n...         [(0.6, 0.6), (0.6, 0.7), (0.7, 0.7)],\n...     ]\n... )\n
Source code in valor/schemas/symbolic/types.py
class MultiLineString(Spatial):\n    \"\"\"\n    Represents a list of lines.\n\n    Follows the GeoJSON specification (RFC 7946).\n\n    Parameters\n    ----------\n    value : List[List[Tuple[float, float]]], optional\n        A multilinestring.\n\n    Examples\n    --------\n    Create a single line.\n    >>> MultiLineString([[(0,0), (0,1), (1,1), (0,0)]])\n\n    Create 3 lines.\n    >>> MultiLineString(\n    ...     [\n    ...         [(0,0), (0,1), (1,1)],\n    ...         [(0.1, 0.1), (0.1, 0.2), (0.2, 0.2)],\n    ...         [(0.6, 0.6), (0.6, 0.7), (0.7, 0.7)],\n    ...     ]\n    ... )\n    \"\"\"\n\n    def __init__(\n        self,\n        value: typing.List[typing.List[typing.Tuple[float, float]]],\n    ):\n        super().__init__(value=value)\n\n    @classmethod\n    def __validate__(cls, value: typing.Any):\n        if not isinstance(value, list):\n            raise TypeError(\n                f\"Expected type 'List[List[Tuple[float, float]]]' received type '{type(value).__name__}'\"\n            )\n        for line in value:\n            LineString.__validate__(line)\n\n    @classmethod\n    def decode_value(\n        cls,\n        value: typing.Optional[typing.List[typing.List[typing.List[float]]]],\n    ):\n        \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n        if value is None:\n            return None\n        return cls(\n            [[(point[0], point[1]) for point in line] for line in value]\n        )\n
"},{"location":"client_api/Schemas/Spatial/MultiLineString/#valor.schemas.MultiLineString-functions","title":"Functions","text":""},{"location":"client_api/Schemas/Spatial/MultiLineString/#valor.schemas.MultiLineString.decode_value","title":"valor.schemas.MultiLineString.decode_value(value) classmethod","text":"

Decode object from JSON compatible dictionary.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef decode_value(\n    cls,\n    value: typing.Optional[typing.List[typing.List[typing.List[float]]]],\n):\n    \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n    if value is None:\n        return None\n    return cls(\n        [[(point[0], point[1]) for point in line] for line in value]\n    )\n
"},{"location":"client_api/Schemas/Spatial/MultiPoint/","title":"MultiPoint","text":"

Bases: Spatial

Represents a list of points.

Follows the GeoJSON specification (RFC 7946).

Parameters:

Name Type Description Default value List[Tuple[float, float]]

A multipoint.

required

Examples:

>>> MultiPoint([(0,0), (0,1), (1,1)])\n
Source code in valor/schemas/symbolic/types.py
class MultiPoint(Spatial):\n    \"\"\"\n    Represents a list of points.\n\n    Follows the GeoJSON specification (RFC 7946).\n\n    Parameters\n    ----------\n    value : List[Tuple[float, float]], optional\n        A multipoint.\n\n    Examples\n    --------\n    >>> MultiPoint([(0,0), (0,1), (1,1)])\n    \"\"\"\n\n    def __init__(\n        self,\n        value: typing.List[typing.Tuple[float, float]],\n    ):\n        super().__init__(value=value)\n\n    @classmethod\n    def __validate__(cls, value: typing.Any):\n        if not isinstance(value, list):\n            raise TypeError(\n                f\"Expected 'typing.List[typing.Tuple[float, float]]' received type '{type(value).__name__}'\"\n            )\n        for point in value:\n            Point.__validate__(point)\n\n    @classmethod\n    def decode_value(\n        cls, value: typing.Optional[typing.List[typing.List[float]]]\n    ):\n        \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n        if value is None:\n            return None\n        return cls([(point[0], point[1]) for point in value])\n
"},{"location":"client_api/Schemas/Spatial/MultiPoint/#valor.schemas.MultiPoint-functions","title":"Functions","text":""},{"location":"client_api/Schemas/Spatial/MultiPoint/#valor.schemas.MultiPoint.decode_value","title":"valor.schemas.MultiPoint.decode_value(value) classmethod","text":"

Decode object from JSON compatible dictionary.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef decode_value(\n    cls, value: typing.Optional[typing.List[typing.List[float]]]\n):\n    \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n    if value is None:\n        return None\n    return cls([(point[0], point[1]) for point in value])\n
"},{"location":"client_api/Schemas/Spatial/Point/","title":"Point","text":"

Bases: Spatial, Equatable

Represents a point in 2D space.

Follows the GeoJSON specification (RFC 7946).

Parameters:

Name Type Description Default value Tuple[float, float]

A point.

required

Examples:

>>> Point((1,2))\n
Source code in valor/schemas/symbolic/types.py
class Point(Spatial, Equatable):\n    \"\"\"\n    Represents a point in 2D space.\n\n    Follows the GeoJSON specification (RFC 7946).\n\n    Parameters\n    ----------\n    value : Tuple[float, float], optional\n        A point.\n\n    Examples\n    --------\n    >>> Point((1,2))\n    \"\"\"\n\n    def __init__(\n        self,\n        value: typing.Tuple[float, float],\n    ):\n        super().__init__(value=value)\n\n    @classmethod\n    def __validate__(cls, value: typing.Any):\n        if not isinstance(value, tuple):\n            raise TypeError(\n                f\"Expected type 'typing.Tuple[float, float]' received type '{type(value).__name__}'\"\n            )\n        elif len(value) != 2:\n            raise ValueError(\"\")\n        for item in value:\n            if not isinstance(item, (int, float, np.floating)):\n                raise TypeError(\n                    f\"Expected type '{float.__name__}' received type '{type(item).__name__}'\"\n                )\n\n    @classmethod\n    def decode_value(cls, value: typing.Optional[typing.List[float]]):\n        \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n        if value is None:\n            return None\n        return cls((value[0], value[1]))\n\n    def encode_value(self) -> typing.Any:\n        \"\"\"Encode object to JSON compatible dictionary.\"\"\"\n        value = self.get_value()\n        if value is None:\n            return None\n        return (float(value[0]), float(value[1]))\n\n    def tuple(self):\n        return self.get_value()\n\n    def resize(\n        self,\n        og_img_h=10,\n        og_img_w=10,\n        new_img_h=100,\n        new_img_w=100,\n    ):\n        value = self.get_value()\n        h_ratio = new_img_h / og_img_h\n        w_ratio = new_img_w / og_img_w\n        return Point((value[0] * h_ratio, value[1] * w_ratio))\n\n    @property\n    def x(self):\n        return self.get_value()[0]\n\n    @property\n    def y(self):\n        return self.get_value()[1]\n
"},{"location":"client_api/Schemas/Spatial/Point/#valor.schemas.Point-functions","title":"Functions","text":""},{"location":"client_api/Schemas/Spatial/Point/#valor.schemas.Point.decode_value","title":"valor.schemas.Point.decode_value(value) classmethod","text":"

Decode object from JSON compatible dictionary.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef decode_value(cls, value: typing.Optional[typing.List[float]]):\n    \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n    if value is None:\n        return None\n    return cls((value[0], value[1]))\n
"},{"location":"client_api/Schemas/Spatial/Point/#valor.schemas.Point.encode_value","title":"valor.schemas.Point.encode_value()","text":"

Encode object to JSON compatible dictionary.

Source code in valor/schemas/symbolic/types.py
def encode_value(self) -> typing.Any:\n    \"\"\"Encode object to JSON compatible dictionary.\"\"\"\n    value = self.get_value()\n    if value is None:\n        return None\n    return (float(value[0]), float(value[1]))\n
"},{"location":"client_api/Schemas/Spatial/Polygon/","title":"Polygon","text":"

Bases: Spatial

Represents a polygon with a boundary and optional holes.

Follows the GeoJSON specification (RFC 7946).

Parameters:

Name Type Description Default value List[List[Tuple[float, float]]]

A polygon.

required

Attributes:

Name Type Description area Float boundary List[Tuple[float, float]] holes List[List[Tuple[float, float]]] xmin float xmax float ymin float ymax float

Examples:

Create a polygon without any holes.

>>> Polygon([[(0,0), (0,1), (1,1), (0,0)]])\n

Create a polygon with 2 holes.

>>> Polygon(\n...     [\n...         [(0,0), (0,1), (1,1), (0,0)],\n...         [(0.1, 0.1), (0.1, 0.2), (0.2, 0.2), (0.1, 0.1)],\n...         [(0.6, 0.6), (0.6, 0.7), (0.7, 0.7), (0.6, 0.6)],\n...     ]\n... )\n
Source code in valor/schemas/symbolic/types.py
class Polygon(Spatial):\n    \"\"\"\n    Represents a polygon with a boundary and optional holes.\n\n    Follows the GeoJSON specification (RFC 7946).\n\n    Parameters\n    ----------\n    value : List[List[Tuple[float, float]]], optional\n        A polygon.\n\n    Attributes\n    ----------\n    area\n    boundary\n    holes\n    xmin\n    xmax\n    ymin\n    ymax\n\n    Examples\n    --------\n    Create a polygon without any holes.\n    >>> Polygon([[(0,0), (0,1), (1,1), (0,0)]])\n\n    Create a polygon with 2 holes.\n    >>> Polygon(\n    ...     [\n    ...         [(0,0), (0,1), (1,1), (0,0)],\n    ...         [(0.1, 0.1), (0.1, 0.2), (0.2, 0.2), (0.1, 0.1)],\n    ...         [(0.6, 0.6), (0.6, 0.7), (0.7, 0.7), (0.6, 0.6)],\n    ...     ]\n    ... )\n    \"\"\"\n\n    def __init__(\n        self, value: typing.List[typing.List[typing.Tuple[float, float]]]\n    ):\n        super().__init__(value=value)\n\n    @classmethod\n    def __validate__(cls, value: typing.Any):\n        MultiLineString.__validate__(value)\n        for line in value:\n            if not (len(line) >= 4 and line[0] == line[-1]):\n                raise ValueError(\n                    \"Polygons are defined by at least 4 points with the first point being repeated at the end.\"\n                )\n\n    @classmethod\n    def decode_value(\n        cls,\n        value: typing.Optional[typing.List[typing.List[typing.List[float]]]],\n    ):\n        \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n        if value is None:\n            return None\n        return cls(\n            [\n                [(point[0], point[1]) for point in subpolygon]\n                for subpolygon in value\n            ]\n        )\n\n    @property\n    def area(self) -> Float:\n        \"\"\"\n        Symbolic representation of area.\n        \"\"\"\n        if not isinstance(self._value, Symbol):\n            raise ValueError\n        return Float.symbolic(\n            owner=self._value._owner,\n            name=self._value._name,\n            key=self._value._key,\n            attribute=\"area\",\n        )\n\n    @property\n    def boundary(self) -> typing.List[typing.Tuple[float, float]]:\n        \"\"\"\n        The boundary of the polygon.\n\n        Returns\n        -------\n        List[Tuple(float, float)]\n            A list of points.\n        \"\"\"\n        value = self.get_value()\n        if value is None:\n            raise ValueError(\"Polygon is 'None'\")\n        return value[0]\n\n    @property\n    def holes(self) -> typing.List[typing.List[typing.Tuple[float, float]]]:\n        \"\"\"\n        typing.Any holes in the polygon.\n\n        Returns\n        -------\n        List[List[Tuple(float, float)]]\n            A list of holes.\n        \"\"\"\n        value = self.get_value()\n        if value is None:\n            raise ValueError(\"Polygon is 'None'\")\n        return value[1:]\n\n    @property\n    def xmin(self) -> float:\n        \"\"\"\n        Minimum x-value.\n\n        Returns\n        -------\n        float\n        \"\"\"\n        return min([p[0] for p in self.boundary])\n\n    @property\n    def xmax(self) -> float:\n        \"\"\"\n        Maximum x-value.\n\n        Returns\n        -------\n        float\n        \"\"\"\n        return max([p[0] for p in self.boundary])\n\n    @property\n    def ymin(self) -> float:\n        \"\"\"\n        Minimum y-value.\n\n        Returns\n        -------\n        float\n        \"\"\"\n        return min([p[1] for p in self.boundary])\n\n    @property\n    def ymax(self) -> float:\n        \"\"\"\n        Maximum y-value.\n\n        Returns\n        -------\n        float\n        \"\"\"\n        return max([p[1] for p in self.boundary])\n
"},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon-attributes","title":"Attributes","text":""},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon.area","title":"valor.schemas.Polygon.area: Float property","text":"

Symbolic representation of area.

"},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon.boundary","title":"valor.schemas.Polygon.boundary: typing.List[typing.Tuple[float, float]] property","text":"

The boundary of the polygon.

Returns:

Type Description List[Tuple(float, float)]

A list of points.

"},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon.holes","title":"valor.schemas.Polygon.holes: typing.List[typing.List[typing.Tuple[float, float]]] property","text":"

typing.Any holes in the polygon.

Returns:

Type Description List[List[Tuple(float, float)]]

A list of holes.

"},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon.xmax","title":"valor.schemas.Polygon.xmax: float property","text":"

Maximum x-value.

Returns:

Type Description float"},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon.xmin","title":"valor.schemas.Polygon.xmin: float property","text":"

Minimum x-value.

Returns:

Type Description float"},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon.ymax","title":"valor.schemas.Polygon.ymax: float property","text":"

Maximum y-value.

Returns:

Type Description float"},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon.ymin","title":"valor.schemas.Polygon.ymin: float property","text":"

Minimum y-value.

Returns:

Type Description float"},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon-functions","title":"Functions","text":""},{"location":"client_api/Schemas/Spatial/Polygon/#valor.schemas.Polygon.decode_value","title":"valor.schemas.Polygon.decode_value(value) classmethod","text":"

Decode object from JSON compatible dictionary.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef decode_value(\n    cls,\n    value: typing.Optional[typing.List[typing.List[typing.List[float]]]],\n):\n    \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n    if value is None:\n        return None\n    return cls(\n        [\n            [(point[0], point[1]) for point in subpolygon]\n            for subpolygon in value\n        ]\n    )\n
"},{"location":"client_api/Schemas/Spatial/Raster/","title":"Raster","text":"

Bases: Spatial

Represents a binary mask.

Parameters:

Name Type Description Default value Dict[str, Union[ndarray, str, None]]

An raster value.

required

Attributes:

Name Type Description area Float array ndarray geometry Union[Box, Polygon, MultiPolygon] height int width int

Raises:

Type Description TypeError

If encoding is not a string.

Examples:

Generate a random mask.

>>> import numpy.random\n>>> height = 640\n>>> width = 480\n>>> array = numpy.random.rand(height, width)\n

Convert to binary mask.

>>> mask = (array > 0.5)\n

Create Raster.

>>> Raster.from_numpy(mask)\n
Source code in valor/schemas/symbolic/types.py
class Raster(Spatial):\n    \"\"\"\n    Represents a binary mask.\n\n    Parameters\n    ----------\n    value : Dict[str, typing.Union[np.ndarray, str, None]], optional\n        An raster value.\n\n    Attributes\n    ----------\n    area\n    array\n    geometry\n    height\n    width\n\n    Raises\n    ------\n    TypeError\n        If `encoding` is not a string.\n\n    Examples\n    --------\n    Generate a random mask.\n    >>> import numpy.random\n    >>> height = 640\n    >>> width = 480\n    >>> array = numpy.random.rand(height, width)\n\n    Convert to binary mask.\n    >>> mask = (array > 0.5)\n\n    Create Raster.\n    >>> Raster.from_numpy(mask)\n    \"\"\"\n\n    def __init__(\n        self,\n        value: typing.Dict[\n            str, typing.Union[np.ndarray, Box, Polygon, MultiPolygon, None]\n        ],\n    ):\n        \"\"\"\n        Initialize and instance of a raster.\n\n        Parameters\n        ----------\n        value : Dict[str, Union[np.ndarray, Box, Polygon, MultiPolygon, None]]\n            The raster in dictionary format {\"mask\": <np.ndarray>, \"geometry\": <geometry | None>}.\n        \"\"\"\n        super().__init__(value)\n\n    @classmethod\n    def __validate__(cls, value: typing.Any):\n        \"\"\"\n        Validates typing.\n\n        Parameters\n        ----------\n        value : Any\n            The value to validate.\n\n        Raises\n        ------\n        TypeError\n            If the value type is not supported.\n        \"\"\"\n        if not isinstance(value, dict):\n            raise TypeError(\n                \"Raster should contain a dictionary describing a mask and optionally a geometry.\"\n            )\n        elif set(value.keys()) != {\"mask\", \"geometry\"}:\n            raise ValueError(\n                \"Raster should be described by a dictionary with keys 'mask' and 'geometry'\"\n            )\n        elif not isinstance(value[\"mask\"], np.ndarray):\n            raise TypeError(\n                f\"Expected mask to have type '{np.ndarray}' receieved type '{value['mask']}'\"\n            )\n        elif len(value[\"mask\"].shape) != 2:\n            raise ValueError(\"raster only supports 2d arrays\")\n        elif value[\"mask\"].dtype != bool:\n            raise ValueError(\n                f\"Expecting a binary mask (i.e. of dtype bool) but got dtype {value['mask'].dtype}\"\n            )\n        elif (\n            value[\"geometry\"] is not None\n            and not Polygon.supports(value[\"geometry\"])\n            and not MultiPolygon.supports(value[\"geometry\"])\n        ):\n            raise TypeError(\n                \"Expected geometry to conform to either Polygon or MultiPolygon or be 'None'\"\n            )\n\n    def encode_value(self) -> typing.Any:\n        \"\"\"Encode object to JSON compatible dictionary.\"\"\"\n        value = self.get_value()\n        if value is None:\n            return None\n        f = io.BytesIO()\n        PIL.Image.fromarray(value[\"mask\"]).save(f, format=\"PNG\")\n        f.seek(0)\n        mask_bytes = f.read()\n        f.close()\n        return {\n            \"mask\": b64encode(mask_bytes).decode(),\n            \"geometry\": value[\"geometry\"],\n        }\n\n    @classmethod\n    def decode_value(cls, value: typing.Any):\n        \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n        if value is None:\n            return None\n        if not (\n            isinstance(value, dict)\n            and set(value.keys()) == {\"mask\", \"geometry\"}\n        ):\n            raise ValueError(\n                f\"Improperly formatted raster encoding. Received '{value}'\"\n            )\n        mask_bytes = b64decode(value[\"mask\"])\n        with io.BytesIO(mask_bytes) as f:\n            img = PIL.Image.open(f)\n            value = {\n                \"mask\": np.array(img),\n                \"geometry\": value[\"geometry\"],\n            }\n        return cls(value=value)\n\n    @classmethod\n    def from_numpy(cls, mask: np.ndarray):\n        \"\"\"\n        Create a Raster object from a NumPy array.\n\n        Parameters\n        ----------\n        mask : np.ndarray\n            The 2D binary array representing the mask.\n\n        Returns\n        -------\n        Raster\n\n        Raises\n        ------\n        ValueError\n            If the input array is not 2D or not of dtype bool.\n        \"\"\"\n        return cls(value={\"mask\": mask, \"geometry\": None})\n\n    @classmethod\n    def from_geometry(\n        cls,\n        geometry: typing.Union[Box, Polygon, MultiPolygon],\n        height: int,\n        width: int,\n    ):\n        \"\"\"\n        Create a Raster object from a geometric mask.\n\n        Parameters\n        ----------\n        geometry : Union[Box, Polygon, MultiPolygon]\n            Defines the bitmask as a geometry. Overrides any existing mask.\n        height : int\n            The intended height of the binary mask.\n        width : int\n            The intended width of the binary mask.\n\n        Returns\n        -------\n        Raster\n        \"\"\"\n        bitmask = np.full((int(height), int(width)), False)\n        return cls(value={\"mask\": bitmask, \"geometry\": geometry.get_value()})\n\n    @property\n    def area(self) -> Float:\n        \"\"\"\n        Symbolic representation of area.\n        \"\"\"\n        if not isinstance(self._value, Symbol):\n            raise ValueError\n        return Float.symbolic(\n            owner=self._value._owner,\n            name=self._value._name,\n            key=self._value._key,\n            attribute=\"area\",\n        )\n\n    @property\n    def array(self) -> np.ndarray:\n        \"\"\"\n        The bitmask as a numpy array.\n\n        Returns\n        -------\n        Optional[np.ndarray]\n            A 2D binary array representing the mask if it exists.\n        \"\"\"\n        value = self.get_value()\n        if value[\"geometry\"] is not None:\n            warnings.warn(\n                \"Raster array does not contain bitmask as this is a geometry-defined raster.\",\n                RuntimeWarning,\n            )\n        return value[\"mask\"]\n\n    @property\n    def geometry(self) -> typing.Union[Box, Polygon, MultiPolygon]:\n        \"\"\"\n        The geometric mask if it exists.\n\n        Returns\n        -------\n        Box | Polygon | MultiPolygon | None\n            The geometry if it exists.\n        \"\"\"\n        return self.get_value()[\"geometry\"]\n\n    @property\n    def height(self) -> int:\n        \"\"\"Returns the height of the raster if it exists.\"\"\"\n        return self.array.shape[0]\n\n    @property\n    def width(self) -> int:\n        \"\"\"Returns the width of the raster if it exists.\"\"\"\n        return self.array.shape[1]\n
"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster-attributes","title":"Attributes","text":""},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.area","title":"valor.schemas.Raster.area: Float property","text":"

Symbolic representation of area.

"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.array","title":"valor.schemas.Raster.array: np.ndarray property","text":"

The bitmask as a numpy array.

Returns:

Type Description Optional[ndarray]

A 2D binary array representing the mask if it exists.

"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.geometry","title":"valor.schemas.Raster.geometry: typing.Union[Box, Polygon, MultiPolygon] property","text":"

The geometric mask if it exists.

Returns:

Type Description Box | Polygon | MultiPolygon | None

The geometry if it exists.

"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.height","title":"valor.schemas.Raster.height: int property","text":"

Returns the height of the raster if it exists.

"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.width","title":"valor.schemas.Raster.width: int property","text":"

Returns the width of the raster if it exists.

"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster-functions","title":"Functions","text":""},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.__init__","title":"valor.schemas.Raster.__init__(value)","text":"

Initialize and instance of a raster.

Parameters:

Name Type Description Default value Dict[str, Union[ndarray, Box, Polygon, MultiPolygon, None]]

The raster in dictionary format {\"mask\": , \"geometry\": }. required Source code in valor/schemas/symbolic/types.py

def __init__(\n    self,\n    value: typing.Dict[\n        str, typing.Union[np.ndarray, Box, Polygon, MultiPolygon, None]\n    ],\n):\n    \"\"\"\n    Initialize and instance of a raster.\n\n    Parameters\n    ----------\n    value : Dict[str, Union[np.ndarray, Box, Polygon, MultiPolygon, None]]\n        The raster in dictionary format {\"mask\": <np.ndarray>, \"geometry\": <geometry | None>}.\n    \"\"\"\n    super().__init__(value)\n
"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.__validate__","title":"valor.schemas.Raster.__validate__(value) classmethod","text":"

Validates typing.

Parameters:

Name Type Description Default value Any

The value to validate.

required

Raises:

Type Description TypeError

If the value type is not supported.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef __validate__(cls, value: typing.Any):\n    \"\"\"\n    Validates typing.\n\n    Parameters\n    ----------\n    value : Any\n        The value to validate.\n\n    Raises\n    ------\n    TypeError\n        If the value type is not supported.\n    \"\"\"\n    if not isinstance(value, dict):\n        raise TypeError(\n            \"Raster should contain a dictionary describing a mask and optionally a geometry.\"\n        )\n    elif set(value.keys()) != {\"mask\", \"geometry\"}:\n        raise ValueError(\n            \"Raster should be described by a dictionary with keys 'mask' and 'geometry'\"\n        )\n    elif not isinstance(value[\"mask\"], np.ndarray):\n        raise TypeError(\n            f\"Expected mask to have type '{np.ndarray}' receieved type '{value['mask']}'\"\n        )\n    elif len(value[\"mask\"].shape) != 2:\n        raise ValueError(\"raster only supports 2d arrays\")\n    elif value[\"mask\"].dtype != bool:\n        raise ValueError(\n            f\"Expecting a binary mask (i.e. of dtype bool) but got dtype {value['mask'].dtype}\"\n        )\n    elif (\n        value[\"geometry\"] is not None\n        and not Polygon.supports(value[\"geometry\"])\n        and not MultiPolygon.supports(value[\"geometry\"])\n    ):\n        raise TypeError(\n            \"Expected geometry to conform to either Polygon or MultiPolygon or be 'None'\"\n        )\n
"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.decode_value","title":"valor.schemas.Raster.decode_value(value) classmethod","text":"

Decode object from JSON compatible dictionary.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef decode_value(cls, value: typing.Any):\n    \"\"\"Decode object from JSON compatible dictionary.\"\"\"\n    if value is None:\n        return None\n    if not (\n        isinstance(value, dict)\n        and set(value.keys()) == {\"mask\", \"geometry\"}\n    ):\n        raise ValueError(\n            f\"Improperly formatted raster encoding. Received '{value}'\"\n        )\n    mask_bytes = b64decode(value[\"mask\"])\n    with io.BytesIO(mask_bytes) as f:\n        img = PIL.Image.open(f)\n        value = {\n            \"mask\": np.array(img),\n            \"geometry\": value[\"geometry\"],\n        }\n    return cls(value=value)\n
"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.encode_value","title":"valor.schemas.Raster.encode_value()","text":"

Encode object to JSON compatible dictionary.

Source code in valor/schemas/symbolic/types.py
def encode_value(self) -> typing.Any:\n    \"\"\"Encode object to JSON compatible dictionary.\"\"\"\n    value = self.get_value()\n    if value is None:\n        return None\n    f = io.BytesIO()\n    PIL.Image.fromarray(value[\"mask\"]).save(f, format=\"PNG\")\n    f.seek(0)\n    mask_bytes = f.read()\n    f.close()\n    return {\n        \"mask\": b64encode(mask_bytes).decode(),\n        \"geometry\": value[\"geometry\"],\n    }\n
"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.from_geometry","title":"valor.schemas.Raster.from_geometry(geometry, height, width) classmethod","text":"

Create a Raster object from a geometric mask.

Parameters:

Name Type Description Default geometry Union[Box, Polygon, MultiPolygon]

Defines the bitmask as a geometry. Overrides any existing mask.

required height int

The intended height of the binary mask.

required width int

The intended width of the binary mask.

required

Returns:

Type Description Raster Source code in valor/schemas/symbolic/types.py
@classmethod\ndef from_geometry(\n    cls,\n    geometry: typing.Union[Box, Polygon, MultiPolygon],\n    height: int,\n    width: int,\n):\n    \"\"\"\n    Create a Raster object from a geometric mask.\n\n    Parameters\n    ----------\n    geometry : Union[Box, Polygon, MultiPolygon]\n        Defines the bitmask as a geometry. Overrides any existing mask.\n    height : int\n        The intended height of the binary mask.\n    width : int\n        The intended width of the binary mask.\n\n    Returns\n    -------\n    Raster\n    \"\"\"\n    bitmask = np.full((int(height), int(width)), False)\n    return cls(value={\"mask\": bitmask, \"geometry\": geometry.get_value()})\n
"},{"location":"client_api/Schemas/Spatial/Raster/#valor.schemas.Raster.from_numpy","title":"valor.schemas.Raster.from_numpy(mask) classmethod","text":"

Create a Raster object from a NumPy array.

Parameters:

Name Type Description Default mask ndarray

The 2D binary array representing the mask.

required

Returns:

Type Description Raster

Raises:

Type Description ValueError

If the input array is not 2D or not of dtype bool.

Source code in valor/schemas/symbolic/types.py
@classmethod\ndef from_numpy(cls, mask: np.ndarray):\n    \"\"\"\n    Create a Raster object from a NumPy array.\n\n    Parameters\n    ----------\n    mask : np.ndarray\n        The 2D binary array representing the mask.\n\n    Returns\n    -------\n    Raster\n\n    Raises\n    ------\n    ValueError\n        If the input array is not 2D or not of dtype bool.\n    \"\"\"\n    return cls(value={\"mask\": mask, \"geometry\": None})\n
"}]} \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz index b145c17f8c4610aa97ff04f37c5737b721083402..4ea1fef372ae1a35723f30b1d1ee4e822e3ab74c 100644 GIT binary patch delta 13 Ucmb=gXP58h;9%&;p2%JS02$u{!vFvP delta 13 Ucmb=gXP58h;AohfK9Riw038MdN&o-= diff --git a/static/openapi.json b/static/openapi.json index 35cbe9175..c7d69594c 100644 --- a/static/openapi.json +++ b/static/openapi.json @@ -1 +1 @@ -{"openapi": "3.1.0", "info": {"title": "FastAPI", "version": "0.1.0"}, "paths": {"/groundtruths": {"post": {"tags": ["GroundTruths"], "summary": "Create Groundtruths", "description": "Create a ground truth in the database.\n\nPOST Endpoint: `/groundtruths`\n\nParameters\n----------\ngroundtruths : list[schemas.GroundTruth]\n The ground truths to add to the database.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\nignore_existing_datums : bool, optional\n If True, will ignore datums that already exist in the database.\n\nRaises\n------\nHTTPException (404)\n If the dataset or datum doesn't exist.\nHTTPException (409)\n If the dataset has been finalized, or if the datum already exists.", "operationId": "create_groundtruths_groundtruths_post", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "ignore_existing_datums", "in": "query", "required": false, "schema": {"type": "boolean", "default": false, "title": "Ignore Existing Datums"}}], "requestBody": {"required": true, "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/GroundTruth-Input"}, "title": "Groundtruths"}}}}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/groundtruths/dataset/{dataset_name}/datum/{uid}": {"get": {"tags": ["GroundTruths"], "summary": "Get Groundtruth", "description": "Fetch a ground truth from the database.\n\nGET Endpoint: `/groundtruths/dataset/{dataset_name}/datum/{uid}`\n\nParameters\n----------\ndataset_name : str\n The name of the dataset to fetch the ground truth from.\nuid : str\n The UID of the ground truth.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nschemas.GroundTruth\n Thee ground truth requested by the user.\n\nRaises\n------\nHTTPException (404)\n If the dataset or datum does not exist.", "operationId": "get_groundtruth_groundtruths_dataset__dataset_name__datum__uid__get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}, {"name": "uid", "in": "path", "required": true, "schema": {"type": "string", "title": "Uid"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"anyOf": [{"$ref": "#/components/schemas/GroundTruth-Output"}, {"type": "null"}], "title": "Response Get Groundtruth Groundtruths Dataset Dataset Name Datum Uid Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/predictions": {"post": {"tags": ["Predictions"], "summary": "Create Predictions", "description": "Create a prediction in the database.\n\nPOST Endpoint: `/predictions`\n\nParameters\n----------\npredictions : list[schemas.Prediction]\n The predictions to add to the database.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nRaises\n------\nHTTPException (404)\n If the dataset, model, or datum doesn't exist.\nHTTPException (409)\n If the model has been finalized, or if the dataset has not been finalized.", "operationId": "create_predictions_predictions_post", "requestBody": {"content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/Prediction-Input"}, "type": "array", "title": "Predictions"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"OptionalHTTPBearer": []}]}}, "/predictions/model/{model_name}/dataset/{dataset_name}/datum/{uid}": {"get": {"tags": ["Predictions"], "summary": "Get Prediction", "description": "Fetch a prediction from the database.\n\nGET Endpoint: `/predictions/model/{model_name}/dataset/{dataset_name}/datum/{uid}`\n\nParameters\n----------\nmodel_name : str\n The name of the model associated with the prediction.\ndataset_name : str\n The name of the dataset associated with the prediction.\nuid : str\n The UID associated with the prediction.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nschemas.Prediction\n The requested prediction.\n\nRaises\n------\nHTTPException (404)\n If the dataset or datum doesn't exist.", "operationId": "get_prediction_predictions_model__model_name__dataset__dataset_name__datum__uid__get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "model_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Model Name"}}, {"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}, {"name": "uid", "in": "path", "required": true, "schema": {"type": "string", "title": "Uid"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"anyOf": [{"$ref": "#/components/schemas/Prediction-Output"}, {"type": "null"}], "title": "Response Get Prediction Predictions Model Model Name Dataset Dataset Name Datum Uid Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/labels": {"get": {"tags": ["Labels"], "summary": "Get Labels", "description": "Fetch labels using optional JSON strings as query parameters.", "operationId": "get_labels_labels_get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "offset", "in": "query", "required": false, "schema": {"type": "integer", "default": 0, "title": "Offset"}}, {"name": "limit", "in": "query", "required": false, "schema": {"type": "integer", "default": -1, "title": "Limit"}}, {"name": "dataset_names", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Dataset Names"}}, {"name": "dataset_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Dataset Metadata"}}, {"name": "model_names", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Model Names"}}, {"name": "model_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Model Metadata"}}, {"name": "datum_uids", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Datum Uids"}}, {"name": "datum_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Datum Metadata"}}, {"name": "task_types", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Task Types"}}, {"name": "annotation_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Annotation Metadata"}}, {"name": "require_bounding_box", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Bounding Box"}}, {"name": "bounding_box_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Bounding Box Area"}}, {"name": "require_polygon", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Polygon"}}, {"name": "polygon_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Polygon Area"}}, {"name": "require_raster", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Raster"}}, {"name": "raster_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Raster Area"}}, {"name": "labels", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Labels"}}, {"name": "label_ids", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Ids"}}, {"name": "label_keys", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Keys"}}, {"name": "label_scores", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Scores"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/Label"}, "title": "Response Get Labels Labels Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/labels/dataset/{dataset_name}": {"get": {"tags": ["Labels"], "summary": "Get Labels From Dataset", "description": "Fetch all labels for a particular dataset from the database.\n\nGET Endpoint: `/labels/dataset/{dataset_name}`\n\nParameters\n----------\nresponse: Response\n The FastAPI response object. Used to return a content-range header to the user.\ndataset_name : str\n The name of the dataset.\noffset : int, optional\n The start index of the items to return.\nlimit : int, optional\n The number of items to return. Returns all items when set to -1. Returns all items when set to -1.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user\n-------\nlist[schemas.Label]\n A list of all labels associated with the dataset in the database.\n\nRaises\n------\nHTTPException (404)\n If the dataset doesn't exist.", "operationId": "get_labels_from_dataset_labels_dataset__dataset_name__get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}, {"name": "offset", "in": "query", "required": false, "schema": {"type": "integer", "default": 0, "title": "Offset"}}, {"name": "limit", "in": "query", "required": false, "schema": {"type": "integer", "default": -1, "title": "Limit"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/Label"}, "title": "Response Get Labels From Dataset Labels Dataset Dataset Name Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/labels/model/{model_name}": {"get": {"tags": ["Labels"], "summary": "Get Labels From Model", "description": "Fetch all labels for a particular model from the database.\n\nGET Endpoint: `/labels/model/{model_name}`\n\nParameters\n----------\nresponse: Response\n The FastAPI response object. Used to return a content-range header to the user.\nmodel_name : str\n The name of the model.\noffset : int, optional\n The start index of the items to return.\nlimit : int, optional\n The number of items to return. Returns all items when set to -1.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nlist[schemas.Label]\n A list of all labels associated with the model in the database.\n\nRaises\n------\nHTTPException (404)\n If the model doesn't exist.", "operationId": "get_labels_from_model_labels_model__model_name__get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "model_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Model Name"}}, {"name": "offset", "in": "query", "required": false, "schema": {"type": "integer", "default": 0, "title": "Offset"}}, {"name": "limit", "in": "query", "required": false, "schema": {"type": "integer", "default": -1, "title": "Limit"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/Label"}, "title": "Response Get Labels From Model Labels Model Model Name Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/datasets": {"post": {"tags": ["Datasets"], "summary": "Create Dataset", "description": "Create a dataset in the database.\n\nPOST Endpoint: `/datasets`\n\nParameters\n----------\ndataset : schemas.Dataset\n The dataset to add to the database.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nRaises\n------\nHTTPException (409)\n If the dataset already exists.", "operationId": "create_dataset_datasets_post", "security": [{"OptionalHTTPBearer": []}], "requestBody": {"required": true, "content": {"application/json": {"schema": {"$ref": "#/components/schemas/Dataset"}}}}, "responses": {"201": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}, "get": {"tags": ["Datasets"], "summary": "Get Datasets", "description": "Fetch datasets using optional JSON strings as query parameters.", "operationId": "get_datasets_datasets_get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "offset", "in": "query", "required": false, "schema": {"type": "integer", "default": 0, "title": "Offset"}}, {"name": "limit", "in": "query", "required": false, "schema": {"type": "integer", "default": -1, "title": "Limit"}}, {"name": "dataset_names", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Dataset Names"}}, {"name": "dataset_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Dataset Metadata"}}, {"name": "model_names", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Model Names"}}, {"name": "model_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Model Metadata"}}, {"name": "datum_uids", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Datum Uids"}}, {"name": "datum_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Datum Metadata"}}, {"name": "task_types", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Task Types"}}, {"name": "annotation_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Annotation Metadata"}}, {"name": "require_bounding_box", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Bounding Box"}}, {"name": "bounding_box_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Bounding Box Area"}}, {"name": "require_polygon", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Polygon"}}, {"name": "polygon_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Polygon Area"}}, {"name": "require_raster", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Raster"}}, {"name": "raster_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Raster Area"}}, {"name": "labels", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Labels"}}, {"name": "label_ids", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Ids"}}, {"name": "label_keys", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Keys"}}, {"name": "label_scores", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Scores"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/Dataset"}, "title": "Response Get Datasets Datasets Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/datasets/{dataset_name}": {"get": {"tags": ["Datasets"], "summary": "Get Dataset", "description": "Fetch a particular dataset from the database.\n\nGET Endpoint: `/datasets/{dataset_name}`\n\nParameters\n----------\ndataset_name : str\n The name of the dataset.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nschemas.Dataset\n The requested dataset.\n\nRaises\n------\nHTTPException (404)\n If the dataset doesn't exist.", "operationId": "get_dataset_datasets__dataset_name__get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/Dataset"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}, "delete": {"tags": ["Datasets"], "summary": "Delete Dataset", "description": "Delete a dataset from the database.\n\nDELETE Endpoint: `/datasets/{dataset_name}`\n\nParameters\n----------\ndataset_name : str\n The name of the dataset.\nbackground_tasks: BackgroundTasks\n A FastAPI `BackgroundTasks` object to process the deletion asyncronously. This parameter is a FastAPI dependency and shouldn't be submitted by the user.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nRaises\n------\nHTTPException (404)\n If the dataset doesn't exist.\nHTTPException (409)\n If the dataset isn't in the correct state to be deleted.", "operationId": "delete_dataset_datasets__dataset_name__delete", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/datasets/{dataset_name}/status": {"get": {"tags": ["Datasets"], "summary": "Get Dataset Status", "description": "Fetch the status of a dataset.\n\nGET Endpoint: `/datasets/{dataset_name}/status`\n\nParameters\n----------\ndataset_name : str\n The name of the dataset.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nenums.TableStatus\n The requested state.\n\nRaises\n------\nHTTPException (404)\n If the dataset doesn't exist.", "operationId": "get_dataset_status_datasets__dataset_name__status_get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/TableStatus"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/datasets/{dataset_name}/summary": {"get": {"tags": ["Datasets"], "summary": "Get Dataset Summary", "description": "Get the summary of a dataset.\n\nGET Endpoint: `/datasets/{dataset_name}/summary`\n\nParameters\n----------\ndataset_name : str\n The name of the dataset.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nschemas.DatasetSummary\n The dataset summary.\n\nRaises\n------\nHTTPException (404)\n If the dataset doesn't exist.", "operationId": "get_dataset_summary_datasets__dataset_name__summary_get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/DatasetSummary"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/datasets/{dataset_name}/finalize": {"put": {"tags": ["Datasets"], "summary": "Finalize Dataset", "description": "Finalizes a dataset for evaluation.\n\nPUT Endpoint: `/datasets/{dataset_name}/finalize`\n\nParameters\n----------\ndataset_name : str\n The name of the dataset.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nRaises\n------\nHTTPException (409)\n If the dataset is empty.\nHTTPException (404)\n If the dataset doesn't exist.", "operationId": "finalize_dataset_datasets__dataset_name__finalize_put", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/data": {"get": {"tags": ["Datums"], "summary": "Get Datums", "description": "Fetch datums using optional JSON strings as query parameters.", "operationId": "get_datums_data_get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "offset", "in": "query", "required": false, "schema": {"type": "integer", "default": 0, "title": "Offset"}}, {"name": "limit", "in": "query", "required": false, "schema": {"type": "integer", "default": -1, "title": "Limit"}}, {"name": "dataset_names", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Dataset Names"}}, {"name": "dataset_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Dataset Metadata"}}, {"name": "model_names", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Model Names"}}, {"name": "model_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Model Metadata"}}, {"name": "datum_uids", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Datum Uids"}}, {"name": "datum_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Datum Metadata"}}, {"name": "task_types", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Task Types"}}, {"name": "annotation_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Annotation Metadata"}}, {"name": "require_bounding_box", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Bounding Box"}}, {"name": "bounding_box_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Bounding Box Area"}}, {"name": "require_polygon", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Polygon"}}, {"name": "polygon_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Polygon Area"}}, {"name": "require_raster", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Raster"}}, {"name": "raster_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Raster Area"}}, {"name": "labels", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Labels"}}, {"name": "label_ids", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Ids"}}, {"name": "label_keys", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Keys"}}, {"name": "label_scores", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Scores"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/Datum"}, "title": "Response Get Datums Data Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/data/dataset/{dataset_name}/uid/{uid}": {"get": {"tags": ["Datums"], "summary": "Get Datum", "description": "Fetch a particular datum.\nGET Endpoint: `/data/dataset/{dataset_name}/uid/{uid}`\nParameters\n----------\ndataset_name : str\n The name of the dataset.\nuid : str\n The UID of the datum.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\nReturns\n-------\nschemas.Datum\n The requested datum.\nRaises\n------\nHTTPException (404)\n If the dataset or datum doesn't exist.", "operationId": "get_datum_data_dataset__dataset_name__uid__uid__get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}, {"name": "uid", "in": "path", "required": true, "schema": {"type": "string", "title": "Uid"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"anyOf": [{"$ref": "#/components/schemas/Datum"}, {"type": "null"}], "title": "Response Get Datum Data Dataset Dataset Name Uid Uid Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/models": {"post": {"tags": ["Models"], "summary": "Create Model", "description": "Create a model in the database.\n\nPOST Endpoint: `/models`\n\nParameters\n----------\nmodel : schemas.Model\n The model to add to the database.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nRaises\n------\nHTTPException (404)\n If the dataset or datum doesn't exist.\nHTTPException (409)\n If the dataset has been finalized, or if the datum already exists.", "operationId": "create_model_models_post", "security": [{"OptionalHTTPBearer": []}], "requestBody": {"required": true, "content": {"application/json": {"schema": {"$ref": "#/components/schemas/Model"}}}}, "responses": {"201": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}, "get": {"tags": ["Models"], "summary": "Get Models", "description": "Fetch models using optional JSON strings as query parameters.", "operationId": "get_models_models_get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "offset", "in": "query", "required": false, "schema": {"type": "integer", "default": 0, "title": "Offset"}}, {"name": "limit", "in": "query", "required": false, "schema": {"type": "integer", "default": -1, "title": "Limit"}}, {"name": "dataset_names", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Dataset Names"}}, {"name": "dataset_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Dataset Metadata"}}, {"name": "model_names", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Model Names"}}, {"name": "model_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Model Metadata"}}, {"name": "datum_uids", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Datum Uids"}}, {"name": "datum_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Datum Metadata"}}, {"name": "task_types", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Task Types"}}, {"name": "annotation_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Annotation Metadata"}}, {"name": "require_bounding_box", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Bounding Box"}}, {"name": "bounding_box_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Bounding Box Area"}}, {"name": "require_polygon", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Polygon"}}, {"name": "polygon_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Polygon Area"}}, {"name": "require_raster", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Raster"}}, {"name": "raster_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Raster Area"}}, {"name": "labels", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Labels"}}, {"name": "label_ids", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Ids"}}, {"name": "label_keys", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Keys"}}, {"name": "label_scores", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Scores"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/Model"}, "title": "Response Get Models Models Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/models/{model_name}": {"get": {"tags": ["Models"], "summary": "Get Model", "description": "Fetch a particular model.\n\nGET Endpoint: `/models/{model_name}`\n\nParameters\n----------\nmodel_name : str\n The name of the model.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nschemas.Model\n The requested model.\n\nRaises\n------\nHTTPException (404)\n If the model datum doesn't exist.", "operationId": "get_model_models__model_name__get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "model_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Model Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/Model"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}, "delete": {"tags": ["Models"], "summary": "Delete Model", "description": "Delete a model from the database.\n\nDELETE Endpoint: `/models/{model_name}`\n\nParameters\n----------\nmodel_name : str\n The name of the model.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nRaises\n------\nHTTPException (404)\n If the model doesn't exist.\nHTTPException (409)\n If the model isn't in the correct state to be deleted.", "operationId": "delete_model_models__model_name__delete", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "model_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Model Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/models/{model_name}/eval-requests": {"get": {"tags": ["Models"], "summary": "Get Model Eval Requests", "description": "Fetch a particular model.\n\nGET Endpoint: `/models/{model_name}`\n\nParameters\n----------\nmodel_name : str\n The name of the model.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nlist[EvaluationResponse]\n The evaluation requessts associated to the model\n\nRaises\n------\nHTTPException (404)\n If the model doesn't exist.", "operationId": "get_model_eval_requests_models__model_name__eval_requests_get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "model_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Model Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/EvaluationResponse"}, "title": "Response Get Model Eval Requests Models Model Name Eval Requests Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/models/{model_name}/dataset/{dataset_name}/status": {"get": {"tags": ["Models"], "summary": "Get Model Status", "description": "Fetch the status of a model over a dataset.\n\nParameters\n----------\ndataset_name : str\n The name of the dataset.\nmodel_name : str\n The name of the model.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nenums.TableStatus\n The requested state.\n\nRaises\n------\nHTTPException (404)\n If the model doesn't exist.", "operationId": "get_model_status_models__model_name__dataset__dataset_name__status_get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}, {"name": "model_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Model Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/TableStatus"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/models/{model_name}/datasets/{dataset_name}/finalize": {"put": {"tags": ["Models"], "summary": "Finalize Inferences", "description": "Finalize a model prior to evaluation.\n\nPUT Endpoint: `/models/{model_name}/datasets/{dataset_name}/finalize`\n\nParameters\n----------\ndataset_name : str\n The name of the dataset.\nmodel_name : str\n The name of the model.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\n\nRaises\n------\nHTTPException (400)\n If the dataset or model are empty.\nHTTPException (404)\n If the dataset or model do not exist.", "operationId": "finalize_inferences_models__model_name__datasets__dataset_name__finalize_put", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}, {"name": "model_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Model Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/evaluations": {"post": {"tags": ["Evaluations"], "summary": "Create Or Get Evaluations", "description": "Create a new evaluation.\n\nPOST Endpoint: `/evaluations`\n\nParameters\n----------\nresponse: Response\n The FastAPI response object. Used to return a content-range header to the user.\njob_request: schemas.EvaluationJob\n The job request for the evaluation.\nbackground_tasks: BackgroundTasks\n A FastAPI `BackgroundTasks` object to process the creation asyncronously. This parameter is a FastAPI dependency and shouldn't be submitted by the user.\nallow_retries: bool, default = False\n Determines whether failed evaluations are restarted.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nlist[schemas.EvaluationResponse]\n A list of evaluation response objects.\n\nRaises\n------\nHTTPException (400)\n If the task type of the evaluation job doesn't exist, or if another ValueError is thrown.\nHTTPException (404)\n If the dataset or model does not exist.\nHTTPException (405)\n If the dataset or model hasn't been finalized.\nHTTPException (409)\n If there is a state exception when creating the evaluation.", "operationId": "create_or_get_evaluations_evaluations_post", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "allow_retries", "in": "query", "required": false, "schema": {"type": "boolean", "default": false, "title": "Allow Retries"}}], "requestBody": {"required": true, "content": {"application/json": {"schema": {"$ref": "#/components/schemas/EvaluationRequest"}}}}, "responses": {"202": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/EvaluationResponse"}, "title": "Response Create Or Get Evaluations Evaluations Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}, "get": {"tags": ["Evaluations"], "summary": "Get Evaluations", "description": "Fetch all metrics associated with user-supplied dataset and model names. Users\nmay query using model names, dataset names, or both. All metrics for all specified\nmodels and datasets will be returned in a list of Evaluations.\n\nThis endpoint can handle multiple dataset and model names. For example, you can use\n`/evaluations?models=first_model,second_model&datasets=test_dataset` to get all evaluations\nrelated to `test_dataset` and either `first_model` or `second_model`.\n\nGET Endpoint: `/evaluations`\n\nParameters\n----------\nresponse: Response\n The FastAPI response object. Used to return a content-range header to the user.\ndatasets : str\n An optional set of dataset names to return metrics for\nmodels : str\n An optional set of model names to return metrics for\nevaluation_ids : str\n An optional set of evaluation_ids to return metrics for\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\noffset : int, optional\n The start index of the items to return.\nlimit : int, optional\n The number of items to return. Returns all items when set to -1.\nmetrics_to_sort_by: str, optional\n An optional dict of metric types to sort the evaluations by.\n\nReturns\n-------\nlist[schemas.Evaluation]\n A list of evaluations.\n\nRaises\n------\nHTTPException (400)\n If a ValueError is thrown.\nHTTPException (404)\n If the dataset or model doesn't exist.", "operationId": "get_evaluations_evaluations_get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "datasets", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Datasets"}}, {"name": "models", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Models"}}, {"name": "evaluation_ids", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Evaluation Ids"}}, {"name": "offset", "in": "query", "required": false, "schema": {"type": "integer", "default": 0, "title": "Offset"}}, {"name": "limit", "in": "query", "required": false, "schema": {"type": "integer", "default": -1, "title": "Limit"}}, {"name": "metrics_to_sort_by", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Metrics To Sort By"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/EvaluationResponse"}, "title": "Response Get Evaluations Evaluations Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/token": {"post": {"tags": ["Authentication"], "summary": "Login For Access Token", "operationId": "login_for_access_token_token_post", "requestBody": {"content": {"application/x-www-form-urlencoded": {"schema": {"$ref": "#/components/schemas/Body_login_for_access_token_token_post"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "string", "title": "Response Login For Access Token Token Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/api-version": {"get": {"tags": ["Info"], "summary": "Get Api Version", "description": "Return the API's version.\n\nGET Endpoint: `/api-version`\n\nReturns\n-------\nschemas.APIVersion\n A response object containing the API's version number.", "operationId": "get_api_version_api_version_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/APIVersion"}}}}}, "security": [{"OptionalHTTPBearer": []}]}}, "/health": {"get": {"tags": ["Status"], "summary": "Health", "description": "Return 200 if the service is up.\n\nGET Endpoint: `/health`\n\nReturns\n-------\nschemas.Health\n A response indicating that the service is up and running.", "operationId": "health_health_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}}}, "/ready": {"get": {"tags": ["Status"], "summary": "Ready", "description": "Return 200 if the service is up and connected to the database.\n\nGET Endpoint: `/ready`\n\nReturns\n-------\nschemas.Readiness\n A response indicating that the service is up and connected to the database.", "operationId": "ready_ready_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}}}}, "components": {"schemas": {"APIVersion": {"properties": {"api_version": {"type": "string", "title": "Api Version"}}, "type": "object", "required": ["api_version"], "title": "APIVersion", "description": "Defines an API version string which is sent back to the user after their authentication is confirmed.\n\nAttributes\n----------\napi_version : str\n The API version."}, "Annotation-Input": {"properties": {"metadata": {"additionalProperties": {"anyOf": [{"type": "boolean"}, {"type": "integer"}, {"type": "number"}, {"type": "string"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"type": "number"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, {"items": {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, "type": "array"}]}, "type": "object"}]}, "type": "object"}]}, "type": "object", "title": "Metadata", "default": {}}, "labels": {"items": {"$ref": "#/components/schemas/Label"}, "type": "array", "title": "Labels", "default": []}, "bounding_box": {"anyOf": [{"$ref": "#/components/schemas/Box"}, {"type": "null"}]}, "polygon": {"anyOf": [{"$ref": "#/components/schemas/Polygon"}, {"type": "null"}]}, "raster": {"anyOf": [{"$ref": "#/components/schemas/Raster"}, {"type": "null"}]}, "embedding": {"anyOf": [{"items": {"type": "number"}, "type": "array"}, {"type": "null"}], "title": "Embedding"}, "is_instance": {"anyOf": [{"type": "boolean"}, {"type": "null"}], "title": "Is Instance"}, "implied_task_types": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Implied Task Types"}}, "additionalProperties": false, "type": "object", "title": "Annotation", "description": "A class used to annotate 'GroundTruths' and 'Predictions'.\n\nAttributes\n----------\nmetadata: dict, optional\n A dictionary of metadata that describes the 'Annotation'.\nlabels: List[Label], optional\n A list of labels to use for the 'Annotation'.\nbounding_box: BoundingBox, optional\n A bounding box to assign to the 'Annotation'.\npolygon: Polygon, optional\n A polygon to assign to the 'Annotation'.\nraster: Raster, optional\n A raster to assign to the 'Annotation'.\nembedding: list[float], optional\n A jsonb to assign to the 'Annotation'.\nis_instance: bool, optional\n A boolean describing whether we should treat the Raster attached to an annotation as an instance segmentation or not. If set to true, then the Annotation will be validated for use in object detection tasks. If set to false, then the Annotation will be validated for use in semantic segmentation tasks.\nimplied_task_types: list[str], optional\n The validated task types that are applicable to each Annotation. Doesn't need to bet set by the user."}, "Annotation-Output": {"properties": {"metadata": {"additionalProperties": {"anyOf": [{"type": "boolean"}, {"type": "integer"}, {"type": "number"}, {"type": "string"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"type": "number"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, {"items": {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, "type": "array"}]}, "type": "object"}]}, "type": "object"}]}, "type": "object", "title": "Metadata", "default": {}}, "labels": {"items": {"$ref": "#/components/schemas/Label"}, "type": "array", "title": "Labels", "default": []}, "bounding_box": {"anyOf": [{"type": "object"}, {"type": "null"}]}, "polygon": {"anyOf": [{"type": "object"}, {"type": "null"}]}, "raster": {"anyOf": [{"type": "object"}, {"type": "null"}]}, "embedding": {"anyOf": [{"items": {"type": "number"}, "type": "array"}, {"type": "null"}], "title": "Embedding"}, "is_instance": {"anyOf": [{"type": "boolean"}, {"type": "null"}], "title": "Is Instance"}, "implied_task_types": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Implied Task Types"}}, "additionalProperties": false, "type": "object", "title": "Annotation", "description": "A class used to annotate 'GroundTruths' and 'Predictions'.\n\nAttributes\n----------\nmetadata: dict, optional\n A dictionary of metadata that describes the 'Annotation'.\nlabels: List[Label], optional\n A list of labels to use for the 'Annotation'.\nbounding_box: BoundingBox, optional\n A bounding box to assign to the 'Annotation'.\npolygon: Polygon, optional\n A polygon to assign to the 'Annotation'.\nraster: Raster, optional\n A raster to assign to the 'Annotation'.\nembedding: list[float], optional\n A jsonb to assign to the 'Annotation'.\nis_instance: bool, optional\n A boolean describing whether we should treat the Raster attached to an annotation as an instance segmentation or not. If set to true, then the Annotation will be validated for use in object detection tasks. If set to false, then the Annotation will be validated for use in semantic segmentation tasks.\nimplied_task_types: list[str], optional\n The validated task types that are applicable to each Annotation. Doesn't need to bet set by the user."}, "AnnotationType": {"type": "string", "enum": ["none", "box", "polygon", "multipolygon", "raster"], "title": "AnnotationType"}, "Body_login_for_access_token_token_post": {"properties": {"grant_type": {"anyOf": [{"type": "string", "pattern": "password"}, {"type": "null"}], "title": "Grant Type"}, "username": {"type": "string", "title": "Username"}, "password": {"type": "string", "title": "Password"}, "scope": {"type": "string", "title": "Scope", "default": ""}, "client_id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Client Id"}, "client_secret": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Client Secret"}}, "type": "object", "required": ["username", "password"], "title": "Body_login_for_access_token_token_post"}, "BooleanFilter": {"properties": {"value": {"type": "boolean", "title": "Value"}, "operator": {"type": "string", "title": "Operator", "default": "=="}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "BooleanFilter", "description": "Used to filter on boolean values that meet some user-defined condition.\n\nAttributes\n----------\nvalue : bool\n The value to compare the specific field against.\noperator : str\n The operator to use for comparison. Should be one of `[\"==\", \"!=\"]`.\n\nRaises\n------\nValueError\n If the `operator` doesn't match one of the allowed patterns."}, "Box": {"properties": {"value": {"items": {"items": {"prefixItems": [{"anyOf": [{"type": "integer"}, {"type": "number"}]}, {"anyOf": [{"type": "integer"}, {"type": "number"}]}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array", "title": "Value"}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "Box", "description": "Describes a Box in (x,y) coordinates.\n\nAttributes\n----------\nvalue : list[list[tuple[int | float, int | float]]]\n A list of coordinates describing the Box.\n\nRaises\n------\nValueError\n If the value doesn't conform to the type."}, "ConfusionMatrixEntry": {"properties": {"prediction": {"type": "string", "title": "Prediction"}, "groundtruth": {"type": "string", "title": "Groundtruth"}, "count": {"type": "integer", "title": "Count"}}, "type": "object", "required": ["prediction", "groundtruth", "count"], "title": "ConfusionMatrixEntry", "description": "Describes one element in a confusion matrix.\n\nAttributes\n----------\nprediction : str\n The prediction.\ngroundtruth : str\n The ground truth.\ncount : int\n The value of the element in the matrix."}, "ConfusionMatrixResponse": {"properties": {"label_key": {"type": "string", "title": "Label Key"}, "entries": {"items": {"$ref": "#/components/schemas/ConfusionMatrixEntry"}, "type": "array", "title": "Entries"}}, "type": "object", "required": ["label_key", "entries"], "title": "ConfusionMatrixResponse", "description": "A response object used for HTTP responses since they won't contain matrix or label map attributes."}, "Dataset": {"properties": {"name": {"type": "string", "title": "Name"}, "metadata": {"additionalProperties": {"anyOf": [{"type": "boolean"}, {"type": "integer"}, {"type": "number"}, {"type": "string"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"type": "number"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, {"items": {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, "type": "array"}]}, "type": "object"}]}, "type": "object"}]}, "type": "object", "title": "Metadata", "default": {}}}, "additionalProperties": false, "type": "object", "required": ["name"], "title": "Dataset", "description": "A class describing a given dataset.\n\nAttributes\n----------\nname : str\n The name of the dataset.\nmetadata : dict, optional\n A dictionary of metadata that describes the dataset."}, "DatasetSummary": {"properties": {"name": {"type": "string", "title": "Name"}, "num_datums": {"type": "integer", "title": "Num Datums"}, "num_annotations": {"type": "integer", "title": "Num Annotations"}, "num_bounding_boxes": {"type": "integer", "title": "Num Bounding Boxes"}, "num_polygons": {"type": "integer", "title": "Num Polygons"}, "num_rasters": {"type": "integer", "title": "Num Rasters"}, "task_types": {"items": {"$ref": "#/components/schemas/TaskType"}, "type": "array", "title": "Task Types"}, "labels": {"items": {"$ref": "#/components/schemas/Label"}, "type": "array", "title": "Labels"}, "datum_metadata": {"items": {"additionalProperties": {"anyOf": [{"type": "boolean"}, {"type": "integer"}, {"type": "number"}, {"type": "string"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"type": "number"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, {"items": {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, "type": "array"}]}, "type": "object"}]}, "type": "object"}]}, "type": "object"}, "type": "array", "title": "Datum Metadata"}, "annotation_metadata": {"items": {"additionalProperties": {"anyOf": [{"type": "boolean"}, {"type": "integer"}, {"type": "number"}, {"type": "string"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"type": "number"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, {"items": {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, "type": "array"}]}, "type": "object"}]}, "type": "object"}]}, "type": "object"}, "type": "array", "title": "Annotation Metadata"}}, "type": "object", "required": ["name", "num_datums", "num_annotations", "num_bounding_boxes", "num_polygons", "num_rasters", "task_types", "labels", "datum_metadata", "annotation_metadata"], "title": "DatasetSummary"}, "Date": {"properties": {"value": {"type": "string", "title": "Value"}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "Date", "description": "An object describing a date.\n\nAttributes\n----------\nvalue : str\n Date in ISO format."}, "DateTime": {"properties": {"value": {"type": "string", "title": "Value"}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "DateTime", "description": "An object describing a date and time.\n\nAttributes\n----------\nvalue : str\n Datetime in ISO format."}, "DateTimeFilter": {"properties": {"value": {"anyOf": [{"$ref": "#/components/schemas/DateTime"}, {"$ref": "#/components/schemas/Date"}, {"$ref": "#/components/schemas/Time"}, {"$ref": "#/components/schemas/Duration"}], "title": "Value"}, "operator": {"type": "string", "title": "Operator", "default": "=="}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "DateTimeFilter", "description": "Used to filter on datetime values that meet some user-defined condition.\n\nAttributes\n----------\nvalue : DateTime\n The value to compare the specific field against.\noperator : str\n The operator to use for comparison. Should be one of `[\">\", \"<\", \">=\", \"<=\", \"==\", \"!=\"]`.\n\nRaises\n------\nValueError\n If the `operator` doesn't match one of the allowed patterns."}, "Datum": {"properties": {"uid": {"type": "string", "title": "Uid"}, "metadata": {"additionalProperties": {"anyOf": [{"type": "boolean"}, {"type": "integer"}, {"type": "number"}, {"type": "string"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"type": "number"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, {"items": {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, "type": "array"}]}, "type": "object"}]}, "type": "object"}]}, "type": "object", "title": "Metadata", "default": {}}}, "additionalProperties": false, "type": "object", "required": ["uid"], "title": "Datum", "description": "A class used to store datum information about 'GroundTruths' and 'Predictions'.\n\nAttributes\n----------\nuid : str\n The UID of the datum.\nmetadata : dict, optional\n A dictionary of metadata that describes the datum."}, "Duration": {"properties": {"value": {"type": "number", "title": "Value"}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "Duration", "description": "An object describing a time duration.\n\nAttributes\n----------\nvalue : float\n Time duration in seconds."}, "EvaluationParameters": {"properties": {"task_type": {"$ref": "#/components/schemas/TaskType"}, "metrics_to_return": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Metrics To Return"}, "label_map": {"anyOf": [{"items": {"items": {"items": {"type": "string"}, "type": "array"}, "type": "array"}, "type": "array"}, {"type": "null"}], "title": "Label Map"}, "convert_annotations_to_type": {"anyOf": [{"$ref": "#/components/schemas/AnnotationType"}, {"type": "null"}]}, "iou_thresholds_to_compute": {"anyOf": [{"items": {"type": "number"}, "type": "array"}, {"type": "null"}], "title": "Iou Thresholds To Compute"}, "iou_thresholds_to_return": {"anyOf": [{"items": {"type": "number"}, "type": "array"}, {"type": "null"}], "title": "Iou Thresholds To Return"}, "recall_score_threshold": {"anyOf": [{"type": "number"}, {"type": "null"}], "title": "Recall Score Threshold", "default": 0}, "pr_curve_iou_threshold": {"type": "number", "title": "Pr Curve Iou Threshold", "default": 0.5}, "pr_curve_max_examples": {"type": "integer", "title": "Pr Curve Max Examples", "default": 1}}, "additionalProperties": false, "type": "object", "required": ["task_type"], "title": "EvaluationParameters", "description": "Defines parameters for evaluation methods.\n\nAttributes\n----------\ntask_type: TaskType\n The task type of a given evaluation.\nlabel_map: Optional[List[List[List[str]]]]\n Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.\nmetrics: List[str], optional\n The list of metrics to compute, store, and return to the user.\nconvert_annotations_to_type: AnnotationType | None = None\n The type to convert all annotations to.\niou_thresholds_to_compute: List[float], optional\n A list of floats describing which Intersection over Unions (IoUs) to use when calculating metrics (i.e., mAP).\niou_thresholds_to_return: List[float], optional\n A list of floats describing which Intersection over Union (IoUs) thresholds to calculate a metric for. Must be a subset of `iou_thresholds_to_compute`.\nrecall_score_threshold: float, default=0\n The confidence score threshold for use when determining whether to count a prediction as a true positive or not while calculating Average Recall.\npr_curve_iou_threshold: float, optional\n The IOU threshold to use when calculating precision-recall curves for object detection tasks. Defaults to 0.5.\npr_curve_max_examples: int\n The maximum number of datum examples to store when calculating PR curves."}, "EvaluationRequest": {"properties": {"model_names": {"items": {"type": "string"}, "type": "array", "title": "Model Names"}, "datum_filter": {"$ref": "#/components/schemas/Filter-Input"}, "parameters": {"$ref": "#/components/schemas/EvaluationParameters"}}, "additionalProperties": false, "type": "object", "required": ["model_names", "datum_filter", "parameters"], "title": "EvaluationRequest", "description": "Request for evaluation.\n\nAttributes\n----------\nmodel_names : str | list[str]\n The model(s) to evaluate.\ndatum_filter : schemas.Filter\n The filter object used to define what datums the model is evaluating over.\nparameters : DetectionParameters, optional\n Any parameters that are used to modify an evaluation method."}, "EvaluationResponse": {"properties": {"id": {"type": "integer", "title": "Id"}, "model_name": {"type": "string", "title": "Model Name"}, "datum_filter": {"$ref": "#/components/schemas/Filter-Output"}, "parameters": {"$ref": "#/components/schemas/EvaluationParameters"}, "status": {"$ref": "#/components/schemas/EvaluationStatus"}, "created_at": {"type": "string", "format": "date-time", "title": "Created At"}, "meta": {"anyOf": [{"additionalProperties": {"anyOf": [{"type": "string"}, {"type": "integer"}, {"type": "number"}]}, "type": "object"}, {"type": "null"}], "title": "Meta"}, "metrics": {"anyOf": [{"items": {"$ref": "#/components/schemas/Metric"}, "type": "array"}, {"type": "null"}], "title": "Metrics"}, "confusion_matrices": {"anyOf": [{"items": {"$ref": "#/components/schemas/ConfusionMatrixResponse"}, "type": "array"}, {"type": "null"}], "title": "Confusion Matrices"}, "ignored_pred_labels": {"anyOf": [{"items": {"$ref": "#/components/schemas/Label"}, "type": "array"}, {"type": "null"}], "title": "Ignored Pred Labels"}, "missing_pred_labels": {"anyOf": [{"items": {"$ref": "#/components/schemas/Label"}, "type": "array"}, {"type": "null"}], "title": "Missing Pred Labels"}}, "additionalProperties": true, "type": "object", "required": ["id", "model_name", "datum_filter", "parameters", "status", "created_at", "meta"], "title": "EvaluationResponse", "description": "An object for storing the returned results of a model evaluation (where groundtruths are compared with predictions to measure performance).\n\nAttributes\n----------\nid : int\n The ID of the evaluation.\nmodel_name : str\n The name of the evaluated model.\ndatum_filter : schemas.Filter\n The evaluation filter used in the evaluation.\nparameters : schemas.EvaluationParameters\n Any parameters used by the evaluation method.\nstatus : str\n The status of the evaluation.\ncreated_at: datetime.datetime\n The time the evaluation was created.\nmetrics : List[Metric]\n A list of metrics associated with the evaluation.\nconfusion_matrices: List[ConfusionMatrixResponse]\n A list of confusion matrices associated with the evaluation.\nmissing_pred_labels: List[Label], optional\n A list of ground truth labels that aren't associated with any predictions.\nignored_pred_labels: List[Label], optional\n A list of prediction labels that aren't associated with any ground truths.\nmeta: dict[str, str | int | float]\n Metadata about the evaluation run."}, "EvaluationStatus": {"type": "string", "enum": ["pending", "running", "done", "failed", "deleting"], "title": "EvaluationStatus"}, "Filter-Input": {"properties": {"dataset_names": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Dataset Names"}, "dataset_metadata": {"anyOf": [{"additionalProperties": {"items": {"anyOf": [{"$ref": "#/components/schemas/StringFilter"}, {"$ref": "#/components/schemas/NumericFilter"}, {"$ref": "#/components/schemas/DateTimeFilter"}, {"$ref": "#/components/schemas/BooleanFilter"}, {"$ref": "#/components/schemas/GeospatialFilter"}]}, "type": "array"}, "type": "object"}, {"type": "null"}], "title": "Dataset Metadata"}, "model_names": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Model Names"}, "model_metadata": {"anyOf": [{"additionalProperties": {"items": {"anyOf": [{"$ref": "#/components/schemas/StringFilter"}, {"$ref": "#/components/schemas/NumericFilter"}, {"$ref": "#/components/schemas/DateTimeFilter"}, {"$ref": "#/components/schemas/BooleanFilter"}, {"$ref": "#/components/schemas/GeospatialFilter"}]}, "type": "array"}, "type": "object"}, {"type": "null"}], "title": "Model Metadata"}, "datum_uids": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Datum Uids"}, "datum_metadata": {"anyOf": [{"additionalProperties": {"items": {"anyOf": [{"$ref": "#/components/schemas/StringFilter"}, {"$ref": "#/components/schemas/NumericFilter"}, {"$ref": "#/components/schemas/DateTimeFilter"}, {"$ref": "#/components/schemas/BooleanFilter"}, {"$ref": "#/components/schemas/GeospatialFilter"}]}, "type": "array"}, "type": "object"}, {"type": "null"}], "title": "Datum Metadata"}, "task_types": {"anyOf": [{"items": {"$ref": "#/components/schemas/TaskType"}, "type": "array"}, {"type": "null"}], "title": "Task Types"}, "annotation_metadata": {"anyOf": [{"additionalProperties": {"items": {"anyOf": [{"$ref": "#/components/schemas/StringFilter"}, {"$ref": "#/components/schemas/NumericFilter"}, {"$ref": "#/components/schemas/DateTimeFilter"}, {"$ref": "#/components/schemas/BooleanFilter"}, {"$ref": "#/components/schemas/GeospatialFilter"}]}, "type": "array"}, "type": "object"}, {"type": "null"}], "title": "Annotation Metadata"}, "require_bounding_box": {"anyOf": [{"type": "boolean"}, {"type": "null"}], "title": "Require Bounding Box"}, "bounding_box_area": {"anyOf": [{"items": {"$ref": "#/components/schemas/NumericFilter"}, "type": "array"}, {"type": "null"}], "title": "Bounding Box Area"}, "require_polygon": {"anyOf": [{"type": "boolean"}, {"type": "null"}], "title": "Require Polygon"}, "polygon_area": {"anyOf": [{"items": {"$ref": "#/components/schemas/NumericFilter"}, "type": "array"}, {"type": "null"}], "title": "Polygon Area"}, "require_raster": {"anyOf": [{"type": "boolean"}, {"type": "null"}], "title": "Require Raster"}, "raster_area": {"anyOf": [{"items": {"$ref": "#/components/schemas/NumericFilter"}, "type": "array"}, {"type": "null"}], "title": "Raster Area"}, "labels": {"anyOf": [{"items": {"additionalProperties": {"type": "string"}, "type": "object"}, "type": "array"}, {"type": "null"}], "title": "Labels"}, "label_ids": {"anyOf": [{"items": {"type": "integer"}, "type": "array"}, {"type": "null"}], "title": "Label Ids"}, "label_keys": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Label Keys"}, "label_scores": {"anyOf": [{"items": {"$ref": "#/components/schemas/NumericFilter"}, "type": "array"}, {"type": "null"}], "title": "Label Scores"}}, "additionalProperties": false, "type": "object", "title": "Filter", "description": "Used to filter Evaluations according to specific, user-defined criteria.\n\nAttributes\n----------\ndataset_names: List[str], default=None\n A list of `Dataset` names to filter on.\ndataset_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None\n A dictionary of `Dataset` metadata to filter on.\nmodel_names: List[str], default=None\n A list of `Model` names to filter on.\nmodel_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None\n A dictionary of `Model` metadata to filter on.\ndatum_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None\n A dictionary of `Datum` metadata to filter on.\ntask_types: List[TaskType], default=None\n A list of task types to filter on.\nannotation_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None\n A dictionary of `Annotation` metadata to filter on.\nrequire_bounding_box : bool, optional\n A toggle for filtering by bounding boxes.\nbounding_box_area : bool, optional\n An optional constraint to filter by bounding box area.\nrequire_polygon : bool, optional\n A toggle for filtering by polygons.\npolygon_area : bool, optional\n An optional constraint to filter by polygon area.\nrequire_raster : bool, optional\n A toggle for filtering by rasters.\nraster_area : bool, optional\n An optional constraint to filter by raster area.\nlabels: List[Dict[str, str]], default=None\n A dictionary of `Labels' to filter on.\nlabel_ids: List[int], default=None\n A list of `Label` IDs to filter on.\nlabel_keys: List[str] = None, default=None\n A list of `Label` keys to filter on.\nlabel_scores: List[ValueFilter], default=None\n A list of `ValueFilters` which are used to filter `Evaluations` according to the `Model`'s prediction scores."}, "Filter-Output": {"properties": {"dataset_names": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Dataset Names"}, "dataset_metadata": {"anyOf": [{"additionalProperties": {"items": {"anyOf": [{"$ref": "#/components/schemas/StringFilter"}, {"$ref": "#/components/schemas/NumericFilter"}, {"$ref": "#/components/schemas/DateTimeFilter"}, {"$ref": "#/components/schemas/BooleanFilter"}, {"$ref": "#/components/schemas/GeospatialFilter"}]}, "type": "array"}, "type": "object"}, {"type": "null"}], "title": "Dataset Metadata"}, "model_names": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Model Names"}, "model_metadata": {"anyOf": [{"additionalProperties": {"items": {"anyOf": [{"$ref": "#/components/schemas/StringFilter"}, {"$ref": "#/components/schemas/NumericFilter"}, {"$ref": "#/components/schemas/DateTimeFilter"}, {"$ref": "#/components/schemas/BooleanFilter"}, {"$ref": "#/components/schemas/GeospatialFilter"}]}, "type": "array"}, "type": "object"}, {"type": "null"}], "title": "Model Metadata"}, "datum_uids": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Datum Uids"}, "datum_metadata": {"anyOf": [{"additionalProperties": {"items": {"anyOf": [{"$ref": "#/components/schemas/StringFilter"}, {"$ref": "#/components/schemas/NumericFilter"}, {"$ref": "#/components/schemas/DateTimeFilter"}, {"$ref": "#/components/schemas/BooleanFilter"}, {"$ref": "#/components/schemas/GeospatialFilter"}]}, "type": "array"}, "type": "object"}, {"type": "null"}], "title": "Datum Metadata"}, "task_types": {"anyOf": [{"items": {"$ref": "#/components/schemas/TaskType"}, "type": "array"}, {"type": "null"}], "title": "Task Types"}, "annotation_metadata": {"anyOf": [{"additionalProperties": {"items": {"anyOf": [{"$ref": "#/components/schemas/StringFilter"}, {"$ref": "#/components/schemas/NumericFilter"}, {"$ref": "#/components/schemas/DateTimeFilter"}, {"$ref": "#/components/schemas/BooleanFilter"}, {"$ref": "#/components/schemas/GeospatialFilter"}]}, "type": "array"}, "type": "object"}, {"type": "null"}], "title": "Annotation Metadata"}, "require_bounding_box": {"anyOf": [{"type": "boolean"}, {"type": "null"}], "title": "Require Bounding Box"}, "bounding_box_area": {"anyOf": [{"items": {"$ref": "#/components/schemas/NumericFilter"}, "type": "array"}, {"type": "null"}], "title": "Bounding Box Area"}, "require_polygon": {"anyOf": [{"type": "boolean"}, {"type": "null"}], "title": "Require Polygon"}, "polygon_area": {"anyOf": [{"items": {"$ref": "#/components/schemas/NumericFilter"}, "type": "array"}, {"type": "null"}], "title": "Polygon Area"}, "require_raster": {"anyOf": [{"type": "boolean"}, {"type": "null"}], "title": "Require Raster"}, "raster_area": {"anyOf": [{"items": {"$ref": "#/components/schemas/NumericFilter"}, "type": "array"}, {"type": "null"}], "title": "Raster Area"}, "labels": {"anyOf": [{"items": {"additionalProperties": {"type": "string"}, "type": "object"}, "type": "array"}, {"type": "null"}], "title": "Labels"}, "label_ids": {"anyOf": [{"items": {"type": "integer"}, "type": "array"}, {"type": "null"}], "title": "Label Ids"}, "label_keys": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Label Keys"}, "label_scores": {"anyOf": [{"items": {"$ref": "#/components/schemas/NumericFilter"}, "type": "array"}, {"type": "null"}], "title": "Label Scores"}}, "additionalProperties": false, "type": "object", "title": "Filter", "description": "Used to filter Evaluations according to specific, user-defined criteria.\n\nAttributes\n----------\ndataset_names: List[str], default=None\n A list of `Dataset` names to filter on.\ndataset_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None\n A dictionary of `Dataset` metadata to filter on.\nmodel_names: List[str], default=None\n A list of `Model` names to filter on.\nmodel_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None\n A dictionary of `Model` metadata to filter on.\ndatum_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None\n A dictionary of `Datum` metadata to filter on.\ntask_types: List[TaskType], default=None\n A list of task types to filter on.\nannotation_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None\n A dictionary of `Annotation` metadata to filter on.\nrequire_bounding_box : bool, optional\n A toggle for filtering by bounding boxes.\nbounding_box_area : bool, optional\n An optional constraint to filter by bounding box area.\nrequire_polygon : bool, optional\n A toggle for filtering by polygons.\npolygon_area : bool, optional\n An optional constraint to filter by polygon area.\nrequire_raster : bool, optional\n A toggle for filtering by rasters.\nraster_area : bool, optional\n An optional constraint to filter by raster area.\nlabels: List[Dict[str, str]], default=None\n A dictionary of `Labels' to filter on.\nlabel_ids: List[int], default=None\n A list of `Label` IDs to filter on.\nlabel_keys: List[str] = None, default=None\n A list of `Label` keys to filter on.\nlabel_scores: List[ValueFilter], default=None\n A list of `ValueFilters` which are used to filter `Evaluations` according to the `Model`'s prediction scores."}, "GeoJSON": {"properties": {"type": {"type": "string", "title": "Type"}, "coordinates": {"anyOf": [{"items": {"type": "number"}, "type": "array"}, {"items": {"items": {"type": "number"}, "type": "array"}, "type": "array"}, {"items": {"items": {"items": {"type": "number"}, "type": "array"}, "type": "array"}, "type": "array"}, {"items": {"items": {"items": {"items": {"type": "number"}, "type": "array"}, "type": "array"}, "type": "array"}, "type": "array"}], "title": "Coordinates"}}, "type": "object", "required": ["type", "coordinates"], "title": "GeoJSON"}, "GeospatialFilter": {"properties": {"value": {"$ref": "#/components/schemas/GeoJSON"}, "operator": {"type": "string", "title": "Operator", "default": "intersect"}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "GeospatialFilter", "description": "Used to filter on geospatial coordinates.\n\nAttributes\n----------\nvalue : GeoJSON\n A dictionary containing a Point, Polygon, or MultiPolygon. Mirrors `shapely's` `GeoJSON` format.\noperator : str\n The operator to use for comparison. Should be one of `intersect`, `inside`, or `outside`."}, "GroundTruth-Input": {"properties": {"dataset_name": {"type": "string", "title": "Dataset Name"}, "datum": {"$ref": "#/components/schemas/Datum"}, "annotations": {"items": {"$ref": "#/components/schemas/Annotation-Input"}, "type": "array", "title": "Annotations"}}, "additionalProperties": false, "type": "object", "required": ["dataset_name", "datum", "annotations"], "title": "GroundTruth", "description": "An object describing a ground truth (e.g., a human-drawn bounding box on an image).\n\nAttributes\n----------\ndataset_name: str\n The name of the dataset this ground truth belongs to.\ndatum : Datum\n The datum this ground truth annotates.\nannotations : List[Annotation]\n The list of annotations that this ground truth applies."}, "GroundTruth-Output": {"properties": {"dataset_name": {"type": "string", "title": "Dataset Name"}, "datum": {"$ref": "#/components/schemas/Datum"}, "annotations": {"items": {"$ref": "#/components/schemas/Annotation-Output"}, "type": "array", "title": "Annotations"}}, "additionalProperties": false, "type": "object", "required": ["dataset_name", "datum", "annotations"], "title": "GroundTruth", "description": "An object describing a ground truth (e.g., a human-drawn bounding box on an image).\n\nAttributes\n----------\ndataset_name: str\n The name of the dataset this ground truth belongs to.\ndatum : Datum\n The datum this ground truth annotates.\nannotations : List[Annotation]\n The list of annotations that this ground truth applies."}, "HTTPValidationError": {"properties": {"detail": {"items": {"$ref": "#/components/schemas/ValidationError"}, "type": "array", "title": "Detail"}}, "type": "object", "title": "HTTPValidationError"}, "Label": {"properties": {"key": {"type": "string", "title": "Key"}, "value": {"type": "string", "title": "Value"}, "score": {"anyOf": [{"type": "number"}, {"type": "null"}], "title": "Score"}}, "additionalProperties": false, "type": "object", "required": ["key", "value"], "title": "Label", "description": "An object for labeling datasets, models, and annotations.\n\nAttributes\n----------\nkey : str\n The label key. (e.g. 'class', 'category')\nvalue : str\n The label's value. (e.g. 'dog', 'cat')\nscore : float, optional\n A score assigned to the label in the case of a prediction."}, "Metric": {"properties": {"type": {"type": "string", "title": "Type"}, "parameters": {"anyOf": [{"type": "object"}, {"type": "null"}], "title": "Parameters"}, "value": {"anyOf": [{"type": "number"}, {"type": "object"}, {"type": "null"}], "title": "Value"}, "label": {"anyOf": [{"$ref": "#/components/schemas/Label"}, {"type": "null"}]}}, "type": "object", "required": ["type"], "title": "Metric", "description": "A metric response from the API.\n\nAttributes\n----------\ntype : str\n The type of metric.\nparameters : dict\n The parameters of the metric.\nvalue : float\n The value of the metric.\nlabel : Label\n The `Label` for the metric."}, "Model": {"properties": {"name": {"type": "string", "title": "Name"}, "metadata": {"additionalProperties": {"anyOf": [{"type": "boolean"}, {"type": "integer"}, {"type": "number"}, {"type": "string"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"type": "number"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, {"items": {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, "type": "array"}]}, "type": "object"}]}, "type": "object"}]}, "type": "object", "title": "Metadata", "default": {}}}, "additionalProperties": false, "type": "object", "required": ["name"], "title": "Model", "description": "A class describing a model that was trained on a particular dataset.\n\nAttributes\n----------\nname : str\n The name of the model.\nmetadata : dict, optional\n A dictionary of metadata that describes the model."}, "MultiPolygon": {"properties": {"value": {"items": {"items": {"items": {"prefixItems": [{"anyOf": [{"type": "integer"}, {"type": "number"}]}, {"anyOf": [{"type": "integer"}, {"type": "number"}]}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, "type": "array", "title": "Value"}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "MultiPolygon", "description": "Describes a MultiPolygon in (x,y) coordinates.\n\nAttributes\n----------\nvalue : list[list[list[list[int | float]]]]\n A list of coordinates describing the MultiPolygon.\n\nRaises\n------\nValueError\n If the value doesn't conform to the type."}, "NumericFilter": {"properties": {"value": {"type": "number", "title": "Value"}, "operator": {"type": "string", "title": "Operator", "default": "=="}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "NumericFilter", "description": "Used to filter on numeric values that meet some user-defined condition.\n\nAttributes\n----------\nvalue : float\n The value to compare the specific field against.\noperator : str\n The operator to use for comparison. Should be one of `[\">\", \"<\", \">=\", \"<=\", \"==\", \"!=\"]`.\n\nRaises\n------\nValueError\n If the `operator` doesn't match one of the allowed patterns."}, "Polygon": {"properties": {"value": {"items": {"items": {"prefixItems": [{"anyOf": [{"type": "integer"}, {"type": "number"}]}, {"anyOf": [{"type": "integer"}, {"type": "number"}]}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array", "title": "Value"}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "Polygon", "description": "Describes a Polygon in (x,y) coordinates.\n\nAttributes\n----------\nvalue : list[list[tuple[int | float, int | float]]]\n A list of coordinates describing the Box.\n\nRaises\n------\nValueError\n If the value doesn't conform to the type."}, "Prediction-Input": {"properties": {"dataset_name": {"type": "string", "title": "Dataset Name"}, "model_name": {"type": "string", "title": "Model Name"}, "datum": {"$ref": "#/components/schemas/Datum"}, "annotations": {"items": {"$ref": "#/components/schemas/Annotation-Input"}, "type": "array", "title": "Annotations"}}, "additionalProperties": false, "type": "object", "required": ["dataset_name", "model_name", "datum", "annotations"], "title": "Prediction", "description": "An object describing a prediction (e.g., a machine-drawn bounding box on an image).\n\nAttributes\n----------\ndataset_name: str\n The name of the dataset this ground truth belongs to.\nmodel_name : str\n The name of the model that produced the prediction.\ndatum : Datum\n The datum this ground truth annotates.\nannotations : List[Annotation]\n The list of annotations that this ground truth applies."}, "Prediction-Output": {"properties": {"dataset_name": {"type": "string", "title": "Dataset Name"}, "model_name": {"type": "string", "title": "Model Name"}, "datum": {"$ref": "#/components/schemas/Datum"}, "annotations": {"items": {"$ref": "#/components/schemas/Annotation-Output"}, "type": "array", "title": "Annotations"}}, "additionalProperties": false, "type": "object", "required": ["dataset_name", "model_name", "datum", "annotations"], "title": "Prediction", "description": "An object describing a prediction (e.g., a machine-drawn bounding box on an image).\n\nAttributes\n----------\ndataset_name: str\n The name of the dataset this ground truth belongs to.\nmodel_name : str\n The name of the model that produced the prediction.\ndatum : Datum\n The datum this ground truth annotates.\nannotations : List[Annotation]\n The list of annotations that this ground truth applies."}, "Raster": {"properties": {"mask": {"type": "string", "title": "Mask"}, "geometry": {"anyOf": [{"$ref": "#/components/schemas/Box"}, {"$ref": "#/components/schemas/Polygon"}, {"$ref": "#/components/schemas/MultiPolygon"}, {"type": "null"}], "title": "Geometry"}}, "additionalProperties": false, "type": "object", "required": ["mask"], "title": "Raster", "description": "Describes a raster in geometric space.\n\nAttributes\n----------\nmask : str\n The mask describing the raster.\ngeometry : Box | Polygon | MultiPolygon, optional\n Option to define raster by a geometry. Overrides the bitmask.\n\nRaises\n------\nValueError\n If the image format is not PNG.\n If the image mode is not binary."}, "StringFilter": {"properties": {"value": {"type": "string", "title": "Value"}, "operator": {"type": "string", "title": "Operator", "default": "=="}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "StringFilter", "description": "Used to filter on string values that meet some user-defined condition.\n\nAttributes\n----------\nvalue : str\n The value to compare the specific field against.\noperator : str\n The operator to use for comparison. Should be one of `[\"==\", \"!=\"]`.\n\nRaises\n------\nValueError\n If the `operator` doesn't match one of the allowed patterns."}, "TableStatus": {"type": "string", "enum": ["creating", "finalized", "deleting"], "title": "TableStatus"}, "TaskType": {"type": "string", "enum": ["skip", "empty", "classification", "object-detection", "semantic-segmentation", "embedding"], "title": "TaskType"}, "Time": {"properties": {"value": {"type": "string", "title": "Value"}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "Time", "description": "An object describing a time.\n\nAttributes\n----------\nvalue : str\n Time in ISO format."}, "ValidationError": {"properties": {"loc": {"items": {"anyOf": [{"type": "string"}, {"type": "integer"}]}, "type": "array", "title": "Location"}, "msg": {"type": "string", "title": "Message"}, "type": {"type": "string", "title": "Error Type"}}, "type": "object", "required": ["loc", "msg", "type"], "title": "ValidationError"}}, "securitySchemes": {"OptionalHTTPBearer": {"type": "http", "scheme": "bearer"}}}} \ No newline at end of file +{"openapi": "3.1.0", "info": {"title": "FastAPI", "version": "0.1.0"}, "paths": {"/groundtruths": {"post": {"tags": ["GroundTruths"], "summary": "Create Groundtruths", "description": "Create a ground truth in the database.\n\nPOST Endpoint: `/groundtruths`\n\nParameters\n----------\ngroundtruths : list[schemas.GroundTruth]\n The ground truths to add to the database.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\nignore_existing_datums : bool, optional\n If True, will ignore datums that already exist in the database.\n\nRaises\n------\nHTTPException (404)\n If the dataset or datum doesn't exist.\nHTTPException (409)\n If the dataset has been finalized, or if the datum already exists.", "operationId": "create_groundtruths_groundtruths_post", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "ignore_existing_datums", "in": "query", "required": false, "schema": {"type": "boolean", "default": false, "title": "Ignore Existing Datums"}}], "requestBody": {"required": true, "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/GroundTruth-Input"}, "title": "Groundtruths"}}}}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/groundtruths/dataset/{dataset_name}/datum/{uid}": {"get": {"tags": ["GroundTruths"], "summary": "Get Groundtruth", "description": "Fetch a ground truth from the database.\n\nGET Endpoint: `/groundtruths/dataset/{dataset_name}/datum/{uid}`\n\nParameters\n----------\ndataset_name : str\n The name of the dataset to fetch the ground truth from.\nuid : str\n The UID of the ground truth.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nschemas.GroundTruth\n Thee ground truth requested by the user.\n\nRaises\n------\nHTTPException (404)\n If the dataset or datum does not exist.", "operationId": "get_groundtruth_groundtruths_dataset__dataset_name__datum__uid__get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}, {"name": "uid", "in": "path", "required": true, "schema": {"type": "string", "title": "Uid"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"anyOf": [{"$ref": "#/components/schemas/GroundTruth-Output"}, {"type": "null"}], "title": "Response Get Groundtruth Groundtruths Dataset Dataset Name Datum Uid Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/predictions": {"post": {"tags": ["Predictions"], "summary": "Create Predictions", "description": "Create a prediction in the database.\n\nPOST Endpoint: `/predictions`\n\nParameters\n----------\npredictions : list[schemas.Prediction]\n The predictions to add to the database.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nRaises\n------\nHTTPException (404)\n If the dataset, model, or datum doesn't exist.\nHTTPException (409)\n If the model has been finalized, or if the dataset has not been finalized.", "operationId": "create_predictions_predictions_post", "requestBody": {"content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/Prediction-Input"}, "type": "array", "title": "Predictions"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"OptionalHTTPBearer": []}]}}, "/predictions/model/{model_name}/dataset/{dataset_name}/datum/{uid}": {"get": {"tags": ["Predictions"], "summary": "Get Prediction", "description": "Fetch a prediction from the database.\n\nGET Endpoint: `/predictions/model/{model_name}/dataset/{dataset_name}/datum/{uid}`\n\nParameters\n----------\nmodel_name : str\n The name of the model associated with the prediction.\ndataset_name : str\n The name of the dataset associated with the prediction.\nuid : str\n The UID associated with the prediction.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nschemas.Prediction\n The requested prediction.\n\nRaises\n------\nHTTPException (404)\n If the dataset or datum doesn't exist.", "operationId": "get_prediction_predictions_model__model_name__dataset__dataset_name__datum__uid__get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "model_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Model Name"}}, {"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}, {"name": "uid", "in": "path", "required": true, "schema": {"type": "string", "title": "Uid"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"anyOf": [{"$ref": "#/components/schemas/Prediction-Output"}, {"type": "null"}], "title": "Response Get Prediction Predictions Model Model Name Dataset Dataset Name Datum Uid Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/labels": {"get": {"tags": ["Labels"], "summary": "Get Labels", "description": "Fetch labels using optional JSON strings as query parameters.", "operationId": "get_labels_labels_get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "offset", "in": "query", "required": false, "schema": {"type": "integer", "default": 0, "title": "Offset"}}, {"name": "limit", "in": "query", "required": false, "schema": {"type": "integer", "default": -1, "title": "Limit"}}, {"name": "dataset_names", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Dataset Names"}}, {"name": "dataset_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Dataset Metadata"}}, {"name": "model_names", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Model Names"}}, {"name": "model_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Model Metadata"}}, {"name": "datum_uids", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Datum Uids"}}, {"name": "datum_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Datum Metadata"}}, {"name": "task_types", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Task Types"}}, {"name": "annotation_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Annotation Metadata"}}, {"name": "require_bounding_box", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Bounding Box"}}, {"name": "bounding_box_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Bounding Box Area"}}, {"name": "require_polygon", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Polygon"}}, {"name": "polygon_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Polygon Area"}}, {"name": "require_raster", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Raster"}}, {"name": "raster_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Raster Area"}}, {"name": "labels", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Labels"}}, {"name": "label_ids", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Ids"}}, {"name": "label_keys", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Keys"}}, {"name": "label_scores", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Scores"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/Label"}, "title": "Response Get Labels Labels Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/labels/dataset/{dataset_name}": {"get": {"tags": ["Labels"], "summary": "Get Labels From Dataset", "description": "Fetch all labels for a particular dataset from the database.\n\nGET Endpoint: `/labels/dataset/{dataset_name}`\n\nParameters\n----------\nresponse: Response\n The FastAPI response object. Used to return a content-range header to the user.\ndataset_name : str\n The name of the dataset.\noffset : int, optional\n The start index of the items to return.\nlimit : int, optional\n The number of items to return. Returns all items when set to -1. Returns all items when set to -1.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user\n-------\nlist[schemas.Label]\n A list of all labels associated with the dataset in the database.\n\nRaises\n------\nHTTPException (404)\n If the dataset doesn't exist.", "operationId": "get_labels_from_dataset_labels_dataset__dataset_name__get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}, {"name": "offset", "in": "query", "required": false, "schema": {"type": "integer", "default": 0, "title": "Offset"}}, {"name": "limit", "in": "query", "required": false, "schema": {"type": "integer", "default": -1, "title": "Limit"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/Label"}, "title": "Response Get Labels From Dataset Labels Dataset Dataset Name Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/labels/model/{model_name}": {"get": {"tags": ["Labels"], "summary": "Get Labels From Model", "description": "Fetch all labels for a particular model from the database.\n\nGET Endpoint: `/labels/model/{model_name}`\n\nParameters\n----------\nresponse: Response\n The FastAPI response object. Used to return a content-range header to the user.\nmodel_name : str\n The name of the model.\noffset : int, optional\n The start index of the items to return.\nlimit : int, optional\n The number of items to return. Returns all items when set to -1.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nlist[schemas.Label]\n A list of all labels associated with the model in the database.\n\nRaises\n------\nHTTPException (404)\n If the model doesn't exist.", "operationId": "get_labels_from_model_labels_model__model_name__get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "model_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Model Name"}}, {"name": "offset", "in": "query", "required": false, "schema": {"type": "integer", "default": 0, "title": "Offset"}}, {"name": "limit", "in": "query", "required": false, "schema": {"type": "integer", "default": -1, "title": "Limit"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/Label"}, "title": "Response Get Labels From Model Labels Model Model Name Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/datasets": {"post": {"tags": ["Datasets"], "summary": "Create Dataset", "description": "Create a dataset in the database.\n\nPOST Endpoint: `/datasets`\n\nParameters\n----------\ndataset : schemas.Dataset\n The dataset to add to the database.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nRaises\n------\nHTTPException (409)\n If the dataset already exists.", "operationId": "create_dataset_datasets_post", "security": [{"OptionalHTTPBearer": []}], "requestBody": {"required": true, "content": {"application/json": {"schema": {"$ref": "#/components/schemas/Dataset"}}}}, "responses": {"201": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}, "get": {"tags": ["Datasets"], "summary": "Get Datasets", "description": "Fetch datasets using optional JSON strings as query parameters.", "operationId": "get_datasets_datasets_get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "offset", "in": "query", "required": false, "schema": {"type": "integer", "default": 0, "title": "Offset"}}, {"name": "limit", "in": "query", "required": false, "schema": {"type": "integer", "default": -1, "title": "Limit"}}, {"name": "dataset_names", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Dataset Names"}}, {"name": "dataset_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Dataset Metadata"}}, {"name": "model_names", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Model Names"}}, {"name": "model_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Model Metadata"}}, {"name": "datum_uids", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Datum Uids"}}, {"name": "datum_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Datum Metadata"}}, {"name": "task_types", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Task Types"}}, {"name": "annotation_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Annotation Metadata"}}, {"name": "require_bounding_box", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Bounding Box"}}, {"name": "bounding_box_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Bounding Box Area"}}, {"name": "require_polygon", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Polygon"}}, {"name": "polygon_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Polygon Area"}}, {"name": "require_raster", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Raster"}}, {"name": "raster_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Raster Area"}}, {"name": "labels", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Labels"}}, {"name": "label_ids", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Ids"}}, {"name": "label_keys", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Keys"}}, {"name": "label_scores", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Scores"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/Dataset"}, "title": "Response Get Datasets Datasets Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/datasets/{dataset_name}": {"get": {"tags": ["Datasets"], "summary": "Get Dataset", "description": "Fetch a particular dataset from the database.\n\nGET Endpoint: `/datasets/{dataset_name}`\n\nParameters\n----------\ndataset_name : str\n The name of the dataset.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nschemas.Dataset\n The requested dataset.\n\nRaises\n------\nHTTPException (404)\n If the dataset doesn't exist.", "operationId": "get_dataset_datasets__dataset_name__get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/Dataset"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}, "delete": {"tags": ["Datasets"], "summary": "Delete Dataset", "description": "Delete a dataset from the database.\n\nDELETE Endpoint: `/datasets/{dataset_name}`\n\nParameters\n----------\ndataset_name : str\n The name of the dataset.\nbackground_tasks: BackgroundTasks\n A FastAPI `BackgroundTasks` object to process the deletion asyncronously. This parameter is a FastAPI dependency and shouldn't be submitted by the user.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nRaises\n------\nHTTPException (404)\n If the dataset doesn't exist.\nHTTPException (409)\n If the dataset isn't in the correct state to be deleted.", "operationId": "delete_dataset_datasets__dataset_name__delete", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/datasets/{dataset_name}/status": {"get": {"tags": ["Datasets"], "summary": "Get Dataset Status", "description": "Fetch the status of a dataset.\n\nGET Endpoint: `/datasets/{dataset_name}/status`\n\nParameters\n----------\ndataset_name : str\n The name of the dataset.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nenums.TableStatus\n The requested state.\n\nRaises\n------\nHTTPException (404)\n If the dataset doesn't exist.", "operationId": "get_dataset_status_datasets__dataset_name__status_get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/TableStatus"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/datasets/{dataset_name}/summary": {"get": {"tags": ["Datasets"], "summary": "Get Dataset Summary", "description": "Get the summary of a dataset.\n\nGET Endpoint: `/datasets/{dataset_name}/summary`\n\nParameters\n----------\ndataset_name : str\n The name of the dataset.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nschemas.DatasetSummary\n The dataset summary.\n\nRaises\n------\nHTTPException (404)\n If the dataset doesn't exist.", "operationId": "get_dataset_summary_datasets__dataset_name__summary_get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/DatasetSummary"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/datasets/{dataset_name}/finalize": {"put": {"tags": ["Datasets"], "summary": "Finalize Dataset", "description": "Finalizes a dataset for evaluation.\n\nPUT Endpoint: `/datasets/{dataset_name}/finalize`\n\nParameters\n----------\ndataset_name : str\n The name of the dataset.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nRaises\n------\nHTTPException (409)\n If the dataset is empty.\nHTTPException (404)\n If the dataset doesn't exist.", "operationId": "finalize_dataset_datasets__dataset_name__finalize_put", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/data": {"get": {"tags": ["Datums"], "summary": "Get Datums", "description": "Fetch datums using optional JSON strings as query parameters.", "operationId": "get_datums_data_get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "offset", "in": "query", "required": false, "schema": {"type": "integer", "default": 0, "title": "Offset"}}, {"name": "limit", "in": "query", "required": false, "schema": {"type": "integer", "default": -1, "title": "Limit"}}, {"name": "dataset_names", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Dataset Names"}}, {"name": "dataset_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Dataset Metadata"}}, {"name": "model_names", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Model Names"}}, {"name": "model_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Model Metadata"}}, {"name": "datum_uids", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Datum Uids"}}, {"name": "datum_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Datum Metadata"}}, {"name": "task_types", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Task Types"}}, {"name": "annotation_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Annotation Metadata"}}, {"name": "require_bounding_box", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Bounding Box"}}, {"name": "bounding_box_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Bounding Box Area"}}, {"name": "require_polygon", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Polygon"}}, {"name": "polygon_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Polygon Area"}}, {"name": "require_raster", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Raster"}}, {"name": "raster_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Raster Area"}}, {"name": "labels", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Labels"}}, {"name": "label_ids", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Ids"}}, {"name": "label_keys", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Keys"}}, {"name": "label_scores", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Scores"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/Datum"}, "title": "Response Get Datums Data Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/data/dataset/{dataset_name}/uid/{uid}": {"get": {"tags": ["Datums"], "summary": "Get Datum", "description": "Fetch a particular datum.\nGET Endpoint: `/data/dataset/{dataset_name}/uid/{uid}`\nParameters\n----------\ndataset_name : str\n The name of the dataset.\nuid : str\n The UID of the datum.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\nReturns\n-------\nschemas.Datum\n The requested datum.\nRaises\n------\nHTTPException (404)\n If the dataset or datum doesn't exist.", "operationId": "get_datum_data_dataset__dataset_name__uid__uid__get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}, {"name": "uid", "in": "path", "required": true, "schema": {"type": "string", "title": "Uid"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"anyOf": [{"$ref": "#/components/schemas/Datum"}, {"type": "null"}], "title": "Response Get Datum Data Dataset Dataset Name Uid Uid Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/models": {"post": {"tags": ["Models"], "summary": "Create Model", "description": "Create a model in the database.\n\nPOST Endpoint: `/models`\n\nParameters\n----------\nmodel : schemas.Model\n The model to add to the database.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nRaises\n------\nHTTPException (404)\n If the dataset or datum doesn't exist.\nHTTPException (409)\n If the dataset has been finalized, or if the datum already exists.", "operationId": "create_model_models_post", "security": [{"OptionalHTTPBearer": []}], "requestBody": {"required": true, "content": {"application/json": {"schema": {"$ref": "#/components/schemas/Model"}}}}, "responses": {"201": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}, "get": {"tags": ["Models"], "summary": "Get Models", "description": "Fetch models using optional JSON strings as query parameters.", "operationId": "get_models_models_get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "offset", "in": "query", "required": false, "schema": {"type": "integer", "default": 0, "title": "Offset"}}, {"name": "limit", "in": "query", "required": false, "schema": {"type": "integer", "default": -1, "title": "Limit"}}, {"name": "dataset_names", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Dataset Names"}}, {"name": "dataset_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Dataset Metadata"}}, {"name": "model_names", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Model Names"}}, {"name": "model_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Model Metadata"}}, {"name": "datum_uids", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Datum Uids"}}, {"name": "datum_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Datum Metadata"}}, {"name": "task_types", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Task Types"}}, {"name": "annotation_metadata", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Annotation Metadata"}}, {"name": "require_bounding_box", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Bounding Box"}}, {"name": "bounding_box_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Bounding Box Area"}}, {"name": "require_polygon", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Polygon"}}, {"name": "polygon_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Polygon Area"}}, {"name": "require_raster", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Require Raster"}}, {"name": "raster_area", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Raster Area"}}, {"name": "labels", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Labels"}}, {"name": "label_ids", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Ids"}}, {"name": "label_keys", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Keys"}}, {"name": "label_scores", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Label Scores"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/Model"}, "title": "Response Get Models Models Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/models/{model_name}": {"get": {"tags": ["Models"], "summary": "Get Model", "description": "Fetch a particular model.\n\nGET Endpoint: `/models/{model_name}`\n\nParameters\n----------\nmodel_name : str\n The name of the model.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nschemas.Model\n The requested model.\n\nRaises\n------\nHTTPException (404)\n If the model datum doesn't exist.", "operationId": "get_model_models__model_name__get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "model_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Model Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/Model"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}, "delete": {"tags": ["Models"], "summary": "Delete Model", "description": "Delete a model from the database.\n\nDELETE Endpoint: `/models/{model_name}`\n\nParameters\n----------\nmodel_name : str\n The name of the model.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nRaises\n------\nHTTPException (404)\n If the model doesn't exist.\nHTTPException (409)\n If the model isn't in the correct state to be deleted.", "operationId": "delete_model_models__model_name__delete", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "model_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Model Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/models/{model_name}/eval-requests": {"get": {"tags": ["Models"], "summary": "Get Model Eval Requests", "description": "Fetch a particular model.\n\nGET Endpoint: `/models/{model_name}`\n\nParameters\n----------\nmodel_name : str\n The name of the model.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nlist[EvaluationResponse]\n The evaluation requessts associated to the model\n\nRaises\n------\nHTTPException (404)\n If the model doesn't exist.", "operationId": "get_model_eval_requests_models__model_name__eval_requests_get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "model_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Model Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/EvaluationResponse"}, "title": "Response Get Model Eval Requests Models Model Name Eval Requests Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/models/{model_name}/dataset/{dataset_name}/status": {"get": {"tags": ["Models"], "summary": "Get Model Status", "description": "Fetch the status of a model over a dataset.\n\nParameters\n----------\ndataset_name : str\n The name of the dataset.\nmodel_name : str\n The name of the model.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nenums.TableStatus\n The requested state.\n\nRaises\n------\nHTTPException (404)\n If the model doesn't exist.", "operationId": "get_model_status_models__model_name__dataset__dataset_name__status_get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}, {"name": "model_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Model Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/TableStatus"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/models/{model_name}/datasets/{dataset_name}/finalize": {"put": {"tags": ["Models"], "summary": "Finalize Inferences", "description": "Finalize a model prior to evaluation.\n\nPUT Endpoint: `/models/{model_name}/datasets/{dataset_name}/finalize`\n\nParameters\n----------\ndataset_name : str\n The name of the dataset.\nmodel_name : str\n The name of the model.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\n\nRaises\n------\nHTTPException (400)\n If the dataset or model are empty.\nHTTPException (404)\n If the dataset or model do not exist.", "operationId": "finalize_inferences_models__model_name__datasets__dataset_name__finalize_put", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "dataset_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Dataset Name"}}, {"name": "model_name", "in": "path", "required": true, "schema": {"type": "string", "title": "Model Name"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/evaluations": {"post": {"tags": ["Evaluations"], "summary": "Create Or Get Evaluations", "description": "Create a new evaluation.\n\nPOST Endpoint: `/evaluations`\n\nParameters\n----------\nresponse: Response\n The FastAPI response object. Used to return a content-range header to the user.\njob_request: schemas.EvaluationJob\n The job request for the evaluation.\nbackground_tasks: BackgroundTasks\n A FastAPI `BackgroundTasks` object to process the creation asyncronously. This parameter is a FastAPI dependency and shouldn't be submitted by the user.\nallow_retries: bool, default = False\n Determines whether failed evaluations are restarted.\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\n\nReturns\n-------\nlist[schemas.EvaluationResponse]\n A list of evaluation response objects.\n\nRaises\n------\nHTTPException (400)\n If the task type of the evaluation job doesn't exist, or if another ValueError is thrown.\nHTTPException (404)\n If the dataset or model does not exist.\nHTTPException (405)\n If the dataset or model hasn't been finalized.\nHTTPException (409)\n If there is a state exception when creating the evaluation.", "operationId": "create_or_get_evaluations_evaluations_post", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "allow_retries", "in": "query", "required": false, "schema": {"type": "boolean", "default": false, "title": "Allow Retries"}}], "requestBody": {"required": true, "content": {"application/json": {"schema": {"$ref": "#/components/schemas/EvaluationRequest"}}}}, "responses": {"202": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/EvaluationResponse"}, "title": "Response Create Or Get Evaluations Evaluations Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}, "get": {"tags": ["Evaluations"], "summary": "Get Evaluations", "description": "Fetch all metrics associated with user-supplied dataset and model names. Users\nmay query using model names, dataset names, or both. All metrics for all specified\nmodels and datasets will be returned in a list of Evaluations.\n\nThis endpoint can handle multiple dataset and model names. For example, you can use\n`/evaluations?models=first_model,second_model&datasets=test_dataset` to get all evaluations\nrelated to `test_dataset` and either `first_model` or `second_model`.\n\nGET Endpoint: `/evaluations`\n\nParameters\n----------\nresponse: Response\n The FastAPI response object. Used to return a content-range header to the user.\ndatasets : str\n An optional set of dataset names to return metrics for\nmodels : str\n An optional set of model names to return metrics for\nevaluation_ids : str\n An optional set of evaluation_ids to return metrics for\ndb : Session\n The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.\noffset : int, optional\n The start index of the items to return.\nlimit : int, optional\n The number of items to return. Returns all items when set to -1.\nmetrics_to_sort_by: str, optional\n An optional dict of metric types to sort the evaluations by.\n\nReturns\n-------\nlist[schemas.Evaluation]\n A list of evaluations.\n\nRaises\n------\nHTTPException (400)\n If a ValueError is thrown.\nHTTPException (404)\n If the dataset or model doesn't exist.", "operationId": "get_evaluations_evaluations_get", "security": [{"OptionalHTTPBearer": []}], "parameters": [{"name": "datasets", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Datasets"}}, {"name": "models", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Models"}}, {"name": "evaluation_ids", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Evaluation Ids"}}, {"name": "offset", "in": "query", "required": false, "schema": {"type": "integer", "default": 0, "title": "Offset"}}, {"name": "limit", "in": "query", "required": false, "schema": {"type": "integer", "default": -1, "title": "Limit"}}, {"name": "metrics_to_sort_by", "in": "query", "required": false, "schema": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Metrics To Sort By"}}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/EvaluationResponse"}, "title": "Response Get Evaluations Evaluations Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/token": {"post": {"tags": ["Authentication"], "summary": "Login For Access Token", "operationId": "login_for_access_token_token_post", "requestBody": {"content": {"application/x-www-form-urlencoded": {"schema": {"$ref": "#/components/schemas/Body_login_for_access_token_token_post"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "string", "title": "Response Login For Access Token Token Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/api-version": {"get": {"tags": ["Info"], "summary": "Get Api Version", "description": "Return the API's version.\n\nGET Endpoint: `/api-version`\n\nReturns\n-------\nschemas.APIVersion\n A response object containing the API's version number.", "operationId": "get_api_version_api_version_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/APIVersion"}}}}}, "security": [{"OptionalHTTPBearer": []}]}}, "/health": {"get": {"tags": ["Status"], "summary": "Health", "description": "Return 200 if the service is up.\n\nGET Endpoint: `/health`\n\nReturns\n-------\nschemas.Health\n A response indicating that the service is up and running.", "operationId": "health_health_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}}}, "/ready": {"get": {"tags": ["Status"], "summary": "Ready", "description": "Return 200 if the service is up and connected to the database.\n\nGET Endpoint: `/ready`\n\nReturns\n-------\nschemas.Readiness\n A response indicating that the service is up and connected to the database.", "operationId": "ready_ready_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}}}}, "components": {"schemas": {"APIVersion": {"properties": {"api_version": {"type": "string", "title": "Api Version"}}, "type": "object", "required": ["api_version"], "title": "APIVersion", "description": "Defines an API version string which is sent back to the user after their authentication is confirmed.\n\nAttributes\n----------\napi_version : str\n The API version."}, "Annotation-Input": {"properties": {"metadata": {"additionalProperties": {"anyOf": [{"type": "boolean"}, {"type": "integer"}, {"type": "number"}, {"type": "string"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"type": "number"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, {"items": {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, "type": "array"}]}, "type": "object"}]}, "type": "object"}]}, "type": "object", "title": "Metadata", "default": {}}, "labels": {"items": {"$ref": "#/components/schemas/Label"}, "type": "array", "title": "Labels", "default": []}, "bounding_box": {"anyOf": [{"$ref": "#/components/schemas/Box"}, {"type": "null"}]}, "polygon": {"anyOf": [{"$ref": "#/components/schemas/Polygon"}, {"type": "null"}]}, "raster": {"anyOf": [{"$ref": "#/components/schemas/Raster"}, {"type": "null"}]}, "embedding": {"anyOf": [{"items": {"type": "number"}, "type": "array"}, {"type": "null"}], "title": "Embedding"}, "is_instance": {"anyOf": [{"type": "boolean"}, {"type": "null"}], "title": "Is Instance"}, "implied_task_types": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Implied Task Types"}}, "additionalProperties": false, "type": "object", "title": "Annotation", "description": "A class used to annotate 'GroundTruths' and 'Predictions'.\n\nAttributes\n----------\nmetadata: dict, optional\n A dictionary of metadata that describes the 'Annotation'.\nlabels: List[Label], optional\n A list of labels to use for the 'Annotation'.\nbounding_box: BoundingBox, optional\n A bounding box to assign to the 'Annotation'.\npolygon: Polygon, optional\n A polygon to assign to the 'Annotation'.\nraster: Raster, optional\n A raster to assign to the 'Annotation'.\nembedding: list[float], optional\n A jsonb to assign to the 'Annotation'.\nis_instance: bool, optional\n A boolean describing whether we should treat the Raster attached to an annotation as an instance segmentation or not. If set to true, then the Annotation will be validated for use in object detection tasks. If set to false, then the Annotation will be validated for use in semantic segmentation tasks.\nimplied_task_types: list[str], optional\n The validated task types that are applicable to each Annotation. Doesn't need to bet set by the user."}, "Annotation-Output": {"properties": {"metadata": {"additionalProperties": {"anyOf": [{"type": "boolean"}, {"type": "integer"}, {"type": "number"}, {"type": "string"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"type": "number"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, {"items": {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, "type": "array"}]}, "type": "object"}]}, "type": "object"}]}, "type": "object", "title": "Metadata", "default": {}}, "labels": {"items": {"$ref": "#/components/schemas/Label"}, "type": "array", "title": "Labels", "default": []}, "bounding_box": {"anyOf": [{"type": "object"}, {"type": "null"}]}, "polygon": {"anyOf": [{"type": "object"}, {"type": "null"}]}, "raster": {"anyOf": [{"type": "object"}, {"type": "null"}]}, "embedding": {"anyOf": [{"items": {"type": "number"}, "type": "array"}, {"type": "null"}], "title": "Embedding"}, "is_instance": {"anyOf": [{"type": "boolean"}, {"type": "null"}], "title": "Is Instance"}, "implied_task_types": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Implied Task Types"}}, "additionalProperties": false, "type": "object", "title": "Annotation", "description": "A class used to annotate 'GroundTruths' and 'Predictions'.\n\nAttributes\n----------\nmetadata: dict, optional\n A dictionary of metadata that describes the 'Annotation'.\nlabels: List[Label], optional\n A list of labels to use for the 'Annotation'.\nbounding_box: BoundingBox, optional\n A bounding box to assign to the 'Annotation'.\npolygon: Polygon, optional\n A polygon to assign to the 'Annotation'.\nraster: Raster, optional\n A raster to assign to the 'Annotation'.\nembedding: list[float], optional\n A jsonb to assign to the 'Annotation'.\nis_instance: bool, optional\n A boolean describing whether we should treat the Raster attached to an annotation as an instance segmentation or not. If set to true, then the Annotation will be validated for use in object detection tasks. If set to false, then the Annotation will be validated for use in semantic segmentation tasks.\nimplied_task_types: list[str], optional\n The validated task types that are applicable to each Annotation. Doesn't need to bet set by the user."}, "AnnotationType": {"type": "string", "enum": ["none", "box", "polygon", "multipolygon", "raster"], "title": "AnnotationType"}, "Body_login_for_access_token_token_post": {"properties": {"grant_type": {"anyOf": [{"type": "string", "pattern": "password"}, {"type": "null"}], "title": "Grant Type"}, "username": {"type": "string", "title": "Username"}, "password": {"type": "string", "title": "Password"}, "scope": {"type": "string", "title": "Scope", "default": ""}, "client_id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Client Id"}, "client_secret": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Client Secret"}}, "type": "object", "required": ["username", "password"], "title": "Body_login_for_access_token_token_post"}, "BooleanFilter": {"properties": {"value": {"type": "boolean", "title": "Value"}, "operator": {"type": "string", "title": "Operator", "default": "=="}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "BooleanFilter", "description": "Used to filter on boolean values that meet some user-defined condition.\n\nAttributes\n----------\nvalue : bool\n The value to compare the specific field against.\noperator : str\n The operator to use for comparison. Should be one of `[\"==\", \"!=\"]`.\n\nRaises\n------\nValueError\n If the `operator` doesn't match one of the allowed patterns."}, "Box": {"properties": {"value": {"items": {"items": {"prefixItems": [{"anyOf": [{"type": "integer"}, {"type": "number"}]}, {"anyOf": [{"type": "integer"}, {"type": "number"}]}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array", "title": "Value"}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "Box", "description": "Describes a Box in (x,y) coordinates.\n\nAttributes\n----------\nvalue : list[list[tuple[int | float, int | float]]]\n A list of coordinates describing the Box.\n\nRaises\n------\nValueError\n If the value doesn't conform to the type."}, "ConfusionMatrixEntry": {"properties": {"prediction": {"type": "string", "title": "Prediction"}, "groundtruth": {"type": "string", "title": "Groundtruth"}, "count": {"type": "integer", "title": "Count"}}, "type": "object", "required": ["prediction", "groundtruth", "count"], "title": "ConfusionMatrixEntry", "description": "Describes one element in a confusion matrix.\n\nAttributes\n----------\nprediction : str\n The prediction.\ngroundtruth : str\n The ground truth.\ncount : int\n The value of the element in the matrix."}, "ConfusionMatrixResponse": {"properties": {"label_key": {"type": "string", "title": "Label Key"}, "entries": {"items": {"$ref": "#/components/schemas/ConfusionMatrixEntry"}, "type": "array", "title": "Entries"}}, "type": "object", "required": ["label_key", "entries"], "title": "ConfusionMatrixResponse", "description": "A response object used for HTTP responses since they won't contain matrix or label map attributes."}, "Dataset": {"properties": {"name": {"type": "string", "title": "Name"}, "metadata": {"additionalProperties": {"anyOf": [{"type": "boolean"}, {"type": "integer"}, {"type": "number"}, {"type": "string"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"type": "number"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, {"items": {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, "type": "array"}]}, "type": "object"}]}, "type": "object"}]}, "type": "object", "title": "Metadata", "default": {}}}, "additionalProperties": false, "type": "object", "required": ["name"], "title": "Dataset", "description": "A class describing a given dataset.\n\nAttributes\n----------\nname : str\n The name of the dataset.\nmetadata : dict, optional\n A dictionary of metadata that describes the dataset."}, "DatasetSummary": {"properties": {"name": {"type": "string", "title": "Name"}, "num_datums": {"type": "integer", "title": "Num Datums"}, "num_annotations": {"type": "integer", "title": "Num Annotations"}, "num_bounding_boxes": {"type": "integer", "title": "Num Bounding Boxes"}, "num_polygons": {"type": "integer", "title": "Num Polygons"}, "num_rasters": {"type": "integer", "title": "Num Rasters"}, "task_types": {"items": {"$ref": "#/components/schemas/TaskType"}, "type": "array", "title": "Task Types"}, "labels": {"items": {"$ref": "#/components/schemas/Label"}, "type": "array", "title": "Labels"}, "datum_metadata": {"items": {"additionalProperties": {"anyOf": [{"type": "boolean"}, {"type": "integer"}, {"type": "number"}, {"type": "string"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"type": "number"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, {"items": {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, "type": "array"}]}, "type": "object"}]}, "type": "object"}]}, "type": "object"}, "type": "array", "title": "Datum Metadata"}, "annotation_metadata": {"items": {"additionalProperties": {"anyOf": [{"type": "boolean"}, {"type": "integer"}, {"type": "number"}, {"type": "string"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"type": "number"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, {"items": {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, "type": "array"}]}, "type": "object"}]}, "type": "object"}]}, "type": "object"}, "type": "array", "title": "Annotation Metadata"}}, "type": "object", "required": ["name", "num_datums", "num_annotations", "num_bounding_boxes", "num_polygons", "num_rasters", "task_types", "labels", "datum_metadata", "annotation_metadata"], "title": "DatasetSummary"}, "Date": {"properties": {"value": {"type": "string", "title": "Value"}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "Date", "description": "An object describing a date.\n\nAttributes\n----------\nvalue : str\n Date in ISO format."}, "DateTime": {"properties": {"value": {"type": "string", "title": "Value"}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "DateTime", "description": "An object describing a date and time.\n\nAttributes\n----------\nvalue : str\n Datetime in ISO format."}, "DateTimeFilter": {"properties": {"value": {"anyOf": [{"$ref": "#/components/schemas/DateTime"}, {"$ref": "#/components/schemas/Date"}, {"$ref": "#/components/schemas/Time"}, {"$ref": "#/components/schemas/Duration"}], "title": "Value"}, "operator": {"type": "string", "title": "Operator", "default": "=="}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "DateTimeFilter", "description": "Used to filter on datetime values that meet some user-defined condition.\n\nAttributes\n----------\nvalue : DateTime\n The value to compare the specific field against.\noperator : str\n The operator to use for comparison. Should be one of `[\">\", \"<\", \">=\", \"<=\", \"==\", \"!=\"]`.\n\nRaises\n------\nValueError\n If the `operator` doesn't match one of the allowed patterns."}, "Datum": {"properties": {"uid": {"type": "string", "title": "Uid"}, "metadata": {"additionalProperties": {"anyOf": [{"type": "boolean"}, {"type": "integer"}, {"type": "number"}, {"type": "string"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"type": "number"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, {"items": {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, "type": "array"}]}, "type": "object"}]}, "type": "object"}]}, "type": "object", "title": "Metadata", "default": {}}}, "additionalProperties": false, "type": "object", "required": ["uid"], "title": "Datum", "description": "A class used to store datum information about 'GroundTruths' and 'Predictions'.\n\nAttributes\n----------\nuid : str\n The UID of the datum.\nmetadata : dict, optional\n A dictionary of metadata that describes the datum."}, "Duration": {"properties": {"value": {"type": "number", "title": "Value"}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "Duration", "description": "An object describing a time duration.\n\nAttributes\n----------\nvalue : float\n Time duration in seconds."}, "EvaluationParameters": {"properties": {"task_type": {"$ref": "#/components/schemas/TaskType"}, "metrics_to_return": {"anyOf": [{"items": {"$ref": "#/components/schemas/MetricType"}, "type": "array"}, {"type": "null"}], "title": "Metrics To Return"}, "label_map": {"anyOf": [{"items": {"items": {"items": {"type": "string"}, "type": "array"}, "type": "array"}, "type": "array"}, {"type": "null"}], "title": "Label Map"}, "convert_annotations_to_type": {"anyOf": [{"$ref": "#/components/schemas/AnnotationType"}, {"type": "null"}]}, "iou_thresholds_to_compute": {"anyOf": [{"items": {"type": "number"}, "type": "array"}, {"type": "null"}], "title": "Iou Thresholds To Compute"}, "iou_thresholds_to_return": {"anyOf": [{"items": {"type": "number"}, "type": "array"}, {"type": "null"}], "title": "Iou Thresholds To Return"}, "recall_score_threshold": {"anyOf": [{"type": "number"}, {"type": "null"}], "title": "Recall Score Threshold", "default": 0}, "pr_curve_iou_threshold": {"type": "number", "title": "Pr Curve Iou Threshold", "default": 0.5}, "pr_curve_max_examples": {"type": "integer", "title": "Pr Curve Max Examples", "default": 1}}, "additionalProperties": false, "type": "object", "required": ["task_type"], "title": "EvaluationParameters", "description": "Defines parameters for evaluation methods.\n\nAttributes\n----------\ntask_type: TaskType\n The task type of a given evaluation.\nlabel_map: Optional[List[List[List[str]]]]\n Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models.\nmetrics_to_return: List[str], optional\n The list of metrics to compute, store, and return to the user.\nconvert_annotations_to_type: AnnotationType | None = None\n The type to convert all annotations to.\niou_thresholds_to_compute: List[float], optional\n A list of floats describing which Intersection over Unions (IoUs) to use when calculating metrics (i.e., mAP).\niou_thresholds_to_return: List[float], optional\n A list of floats describing which Intersection over Union (IoUs) thresholds to calculate a metric for. Must be a subset of `iou_thresholds_to_compute`.\nrecall_score_threshold: float, default=0\n The confidence score threshold for use when determining whether to count a prediction as a true positive or not while calculating Average Recall.\npr_curve_iou_threshold: float, optional\n The IOU threshold to use when calculating precision-recall curves for object detection tasks. Defaults to 0.5.\npr_curve_max_examples: int\n The maximum number of datum examples to store when calculating PR curves."}, "EvaluationRequest": {"properties": {"dataset_names": {"items": {"type": "string"}, "type": "array", "title": "Dataset Names"}, "model_names": {"items": {"type": "string"}, "type": "array", "title": "Model Names"}, "filters": {"allOf": [{"$ref": "#/components/schemas/Filter-Input"}], "default": {}}, "parameters": {"$ref": "#/components/schemas/EvaluationParameters"}}, "additionalProperties": false, "type": "object", "required": ["dataset_names", "model_names", "parameters"], "title": "EvaluationRequest", "description": "Request for evaluation.\n\nAttributes\n----------\ndataset_names : list[str]\n The names of the evaluated datasets.\nmodel_names : str | list[str]\n The model(s) to evaluate.\nfilters : schemas.Filter, optional\n The filter object used to define what data to evaluate.\nparameters : DetectionParameters, optional\n Any parameters that are used to modify an evaluation method."}, "EvaluationResponse": {"properties": {"id": {"type": "integer", "title": "Id"}, "dataset_names": {"items": {"type": "string"}, "type": "array", "title": "Dataset Names"}, "model_name": {"type": "string", "title": "Model Name"}, "filters": {"$ref": "#/components/schemas/Filter-Output"}, "parameters": {"$ref": "#/components/schemas/EvaluationParameters"}, "status": {"$ref": "#/components/schemas/EvaluationStatus"}, "created_at": {"type": "string", "format": "date-time", "title": "Created At"}, "meta": {"anyOf": [{"additionalProperties": {"anyOf": [{"type": "string"}, {"type": "integer"}, {"type": "number"}]}, "type": "object"}, {"type": "null"}], "title": "Meta"}, "metrics": {"anyOf": [{"items": {"$ref": "#/components/schemas/Metric"}, "type": "array"}, {"type": "null"}], "title": "Metrics"}, "confusion_matrices": {"anyOf": [{"items": {"$ref": "#/components/schemas/ConfusionMatrixResponse"}, "type": "array"}, {"type": "null"}], "title": "Confusion Matrices"}, "ignored_pred_labels": {"anyOf": [{"items": {"$ref": "#/components/schemas/Label"}, "type": "array"}, {"type": "null"}], "title": "Ignored Pred Labels"}, "missing_pred_labels": {"anyOf": [{"items": {"$ref": "#/components/schemas/Label"}, "type": "array"}, {"type": "null"}], "title": "Missing Pred Labels"}}, "additionalProperties": true, "type": "object", "required": ["id", "dataset_names", "model_name", "filters", "parameters", "status", "created_at", "meta"], "title": "EvaluationResponse", "description": "An object for storing the returned results of a model evaluation (where groundtruths are compared with predictions to measure performance).\n\nAttributes\n----------\nid : int\n The ID of the evaluation.\ndataset_names : list[str]\n The names of the evaluated datasets.\nmodel_name : str\n The name of the evaluated model.\nfilters : schemas.Filter\n The evaluation filter used in the evaluation.\nparameters : schemas.EvaluationParameters\n Any parameters used by the evaluation method.\nstatus : str\n The status of the evaluation.\ncreated_at: datetime.datetime\n The time the evaluation was created.\nmetrics : List[Metric]\n A list of metrics associated with the evaluation.\nconfusion_matrices: List[ConfusionMatrixResponse]\n A list of confusion matrices associated with the evaluation.\nmissing_pred_labels: List[Label], optional\n A list of ground truth labels that aren't associated with any predictions.\nignored_pred_labels: List[Label], optional\n A list of prediction labels that aren't associated with any ground truths.\nmeta: dict[str, str | int | float]\n Metadata about the evaluation run."}, "EvaluationStatus": {"type": "string", "enum": ["pending", "running", "done", "failed", "deleting"], "title": "EvaluationStatus"}, "Filter-Input": {"properties": {"dataset_names": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Dataset Names"}, "dataset_metadata": {"anyOf": [{"additionalProperties": {"items": {"anyOf": [{"$ref": "#/components/schemas/StringFilter"}, {"$ref": "#/components/schemas/NumericFilter"}, {"$ref": "#/components/schemas/DateTimeFilter"}, {"$ref": "#/components/schemas/BooleanFilter"}, {"$ref": "#/components/schemas/GeospatialFilter"}]}, "type": "array"}, "type": "object"}, {"type": "null"}], "title": "Dataset Metadata"}, "model_names": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Model Names"}, "model_metadata": {"anyOf": [{"additionalProperties": {"items": {"anyOf": [{"$ref": "#/components/schemas/StringFilter"}, {"$ref": "#/components/schemas/NumericFilter"}, {"$ref": "#/components/schemas/DateTimeFilter"}, {"$ref": "#/components/schemas/BooleanFilter"}, {"$ref": "#/components/schemas/GeospatialFilter"}]}, "type": "array"}, "type": "object"}, {"type": "null"}], "title": "Model Metadata"}, "datum_uids": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Datum Uids"}, "datum_metadata": {"anyOf": [{"additionalProperties": {"items": {"anyOf": [{"$ref": "#/components/schemas/StringFilter"}, {"$ref": "#/components/schemas/NumericFilter"}, {"$ref": "#/components/schemas/DateTimeFilter"}, {"$ref": "#/components/schemas/BooleanFilter"}, {"$ref": "#/components/schemas/GeospatialFilter"}]}, "type": "array"}, "type": "object"}, {"type": "null"}], "title": "Datum Metadata"}, "task_types": {"anyOf": [{"items": {"$ref": "#/components/schemas/TaskType"}, "type": "array"}, {"type": "null"}], "title": "Task Types"}, "annotation_metadata": {"anyOf": [{"additionalProperties": {"items": {"anyOf": [{"$ref": "#/components/schemas/StringFilter"}, {"$ref": "#/components/schemas/NumericFilter"}, {"$ref": "#/components/schemas/DateTimeFilter"}, {"$ref": "#/components/schemas/BooleanFilter"}, {"$ref": "#/components/schemas/GeospatialFilter"}]}, "type": "array"}, "type": "object"}, {"type": "null"}], "title": "Annotation Metadata"}, "require_bounding_box": {"anyOf": [{"type": "boolean"}, {"type": "null"}], "title": "Require Bounding Box"}, "bounding_box_area": {"anyOf": [{"items": {"$ref": "#/components/schemas/NumericFilter"}, "type": "array"}, {"type": "null"}], "title": "Bounding Box Area"}, "require_polygon": {"anyOf": [{"type": "boolean"}, {"type": "null"}], "title": "Require Polygon"}, "polygon_area": {"anyOf": [{"items": {"$ref": "#/components/schemas/NumericFilter"}, "type": "array"}, {"type": "null"}], "title": "Polygon Area"}, "require_raster": {"anyOf": [{"type": "boolean"}, {"type": "null"}], "title": "Require Raster"}, "raster_area": {"anyOf": [{"items": {"$ref": "#/components/schemas/NumericFilter"}, "type": "array"}, {"type": "null"}], "title": "Raster Area"}, "labels": {"anyOf": [{"items": {"additionalProperties": {"type": "string"}, "type": "object"}, "type": "array"}, {"type": "null"}], "title": "Labels"}, "label_ids": {"anyOf": [{"items": {"type": "integer"}, "type": "array"}, {"type": "null"}], "title": "Label Ids"}, "label_keys": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Label Keys"}, "label_scores": {"anyOf": [{"items": {"$ref": "#/components/schemas/NumericFilter"}, "type": "array"}, {"type": "null"}], "title": "Label Scores"}}, "additionalProperties": false, "type": "object", "title": "Filter", "description": "Used to filter Evaluations according to specific, user-defined criteria.\n\nAttributes\n----------\ndataset_names: List[str], default=None\n A list of `Dataset` names to filter on.\ndataset_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None\n A dictionary of `Dataset` metadata to filter on.\nmodel_names: List[str], default=None\n A list of `Model` names to filter on.\nmodel_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None\n A dictionary of `Model` metadata to filter on.\ndatum_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None\n A dictionary of `Datum` metadata to filter on.\ntask_types: List[TaskType], default=None\n A list of task types to filter on.\nannotation_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None\n A dictionary of `Annotation` metadata to filter on.\nrequire_bounding_box : bool, optional\n A toggle for filtering by bounding boxes.\nbounding_box_area : bool, optional\n An optional constraint to filter by bounding box area.\nrequire_polygon : bool, optional\n A toggle for filtering by polygons.\npolygon_area : bool, optional\n An optional constraint to filter by polygon area.\nrequire_raster : bool, optional\n A toggle for filtering by rasters.\nraster_area : bool, optional\n An optional constraint to filter by raster area.\nlabels: List[Dict[str, str]], default=None\n A dictionary of `Labels' to filter on.\nlabel_ids: List[int], default=None\n A list of `Label` IDs to filter on.\nlabel_keys: List[str] = None, default=None\n A list of `Label` keys to filter on.\nlabel_scores: List[ValueFilter], default=None\n A list of `ValueFilters` which are used to filter `Evaluations` according to the `Model`'s prediction scores."}, "Filter-Output": {"properties": {"dataset_names": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Dataset Names"}, "dataset_metadata": {"anyOf": [{"additionalProperties": {"items": {"anyOf": [{"$ref": "#/components/schemas/StringFilter"}, {"$ref": "#/components/schemas/NumericFilter"}, {"$ref": "#/components/schemas/DateTimeFilter"}, {"$ref": "#/components/schemas/BooleanFilter"}, {"$ref": "#/components/schemas/GeospatialFilter"}]}, "type": "array"}, "type": "object"}, {"type": "null"}], "title": "Dataset Metadata"}, "model_names": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Model Names"}, "model_metadata": {"anyOf": [{"additionalProperties": {"items": {"anyOf": [{"$ref": "#/components/schemas/StringFilter"}, {"$ref": "#/components/schemas/NumericFilter"}, {"$ref": "#/components/schemas/DateTimeFilter"}, {"$ref": "#/components/schemas/BooleanFilter"}, {"$ref": "#/components/schemas/GeospatialFilter"}]}, "type": "array"}, "type": "object"}, {"type": "null"}], "title": "Model Metadata"}, "datum_uids": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Datum Uids"}, "datum_metadata": {"anyOf": [{"additionalProperties": {"items": {"anyOf": [{"$ref": "#/components/schemas/StringFilter"}, {"$ref": "#/components/schemas/NumericFilter"}, {"$ref": "#/components/schemas/DateTimeFilter"}, {"$ref": "#/components/schemas/BooleanFilter"}, {"$ref": "#/components/schemas/GeospatialFilter"}]}, "type": "array"}, "type": "object"}, {"type": "null"}], "title": "Datum Metadata"}, "task_types": {"anyOf": [{"items": {"$ref": "#/components/schemas/TaskType"}, "type": "array"}, {"type": "null"}], "title": "Task Types"}, "annotation_metadata": {"anyOf": [{"additionalProperties": {"items": {"anyOf": [{"$ref": "#/components/schemas/StringFilter"}, {"$ref": "#/components/schemas/NumericFilter"}, {"$ref": "#/components/schemas/DateTimeFilter"}, {"$ref": "#/components/schemas/BooleanFilter"}, {"$ref": "#/components/schemas/GeospatialFilter"}]}, "type": "array"}, "type": "object"}, {"type": "null"}], "title": "Annotation Metadata"}, "require_bounding_box": {"anyOf": [{"type": "boolean"}, {"type": "null"}], "title": "Require Bounding Box"}, "bounding_box_area": {"anyOf": [{"items": {"$ref": "#/components/schemas/NumericFilter"}, "type": "array"}, {"type": "null"}], "title": "Bounding Box Area"}, "require_polygon": {"anyOf": [{"type": "boolean"}, {"type": "null"}], "title": "Require Polygon"}, "polygon_area": {"anyOf": [{"items": {"$ref": "#/components/schemas/NumericFilter"}, "type": "array"}, {"type": "null"}], "title": "Polygon Area"}, "require_raster": {"anyOf": [{"type": "boolean"}, {"type": "null"}], "title": "Require Raster"}, "raster_area": {"anyOf": [{"items": {"$ref": "#/components/schemas/NumericFilter"}, "type": "array"}, {"type": "null"}], "title": "Raster Area"}, "labels": {"anyOf": [{"items": {"additionalProperties": {"type": "string"}, "type": "object"}, "type": "array"}, {"type": "null"}], "title": "Labels"}, "label_ids": {"anyOf": [{"items": {"type": "integer"}, "type": "array"}, {"type": "null"}], "title": "Label Ids"}, "label_keys": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "title": "Label Keys"}, "label_scores": {"anyOf": [{"items": {"$ref": "#/components/schemas/NumericFilter"}, "type": "array"}, {"type": "null"}], "title": "Label Scores"}}, "additionalProperties": false, "type": "object", "title": "Filter", "description": "Used to filter Evaluations according to specific, user-defined criteria.\n\nAttributes\n----------\ndataset_names: List[str], default=None\n A list of `Dataset` names to filter on.\ndataset_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None\n A dictionary of `Dataset` metadata to filter on.\nmodel_names: List[str], default=None\n A list of `Model` names to filter on.\nmodel_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None\n A dictionary of `Model` metadata to filter on.\ndatum_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None\n A dictionary of `Datum` metadata to filter on.\ntask_types: List[TaskType], default=None\n A list of task types to filter on.\nannotation_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None\n A dictionary of `Annotation` metadata to filter on.\nrequire_bounding_box : bool, optional\n A toggle for filtering by bounding boxes.\nbounding_box_area : bool, optional\n An optional constraint to filter by bounding box area.\nrequire_polygon : bool, optional\n A toggle for filtering by polygons.\npolygon_area : bool, optional\n An optional constraint to filter by polygon area.\nrequire_raster : bool, optional\n A toggle for filtering by rasters.\nraster_area : bool, optional\n An optional constraint to filter by raster area.\nlabels: List[Dict[str, str]], default=None\n A dictionary of `Labels' to filter on.\nlabel_ids: List[int], default=None\n A list of `Label` IDs to filter on.\nlabel_keys: List[str] = None, default=None\n A list of `Label` keys to filter on.\nlabel_scores: List[ValueFilter], default=None\n A list of `ValueFilters` which are used to filter `Evaluations` according to the `Model`'s prediction scores."}, "GeoJSON": {"properties": {"type": {"type": "string", "title": "Type"}, "coordinates": {"anyOf": [{"items": {"type": "number"}, "type": "array"}, {"items": {"items": {"type": "number"}, "type": "array"}, "type": "array"}, {"items": {"items": {"items": {"type": "number"}, "type": "array"}, "type": "array"}, "type": "array"}, {"items": {"items": {"items": {"items": {"type": "number"}, "type": "array"}, "type": "array"}, "type": "array"}, "type": "array"}], "title": "Coordinates"}}, "type": "object", "required": ["type", "coordinates"], "title": "GeoJSON"}, "GeospatialFilter": {"properties": {"value": {"$ref": "#/components/schemas/GeoJSON"}, "operator": {"type": "string", "title": "Operator", "default": "intersect"}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "GeospatialFilter", "description": "Used to filter on geospatial coordinates.\n\nAttributes\n----------\nvalue : GeoJSON\n A dictionary containing a Point, Polygon, or MultiPolygon. Mirrors `shapely's` `GeoJSON` format.\noperator : str\n The operator to use for comparison. Should be one of `intersect`, `inside`, or `outside`."}, "GroundTruth-Input": {"properties": {"dataset_name": {"type": "string", "title": "Dataset Name"}, "datum": {"$ref": "#/components/schemas/Datum"}, "annotations": {"items": {"$ref": "#/components/schemas/Annotation-Input"}, "type": "array", "title": "Annotations"}}, "additionalProperties": false, "type": "object", "required": ["dataset_name", "datum", "annotations"], "title": "GroundTruth", "description": "An object describing a ground truth (e.g., a human-drawn bounding box on an image).\n\nAttributes\n----------\ndataset_name: str\n The name of the dataset this ground truth belongs to.\ndatum : Datum\n The datum this ground truth annotates.\nannotations : List[Annotation]\n The list of annotations that this ground truth applies."}, "GroundTruth-Output": {"properties": {"dataset_name": {"type": "string", "title": "Dataset Name"}, "datum": {"$ref": "#/components/schemas/Datum"}, "annotations": {"items": {"$ref": "#/components/schemas/Annotation-Output"}, "type": "array", "title": "Annotations"}}, "additionalProperties": false, "type": "object", "required": ["dataset_name", "datum", "annotations"], "title": "GroundTruth", "description": "An object describing a ground truth (e.g., a human-drawn bounding box on an image).\n\nAttributes\n----------\ndataset_name: str\n The name of the dataset this ground truth belongs to.\ndatum : Datum\n The datum this ground truth annotates.\nannotations : List[Annotation]\n The list of annotations that this ground truth applies."}, "HTTPValidationError": {"properties": {"detail": {"items": {"$ref": "#/components/schemas/ValidationError"}, "type": "array", "title": "Detail"}}, "type": "object", "title": "HTTPValidationError"}, "Label": {"properties": {"key": {"type": "string", "title": "Key"}, "value": {"type": "string", "title": "Value"}, "score": {"anyOf": [{"type": "number"}, {"type": "null"}], "title": "Score"}}, "additionalProperties": false, "type": "object", "required": ["key", "value"], "title": "Label", "description": "An object for labeling datasets, models, and annotations.\n\nAttributes\n----------\nkey : str\n The label key. (e.g. 'class', 'category')\nvalue : str\n The label's value. (e.g. 'dog', 'cat')\nscore : float, optional\n A score assigned to the label in the case of a prediction."}, "Metric": {"properties": {"type": {"type": "string", "title": "Type"}, "parameters": {"anyOf": [{"type": "object"}, {"type": "null"}], "title": "Parameters"}, "value": {"anyOf": [{"type": "number"}, {"type": "object"}, {"type": "null"}], "title": "Value"}, "label": {"anyOf": [{"$ref": "#/components/schemas/Label"}, {"type": "null"}]}}, "type": "object", "required": ["type"], "title": "Metric", "description": "A metric response from the API.\n\nAttributes\n----------\ntype : str\n The type of metric.\nparameters : dict\n The parameters of the metric.\nvalue : float\n The value of the metric.\nlabel : Label\n The `Label` for the metric."}, "MetricType": {"type": "string", "enum": ["Accuracy", "Precision", "Recall", "F1", "ROCAUC", "AP", "AR", "mAP", "mAR", "APAveragedOverIOUs", "mAPAveragedOverIOUs", "IOU", "mIOU", "PrecisionRecallCurve", "DetailedPrecisionRecallCurve"], "title": "MetricType"}, "Model": {"properties": {"name": {"type": "string", "title": "Name"}, "metadata": {"additionalProperties": {"anyOf": [{"type": "boolean"}, {"type": "integer"}, {"type": "number"}, {"type": "string"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"type": "number"}, {"additionalProperties": {"anyOf": [{"type": "string"}, {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, {"items": {"items": {"items": {"prefixItems": [{"type": "number"}, {"type": "number"}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, "type": "array"}]}, "type": "object"}]}, "type": "object"}]}, "type": "object", "title": "Metadata", "default": {}}}, "additionalProperties": false, "type": "object", "required": ["name"], "title": "Model", "description": "A class describing a model that was trained on a particular dataset.\n\nAttributes\n----------\nname : str\n The name of the model.\nmetadata : dict, optional\n A dictionary of metadata that describes the model."}, "MultiPolygon": {"properties": {"value": {"items": {"items": {"items": {"prefixItems": [{"anyOf": [{"type": "integer"}, {"type": "number"}]}, {"anyOf": [{"type": "integer"}, {"type": "number"}]}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array"}, "type": "array", "title": "Value"}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "MultiPolygon", "description": "Describes a MultiPolygon in (x,y) coordinates.\n\nAttributes\n----------\nvalue : list[list[list[list[int | float]]]]\n A list of coordinates describing the MultiPolygon.\n\nRaises\n------\nValueError\n If the value doesn't conform to the type."}, "NumericFilter": {"properties": {"value": {"type": "number", "title": "Value"}, "operator": {"type": "string", "title": "Operator", "default": "=="}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "NumericFilter", "description": "Used to filter on numeric values that meet some user-defined condition.\n\nAttributes\n----------\nvalue : float\n The value to compare the specific field against.\noperator : str\n The operator to use for comparison. Should be one of `[\">\", \"<\", \">=\", \"<=\", \"==\", \"!=\"]`.\n\nRaises\n------\nValueError\n If the `operator` doesn't match one of the allowed patterns."}, "Polygon": {"properties": {"value": {"items": {"items": {"prefixItems": [{"anyOf": [{"type": "integer"}, {"type": "number"}]}, {"anyOf": [{"type": "integer"}, {"type": "number"}]}], "type": "array", "maxItems": 2, "minItems": 2}, "type": "array"}, "type": "array", "title": "Value"}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "Polygon", "description": "Describes a Polygon in (x,y) coordinates.\n\nAttributes\n----------\nvalue : list[list[tuple[int | float, int | float]]]\n A list of coordinates describing the Box.\n\nRaises\n------\nValueError\n If the value doesn't conform to the type."}, "Prediction-Input": {"properties": {"dataset_name": {"type": "string", "title": "Dataset Name"}, "model_name": {"type": "string", "title": "Model Name"}, "datum": {"$ref": "#/components/schemas/Datum"}, "annotations": {"items": {"$ref": "#/components/schemas/Annotation-Input"}, "type": "array", "title": "Annotations"}}, "additionalProperties": false, "type": "object", "required": ["dataset_name", "model_name", "datum", "annotations"], "title": "Prediction", "description": "An object describing a prediction (e.g., a machine-drawn bounding box on an image).\n\nAttributes\n----------\ndataset_name: str\n The name of the dataset this ground truth belongs to.\nmodel_name : str\n The name of the model that produced the prediction.\ndatum : Datum\n The datum this ground truth annotates.\nannotations : List[Annotation]\n The list of annotations that this ground truth applies."}, "Prediction-Output": {"properties": {"dataset_name": {"type": "string", "title": "Dataset Name"}, "model_name": {"type": "string", "title": "Model Name"}, "datum": {"$ref": "#/components/schemas/Datum"}, "annotations": {"items": {"$ref": "#/components/schemas/Annotation-Output"}, "type": "array", "title": "Annotations"}}, "additionalProperties": false, "type": "object", "required": ["dataset_name", "model_name", "datum", "annotations"], "title": "Prediction", "description": "An object describing a prediction (e.g., a machine-drawn bounding box on an image).\n\nAttributes\n----------\ndataset_name: str\n The name of the dataset this ground truth belongs to.\nmodel_name : str\n The name of the model that produced the prediction.\ndatum : Datum\n The datum this ground truth annotates.\nannotations : List[Annotation]\n The list of annotations that this ground truth applies."}, "Raster": {"properties": {"mask": {"type": "string", "title": "Mask"}, "geometry": {"anyOf": [{"$ref": "#/components/schemas/Box"}, {"$ref": "#/components/schemas/Polygon"}, {"$ref": "#/components/schemas/MultiPolygon"}, {"type": "null"}], "title": "Geometry"}}, "additionalProperties": false, "type": "object", "required": ["mask"], "title": "Raster", "description": "Describes a raster in geometric space.\n\nAttributes\n----------\nmask : str\n The mask describing the raster.\ngeometry : Box | Polygon | MultiPolygon, optional\n Option to define raster by a geometry. Overrides the bitmask.\n\nRaises\n------\nValueError\n If the image format is not PNG.\n If the image mode is not binary."}, "StringFilter": {"properties": {"value": {"type": "string", "title": "Value"}, "operator": {"type": "string", "title": "Operator", "default": "=="}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "StringFilter", "description": "Used to filter on string values that meet some user-defined condition.\n\nAttributes\n----------\nvalue : str\n The value to compare the specific field against.\noperator : str\n The operator to use for comparison. Should be one of `[\"==\", \"!=\"]`.\n\nRaises\n------\nValueError\n If the `operator` doesn't match one of the allowed patterns."}, "TableStatus": {"type": "string", "enum": ["creating", "finalized", "deleting"], "title": "TableStatus"}, "TaskType": {"type": "string", "enum": ["skip", "empty", "classification", "object-detection", "semantic-segmentation", "embedding"], "title": "TaskType"}, "Time": {"properties": {"value": {"type": "string", "title": "Value"}}, "additionalProperties": false, "type": "object", "required": ["value"], "title": "Time", "description": "An object describing a time.\n\nAttributes\n----------\nvalue : str\n Time in ISO format."}, "ValidationError": {"properties": {"loc": {"items": {"anyOf": [{"type": "string"}, {"type": "integer"}]}, "type": "array", "title": "Location"}, "msg": {"type": "string", "title": "Message"}, "type": {"type": "string", "title": "Error Type"}}, "type": "object", "required": ["loc", "msg", "type"], "title": "ValidationError"}}, "securitySchemes": {"OptionalHTTPBearer": {"type": "http", "scheme": "bearer"}}}} \ No newline at end of file