From a8060ee8334789a6d1dee1e6966fecd90f7193ff Mon Sep 17 00:00:00 2001 From: mike0sv Date: Fri, 29 Dec 2023 18:14:30 +0400 Subject: [PATCH 1/2] Add custom callable metric --- src/evidently/metrics/custom_metric.py | 51 ++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 src/evidently/metrics/custom_metric.py diff --git a/src/evidently/metrics/custom_metric.py b/src/evidently/metrics/custom_metric.py new file mode 100644 index 0000000000..88ce0d2ef5 --- /dev/null +++ b/src/evidently/metrics/custom_metric.py @@ -0,0 +1,51 @@ +from typing import Callable +from typing import List +from typing import Optional +from typing import Union + +from pydantic import PrivateAttr + +from evidently.base_metric import InputData +from evidently.base_metric import Metric +from evidently.base_metric import MetricResult +from evidently.model.widget import BaseWidgetInfo +from evidently.options.base import AnyOptions +from evidently.renderers.base_renderer import MetricRenderer +from evidently.renderers.base_renderer import default_renderer +from evidently.renderers.html_widgets import CounterData +from evidently.renderers.html_widgets import counter + + +class CustomCallableMetricResult(MetricResult): + value: float + + +CustomCallableType = Callable[[InputData], float] + + +class CustomCallableMetric(Metric[CustomCallableMetricResult]): + func: str + title: Optional[str] = None + + _func: Optional[CustomCallableType] = PrivateAttr(None) + + def __init__(self, func: Union[CustomCallableType, str], title: str = None, options: AnyOptions = None, **data): + if callable(func): + self._func = func + self.func = f"{func.__module__}.{func.__name__}" + else: + self._func = None + self.func = func + self.title = title + super().__init__(options, **data) + + def calculate(self, data: InputData) -> CustomCallableMetricResult: + if self._func is None: + raise ValueError("CustomCallableMetric is not configured with callable func") + return CustomCallableMetricResult(value=self._func(data)) + + +@default_renderer(wrap_type=CustomCallableMetric) +class CustomCallableMetricRenderer(MetricRenderer): + def render_html(self, obj: CustomCallableMetric) -> List[BaseWidgetInfo]: + return [counter(counters=[CounterData.float("", obj.get_result().value, 2)], title=obj.title or "")] From 58f16deb9e933ffef965a29055962aabe13476f6 Mon Sep 17 00:00:00 2001 From: mike0sv Date: Fri, 29 Dec 2023 19:09:05 +0400 Subject: [PATCH 2/2] add custom tests --- tests/multitest/metrics/conftest.py | 2 +- tests/multitest/metrics/custom.py | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 tests/multitest/metrics/custom.py diff --git a/tests/multitest/metrics/conftest.py b/tests/multitest/metrics/conftest.py index 3126d6c0bc..3b56add10b 100644 --- a/tests/multitest/metrics/conftest.py +++ b/tests/multitest/metrics/conftest.py @@ -102,7 +102,7 @@ def generate_dataset_outcome(m: TestMetric): def load_test_metrics(): - for module in ["classification", "data_integrity", "data_drift", "data_quality", "recsys", "regression"]: + for module in ["classification", "data_integrity", "data_drift", "data_quality", "recsys", "regression", "custom"]: import_module(f"tests.multitest.metrics.{module}") diff --git a/tests/multitest/metrics/custom.py b/tests/multitest/metrics/custom.py new file mode 100644 index 0000000000..e0ba272442 --- /dev/null +++ b/tests/multitest/metrics/custom.py @@ -0,0 +1,26 @@ +import pandas as pd + +from evidently.base_metric import InputData +from evidently.metrics.custom_metric import CustomCallableMetric +from tests.multitest.conftest import AssertResultFields +from tests.multitest.datasets import TestDataset +from tests.multitest.metrics.conftest import TestMetric +from tests.multitest.metrics.conftest import metric + + +def custom_func(data: InputData) -> float: + return 0.3 + + +@metric +def custom_callable_metric(): + reference_data = current_data = pd.DataFrame({"text": [1, 2, 3]}) + + return TestMetric( + "custom_callable_metric", + CustomCallableMetric(func=custom_func, title="aaa"), + AssertResultFields({"value": 0.3}), + datasets=[ + TestDataset("custom_callable_metric_data", current=current_data, reference=reference_data, tags=[]), + ], + )