From 3a9e199349d25f8bd276b0eb9791817672bccf09 Mon Sep 17 00:00:00 2001 From: Mikhail Sveshnikov Date: Wed, 18 Sep 2024 16:50:28 +0400 Subject: [PATCH] ALIES INVASION (#1301) * add alias_required * remove abc * fix tests * alias test WIP * alias test WIP * alias test WIP * alias test WIP * alias test WIP * alias test WIP * alias test WIP * alias test WIP * add aliases (#1302) * alias registries and tests * test classpaths exist * fix mypy * transitive aliases * fix lint * better error --- src/evidently/base_metric.py | 14 + src/evidently/calculations/data_drift.py | 5 + src/evidently/collector/config.py | 16 + src/evidently/collector/storage.py | 6 + src/evidently/descriptors/__init__.py | 2 + src/evidently/descriptors/_registry.py | 102 +++ .../descriptors/custom_descriptor.py | 6 + src/evidently/descriptors/hf_descriptor.py | 6 + src/evidently/descriptors/llm_judges.py | 24 + ..._letter_character_percentage_descriptor.py | 3 + .../oov_words_percentage_descriptor.py | 3 + .../descriptors/openai_descriptor.py | 3 + .../descriptors/regexp_descriptor.py | 3 + .../descriptors/semantic_similarity.py | 3 + .../descriptors/sentence_count_descriptor.py | 3 + .../descriptors/sentiment_descriptor.py | 3 + .../descriptors/text_contains_descriptor.py | 6 + .../descriptors/text_length_descriptor.py | 3 + .../descriptors/text_part_descriptor.py | 6 + .../trigger_words_presence_descriptor.py | 3 + .../descriptors/word_count_descriptor.py | 3 + src/evidently/descriptors/words_descriptor.py | 6 + .../features/OOV_words_percentage_feature.py | 3 + src/evidently/features/__init__.py | 3 + src/evidently/features/_registry.py | 76 ++ src/evidently/features/custom_feature.py | 9 + src/evidently/features/generated_features.py | 11 + src/evidently/features/hf_feature.py | 6 + src/evidently/features/llm_judge.py | 8 + ...non_letter_character_percentage_feature.py | 3 + src/evidently/features/openai_feature.py | 3 + src/evidently/features/regexp_feature.py | 3 + .../features/semantic_similarity_feature.py | 3 + .../features/sentence_count_feature.py | 3 + src/evidently/features/sentiment_feature.py | 3 + .../features/text_contains_feature.py | 6 + src/evidently/features/text_length_feature.py | 3 + src/evidently/features/text_part_feature.py | 6 + .../trigger_words_presence_feature.py | 3 + src/evidently/features/word_count_feature.py | 3 + src/evidently/features/words_feature.py | 9 + src/evidently/metric_preset/__init__.py | 2 + src/evidently/metric_preset/_registry.py | 24 + .../classification_performance.py | 3 + src/evidently/metric_preset/data_drift.py | 3 + src/evidently/metric_preset/data_quality.py | 3 + src/evidently/metric_preset/metric_preset.py | 3 + src/evidently/metric_preset/recsys.py | 3 + .../metric_preset/regression_performance.py | 3 + src/evidently/metric_preset/target_drift.py | 3 + src/evidently/metric_preset/text_evals.py | 3 + src/evidently/metric_results.py | 24 + src/evidently/metrics/__init__.py | 2 + src/evidently/metrics/_registry.py | 826 ++++++++++++++++++ .../class_balance_metric.py | 4 + .../class_separation_metric.py | 4 + .../classification_dummy_metric.py | 4 + .../classification_quality_metric.py | 4 + .../confusion_matrix_metric.py | 4 + .../lift_curve_metric.py | 290 +++--- .../lift_table_metric.py | 396 ++++----- .../classification_performance/objects.py | 6 + .../pr_curve_metric.py | 4 + .../pr_table_metric.py | 4 + .../probability_distribution_metric.py | 4 + .../quality_by_class_metric.py | 7 + .../quality_by_feature_table.py | 4 + .../roc_curve_metric.py | 4 + src/evidently/metrics/custom_metric.py | 6 + .../metrics/data_drift/column_drift_metric.py | 3 + .../data_drift/column_interaction_plot.py | 4 + .../metrics/data_drift/column_value_plot.py | 4 + .../metrics/data_drift/data_drift_table.py | 4 + .../data_drift/dataset_drift_metric.py | 6 + .../data_drift/embedding_drift_methods.py | 20 + .../metrics/data_drift/embeddings_drift.py | 4 + .../metrics/data_drift/feature_importance.py | 4 + .../data_drift/target_by_features_table.py | 4 + .../text_descriptors_drift_metric.py | 6 + .../text_domain_classifier_drift_metric.py | 7 + .../metrics/data_drift/text_metric.py | 4 + .../column_missing_values_metric.py | 5 + .../data_integrity/column_regexp_metric.py | 5 + .../data_integrity/column_summary_metric.py | 25 + .../dataset_missing_values_metric.py | 5 + .../data_integrity/dataset_summary_metric.py | 5 + .../data_quality/column_category_metric.py | 6 + .../column_correlations_metric.py | 4 + .../column_distribution_metric.py | 4 + .../data_quality/column_quantile_metric.py | 7 + .../data_quality/column_value_list_metric.py | 5 + .../data_quality/column_value_range_metric.py | 5 + .../conflict_prediction_metric.py | 7 + .../data_quality/conflict_target_metric.py | 4 + .../dataset_correlations_metric.py | 6 + .../metrics/data_quality/stability_metric.py | 6 + .../text_descriptors_correlation_metric.py | 4 + .../text_descriptors_distribution.py | 4 + src/evidently/metrics/recsys/base_top_k.py | 1 + src/evidently/metrics/recsys/diversity.py | 4 + src/evidently/metrics/recsys/f_beta_top_k.py | 3 + src/evidently/metrics/recsys/hit_rate_k.py | 4 + src/evidently/metrics/recsys/item_bias.py | 4 + src/evidently/metrics/recsys/map_k.py | 3 + src/evidently/metrics/recsys/mar_k.py | 3 + src/evidently/metrics/recsys/mrr.py | 4 + src/evidently/metrics/recsys/ndcg_k.py | 3 + src/evidently/metrics/recsys/novelty.py | 4 + .../metrics/recsys/pairwise_distance.py | 4 + .../metrics/recsys/personalisation.py | 4 + .../metrics/recsys/popularity_bias.py | 4 + .../metrics/recsys/precision_recall_k.py | 4 + .../metrics/recsys/precision_top_k.py | 3 + src/evidently/metrics/recsys/rec_examples.py | 4 + src/evidently/metrics/recsys/recall_top_k.py | 3 + .../metrics/recsys/scores_distribution.py | 4 + src/evidently/metrics/recsys/serendipity.py | 4 + src/evidently/metrics/recsys/train_stats.py | 4 + src/evidently/metrics/recsys/user_bias.py | 4 + .../abs_perc_error_in_time.py | 3 + .../error_bias_table.py | 4 + .../error_distribution.py | 4 + .../regression_performance/error_in_time.py | 3 + .../regression_performance/error_normality.py | 4 + .../metrics/regression_performance/objects.py | 11 + .../predicted_and_actual_in_time.py | 3 + .../predicted_vs_actual.py | 5 + .../regression_dummy_metric.py | 6 + .../regression_performance_metrics.py | 5 + .../regression_quality.py | 5 + .../regression_performance/top_error.py | 10 + src/evidently/pydantic_utils.py | 54 +- src/evidently/test_preset/__init__.py | 2 + src/evidently/test_preset/_registry.py | 42 + .../test_preset/classification_binary.py | 3 + .../test_preset/classification_binary_topk.py | 3 + .../test_preset/classification_multiclass.py | 3 + src/evidently/test_preset/data_drift.py | 3 + src/evidently/test_preset/data_quality.py | 3 + src/evidently/test_preset/data_stability.py | 3 + .../test_preset/no_target_performance.py | 3 + src/evidently/test_preset/recsys.py | 3 + src/evidently/test_preset/regression.py | 3 + src/evidently/test_preset/test_preset.py | 3 + src/evidently/tests/_registry.py | 268 ++++++ src/evidently/tests/base_test.py | 17 + .../tests/classification_performance_tests.py | 42 + src/evidently/tests/custom_test.py | 3 + src/evidently/tests/data_drift_tests.py | 18 + src/evidently/tests/data_integrity_tests.py | 66 ++ src/evidently/tests/data_quality_tests.py | 87 ++ src/evidently/tests/recsys_tests.py | 48 + .../tests/regression_performance_tests.py | 18 + src/evidently/ui/components/base.py | 2 + src/evidently/ui/dashboards/base.py | 1 + src/evidently/ui/dashboards/reports.py | 13 + src/evidently/ui/dashboards/test_suites.py | 9 + .../calculation_engine/test_python_engine.py | 6 + tests/features/test_multicolumn.py | 3 + tests/metrics/test_base_metric.py | 24 + tests/multitest/conftest.py | 3 + tests/report/test_report.py | 6 +- tests/test_metric_results.py | 30 +- tests/test_pydantic_aliases.py | 66 +- tests/test_suite/test_test_suite.py | 3 + tests/ui/test_app.py | 6 + tests/ui/test_dashboards.py | 18 + tests/utils/test_pydantic_utils.py | 75 +- 168 files changed, 2967 insertions(+), 348 deletions(-) create mode 100644 src/evidently/descriptors/_registry.py create mode 100644 src/evidently/features/_registry.py create mode 100644 src/evidently/metric_preset/_registry.py create mode 100644 src/evidently/metrics/_registry.py create mode 100644 src/evidently/test_preset/_registry.py create mode 100644 src/evidently/tests/_registry.py diff --git a/src/evidently/base_metric.py b/src/evidently/base_metric.py index 83c42584ac..29d559078b 100644 --- a/src/evidently/base_metric.py +++ b/src/evidently/base_metric.py @@ -33,6 +33,7 @@ from evidently.pydantic_utils import FrozenBaseMeta from evidently.pydantic_utils import PolymorphicModel from evidently.pydantic_utils import WithTestAndMetricDependencies +from evidently.pydantic_utils import autoregister from evidently.pydantic_utils import get_value_fingerprint from evidently.utils.data_preprocessing import DataDefinition @@ -49,7 +50,10 @@ def fields(cls) -> FieldPath: class MetricResult(PolymorphicModel, BaseResult, metaclass=WithFieldsPathMetaclass): # type: ignore[misc] # pydantic Config class Config: + type_alias = "evidently:metric_result:MetricResult" field_tags = {"type": {IncludeTags.TypeField}} + is_base_type = True + alias_required = True class ErrorResult(BaseResult): @@ -72,7 +76,11 @@ class DatasetType(Enum): ADDITIONAL = "additional" +@autoregister class ColumnName(EnumValueMixin, EvidentlyBaseModel): + class Config: + type_alias = "evidently:base:ColumnName" + name: str display_name: str dataset: DatasetType @@ -217,10 +225,15 @@ def result_type(cls) -> Type[MetricResult]: class BasePreset(EvidentlyBaseModel): class Config: + type_alias = "evidently:base:BasePreset" + transitive_aliases = True is_base_type = True class Metric(WithTestAndMetricDependencies, Generic[TResult], metaclass=WithResultFieldPathMetaclass): + class Config: + is_base_type = True + _context: Optional["Context"] = None options: Options @@ -310,6 +323,7 @@ def get_options_fingerprint(self) -> FingerprintPart: class ColumnMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:ColumnMetricResult" field_tags = { "column_name": {IncludeTags.Parameter}, "column_type": {IncludeTags.Parameter}, diff --git a/src/evidently/calculations/data_drift.py b/src/evidently/calculations/data_drift.py index deb49ea39c..21384d3969 100644 --- a/src/evidently/calculations/data_drift.py +++ b/src/evidently/calculations/data_drift.py @@ -35,6 +35,7 @@ class DriftStatsField(MetricResult): class Config: + type_alias = "evidently:metric_result:DriftStatsField" dict_exclude_fields = {"characteristic_examples", "characteristic_words", "correlations"} # todo: after tests PR field_tags = { @@ -55,6 +56,7 @@ class Config: class ColumnDataDriftMetrics(ColumnMetricResult): class Config: # todo: change to field_tags: render + type_alias = "evidently:metric_result:ColumnDataDriftMetrics" dict_exclude_fields = {"scatter"} pd_exclude_fields = {"scatter"} field_tags = { @@ -85,6 +87,9 @@ class DatasetDrift: class DatasetDriftMetrics(MetricResult): + class Config: + type_alias = "evidently:metric_result:DatasetDriftMetrics" + number_of_columns: int number_of_drifted_columns: int share_of_drifted_columns: float diff --git a/src/evidently/collector/config.py b/src/evidently/collector/config.py index 33aa69e789..79de610c97 100644 --- a/src/evidently/collector/config.py +++ b/src/evidently/collector/config.py @@ -18,6 +18,7 @@ from evidently.collector.storage import InMemoryStorage from evidently.options.base import Options from evidently.pydantic_utils import PolymorphicModel +from evidently.pydantic_utils import autoregister from evidently.report import Report from evidently.suite.base_suite import MetadataValueType from evidently.test_suite import TestSuite @@ -42,12 +43,19 @@ def save(self, path: str): class CollectorTrigger(PolymorphicModel): + class Config: + is_base_type = True + @abc.abstractmethod def is_ready(self, config: "CollectorConfig", storage: "CollectorStorage") -> bool: raise NotImplementedError +@autoregister class IntervalTrigger(CollectorTrigger): + class Config: + type_alias = "evidently:collector_trigger:IntervalTrigger" + interval: float = Field(gt=0) last_triggered: float = 0 @@ -59,7 +67,11 @@ def is_ready(self, config: "CollectorConfig", storage: "CollectorStorage") -> bo return is_ready +@autoregister class RowsCountTrigger(CollectorTrigger): + class Config: + type_alias = "evidently:collector_trigger:RowsCountTrigger" + rows_count: int = Field(default=1, gt=0) def is_ready(self, config: "CollectorConfig", storage: "CollectorStorage") -> bool: @@ -67,7 +79,11 @@ def is_ready(self, config: "CollectorConfig", storage: "CollectorStorage") -> bo return buffer_size > 0 and buffer_size >= self.rows_count +@autoregister class RowsCountOrIntervalTrigger(CollectorTrigger): + class Config: + type_alias = "evidently:collector_trigger:RowsCountOrIntervalTrigger" + rows_count_trigger: RowsCountTrigger interval_trigger: IntervalTrigger diff --git a/src/evidently/collector/storage.py b/src/evidently/collector/storage.py index be4e04fe63..1e1b290f0c 100644 --- a/src/evidently/collector/storage.py +++ b/src/evidently/collector/storage.py @@ -9,6 +9,7 @@ from evidently._pydantic_compat import BaseModel from evidently.pydantic_utils import PolymorphicModel +from evidently.pydantic_utils import autoregister from evidently.suite.base_suite import ReportBase @@ -43,6 +44,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): class CollectorStorage(PolymorphicModel): class Config: underscore_attrs_are_private = True + is_base_type = True _locks: Dict[str, Lock] = {} @@ -85,7 +87,11 @@ def take_reports(self, id: str) -> Sequence[ReportPopper]: raise NotImplementedError +@autoregister class InMemoryStorage(CollectorStorage): + class Config: + type_alias = "evidently:collector_storage:InMemoryStorage" + max_log_events: int = 10 _buffers: Dict[str, List[Any]] = {} diff --git a/src/evidently/descriptors/__init__.py b/src/evidently/descriptors/__init__.py index 47d0f273c4..2520abdbe3 100644 --- a/src/evidently/descriptors/__init__.py +++ b/src/evidently/descriptors/__init__.py @@ -1,3 +1,4 @@ +from . import _registry from .custom_descriptor import CustomColumnEval from .custom_descriptor import CustomPairColumnEval from .hf_descriptor import HuggingFaceModel @@ -54,4 +55,5 @@ "SentenceCount", "Sentiment", "RegExp", + "_registry", ] diff --git a/src/evidently/descriptors/_registry.py b/src/evidently/descriptors/_registry.py new file mode 100644 index 0000000000..0f912a86fe --- /dev/null +++ b/src/evidently/descriptors/_registry.py @@ -0,0 +1,102 @@ +from evidently.features.generated_features import FeatureDescriptor +from evidently.features.generated_features import GeneralDescriptor +from evidently.pydantic_utils import register_type_alias + +register_type_alias( + FeatureDescriptor, + "evidently.descriptors.custom_descriptor.CustomColumnEval", + "evidently:descriptor:CustomColumnEval", +) +register_type_alias( + FeatureDescriptor, "evidently.descriptors.hf_descriptor.HuggingFaceModel", "evidently:descriptor:HuggingFaceModel" +) +register_type_alias( + FeatureDescriptor, + "evidently.descriptors.hf_descriptor.HuggingFaceToxicityModel", + "evidently:descriptor:HuggingFaceToxicityModel", +) +register_type_alias( + FeatureDescriptor, "evidently.descriptors.llm_judges.BiasLLMEval", "evidently:descriptor:BiasLLMEval" +) +register_type_alias( + FeatureDescriptor, + "evidently.descriptors.llm_judges.BinaryClassificationLLMEval", + "evidently:descriptor:BinaryClassificationLLMEval", +) +register_type_alias( + FeatureDescriptor, + "evidently.descriptors.llm_judges.ContextQualityLLMEval", + "evidently:descriptor:ContextQualityLLMEval", +) +register_type_alias( + FeatureDescriptor, "evidently.descriptors.llm_judges.DeclineLLMEval", "evidently:descriptor:DeclineLLMEval" +) +register_type_alias(FeatureDescriptor, "evidently.descriptors.llm_judges.LLMEval", "evidently:descriptor:LLMEval") +register_type_alias( + FeatureDescriptor, "evidently.descriptors.llm_judges.NegativityLLMEval", "evidently:descriptor:NegativityLLMEval" +) +register_type_alias(FeatureDescriptor, "evidently.descriptors.llm_judges.PIILLMEval", "evidently:descriptor:PIILLMEval") +register_type_alias( + FeatureDescriptor, "evidently.descriptors.llm_judges.ToxicityLLMEval", "evidently:descriptor:ToxicityLLMEval" +) +register_type_alias( + FeatureDescriptor, + "evidently.descriptors.non_letter_character_percentage_descriptor.NonLetterCharacterPercentage", + "evidently:descriptor:NonLetterCharacterPercentage", +) +register_type_alias( + FeatureDescriptor, "evidently.descriptors.oov_words_percentage_descriptor.OOV", "evidently:descriptor:OOV" +) +register_type_alias( + FeatureDescriptor, "evidently.descriptors.openai_descriptor.OpenAIPrompting", "evidently:descriptor:OpenAIPrompting" +) +register_type_alias(FeatureDescriptor, "evidently.descriptors.regexp_descriptor.RegExp", "evidently:descriptor:RegExp") +register_type_alias( + FeatureDescriptor, + "evidently.descriptors.semantic_similarity.SemanticSimilarity", + "evidently:descriptor:SemanticSimilarity", +) +register_type_alias( + FeatureDescriptor, + "evidently.descriptors.sentence_count_descriptor.SentenceCount", + "evidently:descriptor:SentenceCount", +) +register_type_alias( + FeatureDescriptor, "evidently.descriptors.sentiment_descriptor.Sentiment", "evidently:descriptor:Sentiment" +) +register_type_alias( + FeatureDescriptor, "evidently.descriptors.text_contains_descriptor.Contains", "evidently:descriptor:Contains" +) +register_type_alias( + FeatureDescriptor, + "evidently.descriptors.text_contains_descriptor.DoesNotContain", + "evidently:descriptor:DoesNotContain", +) +register_type_alias( + FeatureDescriptor, "evidently.descriptors.text_length_descriptor.TextLength", "evidently:descriptor:TextLength" +) +register_type_alias( + FeatureDescriptor, "evidently.descriptors.text_part_descriptor.BeginsWith", "evidently:descriptor:BeginsWith" +) +register_type_alias( + FeatureDescriptor, "evidently.descriptors.text_part_descriptor.EndsWith", "evidently:descriptor:EndsWith" +) +register_type_alias( + FeatureDescriptor, + "evidently.descriptors.trigger_words_presence_descriptor.TriggerWordsPresence", + "evidently:descriptor:TriggerWordsPresence", +) +register_type_alias( + FeatureDescriptor, "evidently.descriptors.word_count_descriptor.WordCount", "evidently:descriptor:WordCount" +) +register_type_alias( + FeatureDescriptor, "evidently.descriptors.words_descriptor.ExcludesWords", "evidently:descriptor:ExcludesWords" +) +register_type_alias( + FeatureDescriptor, "evidently.descriptors.words_descriptor.IncludesWords", "evidently:descriptor:IncludesWords" +) +register_type_alias( + GeneralDescriptor, + "evidently.descriptors.custom_descriptor.CustomPairColumnEval", + "evidently:descriptor:CustomPairColumnEval", +) diff --git a/src/evidently/descriptors/custom_descriptor.py b/src/evidently/descriptors/custom_descriptor.py index b05d1fb8b7..04920b7eb4 100644 --- a/src/evidently/descriptors/custom_descriptor.py +++ b/src/evidently/descriptors/custom_descriptor.py @@ -13,6 +13,9 @@ class CustomColumnEval(FeatureDescriptor): + class Config: + type_alias = "evidently:descriptor:CustomColumnEval" + func: Callable[[pd.Series], pd.Series] display_name: str feature_type: Union[str, ColumnType] @@ -29,6 +32,9 @@ def feature(self, column_name: str) -> GeneratedFeature: class CustomPairColumnEval(GeneralDescriptor): + class Config: + type_alias = "evidently:descriptor:CustomPairColumnEval" + func: Callable[[pd.Series, pd.Series], pd.Series] display_name: str first_column: str diff --git a/src/evidently/descriptors/hf_descriptor.py b/src/evidently/descriptors/hf_descriptor.py index 4faf5c965d..f0c080a438 100644 --- a/src/evidently/descriptors/hf_descriptor.py +++ b/src/evidently/descriptors/hf_descriptor.py @@ -7,6 +7,9 @@ class HuggingFaceModel(FeatureDescriptor): + class Config: + type_alias = "evidently:descriptor:HuggingFaceModel" + model: str params: Optional[dict] = None @@ -20,6 +23,9 @@ def feature(self, column_name: str) -> GeneratedFeature: class HuggingFaceToxicityModel(FeatureDescriptor): + class Config: + type_alias = "evidently:descriptor:HuggingFaceToxicityModel" + model: Optional[str] = None toxic_label: Optional[str] = None diff --git a/src/evidently/descriptors/llm_judges.py b/src/evidently/descriptors/llm_judges.py index 7f93416c5f..5ab90663c3 100644 --- a/src/evidently/descriptors/llm_judges.py +++ b/src/evidently/descriptors/llm_judges.py @@ -48,6 +48,9 @@ def get_input_columns(self, column_name: str) -> Dict[str, str]: class LLMEval(BaseLLMEval): + class Config: + type_alias = "evidently:descriptor:LLMEval" + name: ClassVar = "LLMEval" template: BaseLLMPromptTemplate @@ -61,6 +64,9 @@ def get_subcolumn(self) -> Optional[str]: class BinaryClassificationLLMEval(BaseLLMEval): + class Config: + type_alias = "evidently:descriptor:BinaryClassificationLLMEval" + template: ClassVar[BinaryClassificationPromptTemplate] include_category: Optional[bool] = None include_score: Optional[bool] = None @@ -81,6 +87,9 @@ def get_subcolumn(self) -> Optional[str]: class NegativityLLMEval(BinaryClassificationLLMEval): + class Config: + type_alias = "evidently:descriptor:NegativityLLMEval" + name: ClassVar = "Negativity" template: ClassVar = BinaryClassificationPromptTemplate( criteria="""A "NEGATIVE" typically refers to a tendency to be overly critical, pessimistic, or cynical in attitude or tone. @@ -98,6 +107,9 @@ class NegativityLLMEval(BinaryClassificationLLMEval): class PIILLMEval(BinaryClassificationLLMEval): + class Config: + type_alias = "evidently:descriptor:PIILLMEval" + name: ClassVar = "PII" template: ClassVar = BinaryClassificationPromptTemplate( criteria="""Personally identifiable information (PII) is information that, when used alone or with other relevant data, can identify an individual. @@ -116,6 +128,9 @@ class PIILLMEval(BinaryClassificationLLMEval): class DeclineLLMEval(BinaryClassificationLLMEval): + class Config: + type_alias = "evidently:descriptor:DeclineLLMEval" + name: ClassVar = "Decline" template: ClassVar = BinaryClassificationPromptTemplate( criteria="""A "DECLINE" typically refers to a refusal or a polite rejection to do something. @@ -131,6 +146,9 @@ class DeclineLLMEval(BinaryClassificationLLMEval): class ContextQualityLLMEval(BinaryClassificationLLMEval): + class Config: + type_alias = "evidently:descriptor:ContextQualityLLMEval" + name: ClassVar = "ContextQuality" template: ClassVar = BinaryClassificationPromptTemplate( @@ -164,6 +182,9 @@ def get_input_columns(self, column_name: str) -> Dict[str, str]: class BiasLLMEval(BinaryClassificationLLMEval): + class Config: + type_alias = "evidently:descriptor:BiasLLMEval" + name: ClassVar = "Bias" template: ClassVar = BinaryClassificationPromptTemplate( criteria="""A "BIAS" typically refers to an inclination or prejudice for or against one person or group, especially in a way considered to be unfair. @@ -185,6 +206,9 @@ class BiasLLMEval(BinaryClassificationLLMEval): class ToxicityLLMEval(BinaryClassificationLLMEval): + class Config: + type_alias = "evidently:descriptor:ToxicityLLMEval" + name: ClassVar = "Toxicity" template: ClassVar = BinaryClassificationPromptTemplate( criteria="""A TOXICITY typically refers to content that includes harmful, offensive, or derogatory language. diff --git a/src/evidently/descriptors/non_letter_character_percentage_descriptor.py b/src/evidently/descriptors/non_letter_character_percentage_descriptor.py index 7b558a7298..2105f804f5 100644 --- a/src/evidently/descriptors/non_letter_character_percentage_descriptor.py +++ b/src/evidently/descriptors/non_letter_character_percentage_descriptor.py @@ -4,5 +4,8 @@ class NonLetterCharacterPercentage(FeatureDescriptor): + class Config: + type_alias = "evidently:descriptor:NonLetterCharacterPercentage" + def feature(self, column_name: str) -> GeneratedFeature: return non_letter_character_percentage_feature.NonLetterCharacterPercentage(column_name, self.display_name) diff --git a/src/evidently/descriptors/oov_words_percentage_descriptor.py b/src/evidently/descriptors/oov_words_percentage_descriptor.py index e45fcd4ebe..e61f07c831 100644 --- a/src/evidently/descriptors/oov_words_percentage_descriptor.py +++ b/src/evidently/descriptors/oov_words_percentage_descriptor.py @@ -6,6 +6,9 @@ class OOV(FeatureDescriptor): + class Config: + type_alias = "evidently:descriptor:OOV" + ignore_words: Tuple = () def feature(self, column_name: str) -> GeneratedFeature: diff --git a/src/evidently/descriptors/openai_descriptor.py b/src/evidently/descriptors/openai_descriptor.py index 5286d6271e..89d40d1aa3 100644 --- a/src/evidently/descriptors/openai_descriptor.py +++ b/src/evidently/descriptors/openai_descriptor.py @@ -7,6 +7,9 @@ class OpenAIPrompting(FeatureDescriptor): + class Config: + type_alias = "evidently:descriptor:OpenAIPrompting" + prompt: str prompt_replace_string: str context: Optional[str] diff --git a/src/evidently/descriptors/regexp_descriptor.py b/src/evidently/descriptors/regexp_descriptor.py index c7c1f1df23..ec0ecf62dc 100644 --- a/src/evidently/descriptors/regexp_descriptor.py +++ b/src/evidently/descriptors/regexp_descriptor.py @@ -4,6 +4,9 @@ class RegExp(FeatureDescriptor): + class Config: + type_alias = "evidently:descriptor:RegExp" + reg_exp: str def feature(self, column_name: str) -> GeneratedFeature: diff --git a/src/evidently/descriptors/semantic_similarity.py b/src/evidently/descriptors/semantic_similarity.py index 95065d015f..72a34d78ce 100644 --- a/src/evidently/descriptors/semantic_similarity.py +++ b/src/evidently/descriptors/semantic_similarity.py @@ -4,6 +4,9 @@ class SemanticSimilarity(FeatureDescriptor): + class Config: + type_alias = "evidently:descriptor:SemanticSimilarity" + with_column: str def feature(self, column_name: str) -> GeneratedFeatures: diff --git a/src/evidently/descriptors/sentence_count_descriptor.py b/src/evidently/descriptors/sentence_count_descriptor.py index 96788cb18a..0d7d256fb3 100644 --- a/src/evidently/descriptors/sentence_count_descriptor.py +++ b/src/evidently/descriptors/sentence_count_descriptor.py @@ -4,5 +4,8 @@ class SentenceCount(FeatureDescriptor): + class Config: + type_alias = "evidently:descriptor:SentenceCount" + def feature(self, column_name: str) -> GeneratedFeature: return sentence_count_feature.SentenceCount(column_name, self.display_name) diff --git a/src/evidently/descriptors/sentiment_descriptor.py b/src/evidently/descriptors/sentiment_descriptor.py index 7884b2c8e5..026fb317cc 100644 --- a/src/evidently/descriptors/sentiment_descriptor.py +++ b/src/evidently/descriptors/sentiment_descriptor.py @@ -4,5 +4,8 @@ class Sentiment(FeatureDescriptor): + class Config: + type_alias = "evidently:descriptor:Sentiment" + def feature(self, column_name: str) -> GeneratedFeature: return sentiment_feature.Sentiment(column_name, self.display_name) diff --git a/src/evidently/descriptors/text_contains_descriptor.py b/src/evidently/descriptors/text_contains_descriptor.py index 125fb55aaa..7e069970d5 100644 --- a/src/evidently/descriptors/text_contains_descriptor.py +++ b/src/evidently/descriptors/text_contains_descriptor.py @@ -6,6 +6,9 @@ class Contains(FeatureDescriptor): + class Config: + type_alias = "evidently:descriptor:Contains" + items: List[str] mode: str = "any" case_sensitive: bool = True @@ -21,6 +24,9 @@ def feature(self, column_name: str) -> GeneratedFeature: class DoesNotContain(FeatureDescriptor): + class Config: + type_alias = "evidently:descriptor:DoesNotContain" + items: List[str] mode: str = "all" case_sensitive: bool = True diff --git a/src/evidently/descriptors/text_length_descriptor.py b/src/evidently/descriptors/text_length_descriptor.py index 59dadc0209..e42e6f98df 100644 --- a/src/evidently/descriptors/text_length_descriptor.py +++ b/src/evidently/descriptors/text_length_descriptor.py @@ -4,5 +4,8 @@ class TextLength(FeatureDescriptor): + class Config: + type_alias = "evidently:descriptor:TextLength" + def feature(self, column_name: str) -> GeneratedFeature: return text_length_feature.TextLength(column_name, self.display_name) diff --git a/src/evidently/descriptors/text_part_descriptor.py b/src/evidently/descriptors/text_part_descriptor.py index 9cfd2e64c6..913b849580 100644 --- a/src/evidently/descriptors/text_part_descriptor.py +++ b/src/evidently/descriptors/text_part_descriptor.py @@ -4,6 +4,9 @@ class BeginsWith(FeatureDescriptor): + class Config: + type_alias = "evidently:descriptor:BeginsWith" + prefix: str case_sensitive: bool = True @@ -17,6 +20,9 @@ def feature(self, column_name: str) -> GeneratedFeature: class EndsWith(FeatureDescriptor): + class Config: + type_alias = "evidently:descriptor:EndsWith" + suffix: str case_sensitive: bool = True diff --git a/src/evidently/descriptors/trigger_words_presence_descriptor.py b/src/evidently/descriptors/trigger_words_presence_descriptor.py index c4f667ebab..1e9eada7a5 100644 --- a/src/evidently/descriptors/trigger_words_presence_descriptor.py +++ b/src/evidently/descriptors/trigger_words_presence_descriptor.py @@ -6,6 +6,9 @@ class TriggerWordsPresence(FeatureDescriptor): + class Config: + type_alias = "evidently:descriptor:TriggerWordsPresence" + words_list: List[str] lemmatize: bool = True diff --git a/src/evidently/descriptors/word_count_descriptor.py b/src/evidently/descriptors/word_count_descriptor.py index 8a725edc73..7671764a95 100644 --- a/src/evidently/descriptors/word_count_descriptor.py +++ b/src/evidently/descriptors/word_count_descriptor.py @@ -4,5 +4,8 @@ class WordCount(FeatureDescriptor): + class Config: + type_alias = "evidently:descriptor:WordCount" + def feature(self, column_name: str) -> GeneratedFeature: return word_count_feature.WordCount(column_name, self.display_name) diff --git a/src/evidently/descriptors/words_descriptor.py b/src/evidently/descriptors/words_descriptor.py index a337300979..14e30060e5 100644 --- a/src/evidently/descriptors/words_descriptor.py +++ b/src/evidently/descriptors/words_descriptor.py @@ -6,6 +6,9 @@ class ExcludesWords(FeatureDescriptor): + class Config: + type_alias = "evidently:descriptor:ExcludesWords" + words_list: List[str] mode: str = "all" lemmatize: bool = True @@ -21,6 +24,9 @@ def feature(self, column_name: str) -> GeneratedFeature: class IncludesWords(FeatureDescriptor): + class Config: + type_alias = "evidently:descriptor:IncludesWords" + words_list: List[str] mode: str = "any" lemmatize: bool = True diff --git a/src/evidently/features/OOV_words_percentage_feature.py b/src/evidently/features/OOV_words_percentage_feature.py index 78c60d4a8c..6ab72e6d9a 100644 --- a/src/evidently/features/OOV_words_percentage_feature.py +++ b/src/evidently/features/OOV_words_percentage_feature.py @@ -16,6 +16,9 @@ class OOVWordsPercentage(ApplyColumnGeneratedFeature): + class Config: + type_alias = "evidently:feature:OOVWordsPercentage" + __feature_type__: ClassVar = ColumnType.Numerical display_name_template: ClassVar = "OOV Words % for {column_name}" ignore_words: Tuple = () diff --git a/src/evidently/features/__init__.py b/src/evidently/features/__init__.py index e69de29bb2..4bfe1f7c80 100644 --- a/src/evidently/features/__init__.py +++ b/src/evidently/features/__init__.py @@ -0,0 +1,3 @@ +from . import _registry + +__all__ = ["_registry"] diff --git a/src/evidently/features/_registry.py b/src/evidently/features/_registry.py new file mode 100644 index 0000000000..ba2e101f5f --- /dev/null +++ b/src/evidently/features/_registry.py @@ -0,0 +1,76 @@ +from evidently.features.generated_features import GeneratedFeatures +from evidently.pydantic_utils import register_type_alias + +register_type_alias( + GeneratedFeatures, + "evidently.features.OOV_words_percentage_feature.OOVWordsPercentage", + "evidently:feature:OOVWordsPercentage", +) +register_type_alias( + GeneratedFeatures, "evidently.features.custom_feature.CustomFeature", "evidently:feature:CustomFeature" +) +register_type_alias( + GeneratedFeatures, + "evidently.features.custom_feature.CustomPairColumnFeature", + "evidently:feature:CustomPairColumnFeature", +) +register_type_alias( + GeneratedFeatures, + "evidently.features.custom_feature.CustomSingleColumnFeature", + "evidently:feature:CustomSingleColumnFeature", +) +register_type_alias( + GeneratedFeatures, "evidently.features.hf_feature.HuggingFaceFeature", "evidently:feature:HuggingFaceFeature" +) +register_type_alias( + GeneratedFeatures, + "evidently.features.hf_feature.HuggingFaceToxicityFeature", + "evidently:feature:HuggingFaceToxicityFeature", +) +register_type_alias(GeneratedFeatures, "evidently.features.llm_judge.LLMJudge", "evidently:feature:LLMJudge") +register_type_alias( + GeneratedFeatures, + "evidently.features.non_letter_character_percentage_feature.NonLetterCharacterPercentage", + "evidently:feature:NonLetterCharacterPercentage", +) +register_type_alias( + GeneratedFeatures, "evidently.features.openai_feature.OpenAIFeature", "evidently:feature:OpenAIFeature" +) +register_type_alias(GeneratedFeatures, "evidently.features.regexp_feature.RegExp", "evidently:feature:RegExp") +register_type_alias( + GeneratedFeatures, + "evidently.features.semantic_similarity_feature.SemanticSimilarityFeature", + "evidently:feature:SemanticSimilarityFeature", +) +register_type_alias( + GeneratedFeatures, "evidently.features.sentence_count_feature.SentenceCount", "evidently:feature:SentenceCount" +) +register_type_alias(GeneratedFeatures, "evidently.features.sentiment_feature.Sentiment", "evidently:feature:Sentiment") +register_type_alias( + GeneratedFeatures, "evidently.features.text_contains_feature.Contains", "evidently:feature:Contains" +) +register_type_alias( + GeneratedFeatures, "evidently.features.text_contains_feature.DoesNotContain", "evidently:feature:DoesNotContain" +) +register_type_alias( + GeneratedFeatures, "evidently.features.text_length_feature.TextLength", "evidently:feature:TextLength" +) +register_type_alias( + GeneratedFeatures, "evidently.features.text_part_feature.BeginsWith", "evidently:feature:BeginsWith" +) +register_type_alias(GeneratedFeatures, "evidently.features.text_part_feature.EndsWith", "evidently:feature:EndsWith") +register_type_alias( + GeneratedFeatures, + "evidently.features.trigger_words_presence_feature.TriggerWordsPresent", + "evidently:feature:TriggerWordsPresent", +) +register_type_alias(GeneratedFeatures, "evidently.features.word_count_feature.WordCount", "evidently:feature:WordCount") +register_type_alias( + GeneratedFeatures, "evidently.features.words_feature.ExcludesWords", "evidently:feature:ExcludesWords" +) +register_type_alias( + GeneratedFeatures, "evidently.features.words_feature.IncludesWords", "evidently:feature:IncludesWords" +) +register_type_alias( + GeneratedFeatures, "evidently.features.words_feature.WordsPresence", "evidently:feature:WordsPresence" +) diff --git a/src/evidently/features/custom_feature.py b/src/evidently/features/custom_feature.py index 5bd405c48d..dbae8393ee 100644 --- a/src/evidently/features/custom_feature.py +++ b/src/evidently/features/custom_feature.py @@ -14,6 +14,9 @@ class CustomFeature(FeatureTypeFieldMixin, GeneratedFeature): + class Config: + type_alias = "evidently:feature:CustomFeature" + display_name: str name: str = Field(default_factory=lambda: str(new_id())) func: Callable[[pd.DataFrame, DataDefinition], pd.Series] @@ -28,6 +31,9 @@ def _as_column(self) -> "ColumnName": class CustomSingleColumnFeature(FeatureTypeFieldMixin, GeneratedFeature): + class Config: + type_alias = "evidently:feature:CustomSingleColumnFeature" + display_name: str func: Callable[[pd.Series], pd.Series] name: str = Field(default_factory=lambda: str(new_id())) @@ -50,6 +56,9 @@ def get_fingerprint_parts(self) -> Tuple[FingerprintPart, ...]: class CustomPairColumnFeature(FeatureTypeFieldMixin, GeneratedFeature): + class Config: + type_alias = "evidently:feature:CustomPairColumnFeature" + display_name: str func: Callable[[pd.Series, pd.Series], pd.Series] name: str = Field(default_factory=lambda: str(new_id())) diff --git a/src/evidently/features/generated_features.py b/src/evidently/features/generated_features.py index fe17e8d253..f8ebd96f4c 100644 --- a/src/evidently/features/generated_features.py +++ b/src/evidently/features/generated_features.py @@ -28,6 +28,9 @@ class FeatureResult(Generic[TEngineDataType]): class GeneratedFeatures(EvidentlyBaseModel): + class Config: + is_base_type = True + display_name: Optional[str] = None """ Class for computation of additional features. @@ -188,12 +191,16 @@ def _as_column(self) -> "ColumnName": class BaseDescriptor(EvidentlyBaseModel): class Config: + type_alias = "evidently:descriptor:BaseDescriptor" is_base_type = True display_name: Optional[str] = None class GeneralDescriptor(BaseDescriptor): + class Config: + is_base_type = True + @abc.abstractmethod def feature(self) -> GeneratedFeatures: raise NotImplementedError() @@ -203,6 +210,10 @@ def as_column(self) -> "ColumnName": class MultiColumnFeatureDescriptor(BaseDescriptor): + class Config: + type_alias = "evidently:descriptor:MultiColumnFeatureDescriptor" + is_base_type = True + def feature(self, columns: List[str]) -> GeneratedFeature: raise NotImplementedError() diff --git a/src/evidently/features/hf_feature.py b/src/evidently/features/hf_feature.py index 3ccb1b03e9..e002015458 100644 --- a/src/evidently/features/hf_feature.py +++ b/src/evidently/features/hf_feature.py @@ -15,6 +15,9 @@ class HuggingFaceFeature(FeatureTypeFieldMixin, DataFeature): + class Config: + type_alias = "evidently:feature:HuggingFaceFeature" + column_name: str model: str params: dict @@ -40,6 +43,9 @@ def __hash__(self): class HuggingFaceToxicityFeature(DataFeature): + class Config: + type_alias = "evidently:feature:HuggingFaceToxicityFeature" + __feature_type__: ClassVar = ColumnType.Numerical column_name: str model: Optional[str] diff --git a/src/evidently/features/llm_judge.py b/src/evidently/features/llm_judge.py index 35f2f39669..5a193c0853 100644 --- a/src/evidently/features/llm_judge.py +++ b/src/evidently/features/llm_judge.py @@ -25,6 +25,7 @@ from evidently.options.option import Option from evidently.pydantic_utils import EnumValueMixin from evidently.pydantic_utils import EvidentlyBaseModel +from evidently.pydantic_utils import autoregister from evidently.utils.data_preprocessing import DataDefinition LLMMessage = Tuple[str, str] @@ -113,7 +114,11 @@ class Uncertainty(str, Enum): NON_TARGET = "non_target" +@autoregister class BinaryClassificationPromptTemplate(BaseLLMPromptTemplate, EnumValueMixin): + class Config: + type_alias = "evidently:prompt_template:BinaryClassificationPromptTemplate" + template: str = ( """{__criteria__}\n{__task__}\n\n{__as__}\n{{input}}\n{__ae__}\n\n{__instructions__}\n\n{__output_format__}""" ) @@ -247,6 +252,9 @@ def get_system_prompts(self) -> List[LLMMessage]: class LLMJudge(GeneratedFeatures): + class Config: + type_alias = "evidently:feature:LLMJudge" + """Generic LLM judge generated features""" DEFAULT_INPUT_COLUMN: ClassVar = "input" diff --git a/src/evidently/features/non_letter_character_percentage_feature.py b/src/evidently/features/non_letter_character_percentage_feature.py index c11086d569..4ce38ced52 100644 --- a/src/evidently/features/non_letter_character_percentage_feature.py +++ b/src/evidently/features/non_letter_character_percentage_feature.py @@ -9,6 +9,9 @@ class NonLetterCharacterPercentage(ApplyColumnGeneratedFeature): + class Config: + type_alias = "evidently:feature:NonLetterCharacterPercentage" + __feature_type__: ClassVar = ColumnType.Numerical display_name_template: ClassVar = "Non Letter Character % for {column_name}" column_name: str diff --git a/src/evidently/features/openai_feature.py b/src/evidently/features/openai_feature.py index 1ba27348f9..11100a4ac7 100644 --- a/src/evidently/features/openai_feature.py +++ b/src/evidently/features/openai_feature.py @@ -16,6 +16,9 @@ class OpenAIFeature(FeatureTypeFieldMixin, GeneratedFeature): + class Config: + type_alias = "evidently:feature:OpenAIFeature" + column_name: str feature_id: str prompt: str diff --git a/src/evidently/features/regexp_feature.py b/src/evidently/features/regexp_feature.py index 8816316371..af6a8a3e1a 100644 --- a/src/evidently/features/regexp_feature.py +++ b/src/evidently/features/regexp_feature.py @@ -10,6 +10,9 @@ class RegExp(GeneratedFeature): + class Config: + type_alias = "evidently:feature:RegExp" + __feature_type__: ClassVar = ColumnType.Categorical column_name: str reg_exp: str diff --git a/src/evidently/features/semantic_similarity_feature.py b/src/evidently/features/semantic_similarity_feature.py index cb18e19162..5680bbfff6 100644 --- a/src/evidently/features/semantic_similarity_feature.py +++ b/src/evidently/features/semantic_similarity_feature.py @@ -11,6 +11,9 @@ class SemanticSimilarityFeature(GeneratedFeature): + class Config: + type_alias = "evidently:feature:SemanticSimilarityFeature" + __feature_type__: ClassVar = ColumnType.Numerical columns: List[str] model: str = "all-MiniLM-L6-v2" diff --git a/src/evidently/features/sentence_count_feature.py b/src/evidently/features/sentence_count_feature.py index 1c51b45c96..6b4215cae8 100644 --- a/src/evidently/features/sentence_count_feature.py +++ b/src/evidently/features/sentence_count_feature.py @@ -10,6 +10,9 @@ class SentenceCount(ApplyColumnGeneratedFeature): + class Config: + type_alias = "evidently:feature:SentenceCount" + __feature_type__: ClassVar = ColumnType.Numerical _reg: ClassVar[re.Pattern] = re.compile(r"(? ColumnName: class EndsWith(GeneratedFeature): + class Config: + type_alias = "evidently:feature:EndsWith" + __feature_type__: ClassVar = ColumnType.Categorical column_name: str case_sensitive: bool diff --git a/src/evidently/features/trigger_words_presence_feature.py b/src/evidently/features/trigger_words_presence_feature.py index 9385266824..933cabcb55 100644 --- a/src/evidently/features/trigger_words_presence_feature.py +++ b/src/evidently/features/trigger_words_presence_feature.py @@ -13,6 +13,9 @@ class TriggerWordsPresent(ApplyColumnGeneratedFeature): + class Config: + type_alias = "evidently:feature:TriggerWordsPresent" + __feature_type__: ClassVar = ColumnType.Categorical column_name: str words_list: List[str] diff --git a/src/evidently/features/word_count_feature.py b/src/evidently/features/word_count_feature.py index 6181ffe0c0..9154412fed 100644 --- a/src/evidently/features/word_count_feature.py +++ b/src/evidently/features/word_count_feature.py @@ -10,6 +10,9 @@ class WordCount(ApplyColumnGeneratedFeature): + class Config: + type_alias = "evidently:feature:WordCount" + __feature_type__: ClassVar = ColumnType.Numerical _reg: ClassVar[re.Pattern] = re.compile(r"[^a-zA-Z ]+") display_name_template: ClassVar = "Word Count for {column_name}" diff --git a/src/evidently/features/words_feature.py b/src/evidently/features/words_feature.py index 576d999c0e..a85131585c 100644 --- a/src/evidently/features/words_feature.py +++ b/src/evidently/features/words_feature.py @@ -41,6 +41,9 @@ def _listed_words_present( class WordsPresence(ApplyColumnGeneratedFeature): + class Config: + type_alias = "evidently:feature:WordsPresence" + __feature_type__: ClassVar = ColumnType.Categorical column_name: str words_list: List[str] @@ -79,6 +82,9 @@ def apply(self, value: Any): class IncludesWords(WordsPresence): + class Config: + type_alias = "evidently:feature:IncludesWords" + def __init__( self, column_name: str, @@ -100,6 +106,9 @@ def _feature_display_name(self): class ExcludesWords(WordsPresence): + class Config: + type_alias = "evidently:feature:ExcludesWords" + def __init__( self, column_name: str, diff --git a/src/evidently/metric_preset/__init__.py b/src/evidently/metric_preset/__init__.py index aea7a159b9..26b1c80db6 100644 --- a/src/evidently/metric_preset/__init__.py +++ b/src/evidently/metric_preset/__init__.py @@ -1,3 +1,4 @@ +from . import _registry from .classification_performance import ClassificationPreset from .data_drift import DataDriftPreset from .data_quality import DataQualityPreset @@ -14,4 +15,5 @@ "TargetDriftPreset", "TextEvals", "RecsysPreset", + "_registry", ] diff --git a/src/evidently/metric_preset/_registry.py b/src/evidently/metric_preset/_registry.py new file mode 100644 index 0000000000..4aeca8c50e --- /dev/null +++ b/src/evidently/metric_preset/_registry.py @@ -0,0 +1,24 @@ +from evidently.metric_preset.metric_preset import MetricPreset +from evidently.pydantic_utils import register_type_alias + +register_type_alias( + MetricPreset, + "evidently.metric_preset.classification_performance.ClassificationPreset", + "evidently:metric_preset:ClassificationPreset", +) +register_type_alias( + MetricPreset, "evidently.metric_preset.data_drift.DataDriftPreset", "evidently:metric_preset:DataDriftPreset" +) +register_type_alias( + MetricPreset, "evidently.metric_preset.data_quality.DataQualityPreset", "evidently:metric_preset:DataQualityPreset" +) +register_type_alias(MetricPreset, "evidently.metric_preset.recsys.RecsysPreset", "evidently:metric_preset:RecsysPreset") +register_type_alias( + MetricPreset, + "evidently.metric_preset.regression_performance.RegressionPreset", + "evidently:metric_preset:RegressionPreset", +) +register_type_alias( + MetricPreset, "evidently.metric_preset.target_drift.TargetDriftPreset", "evidently:metric_preset:TargetDriftPreset" +) +register_type_alias(MetricPreset, "evidently.metric_preset.text_evals.TextEvals", "evidently:metric_preset:TextEvals") diff --git a/src/evidently/metric_preset/classification_performance.py b/src/evidently/metric_preset/classification_performance.py index 4e1b79c25d..30bbaa9e3b 100644 --- a/src/evidently/metric_preset/classification_performance.py +++ b/src/evidently/metric_preset/classification_performance.py @@ -19,6 +19,9 @@ class ClassificationPreset(MetricPreset): + class Config: + type_alias = "evidently:metric_preset:ClassificationPreset" + """ Metrics preset for classification performance. diff --git a/src/evidently/metric_preset/data_drift.py b/src/evidently/metric_preset/data_drift.py index fbb577e950..c0bc5ed5f4 100644 --- a/src/evidently/metric_preset/data_drift.py +++ b/src/evidently/metric_preset/data_drift.py @@ -15,6 +15,9 @@ class DataDriftPreset(MetricPreset): + class Config: + type_alias = "evidently:metric_preset:DataDriftPreset" + """Metric Preset for Data Drift analysis. Contains metrics: diff --git a/src/evidently/metric_preset/data_quality.py b/src/evidently/metric_preset/data_quality.py index c536b3de44..92e0ad37be 100644 --- a/src/evidently/metric_preset/data_quality.py +++ b/src/evidently/metric_preset/data_quality.py @@ -13,6 +13,9 @@ class DataQualityPreset(MetricPreset): + class Config: + type_alias = "evidently:metric_preset:DataQualityPreset" + """Metric preset for Data Quality analysis. Contains metrics: diff --git a/src/evidently/metric_preset/metric_preset.py b/src/evidently/metric_preset/metric_preset.py index d939c8bd87..35859ba870 100644 --- a/src/evidently/metric_preset/metric_preset.py +++ b/src/evidently/metric_preset/metric_preset.py @@ -16,6 +16,9 @@ class MetricPreset(BasePreset): """Base class for metric presets""" + class Config: + is_base_type = True + @abc.abstractmethod def generate_metrics( self, data_definition: DataDefinition, additional_data: Optional[Dict[str, Any]] diff --git a/src/evidently/metric_preset/recsys.py b/src/evidently/metric_preset/recsys.py index b11f364507..7c58d404ca 100644 --- a/src/evidently/metric_preset/recsys.py +++ b/src/evidently/metric_preset/recsys.py @@ -27,6 +27,9 @@ class RecsysPreset(MetricPreset): + class Config: + type_alias = "evidently:metric_preset:RecsysPreset" + """Metric preset for recsys performance analysis. Contains metrics: diff --git a/src/evidently/metric_preset/regression_performance.py b/src/evidently/metric_preset/regression_performance.py index 4414cacc67..154d4e510c 100644 --- a/src/evidently/metric_preset/regression_performance.py +++ b/src/evidently/metric_preset/regression_performance.py @@ -18,6 +18,9 @@ class RegressionPreset(MetricPreset): + class Config: + type_alias = "evidently:metric_preset:RegressionPreset" + """Metric preset for Regression performance analysis. Contains metrics: diff --git a/src/evidently/metric_preset/target_drift.py b/src/evidently/metric_preset/target_drift.py index 713951400c..3e73c7568e 100644 --- a/src/evidently/metric_preset/target_drift.py +++ b/src/evidently/metric_preset/target_drift.py @@ -16,6 +16,9 @@ class TargetDriftPreset(MetricPreset): + class Config: + type_alias = "evidently:metric_preset:TargetDriftPreset" + """Metric preset for Target Drift analysis. Contains metrics: diff --git a/src/evidently/metric_preset/text_evals.py b/src/evidently/metric_preset/text_evals.py index 9bc134f474..8750baadc4 100644 --- a/src/evidently/metric_preset/text_evals.py +++ b/src/evidently/metric_preset/text_evals.py @@ -16,6 +16,9 @@ class TextEvals(MetricPreset): + class Config: + type_alias = "evidently:metric_preset:TextEvals" + column_name: str descriptors: Optional[List[FeatureDescriptor]] = None diff --git a/src/evidently/metric_results.py b/src/evidently/metric_results.py index e8e5084400..fa614e48ed 100644 --- a/src/evidently/metric_results.py +++ b/src/evidently/metric_results.py @@ -74,6 +74,7 @@ def column_scatter_valudator(value): class Distribution(MetricResult): class Config: + type_alias = "evidently:metric_result:Distribution" pd_include = False tags = {IncludeTags.Render} smart_union = True @@ -85,6 +86,7 @@ class Config: class ConfusionMatrix(MetricResult): class Config: + type_alias = "evidently:metric_result:ConfusionMatrix" smart_union = True field_tags = {"labels": {IncludeTags.Parameter}} @@ -95,6 +97,7 @@ class Config: class PredictionData(MetricResult): class Config: + type_alias = "evidently:metric_result:PredictionData" dict_include = False predictions: pd.Series @@ -123,6 +126,7 @@ def validate_prediction_probas(cls, value: pd.DataFrame, values): class StatsByFeature(MetricResult): class Config: + type_alias = "evidently:metric_result:StatsByFeature" dict_include = False pd_include = False tags = {IncludeTags.Render} @@ -132,6 +136,9 @@ class Config: class DatasetUtilityColumns(MetricResult): + class Config: + type_alias = "evidently:metric_result:DatasetUtilityColumns" + date: Optional[str] id: Optional[str] target: Optional[str] @@ -140,6 +147,7 @@ class DatasetUtilityColumns(MetricResult): class DatasetColumns(MetricResult): class Config: + type_alias = "evidently:metric_result:DatasetColumns" dict_exclude_fields = {"task", "target_type"} pd_include = False tags = {IncludeTags.Parameter} @@ -236,6 +244,7 @@ def column_scatter_from_df(df: Optional[pd.DataFrame], with_index: bool) -> Opti class ScatterAggField(MetricResult): class Config: + type_alias = "evidently:metric_result:ScatterAggField" smart_union = True dict_include = False pd_include = False @@ -249,6 +258,7 @@ class Config: class ScatterField(MetricResult): class Config: + type_alias = "evidently:metric_result:ScatterField" smart_union = True dict_include = False pd_include = False @@ -262,6 +272,7 @@ class Config: class ColumnScatterResult(MetricResult): class Config: + type_alias = "evidently:metric_result:ColumnScatterResult" smart_union = True dict_include = False pd_include = False @@ -277,6 +288,7 @@ class Config: class ColumnAggScatterResult(ColumnScatterResult): class Config: + type_alias = "evidently:metric_result:ColumnAggScatterResult" field_tags = {"current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}} current: ColumnAggScatter @@ -288,6 +300,7 @@ class Config: class Boxes(MetricResult): class Config: + type_alias = "evidently:metric_result:Boxes" dict_include = False tags = {IncludeTags.Render} @@ -300,6 +313,7 @@ class Config: class RatesPlotData(MetricResult): class Config: + type_alias = "evidently:metric_result:RatesPlotData" dict_include = False tags = {IncludeTags.Render} @@ -312,6 +326,7 @@ class Config: class PRCurveData(MetricResult): class Config: + type_alias = "evidently:metric_result:PRCurveData" dict_include = False tags = {IncludeTags.Render} @@ -325,6 +340,7 @@ class Config: class ROCCurveData(MetricResult): class Config: + type_alias = "evidently:metric_result:ROCCurveData" dict_include = False tags = {IncludeTags.Render} @@ -338,6 +354,7 @@ class Config: class LiftCurveData(MetricResult): class Config: + type_alias = "evidently:metric_result:LiftCurveData" dict_include = False tags = {IncludeTags.Render} @@ -360,6 +377,7 @@ class Config: class HistogramData(MetricResult): class Config: + type_alias = "evidently:metric_result:HistogramData" dict_include = False tags = {IncludeTags.Render} extract_as_obj = True @@ -384,6 +402,7 @@ def to_df(self): class Histogram(MetricResult): class Config: + type_alias = "evidently:metric_result:Histogram" dict_include = False tags = {IncludeTags.Render} field_tags = { @@ -403,6 +422,7 @@ class Config: # todo need better config overriding logic in metricresult class DistributionIncluded(Distribution): class Config: + type_alias = "evidently:metric_result:DistributionIncluded" tags: Set[IncludeTags] = set() dict_include = True field_tags = {"x": {IncludeTags.Extra}} @@ -410,6 +430,7 @@ class Config: class ColumnCorrelations(MetricResult): class Config: + type_alias = "evidently:metric_result:ColumnCorrelations" field_tags = {"column_name": {IncludeTags.Parameter}, "kind": {IncludeTags.Parameter}} column_name: str @@ -426,6 +447,9 @@ def get_pandas(self) -> pd.DataFrame: class DatasetClassificationQuality(MetricResult): + class Config: + type_alias = "evidently:metric_result:DatasetClassificationQuality" + accuracy: float precision: float recall: float diff --git a/src/evidently/metrics/__init__.py b/src/evidently/metrics/__init__.py index b40fb31435..c88a28babf 100644 --- a/src/evidently/metrics/__init__.py +++ b/src/evidently/metrics/__init__.py @@ -4,6 +4,7 @@ For specific group see module documentation. """ +from . import _registry from .classification_performance.class_balance_metric import ClassificationClassBalance from .classification_performance.class_separation_metric import ClassificationClassSeparationPlot from .classification_performance.classification_dummy_metric import ClassificationDummyMetric @@ -140,4 +141,5 @@ "ScoreDistribution", "MRRKMetric", "RecCasesTable", + "_registry", ] diff --git a/src/evidently/metrics/_registry.py b/src/evidently/metrics/_registry.py new file mode 100644 index 0000000000..1ed0ce8345 --- /dev/null +++ b/src/evidently/metrics/_registry.py @@ -0,0 +1,826 @@ +from evidently.base_metric import Metric +from evidently.base_metric import MetricResult +from evidently.pydantic_utils import register_type_alias + +register_type_alias( + Metric, + "evidently.metrics.classification_performance.class_balance_metric.ClassificationClassBalance", + "evidently:metric:ClassificationClassBalance", +) +register_type_alias( + Metric, + "evidently.metrics.classification_performance.class_separation_metric.ClassificationClassSeparationPlot", + "evidently:metric:ClassificationClassSeparationPlot", +) +register_type_alias( + Metric, + "evidently.metrics.classification_performance.classification_dummy_metric.ClassificationDummyMetric", + "evidently:metric:ClassificationDummyMetric", +) +register_type_alias( + Metric, + "evidently.metrics.classification_performance.classification_quality_metric.ClassificationQualityMetric", + "evidently:metric:ClassificationQualityMetric", +) +register_type_alias( + Metric, + "evidently.metrics.classification_performance.confusion_matrix_metric.ClassificationConfusionMatrix", + "evidently:metric:ClassificationConfusionMatrix", +) +register_type_alias( + Metric, + "evidently.metrics.classification_performance.lift_curve_metric.ClassificationLiftCurve", + "evidently:metric:ClassificationLiftCurve", +) +register_type_alias( + Metric, + "evidently.metrics.classification_performance.lift_table_metric.ClassificationLiftTable", + "evidently:metric:ClassificationLiftTable", +) +register_type_alias( + Metric, + "evidently.metrics.classification_performance.pr_curve_metric.ClassificationPRCurve", + "evidently:metric:ClassificationPRCurve", +) +register_type_alias( + Metric, + "evidently.metrics.classification_performance.pr_table_metric.ClassificationPRTable", + "evidently:metric:ClassificationPRTable", +) +register_type_alias( + Metric, + "evidently.metrics.classification_performance.probability_distribution_metric.ClassificationProbDistribution", + "evidently:metric:ClassificationProbDistribution", +) +register_type_alias( + Metric, + "evidently.metrics.classification_performance.quality_by_class_metric.ClassificationQualityByClass", + "evidently:metric:ClassificationQualityByClass", +) +register_type_alias( + Metric, + "evidently.metrics.classification_performance.quality_by_feature_table.ClassificationQualityByFeatureTable", + "evidently:metric:ClassificationQualityByFeatureTable", +) +register_type_alias( + Metric, + "evidently.metrics.classification_performance.roc_curve_metric.ClassificationRocCurve", + "evidently:metric:ClassificationRocCurve", +) +register_type_alias(Metric, "evidently.metrics.custom_metric.CustomValueMetric", "evidently:metric:CustomValueMetric") +register_type_alias( + Metric, "evidently.metrics.data_drift.column_drift_metric.ColumnDriftMetric", "evidently:metric:ColumnDriftMetric" +) +register_type_alias( + Metric, + "evidently.metrics.data_drift.column_interaction_plot.ColumnInteractionPlot", + "evidently:metric:ColumnInteractionPlot", +) +register_type_alias( + Metric, "evidently.metrics.data_drift.column_value_plot.ColumnValuePlot", "evidently:metric:ColumnValuePlot" +) +register_type_alias( + Metric, "evidently.metrics.data_drift.data_drift_table.DataDriftTable", "evidently:metric:DataDriftTable" +) +register_type_alias( + Metric, + "evidently.metrics.data_drift.dataset_drift_metric.DatasetDriftMetric", + "evidently:metric:DatasetDriftMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_drift.embeddings_drift.EmbeddingsDriftMetric", + "evidently:metric:EmbeddingsDriftMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_drift.feature_importance.FeatureImportanceMetric", + "evidently:metric:FeatureImportanceMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_drift.target_by_features_table.TargetByFeaturesTable", + "evidently:metric:TargetByFeaturesTable", +) +register_type_alias( + Metric, + "evidently.metrics.data_drift.text_descriptors_drift_metric.TextDescriptorsDriftMetric", + "evidently:metric:TextDescriptorsDriftMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_drift.text_domain_classifier_drift_metric.TextDomainClassifierDriftMetric", + "evidently:metric:TextDomainClassifierDriftMetric", +) +register_type_alias(Metric, "evidently.metrics.data_drift.text_metric.Comment", "evidently:metric:Comment") +register_type_alias( + Metric, + "evidently.metrics.data_integrity.column_missing_values_metric.ColumnMissingValuesMetric", + "evidently:metric:ColumnMissingValuesMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_integrity.column_regexp_metric.ColumnRegExpMetric", + "evidently:metric:ColumnRegExpMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_integrity.column_summary_metric.ColumnSummaryMetric", + "evidently:metric:ColumnSummaryMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_integrity.dataset_missing_values_metric.DatasetMissingValuesMetric", + "evidently:metric:DatasetMissingValuesMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_integrity.dataset_summary_metric.DatasetSummaryMetric", + "evidently:metric:DatasetSummaryMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_quality.column_category_metric.ColumnCategoryMetric", + "evidently:metric:ColumnCategoryMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_quality.column_correlations_metric.ColumnCorrelationsMetric", + "evidently:metric:ColumnCorrelationsMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_quality.column_distribution_metric.ColumnDistributionMetric", + "evidently:metric:ColumnDistributionMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_quality.column_quantile_metric.ColumnQuantileMetric", + "evidently:metric:ColumnQuantileMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_quality.column_value_list_metric.ColumnValueListMetric", + "evidently:metric:ColumnValueListMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_quality.column_value_range_metric.ColumnValueRangeMetric", + "evidently:metric:ColumnValueRangeMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_quality.conflict_prediction_metric.ConflictPredictionMetric", + "evidently:metric:ConflictPredictionMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_quality.conflict_target_metric.ConflictTargetMetric", + "evidently:metric:ConflictTargetMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_quality.dataset_correlations_metric.DatasetCorrelationsMetric", + "evidently:metric:DatasetCorrelationsMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_quality.stability_metric.DataQualityStabilityMetric", + "evidently:metric:DataQualityStabilityMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_quality.text_descriptors_correlation_metric.TextDescriptorsCorrelationMetric", + "evidently:metric:TextDescriptorsCorrelationMetric", +) +register_type_alias( + Metric, + "evidently.metrics.data_quality.text_descriptors_distribution.TextDescriptorsDistribution", + "evidently:metric:TextDescriptorsDistribution", +) +register_type_alias(Metric, "evidently.metrics.recsys.diversity.DiversityMetric", "evidently:metric:DiversityMetric") +register_type_alias(Metric, "evidently.metrics.recsys.f_beta_top_k.FBetaTopKMetric", "evidently:metric:FBetaTopKMetric") +register_type_alias(Metric, "evidently.metrics.recsys.hit_rate_k.HitRateKMetric", "evidently:metric:HitRateKMetric") +register_type_alias(Metric, "evidently.metrics.recsys.item_bias.ItemBiasMetric", "evidently:metric:ItemBiasMetric") +register_type_alias(Metric, "evidently.metrics.recsys.map_k.MAPKMetric", "evidently:metric:MAPKMetric") +register_type_alias(Metric, "evidently.metrics.recsys.mar_k.MARKMetric", "evidently:metric:MARKMetric") +register_type_alias(Metric, "evidently.metrics.recsys.mrr.MRRKMetric", "evidently:metric:MRRKMetric") +register_type_alias(Metric, "evidently.metrics.recsys.ndcg_k.NDCGKMetric", "evidently:metric:NDCGKMetric") +register_type_alias(Metric, "evidently.metrics.recsys.novelty.NoveltyMetric", "evidently:metric:NoveltyMetric") +register_type_alias( + Metric, "evidently.metrics.recsys.pairwise_distance.PairwiseDistance", "evidently:metric:PairwiseDistance" +) +register_type_alias( + Metric, "evidently.metrics.recsys.personalisation.PersonalizationMetric", "evidently:metric:PersonalizationMetric" +) +register_type_alias( + Metric, "evidently.metrics.recsys.popularity_bias.PopularityBias", "evidently:metric:PopularityBias" +) +register_type_alias( + Metric, + "evidently.metrics.recsys.precision_recall_k.PrecisionRecallCalculation", + "evidently:metric:PrecisionRecallCalculation", +) +register_type_alias( + Metric, "evidently.metrics.recsys.precision_top_k.PrecisionTopKMetric", "evidently:metric:PrecisionTopKMetric" +) +register_type_alias(Metric, "evidently.metrics.recsys.rec_examples.RecCasesTable", "evidently:metric:RecCasesTable") +register_type_alias( + Metric, "evidently.metrics.recsys.recall_top_k.RecallTopKMetric", "evidently:metric:RecallTopKMetric" +) +register_type_alias( + Metric, "evidently.metrics.recsys.scores_distribution.ScoreDistribution", "evidently:metric:ScoreDistribution" +) +register_type_alias( + Metric, "evidently.metrics.recsys.serendipity.SerendipityMetric", "evidently:metric:SerendipityMetric" +) +register_type_alias(Metric, "evidently.metrics.recsys.train_stats.TrainStats", "evidently:metric:TrainStats") +register_type_alias(Metric, "evidently.metrics.recsys.user_bias.UserBiasMetric", "evidently:metric:UserBiasMetric") +register_type_alias( + Metric, + "evidently.metrics.regression_performance.abs_perc_error_in_time.RegressionAbsPercentageErrorPlot", + "evidently:metric:RegressionAbsPercentageErrorPlot", +) +register_type_alias( + Metric, + "evidently.metrics.regression_performance.error_bias_table.RegressionErrorBiasTable", + "evidently:metric:RegressionErrorBiasTable", +) +register_type_alias( + Metric, + "evidently.metrics.regression_performance.error_distribution.RegressionErrorDistribution", + "evidently:metric:RegressionErrorDistribution", +) +register_type_alias( + Metric, + "evidently.metrics.regression_performance.error_in_time.RegressionErrorPlot", + "evidently:metric:RegressionErrorPlot", +) +register_type_alias( + Metric, + "evidently.metrics.regression_performance.error_normality.RegressionErrorNormality", + "evidently:metric:RegressionErrorNormality", +) +register_type_alias( + Metric, + "evidently.metrics.regression_performance.predicted_and_actual_in_time.RegressionPredictedVsActualPlot", + "evidently:metric:RegressionPredictedVsActualPlot", +) +register_type_alias( + Metric, + "evidently.metrics.regression_performance.predicted_vs_actual.RegressionPredictedVsActualScatter", + "evidently:metric:RegressionPredictedVsActualScatter", +) +register_type_alias( + Metric, + "evidently.metrics.regression_performance.regression_dummy_metric.RegressionDummyMetric", + "evidently:metric:RegressionDummyMetric", +) +register_type_alias( + Metric, + "evidently.metrics.regression_performance.regression_performance_metrics.RegressionPerformanceMetrics", + "evidently:metric:RegressionPerformanceMetrics", +) +register_type_alias( + Metric, + "evidently.metrics.regression_performance.regression_quality.RegressionQualityMetric", + "evidently:metric:RegressionQualityMetric", +) +register_type_alias( + Metric, + "evidently.metrics.regression_performance.top_error.RegressionTopErrorMetric", + "evidently:metric:RegressionTopErrorMetric", +) + + +register_type_alias( + MetricResult, "evidently.base_metric.ColumnMetricResult", "evidently:metric_result:ColumnMetricResult" +) +register_type_alias(MetricResult, "evidently.base_metric.MetricResult", "evidently:metric_result:MetricResult") +register_type_alias( + MetricResult, + "evidently.calculations.data_drift.ColumnDataDriftMetrics", + "evidently:metric_result:ColumnDataDriftMetrics", +) +register_type_alias( + MetricResult, "evidently.calculations.data_drift.DatasetDriftMetrics", "evidently:metric_result:DatasetDriftMetrics" +) +register_type_alias( + MetricResult, "evidently.calculations.data_drift.DriftStatsField", "evidently:metric_result:DriftStatsField" +) +register_type_alias(MetricResult, "evidently.metric_results.Boxes", "evidently:metric_result:Boxes") +register_type_alias( + MetricResult, "evidently.metric_results.ColumnAggScatterResult", "evidently:metric_result:ColumnAggScatterResult" +) +register_type_alias( + MetricResult, "evidently.metric_results.ColumnCorrelations", "evidently:metric_result:ColumnCorrelations" +) +register_type_alias( + MetricResult, "evidently.metric_results.ColumnScatterResult", "evidently:metric_result:ColumnScatterResult" +) +register_type_alias(MetricResult, "evidently.metric_results.ConfusionMatrix", "evidently:metric_result:ConfusionMatrix") +register_type_alias( + MetricResult, + "evidently.metric_results.DatasetClassificationQuality", + "evidently:metric_result:DatasetClassificationQuality", +) +register_type_alias(MetricResult, "evidently.metric_results.DatasetColumns", "evidently:metric_result:DatasetColumns") +register_type_alias( + MetricResult, "evidently.metric_results.DatasetUtilityColumns", "evidently:metric_result:DatasetUtilityColumns" +) +register_type_alias(MetricResult, "evidently.metric_results.Distribution", "evidently:metric_result:Distribution") +register_type_alias( + MetricResult, "evidently.metric_results.DistributionIncluded", "evidently:metric_result:DistributionIncluded" +) +register_type_alias(MetricResult, "evidently.metric_results.Histogram", "evidently:metric_result:Histogram") +register_type_alias(MetricResult, "evidently.metric_results.HistogramData", "evidently:metric_result:HistogramData") +register_type_alias(MetricResult, "evidently.metric_results.LiftCurveData", "evidently:metric_result:LiftCurveData") +register_type_alias(MetricResult, "evidently.metric_results.PRCurveData", "evidently:metric_result:PRCurveData") +register_type_alias(MetricResult, "evidently.metric_results.PredictionData", "evidently:metric_result:PredictionData") +register_type_alias(MetricResult, "evidently.metric_results.ROCCurveData", "evidently:metric_result:ROCCurveData") +register_type_alias(MetricResult, "evidently.metric_results.RatesPlotData", "evidently:metric_result:RatesPlotData") +register_type_alias(MetricResult, "evidently.metric_results.ScatterAggField", "evidently:metric_result:ScatterAggField") +register_type_alias(MetricResult, "evidently.metric_results.ScatterField", "evidently:metric_result:ScatterField") +register_type_alias(MetricResult, "evidently.metric_results.StatsByFeature", "evidently:metric_result:StatsByFeature") +register_type_alias( + MetricResult, + "evidently.metrics.classification_performance.class_balance_metric.ClassificationClassBalanceResult", + "evidently:metric_result:ClassificationClassBalanceResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.classification_performance.class_separation_metric.ClassificationClassSeparationPlotResults", + "evidently:metric_result:ClassificationClassSeparationPlotResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.classification_performance.classification_dummy_metric.ClassificationDummyMetricResults", + "evidently:metric_result:ClassificationDummyMetricResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.classification_performance.classification_quality_metric.ClassificationQualityMetricResult", + "evidently:metric_result:ClassificationQualityMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.classification_performance.confusion_matrix_metric.ClassificationConfusionMatrixResult", + "evidently:metric_result:ClassificationConfusionMatrixResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.classification_performance.lift_curve_metric.ClassificationLiftCurveResults", + "evidently:metric_result:ClassificationLiftCurveResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.classification_performance.lift_table_metric.ClassificationLiftTableResults", + "evidently:metric_result:ClassificationLiftTableResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.classification_performance.objects.ClassMetric", + "evidently:metric_result:ClassMetric", +) +register_type_alias( + MetricResult, + "evidently.metrics.classification_performance.objects.ClassificationReport", + "evidently:metric_result:ClassificationReport", +) +register_type_alias( + MetricResult, + "evidently.metrics.classification_performance.pr_curve_metric.ClassificationPRCurveResults", + "evidently:metric_result:ClassificationPRCurveResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.classification_performance.pr_table_metric.ClassificationPRTableResults", + "evidently:metric_result:ClassificationPRTableResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.classification_performance.probability_distribution_metric.ClassificationProbDistributionResults", + "evidently:metric_result:ClassificationProbDistributionResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.classification_performance.quality_by_class_metric.ClassificationQuality", + "evidently:metric_result:ClassificationQuality", +) +register_type_alias( + MetricResult, + "evidently.metrics.classification_performance.quality_by_class_metric.ClassificationQualityByClassResult", + "evidently:metric_result:ClassificationQualityByClassResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.classification_performance.quality_by_feature_table.ClassificationQualityByFeatureTableResults", + "evidently:metric_result:ClassificationQualityByFeatureTableResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.classification_performance.roc_curve_metric.ClassificationRocCurveResults", + "evidently:metric_result:ClassificationRocCurveResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.custom_metric.CustomCallableMetricResult", + "evidently:metric_result:CustomCallableMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_drift.column_interaction_plot.ColumnInteractionPlotResults", + "evidently:metric_result:ColumnInteractionPlotResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_drift.column_value_plot.ColumnValuePlotResults", + "evidently:metric_result:ColumnValuePlotResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_drift.data_drift_table.DataDriftTableResults", + "evidently:metric_result:DataDriftTableResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_drift.dataset_drift_metric.DatasetDriftMetricResults", + "evidently:metric_result:DatasetDriftMetricResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_drift.embeddings_drift.EmbeddingsDriftMetricResults", + "evidently:metric_result:EmbeddingsDriftMetricResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_drift.feature_importance.FeatureImportanceMetricResult", + "evidently:metric_result:FeatureImportanceMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_drift.target_by_features_table.TargetByFeaturesTableResults", + "evidently:metric_result:TargetByFeaturesTableResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_drift.text_descriptors_drift_metric.TextDescriptorsDriftMetricResults", + "evidently:metric_result:TextDescriptorsDriftMetricResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_drift.text_domain_classifier_drift_metric.TextDomainClassifierDriftResult", + "evidently:metric_result:TextDomainClassifierDriftResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_drift.text_domain_classifier_drift_metric.TextDomainField", + "evidently:metric_result:TextDomainField", +) +register_type_alias( + MetricResult, "evidently.metrics.data_drift.text_metric.CommentResults", "evidently:metric_result:CommentResults" +) +register_type_alias( + MetricResult, + "evidently.metrics.data_integrity.column_missing_values_metric.ColumnMissingValues", + "evidently:metric_result:ColumnMissingValues", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_integrity.column_missing_values_metric.ColumnMissingValuesMetricResult", + "evidently:metric_result:ColumnMissingValuesMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_integrity.column_regexp_metric.DataIntegrityValueByRegexpMetricResult", + "evidently:metric_result:DataIntegrityValueByRegexpMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_integrity.column_regexp_metric.DataIntegrityValueByRegexpStat", + "evidently:metric_result:DataIntegrityValueByRegexpStat", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_integrity.column_summary_metric.CategoricalCharacteristics", + "evidently:metric_result:CategoricalCharacteristics", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_integrity.column_summary_metric.ColumnCharacteristics", + "evidently:metric_result:ColumnCharacteristics", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_integrity.column_summary_metric.ColumnSummaryResult", + "evidently:metric_result:ColumnSummaryResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_integrity.column_summary_metric.DataByTarget", + "evidently:metric_result:DataByTarget", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_integrity.column_summary_metric.DataInTime", + "evidently:metric_result:DataInTime", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_integrity.column_summary_metric.DataInTimePlots", + "evidently:metric_result:DataInTimePlots", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_integrity.column_summary_metric.DataQualityPlot", + "evidently:metric_result:DataQualityPlot", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_integrity.column_summary_metric.DatetimeCharacteristics", + "evidently:metric_result:DatetimeCharacteristics", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_integrity.column_summary_metric.NumericCharacteristics", + "evidently:metric_result:NumericCharacteristics", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_integrity.column_summary_metric.TextCharacteristics", + "evidently:metric_result:TextCharacteristics", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_integrity.dataset_missing_values_metric.DatasetMissingValues", + "evidently:metric_result:DatasetMissingValues", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_integrity.dataset_missing_values_metric.DatasetMissingValuesMetricResult", + "evidently:metric_result:DatasetMissingValuesMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_integrity.dataset_summary_metric.DatasetSummary", + "evidently:metric_result:DatasetSummary", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_integrity.dataset_summary_metric.DatasetSummaryMetricResult", + "evidently:metric_result:DatasetSummaryMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.column_category_metric.CategoryStat", + "evidently:metric_result:CategoryStat", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.column_category_metric.ColumnCategoryMetricResult", + "evidently:metric_result:ColumnCategoryMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.column_category_metric.CountOfValues", + "evidently:metric_result:CountOfValues", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.column_correlations_metric.ColumnCorrelationsMetricResult", + "evidently:metric_result:ColumnCorrelationsMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.column_distribution_metric.ColumnDistributionMetricResult", + "evidently:metric_result:ColumnDistributionMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.column_quantile_metric.ColumnQuantileMetricResult", + "evidently:metric_result:ColumnQuantileMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.column_quantile_metric.QuantileStats", + "evidently:metric_result:QuantileStats", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.column_value_list_metric.ColumnValueListMetricResult", + "evidently:metric_result:ColumnValueListMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.column_value_list_metric.ValueListStat", + "evidently:metric_result:ValueListStat", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.column_value_range_metric.ColumnValueRangeMetricResult", + "evidently:metric_result:ColumnValueRangeMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.column_value_range_metric.ValuesInRangeStat", + "evidently:metric_result:ValuesInRangeStat", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.conflict_prediction_metric.ConflictPredictionData", + "evidently:metric_result:ConflictPredictionData", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.conflict_prediction_metric.ConflictPredictionMetricResults", + "evidently:metric_result:ConflictPredictionMetricResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.conflict_target_metric.ConflictTargetMetricResults", + "evidently:metric_result:ConflictTargetMetricResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.dataset_correlations_metric.CorrelationStats", + "evidently:metric_result:CorrelationStats", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.dataset_correlations_metric.DatasetCorrelation", + "evidently:metric_result:DatasetCorrelation", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.dataset_correlations_metric.DatasetCorrelationsMetricResult", + "evidently:metric_result:DatasetCorrelationsMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.stability_metric.DataQualityStabilityMetricResult", + "evidently:metric_result:DataQualityStabilityMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.text_descriptors_correlation_metric.TextDescriptorsCorrelationMetricResult", + "evidently:metric_result:TextDescriptorsCorrelationMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.data_quality.text_descriptors_distribution.TextDescriptorsDistributionResult", + "evidently:metric_result:TextDescriptorsDistributionResult", +) +register_type_alias( + MetricResult, "evidently.metrics.recsys.base_top_k.TopKMetricResult", "evidently:metric_result:TopKMetricResult" +) +register_type_alias( + MetricResult, + "evidently.metrics.recsys.diversity.DiversityMetricResult", + "evidently:metric_result:DiversityMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.recsys.hit_rate_k.HitRateKMetricResult", + "evidently:metric_result:HitRateKMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.recsys.item_bias.ItemBiasMetricResult", + "evidently:metric_result:ItemBiasMetricResult", +) +register_type_alias( + MetricResult, "evidently.metrics.recsys.mrr.MRRKMetricResult", "evidently:metric_result:MRRKMetricResult" +) +register_type_alias( + MetricResult, "evidently.metrics.recsys.novelty.NoveltyMetricResult", "evidently:metric_result:NoveltyMetricResult" +) +register_type_alias( + MetricResult, + "evidently.metrics.recsys.pairwise_distance.PairwiseDistanceResult", + "evidently:metric_result:PairwiseDistanceResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.recsys.personalisation.PersonalizationMetricResult", + "evidently:metric_result:PersonalizationMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.recsys.popularity_bias.PopularityBiasResult", + "evidently:metric_result:PopularityBiasResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.recsys.precision_recall_k.PrecisionRecallCalculationResult", + "evidently:metric_result:PrecisionRecallCalculationResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.recsys.rec_examples.RecCasesTableResults", + "evidently:metric_result:RecCasesTableResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.recsys.scores_distribution.ScoreDistributionResult", + "evidently:metric_result:ScoreDistributionResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.recsys.serendipity.SerendipityMetricResult", + "evidently:metric_result:SerendipityMetricResult", +) +register_type_alias( + MetricResult, "evidently.metrics.recsys.train_stats.TrainStatsResult", "evidently:metric_result:TrainStatsResult" +) +register_type_alias( + MetricResult, + "evidently.metrics.recsys.user_bias.UserBiasMetricResult", + "evidently:metric_result:UserBiasMetricResult", +) +register_type_alias( + MetricResult, + "evidently.metrics.regression_performance.error_bias_table.RegressionErrorBiasTableResults", + "evidently:metric_result:RegressionErrorBiasTableResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.regression_performance.error_distribution.RegressionErrorDistributionResults", + "evidently:metric_result:RegressionErrorDistributionResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.regression_performance.error_normality.RegressionErrorNormalityResults", + "evidently:metric_result:RegressionErrorNormalityResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.regression_performance.objects.IntervalSeries", + "evidently:metric_result:IntervalSeries", +) +register_type_alias( + MetricResult, + "evidently.metrics.regression_performance.objects.PredActualScatter", + "evidently:metric_result:PredActualScatter", +) +register_type_alias( + MetricResult, + "evidently.metrics.regression_performance.objects.RegressionMetricScatter", + "evidently:metric_result:RegressionMetricScatter", +) +register_type_alias( + MetricResult, + "evidently.metrics.regression_performance.objects.RegressionMetricsScatter", + "evidently:metric_result:RegressionMetricsScatter", +) +register_type_alias( + MetricResult, + "evidently.metrics.regression_performance.objects.RegressionScatter", + "evidently:metric_result:RegressionScatter", +) +register_type_alias( + MetricResult, + "evidently.metrics.regression_performance.predicted_vs_actual.AggPredActualScatter", + "evidently:metric_result:AggPredActualScatter", +) +register_type_alias( + MetricResult, + "evidently.metrics.regression_performance.predicted_vs_actual.RegressionPredictedVsActualScatterResults", + "evidently:metric_result:RegressionPredictedVsActualScatterResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.regression_performance.regression_dummy_metric.RegressionDummyMetricResults", + "evidently:metric_result:RegressionDummyMetricResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.regression_performance.regression_performance_metrics.RegressionMetrics", + "evidently:metric_result:RegressionMetrics", +) +register_type_alias( + MetricResult, + "evidently.metrics.regression_performance.regression_performance_metrics.RegressionPerformanceMetricsResults", + "evidently:metric_result:RegressionPerformanceMetricsResults", +) +register_type_alias( + MetricResult, + "evidently.metrics.regression_performance.regression_quality.MoreRegressionMetrics", + "evidently:metric_result:MoreRegressionMetrics", +) +register_type_alias( + MetricResult, + "evidently.metrics.regression_performance.regression_quality.RegressionQualityMetricResults", + "evidently:metric_result:RegressionQualityMetricResults", +) +register_type_alias( + MetricResult, "evidently.metrics.regression_performance.top_error.AggTopData", "evidently:metric_result:AggTopData" +) +register_type_alias( + MetricResult, + "evidently.metrics.regression_performance.top_error.RegressionTopErrorMetricResults", + "evidently:metric_result:RegressionTopErrorMetricResults", +) +register_type_alias( + MetricResult, "evidently.metrics.regression_performance.top_error.TopData", "evidently:metric_result:TopData" +) +register_type_alias(MetricResult, "evidently.tests.base_test.TestResult", "evidently:metric_result:TestResult") diff --git a/src/evidently/metrics/classification_performance/class_balance_metric.py b/src/evidently/metrics/classification_performance/class_balance_metric.py index 872e68cd19..9ad56f8d63 100644 --- a/src/evidently/metrics/classification_performance/class_balance_metric.py +++ b/src/evidently/metrics/classification_performance/class_balance_metric.py @@ -18,6 +18,7 @@ class ClassificationClassBalanceResult(MetricResult): class Config: + type_alias = "evidently:metric_result:ClassificationClassBalanceResult" dict_exclude_fields = {"plot_data"} pd_exclude_fields = {"plot_data"} @@ -25,6 +26,9 @@ class Config: class ClassificationClassBalance(Metric[ClassificationClassBalanceResult]): + class Config: + type_alias = "evidently:metric:ClassificationClassBalance" + def calculate(self, data: InputData) -> ClassificationClassBalanceResult: dataset_columns = process_columns(data.current_data, data.column_mapping) target_name = dataset_columns.utility_columns.target diff --git a/src/evidently/metrics/classification_performance/class_separation_metric.py b/src/evidently/metrics/classification_performance/class_separation_metric.py index d9722d6d79..485989bd0d 100644 --- a/src/evidently/metrics/classification_performance/class_separation_metric.py +++ b/src/evidently/metrics/classification_performance/class_separation_metric.py @@ -30,6 +30,7 @@ class ClassificationClassSeparationPlotResults(MetricResult): class Config: + type_alias = "evidently:metric_result:ClassificationClassSeparationPlotResults" dict_exclude_fields = {"current", "reference"} pd_exclude_fields = {"current", "reference"} field_tags = { @@ -72,6 +73,9 @@ def _quantiles(qdf, value): class ClassificationClassSeparationPlot(UsesRawDataMixin, Metric[ClassificationClassSeparationPlotResults]): + class Config: + type_alias = "evidently:metric:ClassificationClassSeparationPlot" + def __init__(self, options: AnyOptions = None): super().__init__(options=options) diff --git a/src/evidently/metrics/classification_performance/classification_dummy_metric.py b/src/evidently/metrics/classification_performance/classification_dummy_metric.py index 29d352b10a..a8ca44f0cb 100644 --- a/src/evidently/metrics/classification_performance/classification_dummy_metric.py +++ b/src/evidently/metrics/classification_performance/classification_dummy_metric.py @@ -31,6 +31,7 @@ class ClassificationDummyMetricResults(MetricResult): class Config: + type_alias = "evidently:metric_result:ClassificationDummyMetricResults" dict_exclude_fields = {"metrics_matrix"} pd_exclude_fields = {"metrics_matrix"} @@ -43,6 +44,9 @@ class Config: class ClassificationDummyMetric(ThresholdClassificationMetric[ClassificationDummyMetricResults]): + class Config: + type_alias = "evidently:metric:ClassificationDummyMetric" + _quality_metric: ClassificationQualityMetric def __init__( diff --git a/src/evidently/metrics/classification_performance/classification_quality_metric.py b/src/evidently/metrics/classification_performance/classification_quality_metric.py index 791ba4f1e5..0e4f9e9ecc 100644 --- a/src/evidently/metrics/classification_performance/classification_quality_metric.py +++ b/src/evidently/metrics/classification_performance/classification_quality_metric.py @@ -20,6 +20,7 @@ class ClassificationQualityMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:ClassificationQualityMetricResult" field_tags = { "current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}, @@ -32,6 +33,9 @@ class Config: class ClassificationQualityMetric(ThresholdClassificationMetric[ClassificationQualityMetricResult]): + class Config: + type_alias = "evidently:metric:ClassificationQualityMetric" + _confusion_matrix_metric: ClassificationConfusionMatrix def __init__( diff --git a/src/evidently/metrics/classification_performance/confusion_matrix_metric.py b/src/evidently/metrics/classification_performance/confusion_matrix_metric.py index 7d92133a31..8d0512f8fb 100644 --- a/src/evidently/metrics/classification_performance/confusion_matrix_metric.py +++ b/src/evidently/metrics/classification_performance/confusion_matrix_metric.py @@ -24,6 +24,7 @@ class ClassificationConfusionMatrixResult(MetricResult): class Config: + type_alias = "evidently:metric_result:ClassificationConfusionMatrixResult" field_tags = { "current_matrix": {IncludeTags.Current}, "reference_matrix": {IncludeTags.Reference}, @@ -46,6 +47,9 @@ def confusion_matric_metric(self): class ClassificationConfusionMatrix( ThresholdClassificationMetric[ClassificationConfusionMatrixResult], ClassificationConfusionMatrixParameters ): + class Config: + type_alias = "evidently:metric:ClassificationConfusionMatrix" + def __init__( self, probas_threshold: Optional[float] = None, diff --git a/src/evidently/metrics/classification_performance/lift_curve_metric.py b/src/evidently/metrics/classification_performance/lift_curve_metric.py index 9d729ac924..6c17a3a136 100644 --- a/src/evidently/metrics/classification_performance/lift_curve_metric.py +++ b/src/evidently/metrics/classification_performance/lift_curve_metric.py @@ -1,143 +1,147 @@ -from typing import List -from typing import Optional - -import pandas as pd - -from evidently.base_metric import InputData -from evidently.base_metric import Metric -from evidently.base_metric import MetricResult -from evidently.calculations.classification_performance import calculate_lift_table -from evidently.calculations.classification_performance import get_prediction_data -from evidently.core import IncludeTags -from evidently.metric_results import LiftCurve -from evidently.metric_results import LiftCurveData -from evidently.metric_results import PredictionData -from evidently.model.widget import BaseWidgetInfo -from evidently.renderers.base_renderer import MetricRenderer -from evidently.renderers.base_renderer import default_renderer -from evidently.renderers.html_widgets import TabData -from evidently.renderers.html_widgets import get_lift_plot_data -from evidently.renderers.html_widgets import header_text -from evidently.renderers.html_widgets import widget_tabs -from evidently.utils.data_operations import process_columns - - -class ClassificationLiftCurveResults(MetricResult): - class Config: - pd_include = False - - field_tags = {"current_lift_curve": {IncludeTags.Current}, "reference_lift_curve": {IncludeTags.Reference}} - - current_lift_curve: Optional[LiftCurve] = None - reference_lift_curve: Optional[LiftCurve] = None - - -class ClassificationLiftCurve(Metric[ClassificationLiftCurveResults]): - def calculate(self, data: InputData) -> ClassificationLiftCurveResults: - dataset_columns = process_columns(data.current_data, data.column_mapping) - target_name = dataset_columns.utility_columns.target - prediction_name = dataset_columns.utility_columns.prediction - if target_name is None or prediction_name is None: - raise ValueError("The columns 'target' and 'prediction' " "columns should be present") - curr_predictions = get_prediction_data(data.current_data, dataset_columns, data.column_mapping.pos_label) - curr_lift_curve = self.calculate_metrics(data.current_data[target_name], curr_predictions) - ref_lift_curve = None - if data.reference_data is not None: - ref_predictions = get_prediction_data( - data.reference_data, - dataset_columns, - data.column_mapping.pos_label, - ) - ref_lift_curve = self.calculate_metrics(data.reference_data[target_name], ref_predictions) - return ClassificationLiftCurveResults( - current_lift_curve=curr_lift_curve, - reference_lift_curve=ref_lift_curve, - ) - - def calculate_metrics(self, target_data: pd.Series, prediction: PredictionData) -> LiftCurve: - labels = prediction.labels - if prediction.prediction_probas is None: - raise ValueError("Lift Curve can be calculated only " "on binary probabilistic predictions") - binaraized_target = (target_data.values.reshape(-1, 1) == labels).astype(int) - lift_curve = {} - lift_table = {} - if len(labels) <= 2: - binaraized_target = pd.DataFrame(binaraized_target[:, 0]) - binaraized_target.columns = ["target"] - - binded = list( - zip( - binaraized_target["target"].tolist(), - prediction.prediction_probas.iloc[:, 0].tolist(), - ) - ) - lift_table[prediction.prediction_probas.columns[0]] = calculate_lift_table(binded) - - lift_curve[prediction.prediction_probas.columns[0]] = LiftCurveData( - lift=[i[8] for i in lift_table[prediction.prediction_probas.columns[0]]], - top=[i[0] for i in lift_table[prediction.prediction_probas.columns[0]]], - count=[i[1] for i in lift_table[prediction.prediction_probas.columns[0]]], - prob=[i[2] for i in lift_table[prediction.prediction_probas.columns[0]]], - tp=[i[3] for i in lift_table[prediction.prediction_probas.columns[0]]], - fp=[i[4] for i in lift_table[prediction.prediction_probas.columns[0]]], - precision=[i[5] for i in lift_table[prediction.prediction_probas.columns[0]]], - recall=[i[6] for i in lift_table[prediction.prediction_probas.columns[0]]], - f1_score=[i[7] for i in lift_table[prediction.prediction_probas.columns[0]]], - max_lift=[i[9] for i in lift_table[prediction.prediction_probas.columns[0]]], - relative_lift=[i[10] for i in lift_table[prediction.prediction_probas.columns[0]]], - percent=[i[11] for i in lift_table[prediction.prediction_probas.columns[0]]], - # percent = lift_table[prediction.prediction_probas.columns[0]][0][11], - ) - else: - binaraized_target = pd.DataFrame(binaraized_target) - binaraized_target.columns = labels - - for label in labels: - binded = list( - zip( - binaraized_target[label].tolist(), - prediction.prediction_probas[label], - ) - ) - lift_table[label] = calculate_lift_table(binded) - - for label in labels: - # lift_curve[int(prediction.prediction_probas.columns[0])] = LiftCurveData( - lift_curve[label] = LiftCurveData( - lift=[i[8] for i in lift_table[prediction.prediction_probas.columns[0]]], - top=[i[0] for i in lift_table[prediction.prediction_probas.columns[0]]], - count=[i[1] for i in lift_table[prediction.prediction_probas.columns[0]]], - prob=[i[2] for i in lift_table[prediction.prediction_probas.columns[0]]], - tp=[i[3] for i in lift_table[prediction.prediction_probas.columns[0]]], - fp=[i[4] for i in lift_table[prediction.prediction_probas.columns[0]]], - precision=[i[5] for i in lift_table[prediction.prediction_probas.columns[0]]], - recall=[i[6] for i in lift_table[prediction.prediction_probas.columns[0]]], - f1_score=[i[7] for i in lift_table[prediction.prediction_probas.columns[0]]], - max_lift=[i[9] for i in lift_table[prediction.prediction_probas.columns[0]]], - relative_lift=[i[10] for i in lift_table[prediction.prediction_probas.columns[0]]], - percent=[i[11] for i in lift_table[prediction.prediction_probas.columns[0]]], - # percent = lift_table[prediction.prediction_probas.columns[0]][0][11], - ) - return lift_curve - - -@default_renderer(wrap_type=ClassificationLiftCurve) -class ClassificationLiftCurveRenderer(MetricRenderer): - def render_html(self, obj: ClassificationLiftCurve) -> List[BaseWidgetInfo]: - current_lift_curve: Optional[LiftCurve] = obj.get_result().current_lift_curve - reference_lift_curve: Optional[LiftCurve] = obj.get_result().reference_lift_curve - if current_lift_curve is None: - return [] - - tab_data = get_lift_plot_data( - current_lift_curve, - reference_lift_curve, - color_options=self.color_options, - ) - if len(tab_data) == 1: - return [header_text(label="Lift Curve"), tab_data[0][1]] - tabs = [TabData(name, widget) for name, widget in tab_data] - return [ - header_text(label="Lift Curve"), - widget_tabs(title="", tabs=tabs), - ] +from typing import List +from typing import Optional + +import pandas as pd + +from evidently.base_metric import InputData +from evidently.base_metric import Metric +from evidently.base_metric import MetricResult +from evidently.calculations.classification_performance import calculate_lift_table +from evidently.calculations.classification_performance import get_prediction_data +from evidently.core import IncludeTags +from evidently.metric_results import LiftCurve +from evidently.metric_results import LiftCurveData +from evidently.metric_results import PredictionData +from evidently.model.widget import BaseWidgetInfo +from evidently.renderers.base_renderer import MetricRenderer +from evidently.renderers.base_renderer import default_renderer +from evidently.renderers.html_widgets import TabData +from evidently.renderers.html_widgets import get_lift_plot_data +from evidently.renderers.html_widgets import header_text +from evidently.renderers.html_widgets import widget_tabs +from evidently.utils.data_operations import process_columns + + +class ClassificationLiftCurveResults(MetricResult): + class Config: + type_alias = "evidently:metric_result:ClassificationLiftCurveResults" + pd_include = False + + field_tags = {"current_lift_curve": {IncludeTags.Current}, "reference_lift_curve": {IncludeTags.Reference}} + + current_lift_curve: Optional[LiftCurve] = None + reference_lift_curve: Optional[LiftCurve] = None + + +class ClassificationLiftCurve(Metric[ClassificationLiftCurveResults]): + class Config: + type_alias = "evidently:metric:ClassificationLiftCurve" + + def calculate(self, data: InputData) -> ClassificationLiftCurveResults: + dataset_columns = process_columns(data.current_data, data.column_mapping) + target_name = dataset_columns.utility_columns.target + prediction_name = dataset_columns.utility_columns.prediction + if target_name is None or prediction_name is None: + raise ValueError("The columns 'target' and 'prediction' " "columns should be present") + curr_predictions = get_prediction_data(data.current_data, dataset_columns, data.column_mapping.pos_label) + curr_lift_curve = self.calculate_metrics(data.current_data[target_name], curr_predictions) + ref_lift_curve = None + if data.reference_data is not None: + ref_predictions = get_prediction_data( + data.reference_data, + dataset_columns, + data.column_mapping.pos_label, + ) + ref_lift_curve = self.calculate_metrics(data.reference_data[target_name], ref_predictions) + return ClassificationLiftCurveResults( + current_lift_curve=curr_lift_curve, + reference_lift_curve=ref_lift_curve, + ) + + def calculate_metrics(self, target_data: pd.Series, prediction: PredictionData) -> LiftCurve: + labels = prediction.labels + if prediction.prediction_probas is None: + raise ValueError("Lift Curve can be calculated only " "on binary probabilistic predictions") + binaraized_target = (target_data.values.reshape(-1, 1) == labels).astype(int) + lift_curve = {} + lift_table = {} + if len(labels) <= 2: + binaraized_target = pd.DataFrame(binaraized_target[:, 0]) + binaraized_target.columns = ["target"] + + binded = list( + zip( + binaraized_target["target"].tolist(), + prediction.prediction_probas.iloc[:, 0].tolist(), + ) + ) + lift_table[prediction.prediction_probas.columns[0]] = calculate_lift_table(binded) + + lift_curve[prediction.prediction_probas.columns[0]] = LiftCurveData( + lift=[i[8] for i in lift_table[prediction.prediction_probas.columns[0]]], + top=[i[0] for i in lift_table[prediction.prediction_probas.columns[0]]], + count=[i[1] for i in lift_table[prediction.prediction_probas.columns[0]]], + prob=[i[2] for i in lift_table[prediction.prediction_probas.columns[0]]], + tp=[i[3] for i in lift_table[prediction.prediction_probas.columns[0]]], + fp=[i[4] for i in lift_table[prediction.prediction_probas.columns[0]]], + precision=[i[5] for i in lift_table[prediction.prediction_probas.columns[0]]], + recall=[i[6] for i in lift_table[prediction.prediction_probas.columns[0]]], + f1_score=[i[7] for i in lift_table[prediction.prediction_probas.columns[0]]], + max_lift=[i[9] for i in lift_table[prediction.prediction_probas.columns[0]]], + relative_lift=[i[10] for i in lift_table[prediction.prediction_probas.columns[0]]], + percent=[i[11] for i in lift_table[prediction.prediction_probas.columns[0]]], + # percent = lift_table[prediction.prediction_probas.columns[0]][0][11], + ) + else: + binaraized_target = pd.DataFrame(binaraized_target) + binaraized_target.columns = labels + + for label in labels: + binded = list( + zip( + binaraized_target[label].tolist(), + prediction.prediction_probas[label], + ) + ) + lift_table[label] = calculate_lift_table(binded) + + for label in labels: + # lift_curve[int(prediction.prediction_probas.columns[0])] = LiftCurveData( + lift_curve[label] = LiftCurveData( + lift=[i[8] for i in lift_table[prediction.prediction_probas.columns[0]]], + top=[i[0] for i in lift_table[prediction.prediction_probas.columns[0]]], + count=[i[1] for i in lift_table[prediction.prediction_probas.columns[0]]], + prob=[i[2] for i in lift_table[prediction.prediction_probas.columns[0]]], + tp=[i[3] for i in lift_table[prediction.prediction_probas.columns[0]]], + fp=[i[4] for i in lift_table[prediction.prediction_probas.columns[0]]], + precision=[i[5] for i in lift_table[prediction.prediction_probas.columns[0]]], + recall=[i[6] for i in lift_table[prediction.prediction_probas.columns[0]]], + f1_score=[i[7] for i in lift_table[prediction.prediction_probas.columns[0]]], + max_lift=[i[9] for i in lift_table[prediction.prediction_probas.columns[0]]], + relative_lift=[i[10] for i in lift_table[prediction.prediction_probas.columns[0]]], + percent=[i[11] for i in lift_table[prediction.prediction_probas.columns[0]]], + # percent = lift_table[prediction.prediction_probas.columns[0]][0][11], + ) + return lift_curve + + +@default_renderer(wrap_type=ClassificationLiftCurve) +class ClassificationLiftCurveRenderer(MetricRenderer): + def render_html(self, obj: ClassificationLiftCurve) -> List[BaseWidgetInfo]: + current_lift_curve: Optional[LiftCurve] = obj.get_result().current_lift_curve + reference_lift_curve: Optional[LiftCurve] = obj.get_result().reference_lift_curve + if current_lift_curve is None: + return [] + + tab_data = get_lift_plot_data( + current_lift_curve, + reference_lift_curve, + color_options=self.color_options, + ) + if len(tab_data) == 1: + return [header_text(label="Lift Curve"), tab_data[0][1]] + tabs = [TabData(name, widget) for name, widget in tab_data] + return [ + header_text(label="Lift Curve"), + widget_tabs(title="", tabs=tabs), + ] diff --git a/src/evidently/metrics/classification_performance/lift_table_metric.py b/src/evidently/metrics/classification_performance/lift_table_metric.py index 2cc3b709d6..e9dc86391c 100644 --- a/src/evidently/metrics/classification_performance/lift_table_metric.py +++ b/src/evidently/metrics/classification_performance/lift_table_metric.py @@ -1,196 +1,200 @@ -from typing import TYPE_CHECKING -from typing import Any -from typing import Dict -from typing import List -from typing import Optional -from typing import Type -from typing import Union - -import pandas as pd - -from evidently._pydantic_compat import BaseModel -from evidently.base_metric import InputData -from evidently.base_metric import Metric -from evidently.base_metric import MetricResult -from evidently.calculations.classification_performance import calculate_lift_table -from evidently.calculations.classification_performance import get_prediction_data -from evidently.core import IncludeTags -from evidently.metric_results import Label -from evidently.metric_results import PredictionData -from evidently.model.widget import BaseWidgetInfo -from evidently.options.base import AnyOptions -from evidently.renderers.base_renderer import MetricRenderer -from evidently.renderers.base_renderer import default_renderer -from evidently.renderers.html_widgets import TabData -from evidently.renderers.html_widgets import WidgetSize -from evidently.renderers.html_widgets import table_data -from evidently.renderers.html_widgets import widget_tabs -from evidently.utils.data_operations import process_columns - -if TYPE_CHECKING: - from evidently._pydantic_compat import Model - - -class LabelModel(BaseModel): - __root__: Union[int, str] - - def validate(cls: Type["Model"], value: Any): # type: ignore[override, misc] - try: - return int(value) - except TypeError: - return value - - -LiftTable = Dict[Union[LabelModel, Label], List[List[Union[float, int]]]] - - -class ClassificationLiftTableResults(MetricResult): - class Config: - pd_include = False - field_tags = { - "current_lift_table": {IncludeTags.Current}, - "reference_lift_table": {IncludeTags.Reference}, - "top": {IncludeTags.Parameter}, - } - - current_lift_table: Optional[LiftTable] = None - reference_lift_table: Optional[LiftTable] = None - top: Optional[int] = 10 - - -class ClassificationLiftTable(Metric[ClassificationLiftTableResults]): - """ - Evidently metric with inherited behaviour, provides data for lift analysis - - Parameters - ---------- - top: Optional[dict] = 10 - Limit top percentiles for displaying in report - - """ - - top: int - - def __init__(self, top: int = 10, options: AnyOptions = None) -> None: - self.top = top - super().__init__(options=options) - - def calculate(self, data: InputData) -> ClassificationLiftTableResults: - dataset_columns = process_columns(data.current_data, data.column_mapping) - target_name = dataset_columns.utility_columns.target - prediction_name = dataset_columns.utility_columns.prediction - if target_name is None or prediction_name is None: - raise ValueError(("The columns 'target' and 'prediction' " "columns should be present")) - curr_prediction = get_prediction_data(data.current_data, dataset_columns, data.column_mapping.pos_label) - curr_lift_table = self.calculate_metrics(data.current_data[target_name], curr_prediction) - ref_lift_table = None - if data.reference_data is not None: - ref_prediction = get_prediction_data( - data.reference_data, - dataset_columns, - data.column_mapping.pos_label, - ) - ref_lift_table = self.calculate_metrics(data.reference_data[target_name], ref_prediction) - return ClassificationLiftTableResults( - current_lift_table=curr_lift_table, - reference_lift_table=ref_lift_table, - top=self.top, - ) - - def calculate_metrics(self, target_data: pd.Series, prediction: PredictionData): - labels = prediction.labels - if prediction.prediction_probas is None: - raise ValueError("Lift Table can be calculated only on " "binary probabilistic predictions") - binaraized_target = (target_data.values.reshape(-1, 1) == labels).astype(int) - lift_table = {} - if len(labels) <= 2: - binaraized_target = pd.DataFrame(binaraized_target[:, 0]) - binaraized_target.columns = ["target"] - - binded = list( - zip( - binaraized_target["target"].tolist(), - prediction.prediction_probas.iloc[:, 0].tolist(), - ) - ) - lift_table[int(prediction.prediction_probas.columns[0])] = calculate_lift_table(binded) - else: - binaraized_target = pd.DataFrame(binaraized_target) - binaraized_target.columns = labels - - for label in labels: - binded = list( - zip( - binaraized_target[label].tolist(), - prediction.prediction_probas[label], - ) - ) - lift_table[int(label)] = calculate_lift_table(binded) - return lift_table - - -@default_renderer(wrap_type=ClassificationLiftTable) -class ClassificationLiftTableRenderer(MetricRenderer): - def render_html(self, obj: ClassificationLiftTable) -> List[BaseWidgetInfo]: - reference_lift_table = obj.get_result().reference_lift_table - current_lift_table = obj.get_result().current_lift_table - top = obj.get_result().top - columns = [ - "Top(%)", - "Count", - "Prob", - "TP", - "FP", - "Precision", - "Recall", - "F1 score", - "Lift", - "Max lift", - "Relative lift", - "Percent", - ] - result = [] - size = WidgetSize.FULL - if current_lift_table is not None: - if len(current_lift_table.keys()) == 1: - result.append( - table_data( - column_names=columns, - data=current_lift_table[list(current_lift_table.keys())[0]][:top], - title="Current: Lift Table", - size=size, - ) - ) - else: - tab_data = [] - for label in current_lift_table.keys(): - table = table_data( - column_names=columns, - data=current_lift_table[label], - title="", - size=size, - ) - tab_data.append(TabData(str(label), table)) - result.append(widget_tabs(title="Current: Lift Table", tabs=tab_data)) - if reference_lift_table is not None: - if len(reference_lift_table.keys()) == 1: - result.append( - table_data( - column_names=columns, - data=reference_lift_table[list(reference_lift_table.keys())[0]][:top], - title="Reference: Lift Table", - size=size, - ) - ) - else: - tab_data = [] - for label in reference_lift_table.keys(): - table = table_data( - column_names=columns, - data=reference_lift_table[label], - title="", - size=size, - ) - tab_data.append(TabData(str(label), table)) - result.append(widget_tabs(title="Reference: Lift Table", tabs=tab_data)) - return result +from typing import TYPE_CHECKING +from typing import Any +from typing import Dict +from typing import List +from typing import Optional +from typing import Type +from typing import Union + +import pandas as pd + +from evidently._pydantic_compat import BaseModel +from evidently.base_metric import InputData +from evidently.base_metric import Metric +from evidently.base_metric import MetricResult +from evidently.calculations.classification_performance import calculate_lift_table +from evidently.calculations.classification_performance import get_prediction_data +from evidently.core import IncludeTags +from evidently.metric_results import Label +from evidently.metric_results import PredictionData +from evidently.model.widget import BaseWidgetInfo +from evidently.options.base import AnyOptions +from evidently.renderers.base_renderer import MetricRenderer +from evidently.renderers.base_renderer import default_renderer +from evidently.renderers.html_widgets import TabData +from evidently.renderers.html_widgets import WidgetSize +from evidently.renderers.html_widgets import table_data +from evidently.renderers.html_widgets import widget_tabs +from evidently.utils.data_operations import process_columns + +if TYPE_CHECKING: + from evidently._pydantic_compat import Model + + +class LabelModel(BaseModel): + __root__: Union[int, str] + + def validate(cls: Type["Model"], value: Any): # type: ignore[override, misc] + try: + return int(value) + except TypeError: + return value + + +LiftTable = Dict[Union[LabelModel, Label], List[List[Union[float, int]]]] + + +class ClassificationLiftTableResults(MetricResult): + class Config: + type_alias = "evidently:metric_result:ClassificationLiftTableResults" + pd_include = False + field_tags = { + "current_lift_table": {IncludeTags.Current}, + "reference_lift_table": {IncludeTags.Reference}, + "top": {IncludeTags.Parameter}, + } + + current_lift_table: Optional[LiftTable] = None + reference_lift_table: Optional[LiftTable] = None + top: Optional[int] = 10 + + +class ClassificationLiftTable(Metric[ClassificationLiftTableResults]): + class Config: + type_alias = "evidently:metric:ClassificationLiftTable" + + """ + Evidently metric with inherited behaviour, provides data for lift analysis + + Parameters + ---------- + top: Optional[dict] = 10 + Limit top percentiles for displaying in report + + """ + + top: int + + def __init__(self, top: int = 10, options: AnyOptions = None) -> None: + self.top = top + super().__init__(options=options) + + def calculate(self, data: InputData) -> ClassificationLiftTableResults: + dataset_columns = process_columns(data.current_data, data.column_mapping) + target_name = dataset_columns.utility_columns.target + prediction_name = dataset_columns.utility_columns.prediction + if target_name is None or prediction_name is None: + raise ValueError(("The columns 'target' and 'prediction' " "columns should be present")) + curr_prediction = get_prediction_data(data.current_data, dataset_columns, data.column_mapping.pos_label) + curr_lift_table = self.calculate_metrics(data.current_data[target_name], curr_prediction) + ref_lift_table = None + if data.reference_data is not None: + ref_prediction = get_prediction_data( + data.reference_data, + dataset_columns, + data.column_mapping.pos_label, + ) + ref_lift_table = self.calculate_metrics(data.reference_data[target_name], ref_prediction) + return ClassificationLiftTableResults( + current_lift_table=curr_lift_table, + reference_lift_table=ref_lift_table, + top=self.top, + ) + + def calculate_metrics(self, target_data: pd.Series, prediction: PredictionData): + labels = prediction.labels + if prediction.prediction_probas is None: + raise ValueError("Lift Table can be calculated only on " "binary probabilistic predictions") + binaraized_target = (target_data.values.reshape(-1, 1) == labels).astype(int) + lift_table = {} + if len(labels) <= 2: + binaraized_target = pd.DataFrame(binaraized_target[:, 0]) + binaraized_target.columns = ["target"] + + binded = list( + zip( + binaraized_target["target"].tolist(), + prediction.prediction_probas.iloc[:, 0].tolist(), + ) + ) + lift_table[int(prediction.prediction_probas.columns[0])] = calculate_lift_table(binded) + else: + binaraized_target = pd.DataFrame(binaraized_target) + binaraized_target.columns = labels + + for label in labels: + binded = list( + zip( + binaraized_target[label].tolist(), + prediction.prediction_probas[label], + ) + ) + lift_table[int(label)] = calculate_lift_table(binded) + return lift_table + + +@default_renderer(wrap_type=ClassificationLiftTable) +class ClassificationLiftTableRenderer(MetricRenderer): + def render_html(self, obj: ClassificationLiftTable) -> List[BaseWidgetInfo]: + reference_lift_table = obj.get_result().reference_lift_table + current_lift_table = obj.get_result().current_lift_table + top = obj.get_result().top + columns = [ + "Top(%)", + "Count", + "Prob", + "TP", + "FP", + "Precision", + "Recall", + "F1 score", + "Lift", + "Max lift", + "Relative lift", + "Percent", + ] + result = [] + size = WidgetSize.FULL + if current_lift_table is not None: + if len(current_lift_table.keys()) == 1: + result.append( + table_data( + column_names=columns, + data=current_lift_table[list(current_lift_table.keys())[0]][:top], + title="Current: Lift Table", + size=size, + ) + ) + else: + tab_data = [] + for label in current_lift_table.keys(): + table = table_data( + column_names=columns, + data=current_lift_table[label], + title="", + size=size, + ) + tab_data.append(TabData(str(label), table)) + result.append(widget_tabs(title="Current: Lift Table", tabs=tab_data)) + if reference_lift_table is not None: + if len(reference_lift_table.keys()) == 1: + result.append( + table_data( + column_names=columns, + data=reference_lift_table[list(reference_lift_table.keys())[0]][:top], + title="Reference: Lift Table", + size=size, + ) + ) + else: + tab_data = [] + for label in reference_lift_table.keys(): + table = table_data( + column_names=columns, + data=reference_lift_table[label], + title="", + size=size, + ) + tab_data.append(TabData(str(label), table)) + result.append(widget_tabs(title="Reference: Lift Table", tabs=tab_data)) + return result diff --git a/src/evidently/metrics/classification_performance/objects.py b/src/evidently/metrics/classification_performance/objects.py index d21ebcdaed..e2bdaebf4e 100644 --- a/src/evidently/metrics/classification_performance/objects.py +++ b/src/evidently/metrics/classification_performance/objects.py @@ -10,6 +10,9 @@ class ClassMetric(MetricResult): + class Config: + type_alias = "evidently:metric_result:ClassMetric" + precision: float recall: float f1: float @@ -20,6 +23,9 @@ class ClassMetric(MetricResult): class ClassificationReport(MetricResult): + class Config: + type_alias = "evidently:metric_result:ClassificationReport" + classes: ClassesMetrics accuracy: float macro_avg: ClassMetric = Field(alias="macro avg") diff --git a/src/evidently/metrics/classification_performance/pr_curve_metric.py b/src/evidently/metrics/classification_performance/pr_curve_metric.py index 58ab185fca..b2537bc6fe 100644 --- a/src/evidently/metrics/classification_performance/pr_curve_metric.py +++ b/src/evidently/metrics/classification_performance/pr_curve_metric.py @@ -24,6 +24,7 @@ class ClassificationPRCurveResults(MetricResult): class Config: + type_alias = "evidently:metric_result:ClassificationPRCurveResults" pd_include = False field_tags = {"current_pr_curve": {IncludeTags.Current}, "reference_pr_curve": {IncludeTags.Reference}} @@ -33,6 +34,9 @@ class Config: class ClassificationPRCurve(Metric[ClassificationPRCurveResults]): + class Config: + type_alias = "evidently:metric:ClassificationPRCurve" + def calculate(self, data: InputData) -> ClassificationPRCurveResults: dataset_columns = process_columns(data.current_data, data.column_mapping) target_name = dataset_columns.utility_columns.target diff --git a/src/evidently/metrics/classification_performance/pr_table_metric.py b/src/evidently/metrics/classification_performance/pr_table_metric.py index a0e2cdaf1d..a12e26eed0 100644 --- a/src/evidently/metrics/classification_performance/pr_table_metric.py +++ b/src/evidently/metrics/classification_performance/pr_table_metric.py @@ -45,6 +45,7 @@ def validate(cls: Type["Model"], value: Any): # type: ignore[override, misc] class ClassificationPRTableResults(MetricResult): class Config: + type_alias = "evidently:metric_result:ClassificationPRTableResults" pd_include = False field_tags = {"current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}} @@ -53,6 +54,9 @@ class Config: class ClassificationPRTable(Metric[ClassificationPRTableResults]): + class Config: + type_alias = "evidently:metric:ClassificationPRTable" + def calculate(self, data: InputData) -> ClassificationPRTableResults: dataset_columns = process_columns(data.current_data, data.column_mapping) target_name = dataset_columns.utility_columns.target diff --git a/src/evidently/metrics/classification_performance/probability_distribution_metric.py b/src/evidently/metrics/classification_performance/probability_distribution_metric.py index c060a52382..1a1f014207 100644 --- a/src/evidently/metrics/classification_performance/probability_distribution_metric.py +++ b/src/evidently/metrics/classification_performance/probability_distribution_metric.py @@ -23,6 +23,7 @@ class ClassificationProbDistributionResults(MetricResult): class Config: + type_alias = "evidently:metric_result:ClassificationProbDistributionResults" dict_include = False pd_include = False tags = {IncludeTags.Render} @@ -34,6 +35,9 @@ class Config: class ClassificationProbDistribution(Metric[ClassificationProbDistributionResults]): + class Config: + type_alias = "evidently:metric:ClassificationProbDistribution" + @staticmethod def get_distribution(dataset: pd.DataFrame, target_name: str, prediction_labels: Iterable) -> Dict[str, list]: result = {} diff --git a/src/evidently/metrics/classification_performance/quality_by_class_metric.py b/src/evidently/metrics/classification_performance/quality_by_class_metric.py index 7e63ee8480..9bf73c2fe2 100644 --- a/src/evidently/metrics/classification_performance/quality_by_class_metric.py +++ b/src/evidently/metrics/classification_performance/quality_by_class_metric.py @@ -27,6 +27,9 @@ class ClassificationQuality(MetricResult): + class Config: + type_alias = "evidently:metric_result:ClassificationQuality" + metrics: ClassesMetrics roc_aucs: Optional[List[float]] @@ -37,6 +40,7 @@ def metrics_dict(self): class ClassificationQualityByClassResult(MetricResult): class Config: + type_alias = "evidently:metric_result:ClassificationQualityByClassResult" field_tags = { "current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}, @@ -62,6 +66,9 @@ def get_pandas(self) -> pd.DataFrame: class ClassificationQualityByClass(ThresholdClassificationMetric[ClassificationQualityByClassResult]): + class Config: + type_alias = "evidently:metric:ClassificationQualityByClass" + def __init__( self, probas_threshold: Optional[float] = None, diff --git a/src/evidently/metrics/classification_performance/quality_by_feature_table.py b/src/evidently/metrics/classification_performance/quality_by_feature_table.py index 7ee66529cf..c89235cff4 100644 --- a/src/evidently/metrics/classification_performance/quality_by_feature_table.py +++ b/src/evidently/metrics/classification_performance/quality_by_feature_table.py @@ -34,6 +34,7 @@ class ClassificationQualityByFeatureTableResults(MetricResult): class Config: + type_alias = "evidently:metric_result:ClassificationQualityByFeatureTableResults" field_tags = { "current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}, @@ -49,6 +50,9 @@ class Config: class ClassificationQualityByFeatureTable(UsesRawDataMixin, Metric[ClassificationQualityByFeatureTableResults]): + class Config: + type_alias = "evidently:metric:ClassificationQualityByFeatureTable" + columns: Optional[List[str]] descriptors: Optional[Dict[str, Dict[str, FeatureDescriptor]]] _text_features_gen: Optional[Dict[str, Dict[str, GeneratedFeature]]] diff --git a/src/evidently/metrics/classification_performance/roc_curve_metric.py b/src/evidently/metrics/classification_performance/roc_curve_metric.py index 16ebab6477..4850a7aae7 100644 --- a/src/evidently/metrics/classification_performance/roc_curve_metric.py +++ b/src/evidently/metrics/classification_performance/roc_curve_metric.py @@ -24,6 +24,7 @@ class ClassificationRocCurveResults(MetricResult): class Config: + type_alias = "evidently:metric_result:ClassificationRocCurveResults" pd_include = False field_tags = {"current_roc_curve": {IncludeTags.Current}, "reference_roc_curve": {IncludeTags.Reference}} @@ -33,6 +34,9 @@ class Config: class ClassificationRocCurve(Metric[ClassificationRocCurveResults]): + class Config: + type_alias = "evidently:metric:ClassificationRocCurve" + def calculate(self, data: InputData) -> ClassificationRocCurveResults: dataset_columns = process_columns(data.current_data, data.column_mapping) target_name = dataset_columns.utility_columns.target diff --git a/src/evidently/metrics/custom_metric.py b/src/evidently/metrics/custom_metric.py index 135e108e66..5dd4ebd2b3 100644 --- a/src/evidently/metrics/custom_metric.py +++ b/src/evidently/metrics/custom_metric.py @@ -17,6 +17,9 @@ class CustomCallableMetricResult(MetricResult): + class Config: + type_alias = "evidently:metric_result:CustomCallableMetricResult" + value: float @@ -24,6 +27,9 @@ class CustomCallableMetricResult(MetricResult): class CustomValueMetric(Metric[CustomCallableMetricResult]): + class Config: + type_alias = "evidently:metric:CustomValueMetric" + func: str title: Optional[str] = None size: Optional[WidgetSize] = None diff --git a/src/evidently/metrics/data_drift/column_drift_metric.py b/src/evidently/metrics/data_drift/column_drift_metric.py index bfd6bfeca7..097ad41cfc 100644 --- a/src/evidently/metrics/data_drift/column_drift_metric.py +++ b/src/evidently/metrics/data_drift/column_drift_metric.py @@ -247,6 +247,9 @@ def get_one_column_drift( class ColumnDriftMetric(UsesRawDataMixin, ColumnMetric[ColumnDataDriftMetrics]): + class Config: + type_alias = "evidently:metric:ColumnDriftMetric" + """Calculate drift metric for a column""" stattest: Optional[PossibleStatTestType] diff --git a/src/evidently/metrics/data_drift/column_interaction_plot.py b/src/evidently/metrics/data_drift/column_interaction_plot.py index cd20c81fbb..b04eb439e5 100644 --- a/src/evidently/metrics/data_drift/column_interaction_plot.py +++ b/src/evidently/metrics/data_drift/column_interaction_plot.py @@ -37,6 +37,7 @@ class ColumnInteractionPlotResults(MetricResult): class Config: + type_alias = "evidently:metric_result:ColumnInteractionPlotResults" dict_include = False pd_include = False tags = {IncludeTags.Render} @@ -65,6 +66,9 @@ class Config: class ColumnInteractionPlot(UsesRawDataMixin, Metric[ColumnInteractionPlotResults]): + class Config: + type_alias = "evidently:metric:ColumnInteractionPlot" + x_column: str y_column: str diff --git a/src/evidently/metrics/data_drift/column_value_plot.py b/src/evidently/metrics/data_drift/column_value_plot.py index 03e5537122..883e6b0349 100644 --- a/src/evidently/metrics/data_drift/column_value_plot.py +++ b/src/evidently/metrics/data_drift/column_value_plot.py @@ -32,6 +32,7 @@ class ColumnValuePlotResults(MetricResult): class Config: + type_alias = "evidently:metric_result:ColumnValuePlotResults" dict_include = False pd_include = False tags = {IncludeTags.Render} @@ -48,6 +49,9 @@ class Config: class ColumnValuePlot(UsesRawDataMixin, Metric[ColumnValuePlotResults]): + class Config: + type_alias = "evidently:metric:ColumnValuePlot" + column_name: str def __init__(self, column_name: str, options: AnyOptions = None): diff --git a/src/evidently/metrics/data_drift/data_drift_table.py b/src/evidently/metrics/data_drift/data_drift_table.py index 964d020040..f611bdebc7 100644 --- a/src/evidently/metrics/data_drift/data_drift_table.py +++ b/src/evidently/metrics/data_drift/data_drift_table.py @@ -36,6 +36,7 @@ class DataDriftTableResults(MetricResult): class Config: + type_alias = "evidently:metric_result:DataDriftTableResults" dict_exclude_fields = {"dataset_columns"} field_tags = { "current_fi": {IncludeTags.Extra, IncludeTags.Current}, @@ -53,6 +54,9 @@ class Config: class DataDriftTable(UsesRawDataMixin, WithDriftOptions[DataDriftTableResults]): + class Config: + type_alias = "evidently:metric:DataDriftTable" + columns: Optional[List[str]] feature_importance: Optional[bool] _feature_importance_metric: Optional[FeatureImportanceMetric] diff --git a/src/evidently/metrics/data_drift/dataset_drift_metric.py b/src/evidently/metrics/data_drift/dataset_drift_metric.py index a4e0f1cfd1..c1fe1be46e 100644 --- a/src/evidently/metrics/data_drift/dataset_drift_metric.py +++ b/src/evidently/metrics/data_drift/dataset_drift_metric.py @@ -18,6 +18,9 @@ class DatasetDriftMetricResults(MetricResult): + class Config: + type_alias = "evidently:metric_result:DatasetDriftMetricResults" + drift_share: float number_of_columns: int number_of_drifted_columns: int @@ -28,6 +31,9 @@ class DatasetDriftMetricResults(MetricResult): class DatasetDriftMetric( WithDriftOptions[DatasetDriftMetricResults], ): + class Config: + type_alias = "evidently:metric:DatasetDriftMetric" + columns: Optional[List[str]] drift_share: float diff --git a/src/evidently/metrics/data_drift/embedding_drift_methods.py b/src/evidently/metrics/data_drift/embedding_drift_methods.py index 3b7bf97253..7383f6b4bd 100644 --- a/src/evidently/metrics/data_drift/embedding_drift_methods.py +++ b/src/evidently/metrics/data_drift/embedding_drift_methods.py @@ -19,6 +19,7 @@ from evidently.calculations.stattests import get_stattest from evidently.core import ColumnType from evidently.pydantic_utils import EvidentlyBaseModel +from evidently.pydantic_utils import autoregister DISTANCE_DICT = { "euclidean": euclidean, @@ -46,12 +47,19 @@ def get_pca_df( class DriftMethod(EvidentlyBaseModel): + class Config: + is_base_type = True + @abc.abstractmethod def __call__(self, current_emb: pd.DataFrame, reference_emb: pd.DataFrame) -> Tuple[float, bool, str]: raise NotImplementedError +@autoregister class DistanceDriftMethod(DriftMethod): + class Config: + type_alias = "evidently:drift_method:DistanceDriftMethod" + dist: str = "euclidean" threshold: float = 0.2 bootstrap: Optional[bool] = None @@ -119,7 +127,11 @@ def calc_roc_auc_random(y_test, i): return roc_auc_random +@autoregister class ModelDriftMethod(DriftMethod): + class Config: + type_alias = "evidently:drift_method:ModelDriftMethod" + threshold: float = 0.55 bootstrap: Optional[bool] = None quantile_probability: float = 0.95 @@ -174,7 +186,11 @@ def model( ) +@autoregister class RatioDriftMethod(DriftMethod): + class Config: + type_alias = "evidently:drift_method:RatioDriftMethod" + component_stattest: str = "wasserstein" component_stattest_threshold: float = 0.1 threshold: float = 0.2 @@ -249,7 +265,11 @@ def MMD2u_bstrp(K, m, n, x_idx, y_idx): ) +@autoregister class MMDDriftMethod(DriftMethod): + class Config: + type_alias = "evidently:drift_method:MMDDriftMethod" + threshold: float = 0.015 bootstrap: Optional[bool] = None quantile_probability: float = 0.05 diff --git a/src/evidently/metrics/data_drift/embeddings_drift.py b/src/evidently/metrics/data_drift/embeddings_drift.py index 99dd354537..345a6eb0ca 100644 --- a/src/evidently/metrics/data_drift/embeddings_drift.py +++ b/src/evidently/metrics/data_drift/embeddings_drift.py @@ -27,6 +27,7 @@ class EmbeddingsDriftMetricResults(MetricResult): class Config: + type_alias = "evidently:metric_result:EmbeddingsDriftMetricResults" dict_exclude_fields = { "reference", "current", @@ -48,6 +49,9 @@ class Config: class EmbeddingsDriftMetric(Metric[EmbeddingsDriftMetricResults]): + class Config: + type_alias = "evidently:metric:EmbeddingsDriftMetric" + embeddings_name: str drift_method: Optional[DriftMethod] diff --git a/src/evidently/metrics/data_drift/feature_importance.py b/src/evidently/metrics/data_drift/feature_importance.py index 26b4a1ae6a..028881fe99 100644 --- a/src/evidently/metrics/data_drift/feature_importance.py +++ b/src/evidently/metrics/data_drift/feature_importance.py @@ -23,6 +23,7 @@ class FeatureImportanceMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:FeatureImportanceMetricResult" field_tags = {"current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}} current: Optional[Dict[str, float]] = None @@ -41,6 +42,9 @@ def get_pandas(self) -> pd.DataFrame: class FeatureImportanceMetric(Metric[FeatureImportanceMetricResult]): + class Config: + type_alias = "evidently:metric:FeatureImportanceMetric" + def calculate(self, data: InputData) -> FeatureImportanceMetricResult: if data.additional_data.get("current_feature_importance") is not None: return FeatureImportanceMetricResult( diff --git a/src/evidently/metrics/data_drift/target_by_features_table.py b/src/evidently/metrics/data_drift/target_by_features_table.py index efc0c04fea..af8d7cd9c4 100644 --- a/src/evidently/metrics/data_drift/target_by_features_table.py +++ b/src/evidently/metrics/data_drift/target_by_features_table.py @@ -34,6 +34,7 @@ class TargetByFeaturesTableResults(MetricResult): class Config: + type_alias = "evidently:metric_result:TargetByFeaturesTableResults" dict_include = False field_tags = { "current": {IncludeTags.Current}, @@ -51,6 +52,9 @@ class Config: class TargetByFeaturesTable(UsesRawDataMixin, Metric[TargetByFeaturesTableResults]): + class Config: + type_alias = "evidently:metric:TargetByFeaturesTable" + columns: Optional[List[str]] _text_features_gen: Optional[ Dict[ diff --git a/src/evidently/metrics/data_drift/text_descriptors_drift_metric.py b/src/evidently/metrics/data_drift/text_descriptors_drift_metric.py index 722ea5f036..5bd2c38135 100644 --- a/src/evidently/metrics/data_drift/text_descriptors_drift_metric.py +++ b/src/evidently/metrics/data_drift/text_descriptors_drift_metric.py @@ -42,6 +42,9 @@ class TextDescriptorsDriftMetricResults(MetricResult): + class Config: + type_alias = "evidently:metric_result:TextDescriptorsDriftMetricResults" + number_of_columns: int number_of_drifted_columns: int share_of_drifted_columns: float @@ -51,6 +54,9 @@ class TextDescriptorsDriftMetricResults(MetricResult): class TextDescriptorsDriftMetric(UsesRawDataMixin, Metric[TextDescriptorsDriftMetricResults]): + class Config: + type_alias = "evidently:metric:TextDescriptorsDriftMetric" + column_name: str stattest: Optional[PossibleStatTestType] = None stattest_threshold: Optional[float] = None diff --git a/src/evidently/metrics/data_drift/text_domain_classifier_drift_metric.py b/src/evidently/metrics/data_drift/text_domain_classifier_drift_metric.py index 77846c6964..01c7eb7a9c 100644 --- a/src/evidently/metrics/data_drift/text_domain_classifier_drift_metric.py +++ b/src/evidently/metrics/data_drift/text_domain_classifier_drift_metric.py @@ -26,12 +26,16 @@ class TextDomainField(MetricResult): + class Config: + type_alias = "evidently:metric_result:TextDomainField" + characteristic_examples: Optional[List[str]] characteristic_words: Optional[List[str]] class TextDomainClassifierDriftResult(MetricResult): class Config: + type_alias = "evidently:metric_result:TextDomainClassifierDriftResult" field_tags = { "current": {IncludeTags.Current, IncludeTags.Extra}, "reference": {IncludeTags.Reference, IncludeTags.Extra}, @@ -47,6 +51,9 @@ class Config: class TextDomainClassifierDriftMetric(Metric[TextDomainClassifierDriftResult]): + class Config: + type_alias = "evidently:metric:TextDomainClassifierDriftMetric" + text_column_name: str def __init__(self, text_column_name: str, options: AnyOptions = None) -> None: diff --git a/src/evidently/metrics/data_drift/text_metric.py b/src/evidently/metrics/data_drift/text_metric.py index e886b541e6..24a7f7b5d8 100644 --- a/src/evidently/metrics/data_drift/text_metric.py +++ b/src/evidently/metrics/data_drift/text_metric.py @@ -13,6 +13,7 @@ class CommentResults(MetricResult): class Config: + type_alias = "evidently:metric_result:CommentResults" dict_include = False tags = {IncludeTags.Render} @@ -20,6 +21,9 @@ class Config: class Comment(Metric[CommentResults]): + class Config: + type_alias = "evidently:metric:Comment" + text: str def __init__(self, text: str, options: AnyOptions = None): diff --git a/src/evidently/metrics/data_integrity/column_missing_values_metric.py b/src/evidently/metrics/data_integrity/column_missing_values_metric.py index 10dafbac3f..431cae96bf 100644 --- a/src/evidently/metrics/data_integrity/column_missing_values_metric.py +++ b/src/evidently/metrics/data_integrity/column_missing_values_metric.py @@ -27,6 +27,7 @@ class ColumnMissingValues(MetricResult): """Statistics about missing values in a column""" class Config: + type_alias = "evidently:metric_result:ColumnMissingValues" pd_exclude_fields = {"different_missing_values"} field_tags = {"number_of_rows": {IncludeTags.Extra}, "different_missing_values": {IncludeTags.Extra}} @@ -44,6 +45,7 @@ class Config: class ColumnMissingValuesMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:ColumnMissingValuesMetricResult" field_tags = { "current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}, @@ -56,6 +58,9 @@ class Config: class ColumnMissingValuesMetric(Metric[ColumnMissingValuesMetricResult]): + class Config: + type_alias = "evidently:metric:ColumnMissingValuesMetric" + """Count missing values in a column. Missing value is a null or NaN value. diff --git a/src/evidently/metrics/data_integrity/column_regexp_metric.py b/src/evidently/metrics/data_integrity/column_regexp_metric.py index c112898265..697188a83e 100644 --- a/src/evidently/metrics/data_integrity/column_regexp_metric.py +++ b/src/evidently/metrics/data_integrity/column_regexp_metric.py @@ -28,6 +28,7 @@ class DataIntegrityValueByRegexpStat(MetricResult): """Statistics about matched by a regular expression values in a column for one dataset""" class Config: + type_alias = "evidently:metric_result:DataIntegrityValueByRegexpStat" pd_exclude_fields = {"table_of_matched", "table_of_not_matched"} field_tags = { @@ -50,6 +51,7 @@ class Config: class DataIntegrityValueByRegexpMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:DataIntegrityValueByRegexpMetricResult" field_tags = { "current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}, @@ -70,6 +72,9 @@ class Config: class ColumnRegExpMetric(Metric[DataIntegrityValueByRegexpMetricResult]): + class Config: + type_alias = "evidently:metric:ColumnRegExpMetric" + """Count number of values in a column matched or not by a regular expression (regexp)""" # name of the column that we check diff --git a/src/evidently/metrics/data_integrity/column_summary_metric.py b/src/evidently/metrics/data_integrity/column_summary_metric.py index 08cf1b45bc..eaf286be8f 100644 --- a/src/evidently/metrics/data_integrity/column_summary_metric.py +++ b/src/evidently/metrics/data_integrity/column_summary_metric.py @@ -54,6 +54,9 @@ class ColumnCharacteristics(MetricResult): + class Config: + type_alias = "evidently:metric_result:ColumnCharacteristics" + number_of_rows: int count: int missing: Optional[int] @@ -61,6 +64,9 @@ class ColumnCharacteristics(MetricResult): class NumericCharacteristics(ColumnCharacteristics): + class Config: + type_alias = "evidently:metric_result:NumericCharacteristics" + mean: Optional[Numeric] std: Optional[Numeric] min: Optional[Numeric] @@ -77,6 +83,9 @@ class NumericCharacteristics(ColumnCharacteristics): class CategoricalCharacteristics(ColumnCharacteristics): + class Config: + type_alias = "evidently:metric_result:CategoricalCharacteristics" + unique: Optional[int] unique_percentage: Optional[float] most_common: Optional[object] @@ -86,6 +95,9 @@ class CategoricalCharacteristics(ColumnCharacteristics): class DatetimeCharacteristics(ColumnCharacteristics): + class Config: + type_alias = "evidently:metric_result:DatetimeCharacteristics" + unique: Optional[int] unique_percentage: Optional[float] most_common: Optional[object] @@ -95,6 +107,9 @@ class DatetimeCharacteristics(ColumnCharacteristics): class TextCharacteristics(ColumnCharacteristics): + class Config: + type_alias = "evidently:metric_result:TextCharacteristics" + text_length_min: Optional[float] text_length_mean: Optional[float] text_length_max: Optional[float] @@ -108,6 +123,7 @@ class TextCharacteristics(ColumnCharacteristics): class DataInTimePlots(MetricResult): class Config: + type_alias = "evidently:metric_result:DataInTimePlots" field_tags = {"current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}} current: pd.DataFrame @@ -115,6 +131,9 @@ class Config: class DataInTime(MetricResult): + class Config: + type_alias = "evidently:metric_result:DataInTime" + data_for_plots: DataInTimePlots freq: str datetime_name: str @@ -122,6 +141,7 @@ class DataInTime(MetricResult): class DataByTarget(MetricResult): class Config: + type_alias = "evidently:metric_result:DataByTarget" smart_union = True box_data: Optional[Dict[str, dict]] @@ -134,6 +154,7 @@ class Config: class DataQualityPlot(MetricResult): class Config: + type_alias = "evidently:metric_result:DataQualityPlot" dict_include = False pd_include = False tags = {IncludeTags.Render} @@ -308,6 +329,7 @@ def _split_periods(curr_data: pd.DataFrame, ref_data: pd.DataFrame, feature_name class ColumnSummaryResult(ColumnMetricResult): class Config: + type_alias = "evidently:metric_result:ColumnSummaryResult" pd_name_mapping = { "reference_characteristics": "reference", "current_characteristics": "current", @@ -324,6 +346,9 @@ class Config: class ColumnSummaryMetric(UsesRawDataMixin, ColumnMetric[ColumnSummaryResult]): + class Config: + type_alias = "evidently:metric:ColumnSummaryMetric" + _generated_text_features: Optional[Dict[str, Union[TextLength, NonLetterCharacterPercentage, OOVWordsPercentage]]] def __init__(self, column_name: Union[str, ColumnName], options: AnyOptions = None): diff --git a/src/evidently/metrics/data_integrity/dataset_missing_values_metric.py b/src/evidently/metrics/data_integrity/dataset_missing_values_metric.py index 6b922d025f..27c0aa094a 100644 --- a/src/evidently/metrics/data_integrity/dataset_missing_values_metric.py +++ b/src/evidently/metrics/data_integrity/dataset_missing_values_metric.py @@ -45,6 +45,7 @@ class DatasetMissingValues(MetricResult): """Statistics about missed values in a dataset""" class Config: + type_alias = "evidently:metric_result:DatasetMissingValues" pd_exclude_fields = { "different_missing_values_by_column", "different_missing_values", @@ -99,6 +100,7 @@ class Config: class DatasetMissingValuesMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:DatasetMissingValuesMetricResult" field_tags = {"current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}} current: DatasetMissingValues @@ -106,6 +108,9 @@ class Config: class DatasetMissingValuesMetric(Metric[DatasetMissingValuesMetricResult]): + class Config: + type_alias = "evidently:metric:DatasetMissingValuesMetric" + """Count missing values in a dataset. Missing value is a null or NaN value. diff --git a/src/evidently/metrics/data_integrity/dataset_summary_metric.py b/src/evidently/metrics/data_integrity/dataset_summary_metric.py index 373d86a532..dc121385dc 100644 --- a/src/evidently/metrics/data_integrity/dataset_summary_metric.py +++ b/src/evidently/metrics/data_integrity/dataset_summary_metric.py @@ -54,6 +54,7 @@ class DatasetSummary(MetricResult): """Columns information in a dataset""" class Config: + type_alias = "evidently:metric_result:DatasetSummary" dict_exclude_fields = {"columns_type_data"} pd_exclude_fields = {"columns_type_data", "nans_by_columns", "number_uniques_by_columns"} @@ -96,6 +97,7 @@ def columns_type(self) -> Dict[Label, np.dtype]: class DatasetSummaryMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:DatasetSummaryMetricResult" field_tags = { "almost_duplicated_threshold": {IncludeTags.Parameter}, "current": {IncludeTags.Current}, @@ -108,6 +110,9 @@ class Config: class DatasetSummaryMetric(Metric[DatasetSummaryMetricResult]): + class Config: + type_alias = "evidently:metric:DatasetSummaryMetric" + """Common dataset(s) columns/features characteristics""" # threshold for calculating the number of almost duplicated columns diff --git a/src/evidently/metrics/data_quality/column_category_metric.py b/src/evidently/metrics/data_quality/column_category_metric.py index 9425461a7b..380db52257 100644 --- a/src/evidently/metrics/data_quality/column_category_metric.py +++ b/src/evidently/metrics/data_quality/column_category_metric.py @@ -22,6 +22,7 @@ class CategoryStat(MetricResult): class Config: + type_alias = "evidently:metric_result:CategoryStat" field_tags = {"all_num": {IncludeTags.Extra}} all_num: int @@ -30,12 +31,16 @@ class Config: class CountOfValues(MetricResult): + class Config: + type_alias = "evidently:metric_result:CountOfValues" + current: HistogramData reference: Optional[HistogramData] = None class ColumnCategoryMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:ColumnCategoryMetricResult" pd_exclude_fields = {"counts"} field_tags = { "current": {IncludeTags.Current}, @@ -78,6 +83,7 @@ class ColumnCategoryMetric(Metric[ColumnCategoryMetricResult]): """Calculates count and shares of values in the predefined values list""" class Config: + type_alias = "evidently:metric:ColumnCategoryMetric" smart_union = True column_name: ColumnName diff --git a/src/evidently/metrics/data_quality/column_correlations_metric.py b/src/evidently/metrics/data_quality/column_correlations_metric.py index cb89afd9c4..31ce9a9c10 100644 --- a/src/evidently/metrics/data_quality/column_correlations_metric.py +++ b/src/evidently/metrics/data_quality/column_correlations_metric.py @@ -27,6 +27,7 @@ class ColumnCorrelationsMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:ColumnCorrelationsMetricResult" field_tags = { "current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}, @@ -54,6 +55,9 @@ def get_pandas(self) -> pd.DataFrame: class ColumnCorrelationsMetric(Metric[ColumnCorrelationsMetricResult]): + class Config: + type_alias = "evidently:metric:ColumnCorrelationsMetric" + """Calculates correlations between the selected column and all the other columns. In the current and reference (if presented) datasets""" diff --git a/src/evidently/metrics/data_quality/column_distribution_metric.py b/src/evidently/metrics/data_quality/column_distribution_metric.py index ccca1c2639..eb8f9ab32f 100644 --- a/src/evidently/metrics/data_quality/column_distribution_metric.py +++ b/src/evidently/metrics/data_quality/column_distribution_metric.py @@ -25,6 +25,7 @@ class ColumnDistributionMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:ColumnDistributionMetricResult" field_tags = { "current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}, @@ -37,6 +38,9 @@ class Config: class ColumnDistributionMetric(Metric[ColumnDistributionMetricResult]): + class Config: + type_alias = "evidently:metric:ColumnDistributionMetric" + """Calculates distribution for the column""" column_name: ColumnName diff --git a/src/evidently/metrics/data_quality/column_quantile_metric.py b/src/evidently/metrics/data_quality/column_quantile_metric.py index b2f5f00dd8..af0ec30af3 100644 --- a/src/evidently/metrics/data_quality/column_quantile_metric.py +++ b/src/evidently/metrics/data_quality/column_quantile_metric.py @@ -27,6 +27,9 @@ class QuantileStats(MetricResult): + class Config: + type_alias = "evidently:metric_result:QuantileStats" + value: float # calculated value of the quantile distribution: Distribution @@ -35,6 +38,7 @@ class QuantileStats(MetricResult): class ColumnQuantileMetricResult(ColumnMetricResult): class Config: + type_alias = "evidently:metric_result:ColumnQuantileMetricResult" field_tags = { "current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}, @@ -48,6 +52,9 @@ class Config: class ColumnQuantileMetric(Metric[ColumnQuantileMetricResult]): + class Config: + type_alias = "evidently:metric:ColumnQuantileMetric" + """Calculates quantile with specified range""" column_name: ColumnName diff --git a/src/evidently/metrics/data_quality/column_value_list_metric.py b/src/evidently/metrics/data_quality/column_value_list_metric.py index 0c99d3877b..840934b4af 100644 --- a/src/evidently/metrics/data_quality/column_value_list_metric.py +++ b/src/evidently/metrics/data_quality/column_value_list_metric.py @@ -25,6 +25,7 @@ class ValueListStat(MetricResult): class Config: + type_alias = "evidently:metric_result:ValueListStat" field_tags = { "values_in_list_dist": {IncludeTags.Extra}, "values_not_in_list_dist": {IncludeTags.Extra}, @@ -64,6 +65,7 @@ def values_not_in_list(self) -> List[Tuple[Any, int]]: class ColumnValueListMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:ColumnValueListMetricResult" field_tags = { "current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}, @@ -78,6 +80,9 @@ class Config: class ColumnValueListMetric(Metric[ColumnValueListMetricResult]): + class Config: + type_alias = "evidently:metric:ColumnValueListMetric" + """Calculates count and shares of values in the predefined values list""" column_name: str diff --git a/src/evidently/metrics/data_quality/column_value_range_metric.py b/src/evidently/metrics/data_quality/column_value_range_metric.py index 7f09c10c89..6aa1817b78 100644 --- a/src/evidently/metrics/data_quality/column_value_range_metric.py +++ b/src/evidently/metrics/data_quality/column_value_range_metric.py @@ -32,6 +32,7 @@ class ValuesInRangeStat(MetricResult): class Config: + type_alias = "evidently:metric_result:ValuesInRangeStat" field_tags = {"number_of_values": {IncludeTags.Extra}} number_in_range: int @@ -45,6 +46,7 @@ class Config: class ColumnValueRangeMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:ColumnValueRangeMetricResult" field_tags = { "current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}, @@ -61,6 +63,9 @@ class Config: class ColumnValueRangeMetric(Metric[ColumnValueRangeMetricResult]): + class Config: + type_alias = "evidently:metric:ColumnValueRangeMetric" + """Calculates count and shares of values in the predefined values range""" column_name: ColumnName diff --git a/src/evidently/metrics/data_quality/conflict_prediction_metric.py b/src/evidently/metrics/data_quality/conflict_prediction_metric.py index 987a437ed9..ff096bc614 100644 --- a/src/evidently/metrics/data_quality/conflict_prediction_metric.py +++ b/src/evidently/metrics/data_quality/conflict_prediction_metric.py @@ -15,12 +15,16 @@ class ConflictPredictionData(MetricResult): + class Config: + type_alias = "evidently:metric_result:ConflictPredictionData" + number_not_stable_prediction: int share_not_stable_prediction: float class ConflictPredictionMetricResults(MetricResult): class Config: + type_alias = "evidently:metric_result:ConflictPredictionMetricResults" field_tags = {"current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}} current: ConflictPredictionData @@ -28,6 +32,9 @@ class Config: class ConflictPredictionMetric(Metric[ConflictPredictionMetricResults]): + class Config: + type_alias = "evidently:metric:ConflictPredictionMetric" + def calculate(self, data: InputData) -> ConflictPredictionMetricResults: dataset_columns = process_columns(data.current_data, data.column_mapping) prediction_name = dataset_columns.utility_columns.prediction diff --git a/src/evidently/metrics/data_quality/conflict_target_metric.py b/src/evidently/metrics/data_quality/conflict_target_metric.py index 1f57452f9c..cf30121e98 100644 --- a/src/evidently/metrics/data_quality/conflict_target_metric.py +++ b/src/evidently/metrics/data_quality/conflict_target_metric.py @@ -16,6 +16,7 @@ class ConflictTargetMetricResults(MetricResult): class Config: + type_alias = "evidently:metric_result:ConflictTargetMetricResults" field_tags = { "number_not_stable_target": {IncludeTags.Current}, "share_not_stable_target": {IncludeTags.Current}, @@ -30,6 +31,9 @@ class Config: class ConflictTargetMetric(Metric[ConflictTargetMetricResults]): + class Config: + type_alias = "evidently:metric:ConflictTargetMetric" + def calculate(self, data: InputData) -> ConflictTargetMetricResults: dataset_columns = process_columns(data.current_data, data.column_mapping) target_name = dataset_columns.utility_columns.target diff --git a/src/evidently/metrics/data_quality/dataset_correlations_metric.py b/src/evidently/metrics/data_quality/dataset_correlations_metric.py index 6abb76306b..ffaa3f5038 100644 --- a/src/evidently/metrics/data_quality/dataset_correlations_metric.py +++ b/src/evidently/metrics/data_quality/dataset_correlations_metric.py @@ -34,6 +34,7 @@ class CorrelationStats(MetricResult): class Config: + type_alias = "evidently:metric_result:CorrelationStats" field_tags = { "abs_max_target_features_correlation": {IncludeTags.Extra}, "abs_max_prediction_features_correlation": {IncludeTags.Extra}, @@ -50,6 +51,7 @@ class Config: class DatasetCorrelation(MetricResult): class Config: + type_alias = "evidently:metric_result:DatasetCorrelation" dict_exclude_fields = {"correlation", "correlations_calculate"} pd_include = False pd_exclude_fields = {"correlation", "correlations_calculate"} @@ -63,6 +65,7 @@ class Config: class DatasetCorrelationsMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:DatasetCorrelationsMetricResult" dict_exclude_fields = {"target_correlation"} pd_exclude_fields = {"target_correlation"} field_tags = { @@ -77,6 +80,9 @@ class Config: class DatasetCorrelationsMetric(Metric[DatasetCorrelationsMetricResult]): + class Config: + type_alias = "evidently:metric:DatasetCorrelationsMetric" + """Calculate different correlations with target, predictions and features""" _text_features_gen: Optional[ diff --git a/src/evidently/metrics/data_quality/stability_metric.py b/src/evidently/metrics/data_quality/stability_metric.py index 7d1042dccd..2ee4a8b4df 100644 --- a/src/evidently/metrics/data_quality/stability_metric.py +++ b/src/evidently/metrics/data_quality/stability_metric.py @@ -13,11 +13,17 @@ class DataQualityStabilityMetricResult(MetricResult): + class Config: + type_alias = "evidently:metric_result:DataQualityStabilityMetricResult" + number_not_stable_target: Optional[int] = None number_not_stable_prediction: Optional[int] = None class DataQualityStabilityMetric(Metric[DataQualityStabilityMetricResult]): + class Config: + type_alias = "evidently:metric:DataQualityStabilityMetric" + """Calculates stability by target and prediction""" def calculate(self, data: InputData) -> DataQualityStabilityMetricResult: diff --git a/src/evidently/metrics/data_quality/text_descriptors_correlation_metric.py b/src/evidently/metrics/data_quality/text_descriptors_correlation_metric.py index c7863345cf..ce8b7bf785 100644 --- a/src/evidently/metrics/data_quality/text_descriptors_correlation_metric.py +++ b/src/evidently/metrics/data_quality/text_descriptors_correlation_metric.py @@ -30,6 +30,7 @@ class TextDescriptorsCorrelationMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:TextDescriptorsCorrelationMetricResult" pd_include = False field_tags = { "current": {IncludeTags.Current}, @@ -43,6 +44,9 @@ class Config: class TextDescriptorsCorrelationMetric(Metric[TextDescriptorsCorrelationMetricResult]): + class Config: + type_alias = "evidently:metric:TextDescriptorsCorrelationMetric" + """Calculates correlations between each auto-generated text feature for column_name and other dataset columns""" column_name: str diff --git a/src/evidently/metrics/data_quality/text_descriptors_distribution.py b/src/evidently/metrics/data_quality/text_descriptors_distribution.py index d569f0c52d..891f6a7f1c 100644 --- a/src/evidently/metrics/data_quality/text_descriptors_distribution.py +++ b/src/evidently/metrics/data_quality/text_descriptors_distribution.py @@ -30,6 +30,7 @@ class TextDescriptorsDistributionResult(MetricResult): class Config: + type_alias = "evidently:metric_result:TextDescriptorsDistributionResult" pd_include = False field_tags = { "current": {IncludeTags.Current}, @@ -43,6 +44,9 @@ class Config: class TextDescriptorsDistribution(Metric[TextDescriptorsDistributionResult]): + class Config: + type_alias = "evidently:metric:TextDescriptorsDistribution" + """Calculates distribution for the column""" column_name: str diff --git a/src/evidently/metrics/recsys/base_top_k.py b/src/evidently/metrics/recsys/base_top_k.py index ca66d31719..24c4f6f666 100644 --- a/src/evidently/metrics/recsys/base_top_k.py +++ b/src/evidently/metrics/recsys/base_top_k.py @@ -22,6 +22,7 @@ class TopKMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:TopKMetricResult" field_tags = { "current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}, diff --git a/src/evidently/metrics/recsys/diversity.py b/src/evidently/metrics/recsys/diversity.py index 0dd886b8a8..342ecde007 100644 --- a/src/evidently/metrics/recsys/diversity.py +++ b/src/evidently/metrics/recsys/diversity.py @@ -29,6 +29,7 @@ class DiversityMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:DiversityMetricResult" field_tags = { "k": {IncludeTags.Parameter}, "current_value": {IncludeTags.Current}, @@ -45,6 +46,9 @@ class Config: class DiversityMetric(Metric[DiversityMetricResult]): + class Config: + type_alias = "evidently:metric:DiversityMetric" + """Intra list diversity""" _pairwise_distance: PairwiseDistance diff --git a/src/evidently/metrics/recsys/f_beta_top_k.py b/src/evidently/metrics/recsys/f_beta_top_k.py index 28c99f29ea..b6a32c4d51 100644 --- a/src/evidently/metrics/recsys/f_beta_top_k.py +++ b/src/evidently/metrics/recsys/f_beta_top_k.py @@ -13,6 +13,9 @@ class FBetaTopKMetric(TopKMetric): + class Config: + type_alias = "evidently:metric:FBetaTopKMetric" + k: int beta: Optional[float] min_rel_score: Optional[int] diff --git a/src/evidently/metrics/recsys/hit_rate_k.py b/src/evidently/metrics/recsys/hit_rate_k.py index e5a7292c61..1eb8065f57 100644 --- a/src/evidently/metrics/recsys/hit_rate_k.py +++ b/src/evidently/metrics/recsys/hit_rate_k.py @@ -14,6 +14,7 @@ class HitRateKMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:HitRateKMetricResult" field_tags = { "current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}, @@ -26,6 +27,9 @@ class Config: class HitRateKMetric(Metric[HitRateKMetricResult]): + class Config: + type_alias = "evidently:metric:HitRateKMetric" + k: int min_rel_score: Optional[int] no_feedback_users: bool diff --git a/src/evidently/metrics/recsys/item_bias.py b/src/evidently/metrics/recsys/item_bias.py index cd3ba68c36..67ccecc38d 100644 --- a/src/evidently/metrics/recsys/item_bias.py +++ b/src/evidently/metrics/recsys/item_bias.py @@ -22,6 +22,7 @@ class ItemBiasMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:ItemBiasMetricResult" field_tags = { "k": {IncludeTags.Parameter}, "column_name": {IncludeTags.Parameter}, @@ -40,6 +41,9 @@ class Config: class ItemBiasMetric(Metric[ItemBiasMetricResult]): + class Config: + type_alias = "evidently:metric:ItemBiasMetric" + k: int column_name: str diff --git a/src/evidently/metrics/recsys/map_k.py b/src/evidently/metrics/recsys/map_k.py index 54d6beb713..9ffc38d9e1 100644 --- a/src/evidently/metrics/recsys/map_k.py +++ b/src/evidently/metrics/recsys/map_k.py @@ -4,6 +4,9 @@ class MAPKMetric(TopKMetric): + class Config: + type_alias = "evidently:metric:MAPKMetric" + def key(self): return "map" diff --git a/src/evidently/metrics/recsys/mar_k.py b/src/evidently/metrics/recsys/mar_k.py index 724eaaead8..425613a2b3 100644 --- a/src/evidently/metrics/recsys/mar_k.py +++ b/src/evidently/metrics/recsys/mar_k.py @@ -4,6 +4,9 @@ class MARKMetric(TopKMetric): + class Config: + type_alias = "evidently:metric:MARKMetric" + def key(self): return "mar" diff --git a/src/evidently/metrics/recsys/mrr.py b/src/evidently/metrics/recsys/mrr.py index ac39d39b05..53cd05f9d2 100644 --- a/src/evidently/metrics/recsys/mrr.py +++ b/src/evidently/metrics/recsys/mrr.py @@ -14,6 +14,7 @@ class MRRKMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:MRRKMetricResult" field_tags = { "k": {IncludeTags.Parameter}, "current": {IncludeTags.Current}, @@ -26,6 +27,9 @@ class Config: class MRRKMetric(Metric[MRRKMetricResult]): + class Config: + type_alias = "evidently:metric:MRRKMetric" + k: int min_rel_score: Optional[int] no_feedback_users: bool diff --git a/src/evidently/metrics/recsys/ndcg_k.py b/src/evidently/metrics/recsys/ndcg_k.py index efdd5a8895..e4d0cc940e 100644 --- a/src/evidently/metrics/recsys/ndcg_k.py +++ b/src/evidently/metrics/recsys/ndcg_k.py @@ -13,6 +13,9 @@ class NDCGKMetric(Metric[TopKMetricResult]): + class Config: + type_alias = "evidently:metric:NDCGKMetric" + k: int min_rel_score: Optional[int] no_feedback_users: bool diff --git a/src/evidently/metrics/recsys/novelty.py b/src/evidently/metrics/recsys/novelty.py index 228d4bc66d..666678b799 100644 --- a/src/evidently/metrics/recsys/novelty.py +++ b/src/evidently/metrics/recsys/novelty.py @@ -27,6 +27,7 @@ class NoveltyMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:NoveltyMetricResult" field_tags = { "k": {IncludeTags.Parameter}, "current_value": {IncludeTags.Current}, @@ -43,6 +44,9 @@ class Config: class NoveltyMetric(Metric[NoveltyMetricResult]): + class Config: + type_alias = "evidently:metric:NoveltyMetric" + """Mean Inverse User Frequency""" k: int diff --git a/src/evidently/metrics/recsys/pairwise_distance.py b/src/evidently/metrics/recsys/pairwise_distance.py index 90d48151fa..6b5d354608 100644 --- a/src/evidently/metrics/recsys/pairwise_distance.py +++ b/src/evidently/metrics/recsys/pairwise_distance.py @@ -20,6 +20,7 @@ class PairwiseDistanceResult(MetricResult): class Config: + type_alias = "evidently:metric_result:PairwiseDistanceResult" pd_include = False field_tags = {"dist_matrix": {IncludeTags.Extra}} @@ -28,6 +29,9 @@ class Config: class PairwiseDistance(Metric[PairwiseDistanceResult]): + class Config: + type_alias = "evidently:metric:PairwiseDistance" + k: int item_features: List[str] diff --git a/src/evidently/metrics/recsys/personalisation.py b/src/evidently/metrics/recsys/personalisation.py index 2b13946f24..3c6b2dc56e 100644 --- a/src/evidently/metrics/recsys/personalisation.py +++ b/src/evidently/metrics/recsys/personalisation.py @@ -26,6 +26,7 @@ class PersonalizationMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:PersonalizationMetricResult" pd_include = False field_tags = { @@ -44,6 +45,9 @@ class Config: class PersonalizationMetric(Metric[PersonalizationMetricResult]): + class Config: + type_alias = "evidently:metric:PersonalizationMetric" + """Mean Inter List""" k: int diff --git a/src/evidently/metrics/recsys/popularity_bias.py b/src/evidently/metrics/recsys/popularity_bias.py index 1cc791f796..07698438cb 100644 --- a/src/evidently/metrics/recsys/popularity_bias.py +++ b/src/evidently/metrics/recsys/popularity_bias.py @@ -27,6 +27,7 @@ class PopularityBiasResult(MetricResult): class Config: + type_alias = "evidently:metric_result:PopularityBiasResult" field_tags = { "k": {IncludeTags.Parameter}, "normalize_arp": {IncludeTags.Parameter}, @@ -53,6 +54,9 @@ class Config: class PopularityBias(Metric[PopularityBiasResult]): + class Config: + type_alias = "evidently:metric:PopularityBias" + """ Average Recommendation Popularity Aggregate Diversity diff --git a/src/evidently/metrics/recsys/precision_recall_k.py b/src/evidently/metrics/recsys/precision_recall_k.py index fb872bb1cb..e40802ee4d 100644 --- a/src/evidently/metrics/recsys/precision_recall_k.py +++ b/src/evidently/metrics/recsys/precision_recall_k.py @@ -18,6 +18,7 @@ class PrecisionRecallCalculationResult(MetricResult): class Config: + type_alias = "evidently:metric_result:PrecisionRecallCalculationResult" pd_include = False field_tags = {"current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}} @@ -26,6 +27,9 @@ class Config: class PrecisionRecallCalculation(Metric[PrecisionRecallCalculationResult]): + class Config: + type_alias = "evidently:metric:PrecisionRecallCalculation" + max_k: int min_rel_score: Optional[int] diff --git a/src/evidently/metrics/recsys/precision_top_k.py b/src/evidently/metrics/recsys/precision_top_k.py index 0769c853c2..701db255b5 100644 --- a/src/evidently/metrics/recsys/precision_top_k.py +++ b/src/evidently/metrics/recsys/precision_top_k.py @@ -4,6 +4,9 @@ class PrecisionTopKMetric(TopKMetric): + class Config: + type_alias = "evidently:metric:PrecisionTopKMetric" + def key(self): return "precision" diff --git a/src/evidently/metrics/recsys/rec_examples.py b/src/evidently/metrics/recsys/rec_examples.py index d65879fa86..96c9739d16 100644 --- a/src/evidently/metrics/recsys/rec_examples.py +++ b/src/evidently/metrics/recsys/rec_examples.py @@ -27,6 +27,7 @@ class RecCasesTableResults(MetricResult): class Config: + type_alias = "evidently:metric_result:RecCasesTableResults" pd_include = False field_tags = { "user_ids": {IncludeTags.Extra}, @@ -44,6 +45,9 @@ class Config: class RecCasesTable(Metric[RecCasesTableResults]): + class Config: + type_alias = "evidently:metric:RecCasesTable" + user_ids: Optional[List[Union[int, str]]] display_features: Optional[List[str]] item_num: Optional[int] diff --git a/src/evidently/metrics/recsys/recall_top_k.py b/src/evidently/metrics/recsys/recall_top_k.py index b3d6e45cee..eb8fb685ca 100644 --- a/src/evidently/metrics/recsys/recall_top_k.py +++ b/src/evidently/metrics/recsys/recall_top_k.py @@ -4,6 +4,9 @@ class RecallTopKMetric(TopKMetric): + class Config: + type_alias = "evidently:metric:RecallTopKMetric" + def key(self): return "recall" diff --git a/src/evidently/metrics/recsys/scores_distribution.py b/src/evidently/metrics/recsys/scores_distribution.py index f1a63b23ac..90cdc2e267 100644 --- a/src/evidently/metrics/recsys/scores_distribution.py +++ b/src/evidently/metrics/recsys/scores_distribution.py @@ -27,6 +27,7 @@ class ScoreDistributionResult(MetricResult): class Config: + type_alias = "evidently:metric_result:ScoreDistributionResult" field_tags = { "k": {IncludeTags.Parameter}, "current_top_k_distr": {IncludeTags.Current}, @@ -47,6 +48,9 @@ class Config: class ScoreDistribution(Metric[ScoreDistributionResult]): + class Config: + type_alias = "evidently:metric:ScoreDistribution" + k: int def __init__(self, k: int, options: AnyOptions = None) -> None: diff --git a/src/evidently/metrics/recsys/serendipity.py b/src/evidently/metrics/recsys/serendipity.py index 07bed495cd..f655849904 100644 --- a/src/evidently/metrics/recsys/serendipity.py +++ b/src/evidently/metrics/recsys/serendipity.py @@ -29,6 +29,7 @@ class SerendipityMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:SerendipityMetricResult" field_tags = { "k": {IncludeTags.Parameter}, "current_value": {IncludeTags.Current}, @@ -45,6 +46,9 @@ class Config: class SerendipityMetric(Metric[SerendipityMetricResult]): + class Config: + type_alias = "evidently:metric:SerendipityMetric" + """unusualness * relevance""" _pairwise_distance: PairwiseDistance diff --git a/src/evidently/metrics/recsys/train_stats.py b/src/evidently/metrics/recsys/train_stats.py index 6242d1aa00..7dcb35b01d 100644 --- a/src/evidently/metrics/recsys/train_stats.py +++ b/src/evidently/metrics/recsys/train_stats.py @@ -15,6 +15,7 @@ class TrainStatsResult(MetricResult): class Config: + type_alias = "evidently:metric_result:TrainStatsResult" field_tags = { "current": {IncludeTags.Current}, "current_n_users": {IncludeTags.Current}, @@ -29,6 +30,9 @@ class Config: class TrainStats(Metric[TrainStatsResult]): + class Config: + type_alias = "evidently:metric:TrainStats" + """Calculates the number of times each item has been rated in the training set""" def __init__(self, options: AnyOptions = None) -> None: diff --git a/src/evidently/metrics/recsys/user_bias.py b/src/evidently/metrics/recsys/user_bias.py index f95a0a1599..9e66477e27 100644 --- a/src/evidently/metrics/recsys/user_bias.py +++ b/src/evidently/metrics/recsys/user_bias.py @@ -20,6 +20,7 @@ class UserBiasMetricResult(MetricResult): class Config: + type_alias = "evidently:metric_result:UserBiasMetricResult" field_tags = { "column_name": {IncludeTags.Parameter}, "current_train_distr": {IncludeTags.Current}, @@ -36,6 +37,9 @@ class Config: class UserBiasMetric(Metric[UserBiasMetricResult]): + class Config: + type_alias = "evidently:metric:UserBiasMetric" + column_name: str def __init__(self, column_name: str, options: AnyOptions = None) -> None: diff --git a/src/evidently/metrics/regression_performance/abs_perc_error_in_time.py b/src/evidently/metrics/regression_performance/abs_perc_error_in_time.py index cc22df57ac..0a3922ec49 100644 --- a/src/evidently/metrics/regression_performance/abs_perc_error_in_time.py +++ b/src/evidently/metrics/regression_performance/abs_perc_error_in_time.py @@ -22,6 +22,9 @@ class RegressionAbsPercentageErrorPlot(UsesRawDataMixin, Metric[ColumnScatterResult]): + class Config: + type_alias = "evidently:metric:RegressionAbsPercentageErrorPlot" + def __init__(self, options: AnyOptions = None): super().__init__(options=options) diff --git a/src/evidently/metrics/regression_performance/error_bias_table.py b/src/evidently/metrics/regression_performance/error_bias_table.py index 1f1474fc31..d8875a026d 100644 --- a/src/evidently/metrics/regression_performance/error_bias_table.py +++ b/src/evidently/metrics/regression_performance/error_bias_table.py @@ -36,6 +36,7 @@ class RegressionErrorBiasTableResults(MetricResult): class Config: + type_alias = "evidently:metric_result:RegressionErrorBiasTableResults" dict_exclude_fields = {"current_plot_data", "reference_plot_data"} pd_exclude_fields = { "current_plot_data", @@ -70,6 +71,9 @@ class Config: class RegressionErrorBiasTable(UsesRawDataMixin, Metric[RegressionErrorBiasTableResults]): # by default, we get 5% values for the error bias calculations + class Config: + type_alias = "evidently:metric:RegressionErrorBiasTable" + TOP_ERROR_DEFAULT: ClassVar[float] = 0.05 TOP_ERROR_MIN: ClassVar[float] = 0 TOP_ERROR_MAX: ClassVar[float] = 0.5 diff --git a/src/evidently/metrics/regression_performance/error_distribution.py b/src/evidently/metrics/regression_performance/error_distribution.py index aa4a17b559..1c3caa0332 100644 --- a/src/evidently/metrics/regression_performance/error_distribution.py +++ b/src/evidently/metrics/regression_performance/error_distribution.py @@ -19,6 +19,7 @@ class RegressionErrorDistributionResults(MetricResult): class Config: + type_alias = "evidently:metric_result:RegressionErrorDistributionResults" dict_exclude_fields = {"current_bins", "reference_bins"} pd_exclude_fields = {"current_bins", "reference_bins"} @@ -29,6 +30,9 @@ class Config: class RegressionErrorDistribution(Metric[RegressionErrorDistributionResults]): + class Config: + type_alias = "evidently:metric:RegressionErrorDistribution" + def calculate(self, data: InputData) -> RegressionErrorDistributionResults: dataset_columns = process_columns(data.current_data, data.column_mapping) target_name = dataset_columns.utility_columns.target diff --git a/src/evidently/metrics/regression_performance/error_in_time.py b/src/evidently/metrics/regression_performance/error_in_time.py index 0ae6681eea..dbea78ce57 100644 --- a/src/evidently/metrics/regression_performance/error_in_time.py +++ b/src/evidently/metrics/regression_performance/error_in_time.py @@ -23,6 +23,9 @@ class RegressionErrorPlot(UsesRawDataMixin, Metric[ColumnScatterResult]): + class Config: + type_alias = "evidently:metric:RegressionErrorPlot" + def __init__(self, options: AnyOptions = None): super().__init__(options=options) diff --git a/src/evidently/metrics/regression_performance/error_normality.py b/src/evidently/metrics/regression_performance/error_normality.py index 6468264964..cd2a713624 100644 --- a/src/evidently/metrics/regression_performance/error_normality.py +++ b/src/evidently/metrics/regression_performance/error_normality.py @@ -25,6 +25,7 @@ class RegressionErrorNormalityResults(MetricResult): class Config: + type_alias = "evidently:metric_result:RegressionErrorNormalityResults" dict_exclude_fields = {"current_plot", "current_theoretical", "reference_plot", "reference_theoretical"} pd_exclude_fields = {"current_plot", "current_theoretical", "reference_plot", "reference_theoretical"} @@ -42,6 +43,9 @@ class Config: class RegressionErrorNormality(UsesRawDataMixin, Metric[RegressionErrorNormalityResults]): + class Config: + type_alias = "evidently:metric:RegressionErrorNormality" + def __init__(self, options: AnyOptions = None): super().__init__(options=options) diff --git a/src/evidently/metrics/regression_performance/objects.py b/src/evidently/metrics/regression_performance/objects.py index b0fc2d431a..0764dd8559 100644 --- a/src/evidently/metrics/regression_performance/objects.py +++ b/src/evidently/metrics/regression_performance/objects.py @@ -12,6 +12,9 @@ class PredActualScatter(MetricResult): + class Config: + type_alias = "evidently:metric_result:PredActualScatter" + predicted: ScatterData actual: ScatterData @@ -31,6 +34,9 @@ def scatter_as_dict(scatter: Optional[PredActualScatter]) -> Optional[Dict[str, class RegressionScatter(MetricResult): + class Config: + type_alias = "evidently:metric_result:RegressionScatter" + underestimation: PredActualScatter majority: PredActualScatter overestimation: PredActualScatter @@ -38,6 +44,7 @@ class RegressionScatter(MetricResult): class IntervalSeries(MetricResult): class Config: + type_alias = "evidently:metric_result:IntervalSeries" underscore_attrs_are_private = True bins: List[float] @@ -69,6 +76,7 @@ def __mul__(self, other: float): class RegressionMetricScatter(MetricResult): class Config: + type_alias = "evidently:metric_result:RegressionMetricScatter" smart_union = True field_tags = {"current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}} @@ -82,6 +90,9 @@ def __mul__(self, other: float): class RegressionMetricsScatter(MetricResult): + class Config: + type_alias = "evidently:metric_result:RegressionMetricsScatter" + r2_score: RegressionMetricScatter rmse: RegressionMetricScatter mean_abs_error: RegressionMetricScatter diff --git a/src/evidently/metrics/regression_performance/predicted_and_actual_in_time.py b/src/evidently/metrics/regression_performance/predicted_and_actual_in_time.py index f60a132423..d3772f3fda 100644 --- a/src/evidently/metrics/regression_performance/predicted_and_actual_in_time.py +++ b/src/evidently/metrics/regression_performance/predicted_and_actual_in_time.py @@ -22,6 +22,9 @@ class RegressionPredictedVsActualPlot(UsesRawDataMixin, Metric[ColumnScatterResult]): + class Config: + type_alias = "evidently:metric:RegressionPredictedVsActualPlot" + def __init__(self, options: AnyOptions = None): super().__init__(options=options) diff --git a/src/evidently/metrics/regression_performance/predicted_vs_actual.py b/src/evidently/metrics/regression_performance/predicted_vs_actual.py index 6c8dbbe882..c11b959e63 100644 --- a/src/evidently/metrics/regression_performance/predicted_vs_actual.py +++ b/src/evidently/metrics/regression_performance/predicted_vs_actual.py @@ -29,6 +29,7 @@ class AggPredActualScatter(MetricResult): class Config: + type_alias = "evidently:metric_result:AggPredActualScatter" dict_include = False tags = {IncludeTags.Render} @@ -37,6 +38,7 @@ class Config: class RegressionPredictedVsActualScatterResults(MetricResult): class Config: + type_alias = "evidently:metric_result:RegressionPredictedVsActualScatterResults" dict_include = False tags = {IncludeTags.Render} field_tags = {"current": {IncludeTags.Current}, "reference": {IncludeTags.Reference}} @@ -50,6 +52,9 @@ class Config: class RegressionPredictedVsActualScatter(UsesRawDataMixin, Metric[RegressionPredictedVsActualScatterResults]): + class Config: + type_alias = "evidently:metric:RegressionPredictedVsActualScatter" + def __init__(self, options: AnyOptions = None): super().__init__(options=options) diff --git a/src/evidently/metrics/regression_performance/regression_dummy_metric.py b/src/evidently/metrics/regression_performance/regression_dummy_metric.py index 3dcde1bb09..e7355e37e9 100644 --- a/src/evidently/metrics/regression_performance/regression_dummy_metric.py +++ b/src/evidently/metrics/regression_performance/regression_dummy_metric.py @@ -21,6 +21,9 @@ class RegressionDummyMetricResults(MetricResult): + class Config: + type_alias = "evidently:metric_result:RegressionDummyMetricResults" + rmse_default: float mean_abs_error_default: float mean_abs_perc_error_default: float @@ -36,6 +39,9 @@ class RegressionDummyMetricResults(MetricResult): class RegressionDummyMetric(Metric[RegressionDummyMetricResults]): + class Config: + type_alias = "evidently:metric:RegressionDummyMetric" + _quality_metric: RegressionQualityMetric def __init__(self, options: AnyOptions = None): diff --git a/src/evidently/metrics/regression_performance/regression_performance_metrics.py b/src/evidently/metrics/regression_performance/regression_performance_metrics.py index 171a90fac7..0a33bcdf3e 100644 --- a/src/evidently/metrics/regression_performance/regression_performance_metrics.py +++ b/src/evidently/metrics/regression_performance/regression_performance_metrics.py @@ -32,6 +32,7 @@ class RegressionMetrics(MetricResult): class Config: + type_alias = "evidently:metric_result:RegressionMetrics" pd_exclude_fields = {"underperformance"} field_tags = {"underperformance": {IncludeTags.Extra}} @@ -46,6 +47,7 @@ class Config: class RegressionPerformanceMetricsResults(MetricResult): class Config: + type_alias = "evidently:metric_result:RegressionPerformanceMetricsResults" dict_exclude_fields = {"hist_for_plot", "vals_for_plots", "me_hist_for_plot"} pd_exclude_fields = {"hist_for_plot", "vals_for_plots", "me_hist_for_plot", "error_bias", "error_normality"} field_tags = { @@ -86,6 +88,9 @@ class Config: class RegressionPerformanceMetrics(Metric[RegressionPerformanceMetricsResults]): + class Config: + type_alias = "evidently:metric:RegressionPerformanceMetrics" + def get_parameters(self) -> tuple: return () diff --git a/src/evidently/metrics/regression_performance/regression_quality.py b/src/evidently/metrics/regression_performance/regression_quality.py index 23f3fa679a..992d80973c 100644 --- a/src/evidently/metrics/regression_performance/regression_quality.py +++ b/src/evidently/metrics/regression_performance/regression_quality.py @@ -34,6 +34,7 @@ class MoreRegressionMetrics(RegressionMetrics): class Config: + type_alias = "evidently:metric_result:MoreRegressionMetrics" field_tags: Dict[str, Set[IncludeTags]] = {"underperformance": set()} error_std: float @@ -43,6 +44,7 @@ class Config: class RegressionQualityMetricResults(MetricResult): class Config: + type_alias = "evidently:metric_result:RegressionQualityMetricResults" dict_exclude_fields = {"hist_for_plot", "vals_for_plots", "me_hist_for_plot"} pd_exclude_fields = {"hist_for_plot", "vals_for_plots", "me_hist_for_plot", "error_normality", "error_bias"} field_tags = { @@ -74,6 +76,9 @@ class Config: class RegressionQualityMetric(Metric[RegressionQualityMetricResults]): + class Config: + type_alias = "evidently:metric:RegressionQualityMetric" + def calculate(self, data: InputData) -> RegressionQualityMetricResults: dataset_columns = process_columns(data.current_data, data.column_mapping) target_name = dataset_columns.utility_columns.target diff --git a/src/evidently/metrics/regression_performance/top_error.py b/src/evidently/metrics/regression_performance/top_error.py index e0cc1f45d6..243ef1c566 100644 --- a/src/evidently/metrics/regression_performance/top_error.py +++ b/src/evidently/metrics/regression_performance/top_error.py @@ -30,17 +30,24 @@ class TopData(MetricResult): + class Config: + type_alias = "evidently:metric_result:TopData" + mean_err_per_group: Dict[str, Dict[str, float]] scatter: RegressionScatter class AggTopData(MetricResult): + class Config: + type_alias = "evidently:metric_result:AggTopData" + mean_err_per_group: Dict[str, Dict[str, float]] contour: Dict[str, ContourData] class RegressionTopErrorMetricResults(MetricResult): class Config: + type_alias = "evidently:metric_result:RegressionTopErrorMetricResults" dict_include = False pd_include = False tags = {IncludeTags.Render} @@ -55,6 +62,9 @@ class Config: class RegressionTopErrorMetric(UsesRawDataMixin, Metric[RegressionTopErrorMetricResults]): + class Config: + type_alias = "evidently:metric:RegressionTopErrorMetric" + def calculate(self, data: InputData) -> RegressionTopErrorMetricResults: dataset_columns = process_columns(data.current_data, data.column_mapping) target_name = dataset_columns.utility_columns.target diff --git a/src/evidently/pydantic_utils.py b/src/evidently/pydantic_utils.py index e746a613e5..983330107d 100644 --- a/src/evidently/pydantic_utils.py +++ b/src/evidently/pydantic_utils.py @@ -1,9 +1,11 @@ import dataclasses import hashlib +import inspect import itertools import json import os import warnings +from abc import ABC from enum import Enum from functools import lru_cache from typing import TYPE_CHECKING @@ -126,11 +128,27 @@ def all_subclasses(cls: Type[T]) -> Set[Type[T]]: def register_type_alias(base_class: Type["PolymorphicModel"], classpath: str, alias: str): - key = (base_class, alias) + while True: + key = (base_class, alias) - if key in TYPE_ALIASES and TYPE_ALIASES[key] != classpath and "PYTEST_CURRENT_TEST" not in os.environ: - warnings.warn(f"Duplicate key {key} in alias map") - TYPE_ALIASES[key] = classpath + if key in TYPE_ALIASES and TYPE_ALIASES[key] != classpath and "PYTEST_CURRENT_TEST" not in os.environ: + warnings.warn(f"Duplicate key {key} in alias map") + TYPE_ALIASES[key] = classpath + + if base_class is PolymorphicModel: + break + base_class = get_base_class(base_class, ensure_parent=True) # type: ignore[arg-type] + if not base_class.__config__.transitive_aliases: + break + + +def autoregister(cls: Type["PolymorphicModel"]): + """Decorator that automatically registers subclass. + Can only be used on subclasses that are defined in the same file as base class + (or if the import of this subclass is guaranteed when base class is imported) + """ + register_type_alias(get_base_class(cls), get_classpath(cls), cls.__get_type__()) # type: ignore[arg-type] + return cls def register_loaded_alias(base_class: Type["PolymorphicModel"], cls: Type["PolymorphicModel"], alias: str): @@ -144,8 +162,10 @@ def register_loaded_alias(base_class: Type["PolymorphicModel"], cls: Type["Polym @lru_cache() -def get_base_class(cls: Type["PolymorphicModel"]) -> Type["PolymorphicModel"]: +def get_base_class(cls: Type["PolymorphicModel"], ensure_parent: bool = False) -> Type["PolymorphicModel"]: for cls_ in cls.mro(): + if ensure_parent and cls_ is cls: + continue if not issubclass(cls_, PolymorphicModel): continue config = cls_.__dict__.get("Config") @@ -164,9 +184,20 @@ def get_classpath(cls: Type) -> str: FingerprintPart = Union[None, int, str, float, bool, bytes, Tuple["FingerprintPart", ...]] +def is_not_abstract(cls): + return not (inspect.isabstract(cls) or ABC in cls.__bases__) + + class PolymorphicModel(BaseModel): class Config(BaseModel.Config): + # value to put into "type" field type_alias: ClassVar[Optional[str]] = None + # flag to mark alias required. If not required, classpath is used by default + alias_required: ClassVar[bool] = True + # flag to register aliaes for grand-parent base type + # eg PolymorphicModel -> A -> B -> C, where A and B are base types. only if A has this flag, C can be parsed as both A and B. + transitive_aliases: ClassVar[bool] = False + # flag to mark type as base. This means it will be possible to parse all subclasses of it as this type is_base_type: ClassVar[bool] = False __config__: ClassVar[Config] @@ -176,6 +207,8 @@ def __get_type__(cls): config = cls.__dict__.get("Config") if config is not None and config.__dict__.get("type_alias") is not None: return config.type_alias + if cls.__config__.alias_required and is_not_abstract(cls): + raise ValueError(f"Alias is required for {cls.__name__}") return cls.__get_classpath__() @classmethod @@ -230,6 +263,8 @@ def validate(cls: Type[TPM], value: Any) -> TPM: if key in TYPE_ALIASES: classpath = TYPE_ALIASES[key] else: + if "." not in typename: + raise ValueError(f'Unknown alias "{typename}"') classpath = typename if not any(classpath.startswith(p) for p in ALLOWED_TYPE_PREFIXES): raise ValueError(f"{classpath} does not match any allowed prefixes") @@ -269,6 +304,11 @@ def get_value_fingerprint(value: Any) -> FingerprintPart: class EvidentlyBaseModel(FrozenBaseModel, PolymorphicModel): + class Config: + type_alias = "evidently:base:EvidentlyBaseModel" + alias_required = True + is_base_type = True + def get_fingerprint(self) -> Fingerprint: return hashlib.md5((self.__get_classpath__() + str(self.get_fingerprint_parts())).encode("utf8")).hexdigest() @@ -289,7 +329,11 @@ def update(self: EBM, **kwargs) -> EBM: return self.__class__(**data) +@autoregister class WithTestAndMetricDependencies(EvidentlyBaseModel): + class Config: + type_alias = "evidently:test:WithTestAndMetricDependencies" + def __evidently_dependencies__(self): from evidently.base_metric import Metric from evidently.tests.base_test import Test diff --git a/src/evidently/test_preset/__init__.py b/src/evidently/test_preset/__init__.py index 348bc5e27c..ea70861fc9 100644 --- a/src/evidently/test_preset/__init__.py +++ b/src/evidently/test_preset/__init__.py @@ -1,5 +1,6 @@ """Predefined Test Presets for Test Suite""" +from . import _registry from .classification_binary import BinaryClassificationTestPreset from .classification_binary_topk import BinaryClassificationTopKTestPreset from .classification_multiclass import MulticlassClassificationTestPreset @@ -20,4 +21,5 @@ "NoTargetPerformanceTestPreset", "RegressionTestPreset", "RecsysTestPreset", + "_registry", ] diff --git a/src/evidently/test_preset/_registry.py b/src/evidently/test_preset/_registry.py new file mode 100644 index 0000000000..39c922ea3c --- /dev/null +++ b/src/evidently/test_preset/_registry.py @@ -0,0 +1,42 @@ +from evidently.pydantic_utils import register_type_alias +from evidently.test_preset.test_preset import TestPreset + +register_type_alias( + TestPreset, + "evidently.test_preset.classification_binary.BinaryClassificationTestPreset", + "evidently:test_preset:BinaryClassificationTestPreset", +) +register_type_alias( + TestPreset, + "evidently.test_preset.classification_binary_topk.BinaryClassificationTopKTestPreset", + "evidently:test_preset:BinaryClassificationTopKTestPreset", +) +register_type_alias( + TestPreset, + "evidently.test_preset.classification_multiclass.MulticlassClassificationTestPreset", + "evidently:test_preset:MulticlassClassificationTestPreset", +) +register_type_alias( + TestPreset, "evidently.test_preset.data_drift.DataDriftTestPreset", "evidently:test_preset:DataDriftTestPreset" +) +register_type_alias( + TestPreset, + "evidently.test_preset.data_quality.DataQualityTestPreset", + "evidently:test_preset:DataQualityTestPreset", +) +register_type_alias( + TestPreset, + "evidently.test_preset.data_stability.DataStabilityTestPreset", + "evidently:test_preset:DataStabilityTestPreset", +) +register_type_alias( + TestPreset, + "evidently.test_preset.no_target_performance.NoTargetPerformanceTestPreset", + "evidently:test_preset:NoTargetPerformanceTestPreset", +) +register_type_alias( + TestPreset, "evidently.test_preset.recsys.RecsysTestPreset", "evidently:test_preset:RecsysTestPreset" +) +register_type_alias( + TestPreset, "evidently.test_preset.regression.RegressionTestPreset", "evidently:test_preset:RegressionTestPreset" +) diff --git a/src/evidently/test_preset/classification_binary.py b/src/evidently/test_preset/classification_binary.py index 928c6a0d77..d08a974d8a 100644 --- a/src/evidently/test_preset/classification_binary.py +++ b/src/evidently/test_preset/classification_binary.py @@ -16,6 +16,9 @@ class BinaryClassificationTestPreset(TestPreset): + class Config: + type_alias = "evidently:test_preset:BinaryClassificationTestPreset" + """ Binary Classification Tests. Args: diff --git a/src/evidently/test_preset/classification_binary_topk.py b/src/evidently/test_preset/classification_binary_topk.py index f5ff3a4523..0576ef02ac 100644 --- a/src/evidently/test_preset/classification_binary_topk.py +++ b/src/evidently/test_preset/classification_binary_topk.py @@ -17,6 +17,9 @@ class BinaryClassificationTopKTestPreset(TestPreset): + class Config: + type_alias = "evidently:test_preset:BinaryClassificationTopKTestPreset" + """ Binary Classification Tests for Top K threshold. Args: diff --git a/src/evidently/test_preset/classification_multiclass.py b/src/evidently/test_preset/classification_multiclass.py index 45ccb34aed..85d9a26410 100644 --- a/src/evidently/test_preset/classification_multiclass.py +++ b/src/evidently/test_preset/classification_multiclass.py @@ -18,6 +18,9 @@ class MulticlassClassificationTestPreset(TestPreset): + class Config: + type_alias = "evidently:test_preset:MulticlassClassificationTestPreset" + """ Multiclass Classification tests. diff --git a/src/evidently/test_preset/data_drift.py b/src/evidently/test_preset/data_drift.py index bccc0b5b25..dfec627696 100644 --- a/src/evidently/test_preset/data_drift.py +++ b/src/evidently/test_preset/data_drift.py @@ -20,6 +20,9 @@ class DataDriftTestPreset(TestPreset): + class Config: + type_alias = "evidently:test_preset:DataDriftTestPreset" + """ Data Drift tests. diff --git a/src/evidently/test_preset/data_quality.py b/src/evidently/test_preset/data_quality.py index 1e03eddd37..b289934377 100644 --- a/src/evidently/test_preset/data_quality.py +++ b/src/evidently/test_preset/data_quality.py @@ -14,6 +14,9 @@ class DataQualityTestPreset(TestPreset): + class Config: + type_alias = "evidently:test_preset:DataQualityTestPreset" + """ Data Quality tests. diff --git a/src/evidently/test_preset/data_stability.py b/src/evidently/test_preset/data_stability.py index 5c4e384778..e5074df86b 100644 --- a/src/evidently/test_preset/data_stability.py +++ b/src/evidently/test_preset/data_stability.py @@ -16,6 +16,9 @@ class DataStabilityTestPreset(TestPreset): + class Config: + type_alias = "evidently:test_preset:DataStabilityTestPreset" + """ Data Stability tests. diff --git a/src/evidently/test_preset/no_target_performance.py b/src/evidently/test_preset/no_target_performance.py index e88bc58106..92cc6888a1 100644 --- a/src/evidently/test_preset/no_target_performance.py +++ b/src/evidently/test_preset/no_target_performance.py @@ -24,6 +24,9 @@ class NoTargetPerformanceTestPreset(TestPreset): + class Config: + type_alias = "evidently:test_preset:NoTargetPerformanceTestPreset" + """ No Target Performance tests. diff --git a/src/evidently/test_preset/recsys.py b/src/evidently/test_preset/recsys.py index e5ebd78fff..cf595f2b76 100644 --- a/src/evidently/test_preset/recsys.py +++ b/src/evidently/test_preset/recsys.py @@ -14,6 +14,9 @@ class RecsysTestPreset(TestPreset): + class Config: + type_alias = "evidently:test_preset:RecsysTestPreset" + """ Recsys performance tests. diff --git a/src/evidently/test_preset/regression.py b/src/evidently/test_preset/regression.py index 518900ac65..6c68179297 100644 --- a/src/evidently/test_preset/regression.py +++ b/src/evidently/test_preset/regression.py @@ -13,6 +13,9 @@ class RegressionTestPreset(TestPreset): + class Config: + type_alias = "evidently:test_preset:RegressionTestPreset" + """ Regression performance tests. diff --git a/src/evidently/test_preset/test_preset.py b/src/evidently/test_preset/test_preset.py index 7ece7f5eef..4afa282c5c 100644 --- a/src/evidently/test_preset/test_preset.py +++ b/src/evidently/test_preset/test_preset.py @@ -14,6 +14,9 @@ class TestPreset(BasePreset): + class Config: + is_base_type = True + @abc.abstractmethod def generate_tests( self, data_definition: DataDefinition, additional_data: Optional[Dict[str, Any]] diff --git a/src/evidently/tests/_registry.py b/src/evidently/tests/_registry.py new file mode 100644 index 0000000000..799cd86e36 --- /dev/null +++ b/src/evidently/tests/_registry.py @@ -0,0 +1,268 @@ +from evidently.pydantic_utils import register_type_alias +from evidently.tests.base_test import Test +from evidently.tests.base_test import TestParameters + +register_type_alias( + Test, "evidently.tests.classification_performance_tests.TestAccuracyScore", "evidently:test:TestAccuracyScore" +) +register_type_alias( + Test, "evidently.tests.classification_performance_tests.TestF1ByClass", "evidently:test:TestF1ByClass" +) +register_type_alias(Test, "evidently.tests.classification_performance_tests.TestF1Score", "evidently:test:TestF1Score") +register_type_alias(Test, "evidently.tests.classification_performance_tests.TestFNR", "evidently:test:TestFNR") +register_type_alias(Test, "evidently.tests.classification_performance_tests.TestFPR", "evidently:test:TestFPR") +register_type_alias(Test, "evidently.tests.classification_performance_tests.TestLogLoss", "evidently:test:TestLogLoss") +register_type_alias( + Test, "evidently.tests.classification_performance_tests.TestPrecisionByClass", "evidently:test:TestPrecisionByClass" +) +register_type_alias( + Test, "evidently.tests.classification_performance_tests.TestPrecisionScore", "evidently:test:TestPrecisionScore" +) +register_type_alias( + Test, "evidently.tests.classification_performance_tests.TestRecallByClass", "evidently:test:TestRecallByClass" +) +register_type_alias( + Test, "evidently.tests.classification_performance_tests.TestRecallScore", "evidently:test:TestRecallScore" +) +register_type_alias(Test, "evidently.tests.classification_performance_tests.TestRocAuc", "evidently:test:TestRocAuc") +register_type_alias(Test, "evidently.tests.classification_performance_tests.TestTNR", "evidently:test:TestTNR") +register_type_alias(Test, "evidently.tests.classification_performance_tests.TestTPR", "evidently:test:TestTPR") +register_type_alias(Test, "evidently.tests.custom_test.CustomValueTest", "evidently:test:CustomValueTest") +register_type_alias(Test, "evidently.tests.data_drift_tests.TestColumnDrift", "evidently:test:TestColumnDrift") +register_type_alias(Test, "evidently.tests.data_drift_tests.TestEmbeddingsDrift", "evidently:test:TestEmbeddingsDrift") +register_type_alias( + Test, "evidently.tests.data_drift_tests.TestNumberOfDriftedColumns", "evidently:test:TestNumberOfDriftedColumns" +) +register_type_alias( + Test, "evidently.tests.data_drift_tests.TestShareOfDriftedColumns", "evidently:test:TestShareOfDriftedColumns" +) +register_type_alias( + Test, + "evidently.tests.data_integrity_tests.TestColumnAllConstantValues", + "evidently:test:TestColumnAllConstantValues", +) +register_type_alias( + Test, "evidently.tests.data_integrity_tests.TestColumnAllUniqueValues", "evidently:test:TestColumnAllUniqueValues" +) +register_type_alias( + Test, + "evidently.tests.data_integrity_tests.TestColumnNumberOfDifferentMissingValues", + "evidently:test:TestColumnNumberOfDifferentMissingValues", +) +register_type_alias( + Test, + "evidently.tests.data_integrity_tests.TestColumnNumberOfMissingValues", + "evidently:test:TestColumnNumberOfMissingValues", +) +register_type_alias( + Test, + "evidently.tests.data_integrity_tests.TestColumnShareOfMissingValues", + "evidently:test:TestColumnShareOfMissingValues", +) +register_type_alias(Test, "evidently.tests.data_integrity_tests.TestColumnsType", "evidently:test:TestColumnsType") +register_type_alias( + Test, "evidently.tests.data_integrity_tests.TestNumberOfColumns", "evidently:test:TestNumberOfColumns" +) +register_type_alias( + Test, + "evidently.tests.data_integrity_tests.TestNumberOfColumnsWithMissingValues", + "evidently:test:TestNumberOfColumnsWithMissingValues", +) +register_type_alias( + Test, + "evidently.tests.data_integrity_tests.TestNumberOfConstantColumns", + "evidently:test:TestNumberOfConstantColumns", +) +register_type_alias( + Test, + "evidently.tests.data_integrity_tests.TestNumberOfDifferentMissingValues", + "evidently:test:TestNumberOfDifferentMissingValues", +) +register_type_alias( + Test, + "evidently.tests.data_integrity_tests.TestNumberOfDuplicatedColumns", + "evidently:test:TestNumberOfDuplicatedColumns", +) +register_type_alias( + Test, "evidently.tests.data_integrity_tests.TestNumberOfDuplicatedRows", "evidently:test:TestNumberOfDuplicatedRows" +) +register_type_alias( + Test, "evidently.tests.data_integrity_tests.TestNumberOfEmptyColumns", "evidently:test:TestNumberOfEmptyColumns" +) +register_type_alias( + Test, "evidently.tests.data_integrity_tests.TestNumberOfEmptyRows", "evidently:test:TestNumberOfEmptyRows" +) +register_type_alias( + Test, "evidently.tests.data_integrity_tests.TestNumberOfMissingValues", "evidently:test:TestNumberOfMissingValues" +) +register_type_alias(Test, "evidently.tests.data_integrity_tests.TestNumberOfRows", "evidently:test:TestNumberOfRows") +register_type_alias( + Test, + "evidently.tests.data_integrity_tests.TestNumberOfRowsWithMissingValues", + "evidently:test:TestNumberOfRowsWithMissingValues", +) +register_type_alias( + Test, + "evidently.tests.data_integrity_tests.TestShareOfColumnsWithMissingValues", + "evidently:test:TestShareOfColumnsWithMissingValues", +) +register_type_alias( + Test, "evidently.tests.data_integrity_tests.TestShareOfMissingValues", "evidently:test:TestShareOfMissingValues" +) +register_type_alias( + Test, + "evidently.tests.data_integrity_tests.TestShareOfRowsWithMissingValues", + "evidently:test:TestShareOfRowsWithMissingValues", +) +register_type_alias(Test, "evidently.tests.data_quality_tests.TestCategoryCount", "evidently:test:TestCategoryCount") +register_type_alias(Test, "evidently.tests.data_quality_tests.TestCategoryShare", "evidently:test:TestCategoryShare") +register_type_alias(Test, "evidently.tests.data_quality_tests.TestColumnQuantile", "evidently:test:TestColumnQuantile") +register_type_alias(Test, "evidently.tests.data_quality_tests.TestColumnValueMax", "evidently:test:TestColumnValueMax") +register_type_alias( + Test, "evidently.tests.data_quality_tests.TestColumnValueMean", "evidently:test:TestColumnValueMean" +) +register_type_alias( + Test, "evidently.tests.data_quality_tests.TestColumnValueMedian", "evidently:test:TestColumnValueMedian" +) +register_type_alias(Test, "evidently.tests.data_quality_tests.TestColumnValueMin", "evidently:test:TestColumnValueMin") +register_type_alias(Test, "evidently.tests.data_quality_tests.TestColumnValueStd", "evidently:test:TestColumnValueStd") +register_type_alias( + Test, "evidently.tests.data_quality_tests.TestConflictPrediction", "evidently:test:TestConflictPrediction" +) +register_type_alias(Test, "evidently.tests.data_quality_tests.TestConflictTarget", "evidently:test:TestConflictTarget") +register_type_alias( + Test, "evidently.tests.data_quality_tests.TestCorrelationChanges", "evidently:test:TestCorrelationChanges" +) +register_type_alias( + Test, "evidently.tests.data_quality_tests.TestHighlyCorrelatedColumns", "evidently:test:TestHighlyCorrelatedColumns" +) +register_type_alias(Test, "evidently.tests.data_quality_tests.TestMeanInNSigmas", "evidently:test:TestMeanInNSigmas") +register_type_alias( + Test, "evidently.tests.data_quality_tests.TestMostCommonValueShare", "evidently:test:TestMostCommonValueShare" +) +register_type_alias( + Test, "evidently.tests.data_quality_tests.TestNumberOfOutListValues", "evidently:test:TestNumberOfOutListValues" +) +register_type_alias( + Test, "evidently.tests.data_quality_tests.TestNumberOfOutRangeValues", "evidently:test:TestNumberOfOutRangeValues" +) +register_type_alias( + Test, "evidently.tests.data_quality_tests.TestNumberOfUniqueValues", "evidently:test:TestNumberOfUniqueValues" +) +register_type_alias( + Test, + "evidently.tests.data_quality_tests.TestPredictionFeaturesCorrelations", + "evidently:test:TestPredictionFeaturesCorrelations", +) +register_type_alias( + Test, "evidently.tests.data_quality_tests.TestShareOfOutListValues", "evidently:test:TestShareOfOutListValues" +) +register_type_alias( + Test, "evidently.tests.data_quality_tests.TestShareOfOutRangeValues", "evidently:test:TestShareOfOutRangeValues" +) +register_type_alias( + Test, + "evidently.tests.data_quality_tests.TestTargetFeaturesCorrelations", + "evidently:test:TestTargetFeaturesCorrelations", +) +register_type_alias( + Test, + "evidently.tests.data_quality_tests.TestTargetPredictionCorrelation", + "evidently:test:TestTargetPredictionCorrelation", +) +register_type_alias( + Test, "evidently.tests.data_quality_tests.TestUniqueValuesShare", "evidently:test:TestUniqueValuesShare" +) +register_type_alias(Test, "evidently.tests.data_quality_tests.TestValueList", "evidently:test:TestValueList") +register_type_alias(Test, "evidently.tests.data_quality_tests.TestValueRange", "evidently:test:TestValueRange") +register_type_alias(Test, "evidently.tests.recsys_tests.TestARP", "evidently:test:TestARP") +register_type_alias(Test, "evidently.tests.recsys_tests.TestCoverage", "evidently:test:TestCoverage") +register_type_alias(Test, "evidently.tests.recsys_tests.TestDiversity", "evidently:test:TestDiversity") +register_type_alias(Test, "evidently.tests.recsys_tests.TestFBetaTopK", "evidently:test:TestFBetaTopK") +register_type_alias(Test, "evidently.tests.recsys_tests.TestGiniIndex", "evidently:test:TestGiniIndex") +register_type_alias(Test, "evidently.tests.recsys_tests.TestHitRateK", "evidently:test:TestHitRateK") +register_type_alias(Test, "evidently.tests.recsys_tests.TestMAPK", "evidently:test:TestMAPK") +register_type_alias(Test, "evidently.tests.recsys_tests.TestMARK", "evidently:test:TestMARK") +register_type_alias(Test, "evidently.tests.recsys_tests.TestMRRK", "evidently:test:TestMRRK") +register_type_alias(Test, "evidently.tests.recsys_tests.TestNDCGK", "evidently:test:TestNDCGK") +register_type_alias(Test, "evidently.tests.recsys_tests.TestNovelty", "evidently:test:TestNovelty") +register_type_alias(Test, "evidently.tests.recsys_tests.TestPersonalization", "evidently:test:TestPersonalization") +register_type_alias(Test, "evidently.tests.recsys_tests.TestPrecisionTopK", "evidently:test:TestPrecisionTopK") +register_type_alias(Test, "evidently.tests.recsys_tests.TestRecallTopK", "evidently:test:TestRecallTopK") +register_type_alias(Test, "evidently.tests.recsys_tests.TestScoreEntropy", "evidently:test:TestScoreEntropy") +register_type_alias(Test, "evidently.tests.recsys_tests.TestSerendipity", "evidently:test:TestSerendipity") +register_type_alias( + Test, "evidently.tests.regression_performance_tests.TestValueAbsMaxError", "evidently:test:TestValueAbsMaxError" +) +register_type_alias(Test, "evidently.tests.regression_performance_tests.TestValueMAE", "evidently:test:TestValueMAE") +register_type_alias(Test, "evidently.tests.regression_performance_tests.TestValueMAPE", "evidently:test:TestValueMAPE") +register_type_alias( + Test, "evidently.tests.regression_performance_tests.TestValueMeanError", "evidently:test:TestValueMeanError" +) +register_type_alias( + Test, "evidently.tests.regression_performance_tests.TestValueR2Score", "evidently:test:TestValueR2Score" +) +register_type_alias(Test, "evidently.tests.regression_performance_tests.TestValueRMSE", "evidently:test:TestValueRMSE") + + +register_type_alias( + TestParameters, "evidently.tests.base_test.CheckValueParameters", "evidently:test_parameters:CheckValueParameters" +) +register_type_alias( + TestParameters, + "evidently.tests.base_test.ColumnCheckValueParameters", + "evidently:test_parameters:ColumnCheckValueParameters", +) +register_type_alias( + TestParameters, + "evidently.tests.base_test.ConditionTestParameters", + "evidently:test_parameters:ConditionTestParameters", +) +register_type_alias( + TestParameters, "evidently.tests.base_test.TestParameters", "evidently:test_parameters:TestParameters" +) +register_type_alias( + TestParameters, + "evidently.tests.classification_performance_tests.ByClassParameters", + "evidently:test_parameters:ByClassParameters", +) +register_type_alias( + TestParameters, + "evidently.tests.data_drift_tests.ColumnDriftParameter", + "evidently:test_parameters:ColumnDriftParameter", +) +register_type_alias( + TestParameters, + "evidently.tests.data_drift_tests.ColumnsDriftParameters", + "evidently:test_parameters:ColumnsDriftParameters", +) +register_type_alias( + TestParameters, + "evidently.tests.data_integrity_tests.ColumnTypeParameter", + "evidently:test_parameters:ColumnTypeParameter", +) +register_type_alias( + TestParameters, + "evidently.tests.data_integrity_tests.ColumnTypesParameter", + "evidently:test_parameters:ColumnTypesParameter", +) +register_type_alias( + TestParameters, + "evidently.tests.data_quality_tests.ColumnValueListParameters", + "evidently:test_parameters:ColumnValueListParameters", +) +register_type_alias( + TestParameters, + "evidently.tests.data_quality_tests.MeanInNSigmasParameter", + "evidently:test_parameters:MeanInNSigmasParameter", +) +register_type_alias( + TestParameters, + "evidently.tests.data_quality_tests.ShareOfOutRangeParameters", + "evidently:test_parameters:ShareOfOutRangeParameters", +) +register_type_alias( + TestParameters, + "evidently.tests.data_quality_tests.ValueListParameters", + "evidently:test_parameters:ValueListParameters", +) diff --git a/src/evidently/tests/base_test.py b/src/evidently/tests/base_test.py index 051eb4da23..bc9f10d556 100644 --- a/src/evidently/tests/base_test.py +++ b/src/evidently/tests/base_test.py @@ -97,11 +97,16 @@ class TestStatus(Enum): class TestParameters(EvidentlyBaseModel, BaseResult): # type: ignore[misc] # pydantic Config class Config: + type_alias = "evidently:test_parameters:TestParameters" field_tags = {"type": {IncludeTags.TypeField}} + is_base_type = True class TestResult(EnumValueMixin, MetricResult): # todo: create common base class # short name/title from the test class + class Config: + type_alias = "evidently:metric_result:TestResult" + name: str # what was checked, what threshold (current value 13 is not ok with condition less than 5) description: str @@ -139,6 +144,9 @@ def is_passed(self): class Test(WithTestAndMetricDependencies): + class Config: + is_base_type = True + """ all fields in test class with type that is subclass of Metric would be used as dependencies of test. """ @@ -271,6 +279,9 @@ def __str__(self) -> str: class ConditionTestParameters(TestParameters): + class Config: + type_alias = "evidently:test_parameters:ConditionTestParameters" + condition: TestValueCondition @@ -302,10 +313,16 @@ def condition(self) -> TestValueCondition: class CheckValueParameters(ConditionTestParameters): + class Config: + type_alias = "evidently:test_parameters:CheckValueParameters" + value: Optional[Numeric] class ColumnCheckValueParameters(CheckValueParameters): + class Config: + type_alias = "evidently:test_parameters:ColumnCheckValueParameters" + column_name: str diff --git a/src/evidently/tests/classification_performance_tests.py b/src/evidently/tests/classification_performance_tests.py index 5fbe9ffd3c..d35d77d6ac 100644 --- a/src/evidently/tests/classification_performance_tests.py +++ b/src/evidently/tests/classification_performance_tests.py @@ -148,6 +148,9 @@ def conf_matrix(self): class TestAccuracyScore(SimpleClassificationTestTopK): + class Config: + type_alias = "evidently:test:TestAccuracyScore" + name = "Accuracy Score" def get_value(self, result: DatasetClassificationQuality): @@ -169,6 +172,9 @@ def render_html(self, obj: TestAccuracyScore) -> TestHtmlInfo: class TestPrecisionScore(SimpleClassificationTestTopK): + class Config: + type_alias = "evidently:test:TestPrecisionScore" + name = "Precision Score" def get_value(self, result: DatasetClassificationQuality): @@ -190,6 +196,9 @@ def render_html(self, obj: TestPrecisionScore) -> TestHtmlInfo: class TestF1Score(SimpleClassificationTestTopK): + class Config: + type_alias = "evidently:test:TestF1Score" + name: ClassVar = "F1 Score" def get_value(self, result: DatasetClassificationQuality): @@ -211,6 +220,9 @@ def render_html(self, obj: TestF1Score) -> TestHtmlInfo: class TestRecallScore(SimpleClassificationTestTopK): + class Config: + type_alias = "evidently:test:TestRecallScore" + name = "Recall Score" def get_value(self, result: DatasetClassificationQuality): @@ -232,6 +244,9 @@ def render_html(self, obj: TestRecallScore) -> TestHtmlInfo: class TestRocAuc(SimpleClassificationTest): + class Config: + type_alias = "evidently:test:TestRocAuc" + name: ClassVar = "ROC AUC Score" _roc_curve: ClassificationRocCurve @@ -291,6 +306,9 @@ def render_html(self, obj: TestRocAuc) -> TestHtmlInfo: class TestLogLoss(SimpleClassificationTest): + class Config: + type_alias = "evidently:test:TestLogLoss" + condition_arg = "lt" name = "Logarithmic Loss" @@ -326,6 +344,9 @@ def render_html(self, obj: TestLogLoss) -> TestHtmlInfo: class TestTPR(SimpleClassificationTestTopK): + class Config: + type_alias = "evidently:test:TestTPR" + name = "True Positive Rate" def get_value(self, result: DatasetClassificationQuality): @@ -362,6 +383,9 @@ def render_html(self, obj: TestF1Score) -> TestHtmlInfo: class TestTNR(SimpleClassificationTestTopK): + class Config: + type_alias = "evidently:test:TestTNR" + name = "True Negative Rate" def get_value(self, result: DatasetClassificationQuality): @@ -398,6 +422,9 @@ def render_html(self, obj: TestF1Score) -> TestHtmlInfo: class TestFPR(SimpleClassificationTestTopK): + class Config: + type_alias = "evidently:test:TestFPR" + condition_arg: ClassVar = "lt" name = "False Positive Rate" @@ -435,6 +462,9 @@ def render_html(self, obj: TestF1Score) -> TestHtmlInfo: class TestFNR(SimpleClassificationTestTopK): + class Config: + type_alias = "evidently:test:TestFNR" + condition_arg: ClassVar = "lt" name = "False Negative Rate" @@ -472,6 +502,9 @@ def render_html(self, obj: TestF1Score) -> TestHtmlInfo: class ByClassParameters(CheckValueParameters): + class Config: + type_alias = "evidently:test_parameters:ByClassParameters" + label: Label @@ -568,6 +601,9 @@ def get_parameters(self) -> ByClassParameters: class TestPrecisionByClass(ByClassClassificationTest): + class Config: + type_alias = "evidently:test:TestPrecisionByClass" + name: ClassVar[str] = "Precision Score by Class" def get_value(self, result: ClassMetric): @@ -592,6 +628,9 @@ def render_html(self, obj: TestPrecisionByClass) -> TestHtmlInfo: class TestRecallByClass(ByClassClassificationTest): + class Config: + type_alias = "evidently:test:TestRecallByClass" + name: ClassVar[str] = "Recall Score by Class" def get_value(self, result: ClassMetric): @@ -616,6 +655,9 @@ def render_html(self, obj: TestRecallByClass) -> TestHtmlInfo: class TestF1ByClass(ByClassClassificationTest): + class Config: + type_alias = "evidently:test:TestF1ByClass" + name: ClassVar[str] = "F1 Score by Class" def get_value(self, result: ClassMetric): diff --git a/src/evidently/tests/custom_test.py b/src/evidently/tests/custom_test.py index 920ee22663..e531d89401 100644 --- a/src/evidently/tests/custom_test.py +++ b/src/evidently/tests/custom_test.py @@ -17,6 +17,9 @@ class CustomValueTest(BaseCheckValueTest): + class Config: + type_alias = "evidently:test:CustomValueTest" + name: ClassVar = "Custom Value test" group = CUSTOM_GROUP.id diff --git a/src/evidently/tests/data_drift_tests.py b/src/evidently/tests/data_drift_tests.py index 4b69ead792..485e1dba5d 100644 --- a/src/evidently/tests/data_drift_tests.py +++ b/src/evidently/tests/data_drift_tests.py @@ -48,6 +48,9 @@ class ColumnDriftParameter(ExcludeNoneMixin, TestParameters): # type: ignore[misc] # pydantic Config + class Config: + type_alias = "evidently:test_parameters:ColumnDriftParameter" + stattest: str score: float threshold: float @@ -67,6 +70,9 @@ def from_metric(cls, data: ColumnDataDriftMetrics, column_name: str = None): class ColumnsDriftParameters(ConditionTestParameters): # todo: rename to columns? + class Config: + type_alias = "evidently:test_parameters:ColumnsDriftParameters" + features: Dict[str, ColumnDriftParameter] @classmethod @@ -179,6 +185,9 @@ def check(self): class TestNumberOfDriftedColumns(BaseDataDriftMetricsTest): + class Config: + type_alias = "evidently:test:TestNumberOfDriftedColumns" + name: ClassVar = "Number of Drifted Features" def get_condition(self) -> TestValueCondition: @@ -199,6 +208,9 @@ def get_description(self, value: Numeric) -> str: class TestShareOfDriftedColumns(BaseDataDriftMetricsTest): + class Config: + type_alias = "evidently:test:TestShareOfDriftedColumns" + name: ClassVar = "Share of Drifted Columns" def get_condition(self) -> TestValueCondition: @@ -220,6 +232,9 @@ def get_description(self, value: Numeric) -> str: class TestColumnDrift(Test): + class Config: + type_alias = "evidently:test:TestColumnDrift" + name: ClassVar = "Drift per Column" group: ClassVar = DATA_DRIFT_GROUP.id _metric: ColumnDriftMetric @@ -604,6 +619,9 @@ def render_html(self, obj: TestColumnDrift) -> TestHtmlInfo: class TestEmbeddingsDrift(Test): + class Config: + type_alias = "evidently:test:TestEmbeddingsDrift" + name: ClassVar = "Drift for embeddings" group: ClassVar = DATA_DRIFT_GROUP.id embeddings_name: str diff --git a/src/evidently/tests/data_integrity_tests.py b/src/evidently/tests/data_integrity_tests.py index c5016eff6d..301785d9c5 100644 --- a/src/evidently/tests/data_integrity_tests.py +++ b/src/evidently/tests/data_integrity_tests.py @@ -76,6 +76,9 @@ def __init__( class TestNumberOfColumns(BaseIntegrityValueTest): + class Config: + type_alias = "evidently:test:TestNumberOfColumns" + """Number of all columns in the data, including utility columns (id/index, datetime, target, predictions)""" name: ClassVar = "Number of Columns" @@ -111,6 +114,9 @@ def render_html(self, obj: TestNumberOfColumns) -> TestHtmlInfo: class TestNumberOfRows(BaseIntegrityValueTest): + class Config: + type_alias = "evidently:test:TestNumberOfRows" + """Number of rows in the data""" name: ClassVar = "Number of Rows" @@ -240,6 +246,9 @@ def get_table_with_number_of_missing_values_by_one_missing_value( class TestNumberOfDifferentMissingValues(BaseIntegrityMissingValuesValuesTest): + class Config: + type_alias = "evidently:test:TestNumberOfDifferentMissingValues" + """Check a number of different encoded missing values.""" name: ClassVar = "Different Types of Missing Values" @@ -283,6 +292,9 @@ def render_html(self, obj: TestNumberOfDifferentMissingValues) -> TestHtmlInfo: class TestNumberOfMissingValues(BaseIntegrityMissingValuesValuesTest): + class Config: + type_alias = "evidently:test:TestNumberOfMissingValues" + """Check a number of missing values.""" name: ClassVar = "The Number of Missing Values" @@ -319,6 +331,9 @@ def render_html(self, obj: TestNumberOfMissingValues) -> TestHtmlInfo: class TestShareOfMissingValues(BaseIntegrityMissingValuesValuesTest): + class Config: + type_alias = "evidently:test:TestShareOfMissingValues" + """Check a share of missing values.""" name: ClassVar = "Share of Missing Values" @@ -345,6 +360,9 @@ def render_html(self, obj: TestNumberOfMissingValues) -> TestHtmlInfo: class TestNumberOfColumnsWithMissingValues(BaseIntegrityMissingValuesValuesTest): + class Config: + type_alias = "evidently:test:TestNumberOfColumnsWithMissingValues" + """Check a number of columns with a missing value.""" name: ClassVar = "The Number of Columns With Missing Values" @@ -375,6 +393,9 @@ def render_html(self, obj: TestNumberOfMissingValues) -> TestHtmlInfo: class TestShareOfColumnsWithMissingValues(BaseIntegrityMissingValuesValuesTest): + class Config: + type_alias = "evidently:test:TestShareOfColumnsWithMissingValues" + """Check a share of columns with a missing value.""" name: ClassVar = "The Share of Columns With Missing Values" @@ -406,6 +427,9 @@ def render_html(self, obj: TestNumberOfMissingValues) -> TestHtmlInfo: class TestNumberOfRowsWithMissingValues(BaseIntegrityMissingValuesValuesTest): + class Config: + type_alias = "evidently:test:TestNumberOfRowsWithMissingValues" + """Check a number of rows with a missing value.""" name: ClassVar = "The Number Of Rows With Missing Values" @@ -429,6 +453,9 @@ def get_description(self, value: Numeric) -> str: class TestShareOfRowsWithMissingValues(BaseIntegrityMissingValuesValuesTest): + class Config: + type_alias = "evidently:test:TestShareOfRowsWithMissingValues" + """Check a share of rows with a missing value.""" name: ClassVar = "The Share of Rows With Missing Values" @@ -488,6 +515,9 @@ def __init__( class TestColumnNumberOfDifferentMissingValues(BaseIntegrityColumnMissingValuesTest): + class Config: + type_alias = "evidently:test:TestColumnNumberOfDifferentMissingValues" + """Check a number of differently encoded missing values in one column.""" name: ClassVar = "Different Types of Missing Values in a Column" @@ -538,6 +568,9 @@ def render_html(self, obj: TestColumnNumberOfDifferentMissingValues) -> TestHtml class TestColumnNumberOfMissingValues(BaseIntegrityColumnMissingValuesTest): + class Config: + type_alias = "evidently:test:TestColumnNumberOfMissingValues" + """Check a number of missing values in one column.""" name: ClassVar = "The Number of Missing Values in a Column" @@ -563,6 +596,9 @@ def get_description(self, value: Numeric) -> str: class TestColumnShareOfMissingValues(BaseIntegrityColumnMissingValuesTest): + class Config: + type_alias = "evidently:test:TestColumnShareOfMissingValues" + """Check a share of missing values in one column.""" name: ClassVar = "The Share of Missing Values in a Column" @@ -613,6 +649,9 @@ def generate(self, data_definition: DataDefinition) -> List[TestColumnShareOfMis class TestNumberOfConstantColumns(BaseIntegrityValueTest): + class Config: + type_alias = "evidently:test:TestNumberOfConstantColumns" + """Number of columns contained only one unique value""" name: ClassVar = "Number of Constant Columns" @@ -650,6 +689,9 @@ def render_html(self, obj: TestNumberOfConstantColumns) -> TestHtmlInfo: class TestNumberOfEmptyRows(BaseIntegrityValueTest): + class Config: + type_alias = "evidently:test:TestNumberOfEmptyRows" + """Number of rows contained all NAN values""" name: ClassVar = "Number of Empty Rows" @@ -672,6 +714,9 @@ def get_description(self, value: Numeric) -> str: class TestNumberOfEmptyColumns(BaseIntegrityValueTest): + class Config: + type_alias = "evidently:test:TestNumberOfEmptyColumns" + """Number of columns contained all NAN values""" name: ClassVar = "Number of Empty Columns" @@ -708,6 +753,9 @@ def render_html(self, obj: TestNumberOfEmptyColumns) -> TestHtmlInfo: class TestNumberOfDuplicatedRows(BaseIntegrityValueTest): + class Config: + type_alias = "evidently:test:TestNumberOfDuplicatedRows" + """How many rows have duplicates in the dataset""" name: ClassVar = "Number of Duplicate Rows" @@ -730,6 +778,9 @@ def get_description(self, value: Numeric) -> str: class TestNumberOfDuplicatedColumns(BaseIntegrityValueTest): + class Config: + type_alias = "evidently:test:TestNumberOfDuplicatedColumns" + """How many columns have duplicates in the dataset""" name: ClassVar = "Number of Duplicate Columns" @@ -805,6 +856,9 @@ def groups(self) -> Dict[str, str]: class TestColumnAllConstantValues(BaseIntegrityOneColumnTest): + class Config: + type_alias = "evidently:test:TestColumnAllConstantValues" + """Test that there is only one unique value in a column""" name: ClassVar = "All Constant Values in a Column" @@ -848,6 +902,9 @@ def render_html(self, obj: TestColumnAllConstantValues) -> TestHtmlInfo: class TestColumnAllUniqueValues(BaseIntegrityOneColumnTest): + class Config: + type_alias = "evidently:test:TestColumnAllUniqueValues" + """Test that there is only uniques values in a column""" name: ClassVar = "All Unique Values in a Column" @@ -891,16 +948,25 @@ def render_html(self, obj: TestColumnAllUniqueValues) -> TestHtmlInfo: class ColumnTypeParameter(TestParameters): + class Config: + type_alias = "evidently:test_parameters:ColumnTypeParameter" + actual_type: str column_name: str expected_type: str class ColumnTypesParameter(TestParameters): + class Config: + type_alias = "evidently:test_parameters:ColumnTypesParameter" + columns: List[ColumnTypeParameter] class TestColumnsType(Test): + class Config: + type_alias = "evidently:test:TestColumnsType" + """This test compares columns type against the specified ones or a reference dataframe""" group: ClassVar = DATA_INTEGRITY_GROUP.id diff --git a/src/evidently/tests/data_quality_tests.py b/src/evidently/tests/data_quality_tests.py index 086b95f2ce..a57eb7c127 100644 --- a/src/evidently/tests/data_quality_tests.py +++ b/src/evidently/tests/data_quality_tests.py @@ -94,6 +94,9 @@ def __init__( class TestConflictTarget(Test): + class Config: + type_alias = "evidently:test:TestConflictTarget" + group: ClassVar = DATA_QUALITY_GROUP.id name: ClassVar = "Test number of conflicts in target" _metric: ConflictTargetMetric @@ -128,6 +131,9 @@ def groups(self) -> Dict[str, str]: class TestConflictPrediction(Test): + class Config: + type_alias = "evidently:test:TestConflictPrediction" + group: ClassVar = DATA_QUALITY_GROUP.id name: ClassVar = "Test number of conflicts in prediction" _metric: ConflictPredictionMetric @@ -195,6 +201,9 @@ def __init__( class TestTargetPredictionCorrelation(BaseDataQualityCorrelationsMetricsValueTest): + class Config: + type_alias = "evidently:test:TestTargetPredictionCorrelation" + name: ClassVar = "Correlation between Target and Prediction" def get_condition_from_reference(self, reference: Optional[DatasetCorrelation]) -> TestValueCondition: @@ -221,6 +230,9 @@ def get_description(self, value: Numeric) -> str: class TestHighlyCorrelatedColumns(BaseDataQualityCorrelationsMetricsValueTest): + class Config: + type_alias = "evidently:test:TestHighlyCorrelatedColumns" + name: ClassVar = "Highly Correlated Columns" def get_condition_from_reference(self, reference: Optional[DatasetCorrelation]) -> TestValueCondition: @@ -259,6 +271,9 @@ def render_html(self, obj: TestHighlyCorrelatedColumns) -> TestHtmlInfo: class TestTargetFeaturesCorrelations(BaseDataQualityCorrelationsMetricsValueTest): + class Config: + type_alias = "evidently:test:TestTargetFeaturesCorrelations" + name: ClassVar = "Correlation between Target and Features" def get_condition_from_reference(self, reference: Optional[DatasetCorrelation]) -> TestValueCondition: @@ -284,6 +299,9 @@ def get_description(self, value: Numeric) -> str: class TestPredictionFeaturesCorrelations(BaseDataQualityCorrelationsMetricsValueTest): + class Config: + type_alias = "evidently:test:TestPredictionFeaturesCorrelations" + name: ClassVar = "Correlation between Prediction and Features" def get_condition_from_reference(self, reference: Optional[DatasetCorrelation]) -> TestValueCondition: @@ -329,6 +347,9 @@ def render_html(self, obj: TestTargetFeaturesCorrelations) -> TestHtmlInfo: class TestCorrelationChanges(BaseDataQualityCorrelationsMetricsValueTest): + class Config: + type_alias = "evidently:test:TestCorrelationChanges" + group: ClassVar = DATA_QUALITY_GROUP.id name: ClassVar = "Change in Correlation" _metric: DatasetCorrelationsMetric @@ -466,6 +487,9 @@ def get_stat(self, current: NumericCharacteristics): class TestColumnValueMin(BaseFeatureDataQualityMetricsTest): + class Config: + type_alias = "evidently:test:TestColumnValueMin" + name: ClassVar = "Min Value" def get_stat(self, current: NumericCharacteristics): @@ -491,6 +515,9 @@ def get_description(self, value: Numeric) -> str: class TestColumnValueMax(BaseFeatureDataQualityMetricsTest): + class Config: + type_alias = "evidently:test:TestColumnValueMax" + name: ClassVar = "Max Value" def get_stat(self, current: NumericCharacteristics): @@ -518,6 +545,9 @@ def get_description(self, value: Numeric) -> str: class TestColumnValueMean(BaseFeatureDataQualityMetricsTest): + class Config: + type_alias = "evidently:test:TestColumnValueMean" + name: ClassVar = "Mean Value" def get_stat(self, current: NumericCharacteristics): @@ -541,6 +571,9 @@ def get_description(self, value: Numeric) -> str: class TestColumnValueMedian(BaseFeatureDataQualityMetricsTest): + class Config: + type_alias = "evidently:test:TestColumnValueMedian" + name: ClassVar = "Median Value" def get_stat(self, current: NumericCharacteristics): @@ -611,6 +644,9 @@ def _feature_render_html(self, obj): class TestColumnValueStd(BaseFeatureDataQualityMetricsTest): + class Config: + type_alias = "evidently:test:TestColumnValueStd" + name: ClassVar = "Standard Deviation (SD)" def get_stat(self, current: NumericCharacteristics): @@ -655,6 +691,9 @@ def render_html(self, obj: BaseFeatureDataQualityMetricsTest) -> TestHtmlInfo: class TestNumberOfUniqueValues(BaseFeatureDataQualityMetricsTest): + class Config: + type_alias = "evidently:test:TestNumberOfUniqueValues" + name: ClassVar = "Number of Unique Values" def get_stat(self, current: NumericCharacteristics): @@ -699,6 +738,9 @@ def render_html(self, obj: TestNumberOfUniqueValues) -> TestHtmlInfo: class TestUniqueValuesShare(BaseFeatureDataQualityMetricsTest): + class Config: + type_alias = "evidently:test:TestUniqueValuesShare" + name: ClassVar = "Share of Unique Values" def get_stat(self, current: NumericCharacteristics): @@ -750,6 +792,9 @@ def render_html(self, obj: TestUniqueValuesShare) -> TestHtmlInfo: class TestMostCommonValueShare(BaseFeatureDataQualityMetricsTest): + class Config: + type_alias = "evidently:test:TestMostCommonValueShare" + name: ClassVar = "Share of the Most Common Value" def get_stat(self, current: NumericCharacteristics): @@ -842,6 +887,9 @@ def generate(self, data_definition: DataDefinition) -> List[TestMostCommonValueS class MeanInNSigmasParameter(TestParameters): + class Config: + type_alias = "evidently:test_parameters:MeanInNSigmasParameter" + column_name: str current_mean: float n_sigmas: int # ? float @@ -850,6 +898,9 @@ class MeanInNSigmasParameter(TestParameters): class TestMeanInNSigmas(Test): + class Config: + type_alias = "evidently:test:TestMeanInNSigmas" + group: ClassVar = DATA_QUALITY_GROUP.id name: ClassVar = "Mean Value Stability" _metric: ColumnSummaryMetric @@ -1001,6 +1052,9 @@ def generate(self, data_definition: DataDefinition) -> List[TestMeanInNSigmas]: class TestValueRange(Test): + class Config: + type_alias = "evidently:test:TestValueRange" + group: ClassVar = DATA_QUALITY_GROUP.id name: ClassVar = "Value Range" _metric: ColumnValueRangeMetric @@ -1129,6 +1183,9 @@ def metric(self): class TestNumberOfOutRangeValues(BaseDataQualityValueRangeMetricsTest): + class Config: + type_alias = "evidently:test:TestNumberOfOutRangeValues" + name: ClassVar = "Number of Out-of-Range Values " def calculate_value_for_test(self) -> Numeric: @@ -1142,11 +1199,17 @@ def get_description(self, value: Numeric) -> str: class ShareOfOutRangeParameters(CheckValueParameters): + class Config: + type_alias = "evidently:test_parameters:ShareOfOutRangeParameters" + left: Optional[float] right: Optional[float] class TestShareOfOutRangeValues(BaseDataQualityValueRangeMetricsTest): + class Config: + type_alias = "evidently:test:TestShareOfOutRangeValues" + name: ClassVar = "Share of Out-of-Range Values" def calculate_value_for_test(self) -> Numeric: @@ -1228,12 +1291,18 @@ def generate(self, data_definition: DataDefinition) -> List[TestShareOfOutRangeV class ColumnValueListParameters(TestParameters): + class Config: + type_alias = "evidently:test_parameters:ColumnValueListParameters" + value: Numeric column_name: str values: Optional[List[Any]] = None class TestValueList(Test): + class Config: + type_alias = "evidently:test:TestValueList" + group: ClassVar = DATA_QUALITY_GROUP.id name: ClassVar = "Out-of-List Values" alias: ClassVar = "value_list" @@ -1326,6 +1395,9 @@ def get_condition(self) -> TestValueCondition: class TestNumberOfOutListValues(BaseDataQualityValueListMetricsTest): + class Config: + type_alias = "evidently:test:TestNumberOfOutListValues" + name: ClassVar = "Number Out-of-List Values" alias: ClassVar = "number_value_list" @@ -1341,10 +1413,16 @@ def get_description(self, value: Numeric) -> str: class ValueListParameters(CheckValueParameters): # todo: typing + class Config: + type_alias = "evidently:test_parameters:ValueListParameters" + values: Optional[List[Any]] = None class TestShareOfOutListValues(BaseDataQualityValueListMetricsTest): + class Config: + type_alias = "evidently:test:TestShareOfOutListValues" + name: ClassVar = "Share of Out-of-List Values" alias: ClassVar = "share_value_list" @@ -1391,6 +1469,9 @@ def generate(self, data_definition: DataDefinition) -> List[TestShareOfOutListVa class TestColumnQuantile(BaseCheckValueTest): + class Config: + type_alias = "evidently:test:TestColumnQuantile" + group: ClassVar = DATA_QUALITY_GROUP.id name: ClassVar = "Quantile Value" _metric: ColumnQuantileMetric @@ -1570,6 +1651,9 @@ def get_condition(self) -> TestValueCondition: class TestCategoryShare(BaseDataQualityCategoryMetricsTest): + class Config: + type_alias = "evidently:test:TestCategoryShare" + name: ClassVar = "Share of category" alias: ClassVar = "share_category" @@ -1594,6 +1678,9 @@ def get_parameters(self) -> CheckValueParameters: class TestCategoryCount(BaseDataQualityCategoryMetricsTest): + class Config: + type_alias = "evidently:test:TestCategoryCount" + name: ClassVar = "Count of category" alias: ClassVar = "count_category" diff --git a/src/evidently/tests/recsys_tests.py b/src/evidently/tests/recsys_tests.py index 156c5deb20..bc9b27671f 100644 --- a/src/evidently/tests/recsys_tests.py +++ b/src/evidently/tests/recsys_tests.py @@ -132,6 +132,9 @@ def render_html(self, obj: BaseTopkRecsysTest) -> TestHtmlInfo: class TestPrecisionTopK(BaseTopkRecsysTest): + class Config: + type_alias = "evidently:test:TestPrecisionTopK" + name: ClassVar = "Precision (top-k)" header: str = "Precision" @@ -145,6 +148,9 @@ class TestPrecisionTopKRenderer(BaseTopkRecsysRenderer): class TestRecallTopK(BaseTopkRecsysTest): + class Config: + type_alias = "evidently:test:TestRecallTopK" + name: ClassVar = "Recall (top-k)" header: str = "Recall" @@ -158,6 +164,9 @@ class TestRecallTopKRenderer(BaseTopkRecsysRenderer): class TestFBetaTopK(BaseTopkRecsysTest): + class Config: + type_alias = "evidently:test:TestFBetaTopK" + name: ClassVar = "F_beta (top-k)" header: str = "F_beta" @@ -171,6 +180,9 @@ class TestFBetaTopKRenderer(BaseTopkRecsysRenderer): class TestMAPK(BaseTopkRecsysTest): + class Config: + type_alias = "evidently:test:TestMAPK" + name: ClassVar = "MAP (top-k)" header: str = "MAP" @@ -184,6 +196,9 @@ class TestMAPKRenderer(BaseTopkRecsysRenderer): class TestMARK(BaseTopkRecsysTest): + class Config: + type_alias = "evidently:test:TestMARK" + name: ClassVar = "MAR (top-k)" header: str = "MAR" @@ -197,6 +212,9 @@ class TestMARKRenderer(BaseTopkRecsysRenderer): class TestNDCGK(BaseTopkRecsysTest): + class Config: + type_alias = "evidently:test:TestNDCGK" + name: ClassVar = "NDCG (top-k)" header: str = "NDCG" @@ -210,6 +228,9 @@ class TestNDCGKRenderer(BaseTopkRecsysRenderer): class TestHitRateK(BaseTopkRecsysTest): + class Config: + type_alias = "evidently:test:TestHitRateK" + name: ClassVar = "Hit Rate (top-k)" header: str = "Hit Rate" @@ -223,6 +244,9 @@ class TestHitRateKRenderer(BaseTopkRecsysRenderer): class TestMRRK(BaseTopkRecsysTest): + class Config: + type_alias = "evidently:test:TestMRRK" + name: ClassVar = "MRR (top-k)" header: str = "MRR" @@ -330,6 +354,9 @@ def render_html(self, obj: BaseNotRankRecsysTest) -> TestHtmlInfo: class TestNovelty(BaseNotRankRecsysTest): + class Config: + type_alias = "evidently:test:TestNovelty" + name: ClassVar = "Novelty (top-k)" header: str = "Novelty" @@ -343,6 +370,9 @@ class TestNoveltyRenderer(BaseNotRankRecsysTestRenderer): class TestDiversity(BaseNotRankRecsysTest): + class Config: + type_alias = "evidently:test:TestDiversity" + name: ClassVar = "Diversity (top-k)" header: str = "Diversity" @@ -356,6 +386,9 @@ class TestDiversityRenderer(BaseNotRankRecsysTestRenderer): class TestSerendipity(BaseNotRankRecsysTest): + class Config: + type_alias = "evidently:test:TestSerendipity" + name: ClassVar = "Serendipity (top-k)" header: str = "Serendipity" @@ -369,6 +402,9 @@ class TestSerendipityRenderer(BaseNotRankRecsysTestRenderer): class TestPersonalization(BaseNotRankRecsysTest): + class Config: + type_alias = "evidently:test:TestPersonalization" + name: ClassVar = "Personalization (top-k)" header: str = "Personalization" @@ -412,6 +448,9 @@ def render_html(self, obj: BaseNotRankRecsysTest) -> TestHtmlInfo: class TestARP(BaseCheckValueTest): + class Config: + type_alias = "evidently:test:TestARP" + group: ClassVar = RECSYS_GROUP.id name: ClassVar = "ARP (top-k)" k: int @@ -468,6 +507,9 @@ def metric(self): class TestGiniIndex(BaseCheckValueTest): + class Config: + type_alias = "evidently:test:TestGiniIndex" + group: ClassVar = RECSYS_GROUP.id name: ClassVar = "Gini Index (top-k)" k: int @@ -521,6 +563,9 @@ def metric(self): class TestCoverage(BaseCheckValueTest): + class Config: + type_alias = "evidently:test:TestCoverage" + group: ClassVar = RECSYS_GROUP.id name: ClassVar = "Coverage (top-k)" k: int @@ -599,6 +644,9 @@ def render_html(self, obj: Union[TestARP, TestGiniIndex, TestCoverage]) -> TestH class TestScoreEntropy(BaseCheckValueTest): + class Config: + type_alias = "evidently:test:TestScoreEntropy" + group: ClassVar = RECSYS_GROUP.id name: ClassVar = "Score Entropy (top-k)" k: int diff --git a/src/evidently/tests/regression_performance_tests.py b/src/evidently/tests/regression_performance_tests.py index 0d4a5d975e..7791a82503 100644 --- a/src/evidently/tests/regression_performance_tests.py +++ b/src/evidently/tests/regression_performance_tests.py @@ -65,6 +65,9 @@ def dummy_metric(self): class TestValueMAE(BaseRegressionPerformanceMetricsTest): + class Config: + type_alias = "evidently:test:TestValueMAE" + name: ClassVar = "Mean Absolute Error (MAE)" def get_condition(self) -> TestValueCondition: @@ -101,6 +104,9 @@ def render_html(self, obj: TestValueMAE) -> TestHtmlInfo: class TestValueMAPE(BaseRegressionPerformanceMetricsTest): + class Config: + type_alias = "evidently:test:TestValueMAPE" + name: ClassVar = "Mean Absolute Percentage Error (MAPE)" def get_condition(self) -> TestValueCondition: @@ -139,6 +145,9 @@ def render_html(self, obj: TestValueMAPE) -> TestHtmlInfo: class TestValueRMSE(BaseRegressionPerformanceMetricsTest): + class Config: + type_alias = "evidently:test:TestValueRMSE" + name: ClassVar = "Root Mean Square Error (RMSE)" def get_condition(self) -> TestValueCondition: @@ -175,6 +184,9 @@ def render_html(self, obj: TestValueRMSE) -> TestHtmlInfo: class TestValueMeanError(BaseRegressionPerformanceMetricsTest): + class Config: + type_alias = "evidently:test:TestValueMeanError" + name: ClassVar = "Mean Error (ME)" def get_condition(self) -> TestValueCondition: @@ -218,6 +230,9 @@ def render_html(self, obj: TestValueMeanError) -> TestHtmlInfo: class TestValueAbsMaxError(BaseRegressionPerformanceMetricsTest): + class Config: + type_alias = "evidently:test:TestValueAbsMaxError" + name: ClassVar = "Max Absolute Error" def get_condition(self) -> TestValueCondition: @@ -250,6 +265,9 @@ def render_html(self, obj: TestValueAbsMaxError) -> TestHtmlInfo: class TestValueR2Score(BaseRegressionPerformanceMetricsTest): + class Config: + type_alias = "evidently:test:TestValueR2Score" + name: ClassVar = "R2 Score" def get_condition(self) -> TestValueCondition: diff --git a/src/evidently/ui/components/base.py b/src/evidently/ui/components/base.py index 4cbb24db8c..38f5b1d21d 100644 --- a/src/evidently/ui/components/base.py +++ b/src/evidently/ui/components/base.py @@ -65,6 +65,8 @@ def get_requirements(self) -> List[Type["Component"]]: class Config: extra = Extra.forbid + alias_required = False + is_base_type = True def __init_subclass__(cls): super().__init_subclass__() diff --git a/src/evidently/ui/dashboards/base.py b/src/evidently/ui/dashboards/base.py index 43d6e73c58..2fdf96229a 100644 --- a/src/evidently/ui/dashboards/base.py +++ b/src/evidently/ui/dashboards/base.py @@ -133,6 +133,7 @@ def inner(self: "DashboardPanel", *args, **kwargs) -> BaseWidgetInfo: class DashboardPanel(EnumValueMixin, PolymorphicModel): class Config: + type_alias = "evidently:dashboard_panel:DashboardPanel" is_base_type = True id: PanelID = Field(default_factory=new_id) diff --git a/src/evidently/ui/dashboards/reports.py b/src/evidently/ui/dashboards/reports.py index 75243387d6..e39a49d391 100644 --- a/src/evidently/ui/dashboards/reports.py +++ b/src/evidently/ui/dashboards/reports.py @@ -14,6 +14,7 @@ from evidently.metric_results import Distribution from evidently.metric_results import HistogramData from evidently.model.widget import BaseWidgetInfo +from evidently.pydantic_utils import autoregister from evidently.renderers.html_widgets import CounterData from evidently.renderers.html_widgets import counter from evidently.renderers.html_widgets import plotly_figure @@ -31,7 +32,11 @@ from evidently.ui.base import DataStorage +@autoregister class DashboardPanelPlot(DashboardPanel): + class Config: + type_alias = "evidently:dashboard_panel:DashboardPanelPlot" + values: List[PanelValue] plot_type: PlotType @@ -89,7 +94,11 @@ def plot_type_cls(self): raise ValueError(f"Unsupported plot type {self.plot_type}") +@autoregister class DashboardPanelCounter(DashboardPanel): + class Config: + type_alias = "evidently:dashboard_panel:DashboardPanelCounter" + agg: CounterAgg value: Optional[PanelValue] = None text: Optional[str] = None @@ -129,7 +138,11 @@ def _get_counter_value(self, points: Dict[Metric, List[Tuple[datetime.datetime, raise ValueError(f"Unknown agg type {self.agg}") +@autoregister class DashboardPanelDistribution(DashboardPanel): + class Config: + type_alias = "evidently:dashboard_panel:DashboardPanelDistribution" + value: PanelValue barmode: HistBarMode = HistBarMode.STACK diff --git a/src/evidently/ui/dashboards/test_suites.py b/src/evidently/ui/dashboards/test_suites.py index f9d06b7c2f..248bcd07fc 100644 --- a/src/evidently/ui/dashboards/test_suites.py +++ b/src/evidently/ui/dashboards/test_suites.py @@ -16,6 +16,7 @@ from evidently._pydantic_compat import BaseModel from evidently.model.widget import BaseWidgetInfo from evidently.pydantic_utils import EvidentlyBaseModel +from evidently.pydantic_utils import autoregister from evidently.renderers.html_widgets import CounterData from evidently.renderers.html_widgets import counter from evidently.renderers.html_widgets import plotly_figure @@ -86,7 +87,11 @@ def get(self, test_suite: TestSuite) -> Dict[Test, TestInfo]: descr_re = re.compile(r"\.\s+([A-Z])") +@autoregister class DashboardPanelTestSuite(DashboardPanel): + class Config: + type_alias = "evidently:dashboard_panel:DashboardPanelTestSuite" + test_filters: List[TestFilter] = [] filter: ReportFilter = ReportFilter(metadata_values={}, tag_values=[], include_test_suites=True) panel_type: TestSuitePanelType = TestSuitePanelType.AGGREGATE @@ -184,7 +189,11 @@ def to_period(time_agg: Optional[str], timestamp: datetime.datetime) -> datetime return pd.Series([timestamp], name="dt").dt.to_period(time_agg)[0] +@autoregister class DashboardPanelTestSuiteCounter(DashboardPanel): + class Config: + type_alias = "evidently:dashboard_panel:DashboardPanelTestSuiteCounter" + agg: CounterAgg = CounterAgg.NONE filter: ReportFilter = ReportFilter(metadata_values={}, tag_values=[], include_test_suites=True) test_filters: List[TestFilter] = [] diff --git a/tests/calculation_engine/test_python_engine.py b/tests/calculation_engine/test_python_engine.py index c6a94e761e..39a531302c 100644 --- a/tests/calculation_engine/test_python_engine.py +++ b/tests/calculation_engine/test_python_engine.py @@ -13,6 +13,9 @@ class OldTypeSimpleMetric(Metric[int]): + class Config: + alias_required = False + value: int def __init__(self, value: int): @@ -24,6 +27,9 @@ def calculate(self, data: InputData) -> int: class SimpleMetric(Metric[int]): + class Config: + alias_required = False + value: int def __init__(self, value: int): diff --git a/tests/features/test_multicolumn.py b/tests/features/test_multicolumn.py index 4ac5934b8d..bafe049525 100644 --- a/tests/features/test_multicolumn.py +++ b/tests/features/test_multicolumn.py @@ -15,6 +15,9 @@ class MultiColumnFeature(GeneratedFeatures): + class Config: + alias_required = False + source_column: str _called_count: int = PrivateAttr(0) diff --git a/tests/metrics/test_base_metric.py b/tests/metrics/test_base_metric.py index 3751bb2c9f..be1693bf9d 100644 --- a/tests/metrics/test_base_metric.py +++ b/tests/metrics/test_base_metric.py @@ -49,6 +49,9 @@ def test_metric_generator(): class SimpleMetric(Metric[int]): + class Config: + alias_required = False + column_name: ColumnName def __init__(self, column_name: ColumnName): @@ -60,6 +63,9 @@ def calculate(self, data: InputData) -> int: class SimpleMetric2(Metric[int]): + class Config: + alias_required = False + column_name: ColumnName def __init__(self, column_name: ColumnName): @@ -71,6 +77,9 @@ def calculate(self, data: InputData) -> int: class SimpleMetricWithFeatures(Metric[int]): + class Config: + alias_required = False + column_name: str _feature: Optional[GeneratedFeature] @@ -93,6 +102,9 @@ def required_features(self, data_definition: DataDefinition): class MetricWithAllTextFeatures(Metric[Dict[str, int]]): + class Config: + alias_required = False + _features: Dict[str, "LengthFeature"] def calculate(self, data: InputData): @@ -107,6 +119,9 @@ def required_features(self, data_definition: DataDefinition): class SimpleGeneratedFeature(GeneratedFeature): + class Config: + alias_required = False + __feature_type__: ClassVar = ColumnType.Numerical column_name: str @@ -123,6 +138,9 @@ def _as_column(self) -> ColumnName: class LengthFeature(GeneratedFeature): + class Config: + alias_required = False + __feature_type__: ClassVar = ColumnType.Numerical column_name: str max_length: Optional[int] = None @@ -215,6 +233,9 @@ class MyOption(Option): field: str class MockMetric(Metric[MetricResult]): + class Config: + alias_required = False + def calculate(self, data: InputData): return MetricResult() @@ -235,6 +256,9 @@ def get_options_fingerprint(self) -> FingerprintPart: return get_value_fingerprint(self.options.get(MyOption).field) class MockMetricWithOption(UsesMyOptionMixin, Metric[MetricResult]): + class Config: + alias_required = False + def calculate(self, data: InputData): return MetricResult() diff --git a/tests/multitest/conftest.py b/tests/multitest/conftest.py index 6c166d5a86..60b3a478c0 100644 --- a/tests/multitest/conftest.py +++ b/tests/multitest/conftest.py @@ -109,6 +109,9 @@ def find_all_subclasses( def make_approx_type(cls: Type[T], ignore_not_set: bool = False) -> Type[T]: class ApproxFields(cls): + class Config: + alias_required = False + __ignore_not_set__ = ignore_not_set __annotations__ = { k: Union[ApproxValue, f.type_] diff --git a/tests/report/test_report.py b/tests/report/test_report.py index ec41780603..bd07172c3e 100644 --- a/tests/report/test_report.py +++ b/tests/report/test_report.py @@ -15,6 +15,7 @@ class MockMetricResult(MetricResult): class Config: + alias_required = False dict_exclude_fields = {"series"} value: str @@ -23,6 +24,9 @@ class Config: class MockMetric(Metric[MockMetricResult]): + class Config: + alias_required = False + def calculate(self, data: InputData) -> MockMetricResult: return MockMetricResult(value="a", series=pd.Series([0]), distribution=Distribution(x=[1, 1], y=[0, 0])) @@ -31,7 +35,7 @@ def calculate(self, data: InputData) -> MockMetricResult: class MockMetricRenderer(MetricRenderer): def render_html(self, obj) -> List[BaseWidgetInfo]: # todo? - raise NotImplementedError + return [] def test_as_dict(): diff --git a/tests/test_metric_results.py b/tests/test_metric_results.py index 49f8f1ff99..fb9e79eca1 100644 --- a/tests/test_metric_results.py +++ b/tests/test_metric_results.py @@ -56,11 +56,15 @@ def test_metric_result_fields_config(all_metric_results: Set[Type[MetricResult]] class SimpleField(MetricResult): + class Config: + alias_required = False + f1: str class ExcludeModel(MetricResult): class Config: + alias_required = False dict_exclude_fields = {"simple"} simple: SimpleField @@ -73,6 +77,7 @@ def test_default_json(obj: MetricResult, expected): class FieldExclude(MetricResult): class Config: + alias_required = False dict_exclude_fields = {"f2"} f1: str @@ -81,6 +86,7 @@ class Config: class FieldInclude(MetricResult): class Config: + alias_required = False dict_include_fields = {"f1"} f1: str @@ -89,6 +95,7 @@ class Config: class DictExclude(MetricResult): class Config: + alias_required = False dict_include = False f1: List[int] @@ -96,11 +103,17 @@ class Config: class NestedExclude(MetricResult): + class Config: + alias_required = False + f: str nested: DictExclude class Model(MetricResult): + class Config: + alias_required = False + no: NestedExclude = Field(..., include={"nested": {"f1"}}) fe: FieldExclude feo: FieldExclude = Field(include={"f1", "f2"}) @@ -146,6 +159,9 @@ def test_include_exclude(model: Model, include, exclude, expected): class DictModel(MetricResult): + class Config: + alias_required = False + de: Dict[str, DictExclude] deo: Dict[str, DictExclude] = Field(..., include=True) # fe: Dict[str, FieldExclude] @@ -181,11 +197,13 @@ def test_include_exclude_dict(dict_model: DictModel, include, exclude, expected) def test_polymorphic(): class Parent(MetricResult): - pass + class Config: + alias_required = False class A(Parent): class Config: dict_include_fields = {"f1"} + alias_required = False f1: str f2: str @@ -193,11 +211,15 @@ class Config: class B(Parent): class Config: dict_exclude_fields = {"b"} + alias_required = False a: str b: str class PModel(MetricResult): + class Config: + alias_required = False + vals: Dict[str, Parent] assert PModel(vals={"a": A(f1="a", f2="b"), "b": B(a="a", b="b")}).get_dict() == { @@ -224,10 +246,16 @@ class Container(EnumValueMixin, BaseModel): def test_model_list(): class SimpleField(MetricResult): + class Config: + alias_required = False + field: str field2: str class Container(MetricResult): + class Config: + alias_required = False + field: List[SimpleField] obj = Container(field=[SimpleField(field="a", field2="b")]) diff --git a/tests/test_pydantic_aliases.py b/tests/test_pydantic_aliases.py index e6b04dc5fa..488322edd3 100644 --- a/tests/test_pydantic_aliases.py +++ b/tests/test_pydantic_aliases.py @@ -6,10 +6,32 @@ from typing import Type from typing import TypeVar +import pytest + import evidently +from evidently._pydantic_compat import import_string +from evidently.base_metric import BasePreset +from evidently.base_metric import ColumnName +from evidently.base_metric import Metric +from evidently.base_metric import MetricResult +from evidently.collector.config import CollectorTrigger +from evidently.collector.storage import CollectorStorage +from evidently.features.generated_features import BaseDescriptor +from evidently.features.generated_features import GeneratedFeatures +from evidently.features.llm_judge import BaseLLMPromptTemplate +from evidently.metric_preset.metric_preset import MetricPreset +from evidently.metrics.data_drift.embedding_drift_methods import DriftMethod from evidently.pydantic_utils import TYPE_ALIASES +from evidently.pydantic_utils import EvidentlyBaseModel from evidently.pydantic_utils import PolymorphicModel +from evidently.pydantic_utils import WithTestAndMetricDependencies from evidently.pydantic_utils import get_base_class +from evidently.pydantic_utils import is_not_abstract +from evidently.test_preset.test_preset import TestPreset +from evidently.tests.base_test import Test +from evidently.tests.base_test import TestParameters +from evidently.ui.components.base import Component +from evidently.ui.dashboards.base import DashboardPanel T = TypeVar("T") @@ -40,6 +62,8 @@ def test_all_aliases_registered(): not_registered = [] for cls in find_all_subclasses(PolymorphicModel, include_abstract=True): + if cls.__is_base_type__(): + continue classpath = cls.__get_classpath__() typename = cls.__get_type__() if classpath == typename: @@ -51,7 +75,47 @@ def test_all_aliases_registered(): msg = "\n".join( f'register_type_alias({get_base_class(cls).__name__}, "{cls.__get_classpath__()}", "{cls.__get_type__()}")' - for cls in not_registered + for cls in sorted(not_registered, key=lambda c: get_base_class(c).__name__ + " " + c.__get_classpath__()) ) print(msg) assert len(not_registered) == 0, "Not all aliases registered" + + +@pytest.mark.parametrize("classpath", list(TYPE_ALIASES.values())) +def test_all_registered_classpath_exist(classpath): + try: + import_string(classpath) + except ImportError: + assert False, f"wrong classpath registered '{classpath}'" + + +def test_all_aliases_correct(): + base_class_type_mapping = { + Metric: "metric", + Test: "test", + GeneratedFeatures: "feature", + BaseDescriptor: "descriptor", + MetricPreset: "metric_preset", + TestPreset: "test_preset", + MetricResult: "metric_result", + DriftMethod: "drift_method", + TestParameters: "test_parameters", + ColumnName: "base", + CollectorTrigger: "collector_trigger", + CollectorStorage: "collector_storage", + BaseLLMPromptTemplate: "prompt_template", + DashboardPanel: "dashboard_panel", + } + skip = [Component] + skip_literal = [EvidentlyBaseModel, WithTestAndMetricDependencies, BasePreset] + for cls in find_all_subclasses(PolymorphicModel, include_abstract=True): + if cls in skip_literal or any(issubclass(cls, s) for s in skip) or not is_not_abstract(cls): + continue + for base_class, base_type in base_class_type_mapping.items(): + if issubclass(cls, base_class): + alias = getattr(cls.__config__, "type_alias") + assert alias is not None, f"{cls.__name__} has no alias ({alias})" + assert alias == f"evidently:{base_type}:{cls.__name__}", f"wrong alias for {cls.__name__}" + break + else: + assert False, f"No base class type mapping for {cls}" diff --git a/tests/test_suite/test_test_suite.py b/tests/test_suite/test_test_suite.py index 2ba343ba3d..4279e46c3a 100644 --- a/tests/test_suite/test_test_suite.py +++ b/tests/test_suite/test_test_suite.py @@ -47,6 +47,9 @@ class ErrorTest(Test): + class Config: + alias_required = False + name = "Error Test" group = "example" diff --git a/tests/ui/test_app.py b/tests/ui/test_app.py index daf4e74edc..b581c466ee 100644 --- a/tests/ui/test_app.py +++ b/tests/ui/test_app.py @@ -114,6 +114,9 @@ def test_delete_project(test_client: TestClient, project_manager: ProjectManager class MockMetricResult(MetricResult): + class Config: + alias_required = False + value: float @classmethod @@ -122,6 +125,9 @@ def create(cls, value: float): class MockMetric(Metric[MockMetricResult]): + class Config: + alias_required = False + def calculate(self, data: InputData) -> MockMetricResult: return MockMetricResult.create(1) diff --git a/tests/ui/test_dashboards.py b/tests/ui/test_dashboards.py index cd199ef13e..6942cef4a5 100644 --- a/tests/ui/test_dashboards.py +++ b/tests/ui/test_dashboards.py @@ -17,10 +17,16 @@ class A(MetricResult): + class Config: + alias_required = False + f: str class B(MetricResult): + class Config: + alias_required = False + f: Dict[str, A] f1: A @@ -44,6 +50,9 @@ def test_panel_value_metric_args_ser(): def test_panel_value_methic_hash_filter(): class MyMetric(Metric[A]): + class Config: + alias_required = False + arg: str def calculate(self, data: InputData) -> TResult: @@ -59,9 +68,15 @@ def calculate(self, data: InputData) -> TResult: def test_metric_hover_template(): class Nested(EvidentlyBaseModel): + class Config: + alias_required = False + f: str class MyMetric(Metric[A]): + class Config: + alias_required = False + arg: str n: Nested @@ -93,6 +108,9 @@ def calculate(self, data: InputData) -> TResult: def test_metric_hover_template_column_name(): class MyMetric(Metric[A]): + class Config: + alias_required = False + column_name: ColumnName def calculate(self, data: InputData) -> TResult: diff --git a/tests/utils/test_pydantic_utils.py b/tests/utils/test_pydantic_utils.py index e3d7d3f743..2751a8bdf6 100644 --- a/tests/utils/test_pydantic_utils.py +++ b/tests/utils/test_pydantic_utils.py @@ -1,3 +1,4 @@ +from abc import ABC from typing import Dict from typing import Optional from typing import Union @@ -17,20 +18,32 @@ class MockMetricResultField(MetricResult): + class Config: + alias_required = False + nested_field: str class ExtendedMockMetricResultField(MockMetricResultField): + class Config: + alias_required = False + additional_field: str class MockMetricResult(MetricResult): + class Config: + alias_required = False + field1: MockMetricResultField field2: int def _metric_with_result(result: MetricResult): class MockMetric(Metric): + class Config: + alias_required = False + def get_result(self): return result @@ -72,6 +85,9 @@ def test_field_path(): class MockMetricResultWithDict(MetricResult): + class Config: + alias_required = False + d: Dict[str, MockMetricResultField] @@ -112,7 +128,8 @@ def test_field_path_with_dict(): def test_not_allowed_prefix(): class SomeModel(PolymorphicModel): - pass + class Config: + alias_required = False with pytest.raises(ValueError): parse_obj_as(SomeModel, {"type": "external.Class"}) @@ -122,6 +139,7 @@ def test_type_alias(): class SomeModel(PolymorphicModel): class Config: type_alias = "somemodel" + alias_required = False class SomeModelSubclass(SomeModel): pass @@ -144,6 +162,7 @@ def test_include_exclude(): class SomeModel(MetricResult): class Config: field_tags = {"f1": {IncludeTags.Render}} + alias_required = False f1: str f2: str @@ -155,10 +174,14 @@ class Config: class SomeNestedModel(MetricResult): class Config: tags = {IncludeTags.Render} + alias_required = False f1: str class SomeOtherModel(MetricResult): + class Config: + alias_required = False + f1: str f2: SomeNestedModel f3: SomeModel @@ -174,6 +197,7 @@ def test_get_field_tags(): class SomeModel(MetricResult): class Config: field_tags = {"f1": {IncludeTags.Render}} + alias_required = False f1: str f2: str @@ -185,10 +209,14 @@ class Config: class SomeNestedModel(MetricResult): class Config: tags = {IncludeTags.Render} + alias_required = False f1: str class SomeOtherModel(MetricResult): + class Config: + alias_required = False + f1: str f2: SomeNestedModel f3: SomeModel @@ -206,6 +234,7 @@ def test_list_with_tags(): class SomeModel(MetricResult): class Config: field_tags = {"f1": {IncludeTags.Render}} + alias_required = False f1: str f2: str @@ -219,10 +248,14 @@ class Config: class SomeNestedModel(MetricResult): class Config: tags = {IncludeTags.Render} + alias_required = False f1: str class SomeOtherModel(MetricResult): + class Config: + alias_required = False + f1: str f2: SomeNestedModel f3: SomeModel @@ -242,12 +275,14 @@ def test_list_with_tags_with_union(): class A(MetricResult): class Config: tags = {IncludeTags.Render} + alias_required = False f1: str class B(MetricResult): class Config: tags = {IncludeTags.Render} + alias_required = False f1: str @@ -256,6 +291,9 @@ class Config: assert fp._cls == A class SomeModel(MetricResult): + class Config: + alias_required = False + f2: Union[A, B] f1: str @@ -271,6 +309,7 @@ def test_get_field_tags_no_overwrite(): class A(MetricResult): class Config: field_tags = {"f": {IncludeTags.Current}} + alias_required = False f: str @@ -281,6 +320,7 @@ class Config: class C(MetricResult): class Config: field_tags = {"f": {IncludeTags.Reference}} + alias_required = False f: A @@ -296,11 +336,17 @@ class Config: def test_fingerprint_add_new_default_field(): class A(EvidentlyBaseModel): + class Config: + alias_required = False + field1: str f1 = A(field1="123").get_fingerprint() class A(EvidentlyBaseModel): + class Config: + alias_required = False + field1: str field2: str = "321" @@ -312,12 +358,18 @@ class A(EvidentlyBaseModel): def test_fingerprint_reorder_fields(): class A(EvidentlyBaseModel): + class Config: + alias_required = False + field1: str field2: str f1 = A(field1="123", field2="321").get_fingerprint() class A(EvidentlyBaseModel): + class Config: + alias_required = False + field2: str field1: str @@ -329,6 +381,9 @@ class A(EvidentlyBaseModel): def test_fingerprint_default_collision(): class A(EvidentlyBaseModel): + class Config: + alias_required = False + field1: Optional[str] = None field2: Optional[str] = None @@ -337,6 +392,9 @@ class A(EvidentlyBaseModel): def test_wrong_classpath(): class A(EvidentlyBaseModel): + class Config: + alias_required = False + f: str ALLOWED_TYPE_PREFIXES.append("tests.") @@ -346,3 +404,18 @@ class A(EvidentlyBaseModel): d["type"] += "_" with pytest.raises(ValidationError): parse_obj_as(A, d) + + +def test_alias_requied(): + class RequiredAlias(PolymorphicModel, ABC): + class Config: + alias_required = True + + with pytest.raises(ValueError): + + class NoAlias(RequiredAlias): + pass + + class Alias(RequiredAlias): + class Config: + type_alias = "alias"