Skip to content

Commit

Permalink
Merge pull request #2770 from axsaucedo/outlier_added_metrics
Browse files Browse the repository at this point in the history
Extending Alibi Detect Server to expose prometheus metrics for outliers
  • Loading branch information
axsaucedo authored Jan 11, 2021
2 parents c9fe796 + 730ea86 commit ff3816e
Show file tree
Hide file tree
Showing 2 changed files with 39 additions and 3 deletions.
2 changes: 2 additions & 0 deletions components/alibi-detect-server/adserver/constants.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
HEADER_RETURN_INSTANCE_SCORE = "Alibi-Detect-Return-Instance-Score"
HEADER_RETURN_FEATURE_SCORE = "Alibi-Detect-Return-Feature-Score"
ENV_RETURN_INSTANCE_SCORE = "ALIBI_DETECT_RETURN_INSTANCE_SCORE"
ENV_RETURN_FEATURE_SCORE = "ALIBI_DETECT_RETURN_FEATURE_SCORE"
HEADER_OUTLIER_TYPE = "Alibi-Detect-Outlier-Type"

REQUEST_ID_HEADER_NAME = "Ce-Requestid"
Expand Down
40 changes: 37 additions & 3 deletions components/alibi-detect-server/adserver/od_model.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,43 @@
import json
from typing import List, Dict, Optional, Union
import os
import logging
import kfserving
import numpy as np
from .numpy_encoder import NumpyEncoder
from adserver.base import CEModel
from alibi_detect.utils.saving import load_detector, Data
from seldon_core.user_model import SeldonResponse
from adserver.constants import (
HEADER_RETURN_INSTANCE_SCORE,
HEADER_RETURN_FEATURE_SCORE,
ENV_RETURN_INSTANCE_SCORE,
ENV_RETURN_FEATURE_SCORE,
HEADER_OUTLIER_TYPE,
)

RETURN_INSTANCE_SCORE = os.environ.get(ENV_RETURN_INSTANCE_SCORE, "").upper() == "TRUE"
RETURN_FEATURE_SCORE = os.environ.get(ENV_RETURN_FEATURE_SCORE, "").upper() == "TRUE"


def _append_outlier_metrcs(metrics, outlier, name, is_count=True):
metric_found = outlier.get("data", {}).get(name)

# Assumes metric_found is always float/int or list/np.array when not none
if metric_found is not None:
if not isinstance(metric_found, (list, np.ndarray)):
metric_found = [metric_found]

for i, instance in enumerate(metric_found):
metrics.append(
{
"key": f"seldon_metric_outlier_{name}",
"value": instance,
"type": "COUNTER" if is_count else "GAUGE",
"tags": {"index": str(i)},
}
)


class AlibiDetectOutlierModel(CEModel): # pylint:disable=c-extension-no-member
def __init__(self, name: str, storage_uri: str, model: Optional[Data] = None):
Expand Down Expand Up @@ -70,7 +96,7 @@ def process_event(self, inputs: Union[List, Dict], headers: Dict) -> Dict:
if (
HEADER_RETURN_INSTANCE_SCORE in headers
and headers[HEADER_RETURN_INSTANCE_SCORE] == "true"
):
) or RETURN_INSTANCE_SCORE:
ret_instance_score = True

outlier_type = "instance"
Expand All @@ -80,7 +106,7 @@ def process_event(self, inputs: Union[List, Dict], headers: Dict) -> Dict:
if (
HEADER_RETURN_FEATURE_SCORE in headers
and headers[HEADER_RETURN_FEATURE_SCORE] == "true"
):
) or RETURN_FEATURE_SCORE:
ret_feature_score = True
od_preds = {}
name = self.model.meta["name"]
Expand All @@ -105,6 +131,12 @@ def process_event(self, inputs: Union[List, Dict], headers: Dict) -> Dict:
# scores used to determine outliers
return_instance_score=ret_instance_score,
)

# Register metrics
metrics = []
_append_outlier_metrcs(metrics, od_preds, "is_outlier")
_append_outlier_metrcs(metrics, od_preds, "instance_score", is_count=False)

# clean result
if (
"data" in od_preds
Expand All @@ -119,4 +151,6 @@ def process_event(self, inputs: Union[List, Dict], headers: Dict) -> Dict:
):
del od_preds["data"]["feature_score"]

return json.loads(json.dumps(od_preds, cls=NumpyEncoder))
resp_data = json.loads(json.dumps(od_preds, cls=NumpyEncoder))

return SeldonResponse(resp_data, None, metrics)

0 comments on commit ff3816e

Please sign in to comment.