Skip to content

Commit

Permalink
Merge pull request #864 from cliveseldon/explainers
Browse files Browse the repository at this point in the history
Create explainer notebook and fix tfserving proxy
  • Loading branch information
seldondev authored Sep 16, 2019
2 parents ec41863 + 08a3a52 commit 6747a21
Show file tree
Hide file tree
Showing 16 changed files with 1,129 additions and 25 deletions.
Binary file added doc/source/analytics/cat.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added doc/source/analytics/cat_explanation.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
10 changes: 10 additions & 0 deletions doc/source/analytics/explainers.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# Model Explainers

![cat](cat.png)
![explanation](cat_explanation.png)

Seldon provides model explanations using its [Alibi](https://github.com/SeldonIO/alibi) Open Sourve library.

We provide [an example notebook](../examples/explainer_examples.html) showing how to deploy an explainer for Tabular, Text and Image models.


3 changes: 3 additions & 0 deletions doc/source/examples/explainer_examples.nblink
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
{
"path": "../../../notebooks/explainer_examples.ipynb"
}
1 change: 1 addition & 0 deletions doc/source/examples/notebooks.rst
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ Notebooks
Custom Endpoints <custom_endpoints>
Example Helm Deployments <helm_examples>
Explainer Alibi Anchor Tabular <alibi_anchor_tabular>
Tabular, Text and Image Model Explainers <explainer_examples>
Go Model <go_example>
GPU Tensorflow Deep MNIST <gpu_tensorflow_deep_mnist>
H2O Java MoJo <h2o_mojo>
Expand Down
1 change: 1 addition & 0 deletions doc/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ Seldon Core is an open source platform for deploying machine learning models on
:maxdepth: 1
:caption: ML Compliance and Governance

Model Explanations <analytics/explainers.md>
Outlier Detection <analytics/outlier_detection.md>
Routers (incl. Multi Armed Bandits) <analytics/routers.md>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,7 @@ else if (state.type == PredictiveUnitType.MODEL)

public SeldonMessage transformInput(SeldonMessage input, PredictiveUnitState state) throws InvalidProtocolBufferException
{
logger.info("Calling grpc for transform-input");
final Endpoint endpoint = state.endpoint;
switch (endpoint.getType()){
case REST:
Expand All @@ -267,11 +268,20 @@ public SeldonMessage transformInput(SeldonMessage input, PredictiveUnitState sta
.withMaxOutboundMessageSize(grpcMaxMessageSize);
return genStub.transformInput(input);
case MODEL:
ModelBlockingStub modelStub = ModelGrpc.newBlockingStub(grpcChannelHandler.get(endpoint))
.withDeadlineAfter(grpcReadTimeout, TimeUnit.MILLISECONDS)
.withMaxInboundMessageSize(grpcMaxMessageSize)
.withMaxOutboundMessageSize(grpcMaxMessageSize);
return modelStub.predict(input);
try
{
ModelBlockingStub modelStub = ModelGrpc.newBlockingStub(grpcChannelHandler.get(endpoint))
.withDeadlineAfter(grpcReadTimeout, TimeUnit.MILLISECONDS)
.withMaxInboundMessageSize(grpcMaxMessageSize)
.withMaxOutboundMessageSize(grpcMaxMessageSize);
logger.info(modelStub.getCallOptions().toString());
return modelStub.predict(input);
}
catch (Exception e)
{
logger.error("grpc exception ",e);
throw e;
}
case TRANSFORMER:
TransformerBlockingStub transformerStub = TransformerGrpc.newBlockingStub(grpcChannelHandler.get(endpoint))
.withDeadlineAfter(grpcReadTimeout, TimeUnit.MILLISECONDS)
Expand Down
6 changes: 3 additions & 3 deletions integrations/tfserving/Makefile
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
IMAGE_VERSION=0.6
IMAGE_VERSION=0.7
IMAGE_NAME = docker.io/seldonio/tfserving-proxy

SELDON_CORE_DIR=../../..

.PHONY: build_rest
build_rest:
s2i build -E environment_rest . seldonio/seldon-core-s2i-python3:0.11-SNAPSHOT $(IMAGE_NAME)_rest:$(IMAGE_VERSION)
s2i build -E environment_rest . seldonio/seldon-core-s2i-python3:0.12-SNAPSHOT $(IMAGE_NAME)_rest:$(IMAGE_VERSION)

.PHONY: build_grpc
build_grpc:
s2i build -E environment_grpc . seldonio/seldon-core-s2i-python3:0.11-SNAPSHOT $(IMAGE_NAME)_grpc:$(IMAGE_VERSION)
s2i build -E environment_grpc . seldonio/seldon-core-s2i-python3:0.12-SNAPSHOT $(IMAGE_NAME)_grpc:$(IMAGE_VERSION)


push_to_dockerhub_rest:
Expand Down
17 changes: 9 additions & 8 deletions integrations/tfserving/TfServingProxy.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,11 @@ def __init__(
log.debug("grpc_endpoint:",grpc_endpoint)
if not grpc_endpoint is None:
self.grpc = True
channel = grpc.insecure_channel(grpc_endpoint)
max_msg = 1000000000
options = [('grpc.max_message_length', max_msg),
('grpc.max_send_message_length', max_msg),
('grpc.max_receive_message_length', max_msg)]
channel = grpc.insecure_channel(grpc_endpoint,options)
self.stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
else:
self.grpc = False
Expand Down Expand Up @@ -80,13 +84,10 @@ def predict_grpc(self,request):
data_arr.tolist(),
shape=data_arr.shape))
result = self.stub.Predict(tfrequest)
result_arr = numpy.array(result.outputs[self.model_output].float_val)
if len(result_arr.shape) == 1:
result_arr = numpy.expand_dims(result_arr, axis=0)
class_names = []
data = array_to_grpc_datadef(
default_data_type, result_arr, class_names)
return prediction_pb2.SeldonMessage(data=data)
datadef = prediction_pb2.DefaultData(
tftensor=result.outputs[self.model_output]
)
return prediction_pb2.SeldonMessage(data=datadef)

def predict(self, X, features_names=[]):
"""
Expand Down
Loading

0 comments on commit 6747a21

Please sign in to comment.