diff --git a/.lighthouse/jenkins-x/release.yaml b/.lighthouse/jenkins-x/release.yaml index af4db7e8f2..3f9fd448c2 100644 --- a/.lighthouse/jenkins-x/release.yaml +++ b/.lighthouse/jenkins-x/release.yaml @@ -33,11 +33,11 @@ spec: resources: requests: cpu: 1 - memory: 2000Mi + memory: 3000Mi ephemeral-storage: "60Gi" limits: cpu: 1 - memory: 2000Mi + memory: 3000Mi ephemeral-storage: "60Gi" securityContext: privileged: true diff --git a/README.md b/README.md index dd4462d19a..f0a7b5dd8b 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,7 @@ We provide optimized model servers for some of the most popular Deep Learning an You only have to upload your model binaries into your preferred object store, in this case we have a trained scikit-learn iris model in a Google bucket: ```console -gs://seldon-models/sklearn/iris/model.pickle +gs://seldon-models/sklearn/iris/model.joblib ``` Create a namespace to run your model in: diff --git a/components/alibi-explain-server/Dockerfile.gpu b/components/alibi-explain-server/Dockerfile.gpu new file mode 100644 index 0000000000..cea7ebaa86 --- /dev/null +++ b/components/alibi-explain-server/Dockerfile.gpu @@ -0,0 +1,28 @@ +FROM nvidia/cuda:11.2.1-devel-ubuntu18.04 + +RUN \ + apt update && apt install -y git wget unzip bzip2 libgomp1 build-essential + +RUN wget -O Miniconda3.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ + bash Miniconda3.sh -b -p /opt/python + +ENV PATH=/opt/python/bin:$PATH + +#RUN conda install -c conda-forge python=3.7.10 + +WORKDIR /app + +COPY setup.py setup.py +COPY alibiexplainer alibiexplainer +COPY README.md README.md + +# Required for https://github.com/slundberg/shap/issues/1633 +RUN pip install . --no-binary protobuf + +COPY requirements-gpu.txt . + +RUN pip install --force-reinstall -r requirements-gpu.txt + +ENTRYPOINT ["python", "-m", "alibiexplainer"] + + diff --git a/components/alibi-explain-server/Makefile b/components/alibi-explain-server/Makefile index 9cbc48c649..ef99182b6d 100644 --- a/components/alibi-explain-server/Makefile +++ b/components/alibi-explain-server/Makefile @@ -39,9 +39,15 @@ type_check: docker-build: build_apis docker build --file=Dockerfile -t seldonio/${IMAGE}:${VERSION} . +docker-build-gpu: + docker build --file=Dockerfile.gpu -t seldonio/${IMAGE}-gpu:${VERSION} . + docker-push: docker push seldonio/${IMAGE}:${VERSION} +docker-push-gpu: + docker push seldonio/${IMAGE}-gpu:${VERSION} + kind_load: docker-build kind load docker-image seldonio/${IMAGE}:${VERSION} @@ -214,3 +220,4 @@ curl_explain_adult_treeshap: cleanup_treeshap: docker rm -f explainer + diff --git a/components/alibi-explain-server/requirements-gpu.txt b/components/alibi-explain-server/requirements-gpu.txt new file mode 100644 index 0000000000..d08bad22ef --- /dev/null +++ b/components/alibi-explain-server/requirements-gpu.txt @@ -0,0 +1,4 @@ +alibi @ git+git://github.com/jklaise/alibi-1@b4b8dc5b5104049d9445cde785bbd8ef1f38529e +numpy==1.20.1 # Version of numpy used to compile shap, need this otherwise RuntimeError will be raised. Ignore warning from tensorflow about incompatibility. +shap @ git+git://github.com/slundberg/shap@fe082e345f7248119c8c0307197df51b839e6a1a +xgboost==1.3.3 diff --git a/doc/source/ingress/istio.md b/doc/source/ingress/istio.md index 18a960c219..57057c3e06 100644 --- a/doc/source/ingress/istio.md +++ b/doc/source/ingress/istio.md @@ -95,8 +95,7 @@ Istio has the capability for fine grained traffic routing to your deployments. T * A/B testing * shadow deployments -More information on these can be found in our [example showing canary -updates](../examples/istio_canary.html) and [other examples, including shadow +More information on these can be found in our [other examples, including shadow updates](../examples/istio_examples.html). @@ -108,4 +107,4 @@ You can fix this by changing `defaultUserID=0` in your helm chart, or add the fo ``` securityContext: runAsUser: 0 -``` \ No newline at end of file +``` diff --git a/doc/source/workflow/usage-reporting.md b/doc/source/workflow/usage-reporting.md index 0443fe21ad..35cc60abed 100644 --- a/doc/source/workflow/usage-reporting.md +++ b/doc/source/workflow/usage-reporting.md @@ -9,7 +9,7 @@ We provide an option to use an anonymous metrics collection tool provided by the To help support the development of seldon-core, the voluntary reporting of usage data can be enabled whenever the "seldon-core-operator" helm chart is used by setting the "--set usageMetrics.enabled=true" option. ```bash -helm install seldon-core-operator --name seldon-core \ +helm install seldon-core seldon-core-operator \ --repo https://storage.googleapis.com/seldon-charts --set usageMetrics.enabled=true ``` @@ -62,7 +62,7 @@ An example of what's reported: Reporting of usage data is disabled by default, just use "seldon-core-operator" as normal. ```bash -helm install seldon-core-operator --name seldon-core \ +helm install seldon-core seldon-core-operator \ --repo https://storage.googleapis.com/seldon-charts ``` diff --git a/examples/batch/argo-workflows-batch/helm-charts/seldon-batch-workflow/templates/workflow.yaml b/examples/batch/argo-workflows-batch/helm-charts/seldon-batch-workflow/templates/workflow.yaml index dd587836eb..02b984f550 100644 --- a/examples/batch/argo-workflows-batch/helm-charts/seldon-batch-workflow/templates/workflow.yaml +++ b/examples/batch/argo-workflows-batch/helm-charts/seldon-batch-workflow/templates/workflow.yaml @@ -67,20 +67,20 @@ spec: predictors: - componentSpecs: - spec: - containers: - - name: classifier - env: - - name: GUNICORN_THREADS - value: {{ .Values.seldonDeployment.serverThreads }} - - name: GUNICORN_WORKERS - value: {{ .Values.seldonDeployment.serverWorkers }} - resources: - requests: - cpu: {{ .Values.seldonDeployment.requests.cpu }} - memory: {{ .Values.seldonDeployment.requests.memory }} - limits: - cpu: {{ .Values.seldonDeployment.limits.cpu }} - memory: {{ .Values.seldonDeployment.limits.memory }} + containers: + - name: classifier + env: + - name: GUNICORN_THREADS + value: "{{ .Values.seldonDeployment.serverThreads }}" + - name: GUNICORN_WORKERS + value: "{{ .Values.seldonDeployment.serverWorkers }}" + resources: + requests: + cpu: {{ .Values.seldonDeployment.requests.cpu }} + memory: {{ .Values.seldonDeployment.requests.memory }} + limits: + cpu: {{ .Values.seldonDeployment.limits.cpu }} + memory: {{ .Values.seldonDeployment.limits.memory }} graph: children: [] implementation: {{ .Values.seldonDeployment.server }} diff --git a/examples/cicd/sig-mlops-jenkins-classic/README.md b/examples/cicd/sig-mlops-jenkins-classic/README.md index 20da32c3cb..d9f09df01c 100644 --- a/examples/cicd/sig-mlops-jenkins-classic/README.md +++ b/examples/cicd/sig-mlops-jenkins-classic/README.md @@ -507,7 +507,7 @@ In particular, you will need to run the following: ```bash %%bash helm install \ - --name "jenkins" stable/jenkins \ + jenkins stable/jenkins \ --namespace "jenkins" \ --set "rbac.create=true" \ --set "master.adminUser=admin" \ diff --git a/examples/models/aws_eks_deep_mnist/README.md b/examples/models/aws_eks_deep_mnist/README.md index e005155b60..d900a21062 100644 --- a/examples/models/aws_eks_deep_mnist/README.md +++ b/examples/models/aws_eks_deep_mnist/README.md @@ -385,7 +385,7 @@ We first start with the custom resource definitions (CRDs) ```python -!helm install seldon-core-operator --name seldon-core-operator --repo https://storage.googleapis.com/seldon-charts --set usageMetrics.enabled=true --namespace seldon-system +!helm install seldon-core-operator seldon-core-operator --repo https://storage.googleapis.com/seldon-charts --set usageMetrics.enabled=true --namespace seldon-system ``` NAME: seldon-core-operator @@ -467,7 +467,7 @@ In EKS it automatically creates an Elastic Load Balancer, which you can configur ```python -!helm install stable/ambassador --name ambassador --set crds.keep=false +!helm install ambassador stable/ambassador --set crds.keep=false ``` NAME: ambassador diff --git a/examples/models/azure_aks_deep_mnist/README.md b/examples/models/azure_aks_deep_mnist/README.md index ac4f7bc1bc..d27543a85a 100644 --- a/examples/models/azure_aks_deep_mnist/README.md +++ b/examples/models/azure_aks_deep_mnist/README.md @@ -354,7 +354,7 @@ We first start with the custom resource definitions (CRDs) ```python -!helm install seldon-core-operator --name seldon-core-operator --repo https://storage.googleapis.com/seldon-charts +!helm install seldon-core-operator seldon-core-operator --repo https://storage.googleapis.com/seldon-charts ``` NAME: seldon-core-operator @@ -415,7 +415,7 @@ In EKS it automatically creates an Elastic Load Balancer, which you can configur ```python -!helm install stable/ambassador --name ambassador --set crds.keep=false +!helm install ambassador stable/ambassador --set crds.keep=false ``` Error: release ambassador failed: serviceaccounts "ambassador" already exists diff --git a/examples/models/gpu_tensorflow_deep_mnist/README.md b/examples/models/gpu_tensorflow_deep_mnist/README.md index 5631548016..76839fd35d 100644 --- a/examples/models/gpu_tensorflow_deep_mnist/README.md +++ b/examples/models/gpu_tensorflow_deep_mnist/README.md @@ -430,7 +430,7 @@ We first start with the custom resource definitions (CRDs) ```python -!helm install seldon-core-operator --name seldon-core-operator --repo https://storage.googleapis.com/seldon-charts +!helm install seldon-core-operator seldon-core-operator --repo https://storage.googleapis.com/seldon-charts ``` NAME: seldon-core-operator diff --git a/notebooks/explainer_examples.ipynb b/notebooks/explainer_examples.ipynb index 912d43e7ca..d75cdc69c4 100644 --- a/notebooks/explainer_examples.ipynb +++ b/notebooks/explainer_examples.ipynb @@ -79,7 +79,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Error from server (AlreadyExists): namespaces \"seldon\" already exists\r\n" + "Error from server (AlreadyExists): namespaces \"seldon\" already exists\n" ] } ], @@ -103,7 +103,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Context \"kind-kind\" modified.\r\n" + "Context \"kind-kind\" modified.\n" ] } ], @@ -111,6 +111,15 @@ "!kubectl config set-context $(kubectl config current-context) --namespace=seldon" ] }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "import json" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -123,7 +132,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -159,7 +168,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "metadata": { "scrolled": true }, @@ -168,7 +177,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "seldondeployment.machinelearning.seldon.io/income created\r\n" + "seldondeployment.machinelearning.seldon.io/income created\n" ] } ], @@ -178,7 +187,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -196,7 +205,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "metadata": { "scrolled": true }, @@ -216,7 +225,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "metadata": { "scrolled": true }, @@ -236,7 +245,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "metadata": { "scrolled": true }, @@ -245,7 +254,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'data': {'names': ['t:0', 't:1'], 'tensor': {'shape': [1, 2], 'values': [0.8585304277244477, 0.14146957227555243]}}, 'meta': {}}\n" + "{'data': {'names': ['t:0', 't:1'], 'tensor': {'shape': [1, 2], 'values': [0.8585304277244477, 0.14146957227555243]}}, 'meta': {'requestPath': {'classifier': 'seldonio/sklearnserver:1.7.0-dev'}}}\n" ] } ], @@ -264,14 +273,14 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "{\"data\":{\"names\":[\"t:0\",\"t:1\"],\"ndarray\":[[0.8585304277244477,0.14146957227555243]]},\"meta\":{}}\r\n" + "{\"data\":{\"names\":[\"t:0\",\"t:1\"],\"ndarray\":[[0.8585304277244477,0.14146957227555243]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/sklearnserver:1.7.0-dev\"}}}\n" ] } ], @@ -290,7 +299,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 14, "metadata": { "scrolled": true }, @@ -318,7 +327,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 15, "metadata": {}, "outputs": [ { @@ -327,9 +336,10 @@ "text": [ " % Total % Received % Xferd Average Speed Time Time Time Current\n", " Dload Upload Total Spent Left Speed\n", - "100 1715 100 1624 100 91 8837 495 --:--:-- --:--:-- --:--:-- 8826\n", + "100 2269 100 2178 100 91 8817 368 --:--:-- --:--:-- --:--:-- 9149\n", "\u001b[1;39m[\n", - " \u001b[0;32m\"Marital Status = Separated\"\u001b[0m\u001b[1;39m\n", + " \u001b[0;32m\"Marital Status = Separated\"\u001b[0m\u001b[1;39m,\n", + " \u001b[0;32m\"Capital Gain <= 0.00\"\u001b[0m\u001b[1;39m\n", "\u001b[1;39m]\u001b[0m\n" ] } @@ -342,7 +352,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 16, "metadata": { "scrolled": true }, @@ -351,7 +361,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "seldondeployment.machinelearning.seldon.io \"income\" deleted\r\n" + "seldondeployment.machinelearning.seldon.io \"income\" deleted\n" ] } ], @@ -370,7 +380,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 17, "metadata": {}, "outputs": [ { @@ -405,7 +415,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 18, "metadata": { "scrolled": true }, @@ -414,7 +424,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "seldondeployment.machinelearning.seldon.io/movie created\r\n" + "seldondeployment.machinelearning.seldon.io/movie created\n" ] } ], @@ -424,7 +434,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 19, "metadata": { "scrolled": true }, @@ -444,14 +454,14 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 20, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "deployment \"movie-default-explainer\" successfully rolled out\r\n" + "deployment \"movie-default-explainer\" successfully rolled out\n" ] } ], @@ -461,7 +471,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 23, "metadata": { "scrolled": true }, @@ -474,14 +484,14 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 24, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "{\"data\":{\"names\":[\"t:0\",\"t:1\"],\"ndarray\":[[0.21266916924914636,0.7873308307508536]]},\"meta\":{}}\r\n" + "{\"data\":{\"names\":[\"t:0\",\"t:1\"],\"ndarray\":[[0.21266916924914636,0.7873308307508536]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/sklearnserver:1.7.0-dev\"}}}\n" ] } ], @@ -493,7 +503,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 27, "metadata": { "scrolled": true }, @@ -515,7 +525,7 @@ "}\n", "\n", "Response:\n", - "{'data': {'names': ['t:0', 't:1'], 'ndarray': [[0.21266916924914636, 0.7873308307508536]]}, 'meta': {}}\n" + "{'data': {'names': ['t:0', 't:1'], 'ndarray': [[0.21266916924914636, 0.7873308307508536]]}, 'meta': {'requestPath': {'classifier': 'seldonio/sklearnserver:1.7.0-dev'}}}\n" ] } ], @@ -528,17 +538,17 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 28, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\u001b[1;39m[\r\n", - " \u001b[0;32m\"emotionally\"\u001b[0m\u001b[1;39m,\r\n", - " \u001b[0;32m\"vapid\"\u001b[0m\u001b[1;39m\r\n", - "\u001b[1;39m]\u001b[0m\r\n" + "\u001b[1;39m[\n", + " \u001b[0;32m\"emotionally\"\u001b[0m\u001b[1;39m,\n", + " \u001b[0;32m\"vapid\"\u001b[0m\u001b[1;39m\n", + "\u001b[1;39m]\u001b[0m\n" ] } ], @@ -550,7 +560,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 29, "metadata": { "scrolled": true }, @@ -571,7 +581,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 30, "metadata": { "scrolled": true }, @@ -580,7 +590,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "seldondeployment.machinelearning.seldon.io \"movie\" deleted\r\n" + "seldondeployment.machinelearning.seldon.io \"movie\" deleted\n" ] } ], @@ -599,14 +609,14 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 31, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Overwriting resources/cifar10_explainer.yaml\n" + "Writing resources/cifar10_explainer.yaml\n" ] } ], @@ -636,14 +646,14 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 32, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "seldondeployment.machinelearning.seldon.io/cifar10-classifier created\r\n" + "seldondeployment.machinelearning.seldon.io/cifar10-classifier created\n" ] } ], @@ -653,14 +663,15 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 33, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "deployment \"cifar10-classifier-default-0-cifar10-classifier\" successfully rolled out\r\n" + "Waiting for deployment \"cifar10-classifier-default-0-cifar10-classifier\" rollout to finish: 0 of 1 updated replicas are available...\n", + "deployment \"cifar10-classifier-default-0-cifar10-classifier\" successfully rolled out\n" ] } ], @@ -670,14 +681,15 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 34, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "deployment \"cifar10-classifier-default-explainer\" successfully rolled out\r\n" + "Waiting for deployment \"cifar10-classifier-default-explainer\" rollout to finish: 0 of 1 updated replicas are available...\n", + "deployment \"cifar10-classifier-default-explainer\" successfully rolled out\n" ] } ], @@ -687,22 +699,9 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 35, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING: Logging before flag parsing goes to stderr.\n", - "W1106 08:48:27.877613 139714443523840 deprecation.py:506] From /home/clive/anaconda3/envs/seldon-core/lib/python3.6/site-packages/tensorflow_core/python/keras/initializers.py:143: calling RandomNormal.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n", - "Instructions for updating:\n", - "Call initializer instance with the dtype argument instead of passing it to the constructor\n", - "W1106 08:48:27.892694 139714443523840 deprecation.py:506] From /home/clive/anaconda3/envs/seldon-core/lib/python3.6/site-packages/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\n", - "Instructions for updating:\n", - "If using Keras pass *_constraint arguments to layers.\n" - ] - }, { "name": "stdout", "output_type": "stream", @@ -734,7 +733,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 36, "metadata": {}, "outputs": [ { @@ -742,7 +741,7 @@ "output_type": "stream", "text": [ "{\n", - " \"predictions\": [[8.98417127e-08, 1.35163679e-12, 5.20754609e-13, 9.01404201e-05, 4.04729e-12, 0.999909759, 9.77382086e-09, 1.30629796e-09, 5.39957488e-12, 3.7917457e-14]\n", + " \"predictions\": [[8.98418833e-08, 1.35163929e-12, 5.20754609e-13, 9.01406747e-05, 4.04729e-12, 0.999909759, 9.77383952e-09, 1.30630051e-09, 5.39958529e-12, 3.7917457e-14]\n", " ]\n", "}\n" ] @@ -794,16 +793,16 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 37, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 8, + "execution_count": 37, "metadata": {}, "output_type": "execute_result" }, @@ -836,14 +835,14 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 38, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "seldondeployment.machinelearning.seldon.io \"cifar10-classifier\" deleted\r\n" + "seldondeployment.machinelearning.seldon.io \"cifar10-classifier\" deleted\n" ] } ], @@ -864,7 +863,7 @@ }, { "cell_type": "code", - "execution_count": 49, + "execution_count": 39, "metadata": {}, "outputs": [ { @@ -917,7 +916,7 @@ }, { "cell_type": "code", - "execution_count": 50, + "execution_count": 40, "metadata": {}, "outputs": [ { @@ -948,7 +947,7 @@ }, { "cell_type": "code", - "execution_count": 51, + "execution_count": 41, "metadata": {}, "outputs": [], "source": [ @@ -960,14 +959,14 @@ }, { "cell_type": "code", - "execution_count": 52, + "execution_count": 42, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Overwriting resources/wine_explainer.yaml\n" + "Writing resources/wine_explainer.yaml\n" ] } ], @@ -999,7 +998,7 @@ }, { "cell_type": "code", - "execution_count": 53, + "execution_count": 43, "metadata": { "scrolled": true }, @@ -1008,7 +1007,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "seldondeployment.machinelearning.seldon.io/wine created\r\n" + "seldondeployment.machinelearning.seldon.io/wine created\n" ] } ], @@ -1018,7 +1017,7 @@ }, { "cell_type": "code", - "execution_count": 54, + "execution_count": 44, "metadata": {}, "outputs": [ { @@ -1036,7 +1035,7 @@ }, { "cell_type": "code", - "execution_count": 55, + "execution_count": 45, "metadata": { "scrolled": true }, @@ -1045,7 +1044,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "deployment \"wine-default-explainer\" successfully rolled out\r\n" + "Waiting for deployment \"wine-default-explainer\" rollout to finish: 0 of 1 updated replicas are available...\n", + "deployment \"wine-default-explainer\" successfully rolled out\n" ] } ], @@ -1055,7 +1055,7 @@ }, { "cell_type": "code", - "execution_count": 56, + "execution_count": 46, "metadata": { "scrolled": true }, @@ -1075,7 +1075,7 @@ }, { "cell_type": "code", - "execution_count": 57, + "execution_count": 47, "metadata": { "scrolled": true }, @@ -1084,7 +1084,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'data': {'names': ['t:0', 't:1', 't:2'], 'tensor': {'shape': [1, 3], 'values': [-0.203700284044519, 0.8934751316557469, 2.2237213335499804]}}, 'meta': {}}\n" + "{'data': {'names': ['t:0', 't:1', 't:2'], 'tensor': {'shape': [1, 3], 'values': [-0.203700284044519, 0.8934751316557469, 2.2237213335499804]}}, 'meta': {'requestPath': {'classifier': 'seldonio/sklearnserver:1.7.0-dev'}}}\n" ] } ], @@ -1106,14 +1106,14 @@ }, { "cell_type": "code", - "execution_count": 58, + "execution_count": 48, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "{\"data\":{\"names\":[\"t:0\",\"t:1\",\"t:2\"],\"ndarray\":[[-0.203700284044519,0.8934751316557469,2.2237213335499804]]},\"meta\":{}}\r\n" + "{\"data\":{\"names\":[\"t:0\",\"t:1\",\"t:2\"],\"ndarray\":[[-0.203700284044519,0.8934751316557469,2.2237213335499804]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/sklearnserver:1.7.0-dev\"}}}\n" ] } ], @@ -1132,7 +1132,7 @@ }, { "cell_type": "code", - "execution_count": 59, + "execution_count": 49, "metadata": { "scrolled": true }, @@ -1149,7 +1149,7 @@ }, { "cell_type": "code", - "execution_count": 60, + "execution_count": 50, "metadata": {}, "outputs": [], "source": [ @@ -1159,7 +1159,7 @@ }, { "cell_type": "code", - "execution_count": 61, + "execution_count": 51, "metadata": {}, "outputs": [], "source": [ @@ -1169,14 +1169,14 @@ }, { "cell_type": "code", - "execution_count": 62, + "execution_count": 52, "metadata": {}, "outputs": [ { "data": { "text/html": [ "\n", - "
\n", + "
\n", "
\n", " Visualization omitted, Javascript library not loaded!
\n", " Have you run `initjs()` in this notebook? If this notebook was from another\n", @@ -1186,8 +1186,8 @@ "
\n", " " ], @@ -1195,7 +1195,7 @@ "" ] }, - "execution_count": 62, + "execution_count": 52, "metadata": {}, "output_type": "execute_result" } @@ -1219,7 +1219,7 @@ }, { "cell_type": "code", - "execution_count": 63, + "execution_count": 53, "metadata": {}, "outputs": [ { @@ -1228,7 +1228,7 @@ "text": [ " % Total % Received % Xferd Average Speed Time Time Time Current\n", " Dload Upload Total Spent Left Speed\n", - "100 4129 100 3916 100 213 1267 68 0:00:03 0:00:03 --:--:-- 1267\n", + "100 4141 100 3928 100 213 2189 118 0:00:01 0:00:01 --:--:-- 2308\n", "\u001b[1;39m{\n", " \u001b[0m\u001b[34;1m\"meta\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[1;39m{\n", " \u001b[0m\u001b[34;1m\"name\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[0;32m\"KernelShap\"\u001b[0m\u001b[1;39m,\n", @@ -1256,53 +1256,53 @@ " \u001b[0m\u001b[34;1m\"shap_values\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[1;39m[\n", " \u001b[1;39m[\n", " \u001b[1;39m[\n", - " \u001b[0;39m-0.018454421208892513\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.012763470836013313\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m-0.001740270040221814\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m-0.07633537428284093\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.006251732078452754\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m-0.13734297799429607\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m-0.1184209545879712\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.016528383221730947\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m-0.035244307767622385\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m-0.1230174198090298\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m-0.14524487323369617\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m-0.2507145522127333\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m-0.1309476564266916\u001b[0m\u001b[1;39m\n", + " \u001b[0;39m-0.026564448172395394\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.01474754619811397\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.0029785794627824602\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m-0.06989674989941941\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.0066466495859965335\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m-0.13846708957225218\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m-0.12276753108492278\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.018077515209726447\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m-0.033220065213557526\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m-0.12530431153383026\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m-0.148057466855813\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m-0.24993296604936582\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m-0.13015888350286176\u001b[0m\u001b[1;39m\n", " \u001b[1;39m]\u001b[0m\u001b[1;39m\n", " \u001b[1;39m]\u001b[0m\u001b[1;39m,\n", " \u001b[1;39m[\n", " \u001b[1;39m[\n", - " \u001b[0;39m0.015452322719423872\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m-0.03832005916153608\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m-0.04544081251256327\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m-7.582651671816931e-05\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m-0.04651784436924722\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.01980318229602418\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m-0.05109329519459854\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.0071260827408027305\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m-0.03975300296252354\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m-0.16144118472393876\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m-0.15110853673546232\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m-0.1006346397643918\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.06837621797012206\u001b[0m\u001b[1;39m\n", + " \u001b[0;39m0.00815794870767661\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m-0.03941083710746157\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m-0.044818303904592904\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.010477810662623877\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m-0.049213513737447256\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.01974804098011501\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m-0.053256982608956116\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.007843279332888253\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m-0.037433584332097314\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m-0.16615405642879122\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m-0.15230659779480793\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m-0.0943545957610265\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.06709399577727015\u001b[0m\u001b[1;39m\n", " \u001b[1;39m]\u001b[0m\u001b[1;39m\n", " \u001b[1;39m]\u001b[0m\u001b[1;39m,\n", " \u001b[1;39m[\n", " \u001b[1;39m[\n", - " \u001b[0;39m0.005403884749745513\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.020371174031396766\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.04217363001738739\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.07712885770720579\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.03400723775835046\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.1216306489876593\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.16988456143104136\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m-0.02030618995668898\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.07351750311197458\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.2879617844043466\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.29473542234118644\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.3536965903131064\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.06890108397640105\u001b[0m\u001b[1;39m\n", + " \u001b[0;39m0.021654431619118508\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.02028068848580955\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.03706436976364458\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.05831112850130049\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.03633892265257299\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.12322951677316496\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.1758965321885464\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m-0.022874111141031328\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.06871646495343742\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.29626374518120013\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.2991585173893212\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.34574854290539925\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.06931743960062864\u001b[0m\u001b[1;39m\n", " \u001b[1;39m]\u001b[0m\u001b[1;39m\n", " \u001b[1;39m]\u001b[0m\u001b[1;39m\n", " \u001b[1;39m]\u001b[0m\u001b[1;39m,\n", @@ -1358,19 +1358,19 @@ " \u001b[0m\u001b[34;1m\"importances\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[1;39m{\n", " \u001b[0m\u001b[34;1m\"0\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[1;39m{\n", " \u001b[0m\u001b[34;1m\"ranked_effect\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[1;39m[\n", - " \u001b[0;39m0.2507145522127333\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.14524487323369617\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.13734297799429607\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.1309476564266916\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.1230174198090298\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.1184209545879712\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.07633537428284093\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.035244307767622385\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.018454421208892513\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.016528383221730947\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.012763470836013313\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.006251732078452754\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.001740270040221814\u001b[0m\u001b[1;39m\n", + " \u001b[0;39m0.24993296604936582\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.148057466855813\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.13846708957225218\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.13015888350286176\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.12530431153383026\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.12276753108492278\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.06989674989941941\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.033220065213557526\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.026564448172395394\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.018077515209726447\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.01474754619811397\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.0066466495859965335\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.0029785794627824602\u001b[0m\u001b[1;39m\n", " \u001b[1;39m]\u001b[0m\u001b[1;39m,\n", " \u001b[0m\u001b[34;1m\"names\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[1;39m[\n", " \u001b[0;32m\"od280/od315_of_diluted_wines\"\u001b[0m\u001b[1;39m,\n", @@ -1390,19 +1390,19 @@ " \u001b[1;39m}\u001b[0m\u001b[1;39m,\n", " \u001b[0m\u001b[34;1m\"1\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[1;39m{\n", " \u001b[0m\u001b[34;1m\"ranked_effect\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[1;39m[\n", - " \u001b[0;39m0.16144118472393876\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.15110853673546232\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.1006346397643918\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.06837621797012206\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.05109329519459854\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.04651784436924722\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.04544081251256327\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.03975300296252354\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.03832005916153608\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.01980318229602418\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.015452322719423872\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.0071260827408027305\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m7.582651671816931e-05\u001b[0m\u001b[1;39m\n", + " \u001b[0;39m0.16615405642879122\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.15230659779480793\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.0943545957610265\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.06709399577727015\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.053256982608956116\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.049213513737447256\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.044818303904592904\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.03941083710746157\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.037433584332097314\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.01974804098011501\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.010477810662623877\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.00815794870767661\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.007843279332888253\u001b[0m\u001b[1;39m\n", " \u001b[1;39m]\u001b[0m\u001b[1;39m,\n", " \u001b[0m\u001b[34;1m\"names\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[1;39m[\n", " \u001b[0;32m\"color_intensity\"\u001b[0m\u001b[1;39m,\n", @@ -1412,29 +1412,29 @@ " \u001b[0;32m\"flavanoids\"\u001b[0m\u001b[1;39m,\n", " \u001b[0;32m\"magnesium\"\u001b[0m\u001b[1;39m,\n", " \u001b[0;32m\"ash\"\u001b[0m\u001b[1;39m,\n", - " \u001b[0;32m\"proanthocyanins\"\u001b[0m\u001b[1;39m,\n", " \u001b[0;32m\"malic_acid\"\u001b[0m\u001b[1;39m,\n", + " \u001b[0;32m\"proanthocyanins\"\u001b[0m\u001b[1;39m,\n", " \u001b[0;32m\"total_phenols\"\u001b[0m\u001b[1;39m,\n", + " \u001b[0;32m\"alcalinity_of_ash\"\u001b[0m\u001b[1;39m,\n", " \u001b[0;32m\"alcohol\"\u001b[0m\u001b[1;39m,\n", - " \u001b[0;32m\"nonflavanoid_phenols\"\u001b[0m\u001b[1;39m,\n", - " \u001b[0;32m\"alcalinity_of_ash\"\u001b[0m\u001b[1;39m\n", + " \u001b[0;32m\"nonflavanoid_phenols\"\u001b[0m\u001b[1;39m\n", " \u001b[1;39m]\u001b[0m\u001b[1;39m\n", " \u001b[1;39m}\u001b[0m\u001b[1;39m,\n", " \u001b[0m\u001b[34;1m\"2\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[1;39m{\n", " \u001b[0m\u001b[34;1m\"ranked_effect\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[1;39m[\n", - " \u001b[0;39m0.3536965903131064\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.29473542234118644\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.2879617844043466\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.16988456143104136\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.1216306489876593\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.07712885770720579\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.07351750311197458\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.06890108397640105\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.04217363001738739\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.03400723775835046\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.020371174031396766\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.02030618995668898\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.005403884749745513\u001b[0m\u001b[1;39m\n", + " \u001b[0;39m0.34574854290539925\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.2991585173893212\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.29626374518120013\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.1758965321885464\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.12322951677316496\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.06931743960062864\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.06871646495343742\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.05831112850130049\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.03706436976364458\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.03633892265257299\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.022874111141031328\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.021654431619118508\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.02028068848580955\u001b[0m\u001b[1;39m\n", " \u001b[1;39m]\u001b[0m\u001b[1;39m,\n", " \u001b[0m\u001b[34;1m\"names\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[1;39m[\n", " \u001b[0;32m\"od280/od315_of_diluted_wines\"\u001b[0m\u001b[1;39m,\n", @@ -1442,31 +1442,31 @@ " \u001b[0;32m\"color_intensity\"\u001b[0m\u001b[1;39m,\n", " \u001b[0;32m\"flavanoids\"\u001b[0m\u001b[1;39m,\n", " \u001b[0;32m\"total_phenols\"\u001b[0m\u001b[1;39m,\n", - " \u001b[0;32m\"alcalinity_of_ash\"\u001b[0m\u001b[1;39m,\n", - " \u001b[0;32m\"proanthocyanins\"\u001b[0m\u001b[1;39m,\n", " \u001b[0;32m\"proline\"\u001b[0m\u001b[1;39m,\n", + " \u001b[0;32m\"proanthocyanins\"\u001b[0m\u001b[1;39m,\n", + " \u001b[0;32m\"alcalinity_of_ash\"\u001b[0m\u001b[1;39m,\n", " \u001b[0;32m\"ash\"\u001b[0m\u001b[1;39m,\n", " \u001b[0;32m\"magnesium\"\u001b[0m\u001b[1;39m,\n", - " \u001b[0;32m\"malic_acid\"\u001b[0m\u001b[1;39m,\n", " \u001b[0;32m\"nonflavanoid_phenols\"\u001b[0m\u001b[1;39m,\n", - " \u001b[0;32m\"alcohol\"\u001b[0m\u001b[1;39m\n", + " \u001b[0;32m\"alcohol\"\u001b[0m\u001b[1;39m,\n", + " \u001b[0;32m\"malic_acid\"\u001b[0m\u001b[1;39m\n", " \u001b[1;39m]\u001b[0m\u001b[1;39m\n", " \u001b[1;39m}\u001b[0m\u001b[1;39m,\n", " \u001b[0m\u001b[34;1m\"aggregated\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[1;39m{\n", " \u001b[0m\u001b[34;1m\"ranked_effect\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[1;39m[\n", - " \u001b[0;39m0.7050457822902315\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.5910888323103449\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.5724203889373152\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.3393988112136111\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.27877680927797954\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.2682249583732147\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.1535400585067649\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.1485148138421205\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.08935471257017247\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.08677681420605043\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.07145470402894616\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.04396065591922266\u001b[0m\u001b[1;39m,\n", - " \u001b[0;39m0.0393106286780619\u001b[0m\u001b[1;39m\n", + " \u001b[0;39m0.6900361047157916\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.5995225820399421\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.5877221131438216\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.3519210458824253\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.28144464732553215\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.26657031888076055\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.13937011449909226\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.13868568906334378\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.09219908597601678\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.08486125313101994\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.07443907179138509\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.05637682849919051\u001b[0m\u001b[1;39m,\n", + " \u001b[0;39m0.04879490568364603\u001b[0m\u001b[1;39m\n", " \u001b[1;39m]\u001b[0m\u001b[1;39m,\n", " \u001b[0m\u001b[34;1m\"names\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[1;39m[\n", " \u001b[0;32m\"od280/od315_of_diluted_wines\"\u001b[0m\u001b[1;39m,\n", @@ -1475,13 +1475,13 @@ " \u001b[0;32m\"flavanoids\"\u001b[0m\u001b[1;39m,\n", " \u001b[0;32m\"total_phenols\"\u001b[0m\u001b[1;39m,\n", " \u001b[0;32m\"proline\"\u001b[0m\u001b[1;39m,\n", - " \u001b[0;32m\"alcalinity_of_ash\"\u001b[0m\u001b[1;39m,\n", " \u001b[0;32m\"proanthocyanins\"\u001b[0m\u001b[1;39m,\n", - " \u001b[0;32m\"ash\"\u001b[0m\u001b[1;39m,\n", + " \u001b[0;32m\"alcalinity_of_ash\"\u001b[0m\u001b[1;39m,\n", " \u001b[0;32m\"magnesium\"\u001b[0m\u001b[1;39m,\n", + " \u001b[0;32m\"ash\"\u001b[0m\u001b[1;39m,\n", " \u001b[0;32m\"malic_acid\"\u001b[0m\u001b[1;39m,\n", - " \u001b[0;32m\"nonflavanoid_phenols\"\u001b[0m\u001b[1;39m,\n", - " \u001b[0;32m\"alcohol\"\u001b[0m\u001b[1;39m\n", + " \u001b[0;32m\"alcohol\"\u001b[0m\u001b[1;39m,\n", + " \u001b[0;32m\"nonflavanoid_phenols\"\u001b[0m\u001b[1;39m\n", " \u001b[1;39m]\u001b[0m\u001b[1;39m\n", " \u001b[1;39m}\u001b[0m\u001b[1;39m\n", " \u001b[1;39m}\u001b[0m\u001b[1;39m\n", @@ -1499,7 +1499,7 @@ }, { "cell_type": "code", - "execution_count": 64, + "execution_count": 54, "metadata": { "scrolled": true }, @@ -1508,7 +1508,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "seldondeployment.machinelearning.seldon.io \"wine\" deleted\r\n" + "seldondeployment.machinelearning.seldon.io \"wine\" deleted\n" ] } ], @@ -1529,14 +1529,14 @@ }, { "cell_type": "code", - "execution_count": 65, + "execution_count": 55, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Overwriting resources/mnist_rest_explainer.yaml\n" + "Writing resources/mnist_rest_explainer.yaml\n" ] } ], @@ -1570,14 +1570,14 @@ }, { "cell_type": "code", - "execution_count": 66, + "execution_count": 56, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "seldondeployment.machinelearning.seldon.io/tfserving created\r\n" + "seldondeployment.machinelearning.seldon.io/tfserving created\n" ] } ], @@ -1587,7 +1587,7 @@ }, { "cell_type": "code", - "execution_count": 67, + "execution_count": 57, "metadata": {}, "outputs": [ { @@ -1605,7 +1605,7 @@ }, { "cell_type": "code", - "execution_count": 68, + "execution_count": 58, "metadata": {}, "outputs": [ { @@ -1635,7 +1635,7 @@ }, { "cell_type": "code", - "execution_count": 69, + "execution_count": 62, "metadata": {}, "outputs": [], "source": [ @@ -1645,7 +1645,7 @@ }, { "cell_type": "code", - "execution_count": 70, + "execution_count": 63, "metadata": {}, "outputs": [], "source": [ @@ -1657,7 +1657,7 @@ }, { "cell_type": "code", - "execution_count": 71, + "execution_count": 64, "metadata": {}, "outputs": [], "source": [ @@ -1669,7 +1669,7 @@ }, { "cell_type": "code", - "execution_count": 72, + "execution_count": 65, "metadata": {}, "outputs": [], "source": [ @@ -1679,7 +1679,7 @@ }, { "cell_type": "code", - "execution_count": 73, + "execution_count": 66, "metadata": {}, "outputs": [ { @@ -1688,7 +1688,7 @@ "array([7, 2, 1, 0, 4, 1, 4, 9, 6, 9])" ] }, - "execution_count": 73, + "execution_count": 66, "metadata": {}, "output_type": "execute_result" } @@ -1699,7 +1699,7 @@ }, { "cell_type": "code", - "execution_count": 74, + "execution_count": 67, "metadata": {}, "outputs": [], "source": [ @@ -1711,7 +1711,7 @@ }, { "cell_type": "code", - "execution_count": 75, + "execution_count": 68, "metadata": {}, "outputs": [], "source": [ @@ -1723,7 +1723,7 @@ }, { "cell_type": "code", - "execution_count": 76, + "execution_count": 69, "metadata": {}, "outputs": [], "source": [ @@ -1732,7 +1732,7 @@ }, { "cell_type": "code", - "execution_count": 77, + "execution_count": 70, "metadata": {}, "outputs": [ { @@ -1784,14 +1784,14 @@ }, { "cell_type": "code", - "execution_count": 78, + "execution_count": 71, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "seldondeployment.machinelearning.seldon.io \"tfserving\" deleted\r\n" + "seldondeployment.machinelearning.seldon.io \"tfserving\" deleted\n" ] } ], @@ -1812,7 +1812,7 @@ }, { "cell_type": "code", - "execution_count": 79, + "execution_count": 72, "metadata": {}, "outputs": [ { @@ -1830,6 +1830,8 @@ "metadata:\n", " name: income\n", "spec:\n", + " annotations:\n", + " seldon.io/rest-timeout: \"100000\"\n", " predictors:\n", " - graph:\n", " children: []\n", @@ -1845,14 +1847,14 @@ }, { "cell_type": "code", - "execution_count": 80, + "execution_count": 73, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "seldondeployment.machinelearning.seldon.io/income created\r\n" + "seldondeployment.machinelearning.seldon.io/income created\n" ] } ], @@ -1862,7 +1864,7 @@ }, { "cell_type": "code", - "execution_count": 81, + "execution_count": 74, "metadata": {}, "outputs": [ { @@ -1880,7 +1882,7 @@ }, { "cell_type": "code", - "execution_count": 82, + "execution_count": 75, "metadata": { "scrolled": true }, @@ -1900,7 +1902,7 @@ }, { "cell_type": "code", - "execution_count": 83, + "execution_count": 77, "metadata": { "scrolled": true }, @@ -1909,7 +1911,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'data': {'names': [], 'tensor': {'shape': [1], 'values': [-1.2381880283355713]}}, 'meta': {}}\n" + "{'data': {'names': [], 'tensor': {'shape': [1], 'values': [-1.2381880283355713]}}, 'meta': {'requestPath': {'income-model': 'seldonio/xgboostserver:1.7.0-dev'}}}\n" ] } ], @@ -1928,22 +1930,45 @@ }, { "cell_type": "code", - "execution_count": 84, + "execution_count": 78, + "metadata": {}, + "outputs": [], + "source": [ + "from alibi.datasets import fetch_adult\n", + "adult = fetch_adult()\n", + "data = adult.data" + ] + }, + { + "cell_type": "code", + "execution_count": 79, "metadata": { "scrolled": true }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Elapsed time: 8.356285572052002\n" + ] + } + ], "source": [ "import json\n", - "data = np.array([[52, 4, 0, 2, 8, 4, 2, 0, 0, 0, 60, 9]])\n", - "res = sc.explain(deployment_name=\"income\", predictor=\"default\", data=data)\n", + "import time\n", + "#data = np.array([[52, 4, 0, 2, 8, 4, 2, 0, 0, 0, 60, 9]])\n", + "start = time.time()\n", + "res = sc.explain(deployment_name=\"income\", predictor=\"default\", data=data[0:2000])\n", + "end = time.time()\n", + "print(\"Elapsed time:\",end-start)\n", "explanation = res.response\n", "explanationStr = json.dumps(explanation)" ] }, { "cell_type": "code", - "execution_count": 85, + "execution_count": 80, "metadata": {}, "outputs": [], "source": [ @@ -1953,7 +1978,7 @@ }, { "cell_type": "code", - "execution_count": 86, + "execution_count": 81, "metadata": {}, "outputs": [], "source": [ @@ -1963,7 +1988,7 @@ }, { "cell_type": "code", - "execution_count": 87, + "execution_count": 82, "metadata": {}, "outputs": [], "source": [ @@ -1993,7 +2018,7 @@ }, { "cell_type": "code", - "execution_count": 88, + "execution_count": 83, "metadata": {}, "outputs": [], "source": [ @@ -2002,7 +2027,7 @@ }, { "cell_type": "code", - "execution_count": 89, + "execution_count": 84, "metadata": {}, "outputs": [ { @@ -2055,14 +2080,14 @@ }, { "cell_type": "code", - "execution_count": 90, + "execution_count": 85, "metadata": {}, "outputs": [ { "data": { "text/html": [ "\n", - "
\n", + "
\n", "
\n", " Visualization omitted, Javascript library not loaded!
\n", " Have you run `initjs()` in this notebook? If this notebook was from another\n", @@ -2072,8 +2097,8 @@ "
\n", " " ], @@ -2081,7 +2106,7 @@ "" ] }, - "execution_count": 90, + "execution_count": 85, "metadata": {}, "output_type": "execute_result" } @@ -2089,7 +2114,7 @@ "source": [ "shap.force_plot(\n", " explanation.expected_value[0], # 0 is a class index but we have single-output model\n", - " explanation.shap_values[0][0, :] , \n", + " explanation.shap_values[0], \n", " decoded_features, \n", " explanation.feature_names,\n", ")" @@ -2097,14 +2122,14 @@ }, { "cell_type": "code", - "execution_count": 91, + "execution_count": 86, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "seldondeployment.machinelearning.seldon.io \"income\" deleted\r\n" + "seldondeployment.machinelearning.seldon.io \"income\" deleted\n" ] } ], @@ -2112,12 +2137,324 @@ "!kubectl delete -f resources/income_explainer.yaml" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Experimental: XGBoost Model with GPU TreeShap Explainer\n", + "\n", + "The model and explainer used here can be trained yourself following the full example in the [Explaining Tree Models with Interventional Feature Perturbation Tree SHAP](https://docs.seldon.io/projects/alibi/en/latest/examples/interventional_tree_shap_adult_xgb.html) in the Alibi project documentation.\n", + "\n", + "Note we used a python 3.8.5 and Alibi master to fit the GPU based TreeShap model.\n", + "\n", + " * You will need a cluster with GPUs. This has been tested on GKE with NVIDIA Tesla P100 GPUs." + ] + }, + { + "cell_type": "code", + "execution_count": 104, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting resources/income_gpu_explainer.yaml\n" + ] + } + ], + "source": [ + "%%writefile resources/income_gpu_explainer.yaml\n", + "apiVersion: machinelearning.seldon.io/v1alpha2\n", + "kind: SeldonDeployment\n", + "metadata:\n", + " name: incomegpu\n", + "spec:\n", + " annotations:\n", + " seldon.io/rest-timeout: \"100000\"\n", + " predictors:\n", + " - graph:\n", + " children: []\n", + " implementation: XGBOOST_SERVER\n", + " modelUri: gs://seldon-models/xgboost/adult/model_1.0.2\n", + " name: income-model\n", + " explainer:\n", + " type: TreeShap\n", + " modelUri: gs://seldon-models/xgboost/adult/tree_shap_gpu\n", + " containerSpec:\n", + " name: explainer\n", + " image: seldonio/alibiexplainer-gpu:1.7.0-dev\n", + " resources:\n", + " limits:\n", + " nvidia.com/gpu: 1\n", + " name: default\n", + " replicas: 1" + ] + }, + { + "cell_type": "code", + "execution_count": 105, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "seldondeployment.machinelearning.seldon.io/incomegpu unchanged\n" + ] + } + ], + "source": [ + "!kubectl apply -f resources/income_gpu_explainer.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": 106, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "deployment \"incomegpu-default-0-income-model\" successfully rolled out\n" + ] + } + ], + "source": [ + "!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=incomegpu -o jsonpath='{.items[0].metadata.name}')" + ] + }, + { + "cell_type": "code", + "execution_count": 107, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "from seldon_core.seldon_client import SeldonClient\n", + "import numpy as np\n", + "sc = SeldonClient(deployment_name=\"incomegpu\",namespace=\"seldon\", gateway=\"ambassador\", gateway_endpoint=\"localhost:8003\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Use python client library to get a prediction." + ] + }, + { + "cell_type": "code", + "execution_count": 110, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "None\n" + ] + } + ], + "source": [ + "data = np.array([[52, 4, 0, 2, 8, 4, 2, 0, 0, 0, 60, 9]])\n", + "r = sc.predict(data=data)\n", + "print(r.response)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Use python client library to get an explanation." + ] + }, + { + "cell_type": "code", + "execution_count": 102, + "metadata": {}, + "outputs": [], + "source": [ + "from alibi.datasets import fetch_adult\n", + "adult = fetch_adult()\n", + "data = adult.data" + ] + }, + { + "cell_type": "code", + "execution_count": 140, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Elapsed time: 0.004498720169067383\n" + ] + } + ], + "source": [ + "import time\n", + "start = time.time()\n", + "res = sc.explain(deployment_name=\"incomegpu\", predictor=\"default\", data=data[0:2000])\n", + "end = time.time()\n", + "print(\"Elapsed time:\",end-start)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For running this test on P100 GPUs on GKE we see at least a 15x speed up over the CPU example above." + ] + }, + { + "cell_type": "code", + "execution_count": 141, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Explanation not successfull: are you running on GPU enabled cluster?\n" + ] + } + ], + "source": [ + "from alibi.api.interfaces import Explanation\n", + "\n", + "if res.success:\n", + " explanation = res.response\n", + " explanationStr = json.dumps(explanation)\n", + " explanation = Explanation.from_json(explanationStr)\n", + " \n", + " explanation.shap_values = np.array(explanation.shap_values)\n", + " explanation.raw[\"instances\"] = np.array(explanation.raw[\"instances\"])\n", + "else:\n", + " explanation = None\n", + " print(\"Explanation not successfull: are you running on GPU enabled cluster?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 142, + "metadata": {}, + "outputs": [], + "source": [ + "def decode_data(X, feature_names, category_map):\n", + " \"\"\"\n", + " Given an encoded data matrix `X` returns a matrix where the \n", + " categorical levels have been replaced by human readable categories.\n", + " \"\"\"\n", + " \n", + " # expect 2D array\n", + " if len(X.shape) == 1:\n", + " X = X.reshape(1, -1)\n", + " \n", + " X_new = np.zeros(X.shape, dtype=object)\n", + " # Check if a column is categorical and replace it with values from category map\n", + " for idx, name in enumerate(feature_names):\n", + " categories = category_map.get(str(idx), None)\n", + " if categories:\n", + " for j, category in enumerate(categories):\n", + " encoded_vals = X[:, idx] == j\n", + " X_new[encoded_vals, idx] = category\n", + " else:\n", + " X_new[:, idx] = X[:, idx]\n", + " \n", + " return X_new" + ] + }, + { + "cell_type": "code", + "execution_count": 145, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import shap\n", + "shap.initjs()" + ] + }, { "cell_type": "code", - "execution_count": null, + "execution_count": 146, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "if explanation is not None:\n", + " decoded_features = decode_data(data, explanation.feature_names, explanation.categorical_names)\n", + " shap.force_plot(\n", + " explanation.expected_value[0], # 0 is a class index but we have single-output model\n", + " explanation.shap_values[0] , \n", + " decoded_features, \n", + " explanation.feature_names,\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": 147, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Error from server (NotFound): error when deleting \"resources/income_gpu_explainer.yaml\": seldondeployments.machinelearning.seldon.io \"incomegpu\" not found\n" + ] + } + ], + "source": [ + "!kubectl delete -f resources/income_gpu_explainer.yaml" + ] } ], "metadata": { @@ -2137,7 +2474,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.8" + "version": "3.7.9" }, "varInspector": { "cols": { diff --git a/notebooks/resources/.gitignore b/notebooks/resources/.gitignore index bfb8212b5d..657ac45855 100644 --- a/notebooks/resources/.gitignore +++ b/notebooks/resources/.gitignore @@ -30,5 +30,6 @@ elasticnet_wine.yaml halfplustwo_rest.yaml mnist_rest.yaml iris-xgboost-v2.yaml +income_gpu_explainer.yaml triton_multi.yaml -triton_tf_cifar10.yaml \ No newline at end of file +triton_tf_cifar10.yaml diff --git a/operator/controllers/mlserver.go b/operator/controllers/mlserver.go index dc12af5105..ae86108eec 100644 --- a/operator/controllers/mlserver.go +++ b/operator/controllers/mlserver.go @@ -1,6 +1,7 @@ package controllers import ( + "errors" "fmt" "strconv" @@ -22,6 +23,13 @@ const ( ) func mergeMLServerContainer(existing *v1.Container, mlServer *v1.Container) *v1.Container { + if mlServer == nil { + // Nothing to merge. + return existing + } + if existing == nil { + existing = &v1.Container{} + } // Overwrite core items if not existing or required if existing.Image == "" { existing.Image = mlServer.Image @@ -58,6 +66,9 @@ func mergeMLServerContainer(existing *v1.Container, mlServer *v1.Container) *v1. } func getMLServerContainer(pu *machinelearningv1.PredictiveUnit) (*v1.Container, error) { + if pu == nil { + return nil, errors.New("received nil predictive unit") + } image, err := getMLServerImage(pu) if err != nil { return nil, err @@ -126,6 +137,9 @@ func getMLServerContainer(pu *machinelearningv1.PredictiveUnit) (*v1.Container, } func getMLServerImage(pu *machinelearningv1.PredictiveUnit) (string, error) { + if pu == nil { + return "", errors.New("received nil predictive unit") + } prepackConfig := machinelearningv1.GetPrepackServerConfig(string(*pu.Implementation)) if prepackConfig == nil { return "", fmt.Errorf("failed to get server config for %s", *pu.Implementation) @@ -146,6 +160,9 @@ func getMLServerImage(pu *machinelearningv1.PredictiveUnit) (string, error) { } func getMLServerEnvVars(pu *machinelearningv1.PredictiveUnit) ([]v1.EnvVar, error) { + if pu == nil { + return nil, errors.New("received nil predictive unit") + } httpPort := pu.Endpoint.HttpPort grpcPort := pu.Endpoint.GrpcPort @@ -184,6 +201,9 @@ func getMLServerEnvVars(pu *machinelearningv1.PredictiveUnit) ([]v1.EnvVar, erro } func getMLServerModelImplementation(pu *machinelearningv1.PredictiveUnit) (string, error) { + if pu == nil { + return "", errors.New("received nil predictive unit") + } switch *pu.Implementation { case machinelearningv1.PrepackSklearnName: return MLServerSKLearnImplementation, nil diff --git a/operator/controllers/mlserver_test.go b/operator/controllers/mlserver_test.go index 4fbfa89c11..d4f71b5e37 100644 --- a/operator/controllers/mlserver_test.go +++ b/operator/controllers/mlserver_test.go @@ -47,6 +47,7 @@ var _ = Describe("MLServer helpers", func() { It("should merge containers adding extra env", func() { merged := mergeMLServerContainer(existing, mlServer) + Expect(merged).ToNot(BeNil()) Expect(merged.Env).To(ContainElement(v1.EnvVar{Name: "FOO", Value: "BAR"})) Expect(merged.Env).To(ContainElements(mlServer.Env)) Expect(merged.Image).To(Equal(mlServer.Image)) diff --git a/testing/profiling/engine/README.md b/testing/profiling/engine/README.md index 2091a890db..548779b86b 100644 --- a/testing/profiling/engine/README.md +++ b/testing/profiling/engine/README.md @@ -5,7 +5,7 @@ 1. Create debug image using Makefile. Ensure you use current version of engine. 1. Launch Seldon Core with this image as the engine image using, e.g. ``` - !helm install ../helm-charts/seldon-core --name seldon-core --namespace seldon --set ambassador.enabled=true --set engine.image.name=seldonio/engine-debug:0.2.6-SNAPSHOT + !helm install seldon-core ../helm-charts/seldon-core --namespace seldon --set ambassador.enabled=true --set engine.image.name=seldonio/engine-debug:0.2.6-SNAPSHOT ``` 1. Check logs of instance to get port and port forward to it, e.g. ``` diff --git a/testing/scripts/kind_test_all.sh b/testing/scripts/kind_test_all.sh index 9ecbf94a75..5da7782ce1 100755 --- a/testing/scripts/kind_test_all.sh +++ b/testing/scripts/kind_test_all.sh @@ -42,8 +42,8 @@ if [[ ${KIND_EXIT_VALUE} -eq 0 ]]; then run_end_to_end_tests() { - echo "Files changed in python folder:" - git --no-pager diff --exit-code --name-only origin/master ../../python + echo "Files changed in python or wrapper folder:" + git --no-pager diff --exit-code --name-only origin/master ../../python ../../wrappers/s2i/python PYTHON_MODIFIED=$? if [[ $PYTHON_MODIFIED -gt 0 ]]; then make s2i_build_base_images @@ -111,8 +111,8 @@ if [[ ${KIND_EXIT_VALUE} -eq 0 ]]; then return 1 fi - echo "Files changed in prepackaged folder:" - git --no-pager diff --exit-code --name-only origin/master ../../servers ../../integrations + echo "Files changed in prepackaged, python, or wrapper folder:" + git --no-pager diff --exit-code --name-only origin/master ../../servers ../../integrations ../../python ../../wrappers/s2i/python PREPACKAGED_MODIFIED=$? if [[ $PREPACKAGED_MODIFIED -gt 0 ]]; then make kind_build_prepackaged diff --git a/wrappers/s2i/python/Dockerfile.local b/wrappers/s2i/python/Dockerfile.local index d2bc429ef7..7d70d611fb 100644 --- a/wrappers/s2i/python/Dockerfile.local +++ b/wrappers/s2i/python/Dockerfile.local @@ -9,6 +9,9 @@ ARG PYTHON_VERSION RUN conda install --yes python=$PYTHON_VERSION conda=$CONDA_VERSION RUN apt-get update --yes && apt-get install --yes gcc make build-essential +# Pin pip and setuptools +RUN pip install pip==20.2 setuptools==46.1 + RUN mkdir microservice WORKDIR /microservice diff --git a/wrappers/s2i/python/Dockerfile.redhat b/wrappers/s2i/python/Dockerfile.redhat index 4e24d33d2f..3fc62156e3 100644 --- a/wrappers/s2i/python/Dockerfile.redhat +++ b/wrappers/s2i/python/Dockerfile.redhat @@ -9,8 +9,8 @@ ARG PYTHON_VERSION RUN conda install --yes python=$PYTHON_VERSION conda=$CONDA_VERSION RUN dnf install -y make automake gcc gcc-c++ -# Upgrade pip version -RUN pip install pip==20.2 +# Pin pip and setuptools +RUN pip install pip==20.2 setuptools==46.1 RUN mkdir microservice WORKDIR /microservice