Skip to content

Commit

Permalink
Merge pull request #2574 from cliveseldon/2378_http_grpc_executor
Browse files Browse the repository at this point in the history
  • Loading branch information
Adrian Gonzalez-Martin authored Nov 16, 2020
2 parents 2a9fc3f + 6ef2187 commit b5e64ad
Show file tree
Hide file tree
Showing 255 changed files with 16,023 additions and 32,800 deletions.
35 changes: 22 additions & 13 deletions ci_build_and_push_images.sh
Original file line number Diff line number Diff line change
Expand Up @@ -68,10 +68,8 @@ function build_push_engine {
function build_push_mock {
make \
-C examples/models/mean_classifier \
build_rest \
build_grpc \
push_rest \
push_grpc
build \
push
MOCK_MODEL_EXIT_VALUE=$?
}

Expand All @@ -94,32 +92,32 @@ function build_push_request_logger {
function build_push_sklearnserver {
make \
-C servers/sklearnserver \
build_all \
push_all
build \
push
SKLEARN_EXIT_VALUE=$?
}

function build_push_mlflowserver {
make \
-C servers/mlflowserver \
build_all \
push_all
build \
push
MLFLOW_EXIT_VALUE=$?
}

function build_push_xgboostserver {
make \
-C servers/xgboostserver \
build_all \
push_all
build \
push
XGBOOST_EXIT_VALUE=$?
}

function build_push_tfproxy {
make \
-C integrations/tfserving \
build_all \
push_all
-C integrations/tfserving_proxy \
build \
push
TFPROXY_EXIT_VALUE=$?
}

Expand All @@ -139,6 +137,14 @@ function build_push_storage_initializer {
STORAGE_INITIALIZER_EXIT_VALUE=$?
}

function build_push_mab {
make \
-C components/routers/epsilon-greedy \
build \
push
MAB_EXIT_VALUE=$?
}


build_push_python
build_push_operator
Expand All @@ -153,6 +159,7 @@ build_push_xgboostserver
build_push_tfproxy
build_push_alibi_explainer
build_push_storage_initializer
build_push_mab

#######################################
# EXIT STOPS COMMANDS FROM HERE ONWARDS
Expand All @@ -171,6 +178,7 @@ echo "Mock model exit value: $MOCK_MODEL_EXIT_VALUE"
echo "Alibi Detect exit value: $ALIBI_DETECT_EXIT_VALUE"
echo "Request Logger exit value: $LOGGER_EXIT_VALUE"
echo "Tensorflow Proxy exit value: $TFPROXY_EXIT_VALUE"
echo "MAB exit value: $MAB_EXIT_VALUE"

exit $((${PYTHON_EXIT_VALUE} \
+ ${OPERATOR_EXIT_VALUE} \
Expand All @@ -184,6 +192,7 @@ exit $((${PYTHON_EXIT_VALUE} \
+ ${XGBOOST_EXIT_VALUE} \
+ ${TFPROXY_EXIT_VALUE} \
+ ${STORAGE_INITIALIZER_EXIT_VALUE} \
+ ${MAB_EXIT_VALUE} \
+ ${EXPLAIN_EXIT_VALUE}))


Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
import json
from .utils import SKLearnServer
ADULT_EXPLAINER_URI = "gs://seldon-models/sklearn/income/alibi/0.4.0"
ADULT_MODEL_URI = "gs://seldon-models/sklearn/income/model-0.23.2"
ADULT_MODEL_URI = "gs://seldon-models/sklearn/income/model"
EXPLAINER_FILENAME = "explainer.dill"


Expand Down
18 changes: 8 additions & 10 deletions components/routers/epsilon-greedy/Makefile
Original file line number Diff line number Diff line change
@@ -1,14 +1,12 @@
IMAGE_VERSION=1.3
VERSION := $(shell cat ../../../version.txt)
IMAGE_NAME=seldonio/mab_epsilon_greedy
KIND_NAME ?= kind

build_rest:
s2i build . -E environment_rest seldonio/seldon-core-s2i-python3:0.15 $(IMAGE_NAME)_rest:$(IMAGE_VERSION)
build:
s2i build . -E environment seldonio/seldon-core-s2i-python3:${VERSION} $(IMAGE_NAME):$(VERSION)

push_to_dockerhub_rest:
docker push $(IMAGE_NAME)_rest:$(IMAGE_VERSION)
push:
docker push $(IMAGE_NAME):$(VERSION)

build_grpc:
s2i build . -E environment_grpc seldonio/seldon-core-s2i-python3:0.15 $(IMAGE_NAME)_grpc:$(IMAGE_VERSION)

push_to_dockerhub_grpc:
docker push $(IMAGE_NAME)_grpc:$(IMAGE_VERSION)
kind_load: build
kind load -v 3 docker-image ${IMAGE_NAME}:${VERSION} --name ${KIND_NAME}
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
MODEL_NAME=EpsilonGreedy
API_TYPE=GRPC
SERVICE_TYPE=ROUTER
PERSISTENCE=0
4 changes: 0 additions & 4 deletions components/routers/epsilon-greedy/environment_rest

This file was deleted.

3 changes: 3 additions & 0 deletions doc/source/examples/backwards_compatibility.nblink
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
{
"path": "../../../notebooks/backwards_compatability.ipynb"
}
3 changes: 0 additions & 3 deletions doc/source/examples/istio.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,3 @@ The addition of Istio is complementary to Seldon and is illustrated below where

![svc-graph-istio](./svc-graph-istio.png)

## Worked Examples

[An example step-by-step guide to canary deployments using Istio and Seldon is provided](./istio_canary.html)
3 changes: 0 additions & 3 deletions doc/source/examples/istio_canary.nblink

This file was deleted.

3 changes: 1 addition & 2 deletions doc/source/examples/notebooks.rst
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,6 @@ Ingress
Ambassador Shadow <ambassador_shadow>
Ambassador Headers <ambassador_headers>
Ambassador Custom Config <ambassador_custom>
Istio Canary <istio_canary>
Istio Examples <istio_examples>

Infrastructure
Expand All @@ -161,4 +160,4 @@ Benchmarking and Load Tests
Service Orchestrator <bench_svcOrch>
Tensorflow <bench_tensorflow>
Argo Workflows Benchmarking <vegeta_bench_argo_workflows>

Backwards Compatability Tests <backwards_compatibility>
5 changes: 0 additions & 5 deletions doc/source/java-jni/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,6 @@ Define the core parameters needed by our Java S2I images to wrap your model.
An example is:

```bash
API_TYPE=REST
SERVICE_TYPE=MODEL
JAVA_IMPORT_PATH=io.seldon.example.model.ExampleModelHandler
```
Expand Down Expand Up @@ -213,10 +212,6 @@ Import path for your Java model implementation.
For instance, in the example above, this would be
`io.seldon.example.model.ExampleModelHandler`.

### API_TYPE

API type to create.
Can be REST or GRPC.

### SERVICE_TYPE

Expand Down
15 changes: 7 additions & 8 deletions doc/source/python/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,11 @@ You can use the following links to navigate the Python seldon-core module:
.. toctree::
:maxdepth: 1

Seldon Core Python Module <python_module.md>
Your python class <python_component.md>
Wrap using S2I <python_wrapping_s2i.md>
Wrap using Docker <python_wrapping_docker.md>
Seldon Python Client <seldon_client.md>
Seldon Python Server <python_server.md>
Python API reference <api/modules>

Install the Seldon Core Python module <python_module.md>
Creating your Python inference class <python_component.md>
Create image with S2I <python_wrapping_s2i.md>
Create image with a Dockerfile <python_wrapping_docker.md>
Seldon Python server configuration <python_server.md>
Calling the Seldon API with the Seldon Python client <seldon_client.md>
Python API reference <api/modules>

2 changes: 1 addition & 1 deletion doc/source/python/python_component.md
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ spec:
## Low level Methods
If you want more control you can provide a low-level methods that will provide as input the raw proto buffer payloads. The signatures for these are shown below for release `sedon_core>=0.2.6.1`:
If you want more control you can provide a low-level methods that will provide as input the raw proto buffer payloads. The signatures for these are shown below for release `seldon_core>=0.2.6.1`:

```python
def predict_raw(self, msg: prediction_pb2.SeldonMessage) -> prediction_pb2.SeldonMessage:
Expand Down
46 changes: 6 additions & 40 deletions doc/source/python/python_module.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,46 +71,6 @@ Keep in mind that this will include some dependencies which may not be used.
Therefore, unless necessary, we recommend most users to install the default
distribution of `seldon-core` as [documented above](#install).

## Seldon Core Microservices

Seldon allows you to easily take your runtime inference code and create a Docker container that can be managed by Seldon Core. Follow the [S2I instructions](../wrappers/python.md) to wrap your code.

You can also create your own image and utilise the `seldon-core-microservice` executable to run your model code.


## Seldon Core Python API Client

The python package contains a module that provides a reference python client for the internal Seldon Core microservice API and the external APIs. More specifically it provides:

* Internal microservice API
* Make REST or gRPC calls
* Test all methods: `predict`, `transform-input`, `transform-output`, `route`, `aggregate`
* Provide a numpy array, binary data or string data as payload or get random data generated as payload for given shape
* Send data as tensor, TFTensor or ndarray
* External API
* Make REST or gRPC calls
* Call the API via Ambassador, Istio or Seldon's OAUTH API gateway.
* Test `predict` or `feedback` endpoints
* Provide a numpy array, binary data or string data as payload or get random data generated as payload for given shape
* Send data as tensor, TFTensor or ndarray

Basic usage of the client is to create a `SeldonClient` object first. For example for a Seldon Deployment called "mymodel" running in the namespace `seldon` with Ambassador endpoint at "localhost:8003" (i.e., via port-forwarding):

```python
from seldon_core.seldon_client import SeldonClient
sc = SeldonClient(deployment_name="mymodel",namespace="seldon", gateway_endpoint="localhost:8003")
```

Then make calls of various types. For example, to make a random prediction via the Ambassador gateway using REST:

```python
r = sc.predict(gateway="ambassador",transport="rest")
print(r)
```

Examples of using the `seldon_client` module can be found in the [example notebook](../examples/helm_examples.html).

The API docs can be found [here](./api/seldon_core.html#module-seldon_core.seldon_client).

## Troubleshooting

Expand Down Expand Up @@ -146,3 +106,9 @@ also works:
```bash
$ pip install azure-storage-blob==2.1.0 seldon-core
```

## Next Steps

[Create your python inference class](python_component.md)


8 changes: 7 additions & 1 deletion doc/source/python/python_server.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,10 @@
# Seldon Python Server
# Seldon Python Server Configuration

* [Workers](#workers)
* [Threads](#threads)
* [Flask Development Server](#development-server)
* [Server Configuration](#configuration)


To serve your component, Seldon's Python wrapper will use
[Gunicorn](https://gunicorn.org/) under the hood by default.
Expand Down
8 changes: 1 addition & 7 deletions doc/source/python/python_wrapping_docker.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,11 +65,10 @@ EXPOSE 5000

# Define environment variable
ENV MODEL_NAME MyModel
ENV API_TYPE REST
ENV SERVICE_TYPE MODEL
ENV PERSISTENCE 0

CMD exec seldon-core-microservice $MODEL_NAME $API_TYPE --service-type $SERVICE_TYPE --persistence $PERSISTENCE
CMD exec seldon-core-microservice $MODEL_NAME --service-type $SERVICE_TYPE --persistence $PERSISTENCE
```


Expand All @@ -87,10 +86,6 @@ The required environment variables understood by the builder image are explained
### MODEL_NAME
The name of the class containing the model. Also the name of the python file which will be imported.

### API_TYPE

API type to create. Can be REST or GRPC

### SERVICE_TYPE

The service type being created. Available options are:
Expand Down Expand Up @@ -157,7 +152,6 @@ These arguments can be set when deploying in a Seldon Deployment. An example can
{
"graph": {
"name": "tfserving-proxy",
"endpoint": { "type": "REST" },
"type": "MODEL",
"children": [],
"parameters": [
Expand Down
42 changes: 4 additions & 38 deletions doc/source/python/python_wrapping_s2i.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,39 +35,8 @@ To use our s2i builder image to package your python model you will need:
We will go into detail for each of these steps:

### Python file
Your source code should contain a python file which defines a class of the same name as the file. For example, looking at our skeleton python model file at `wrappers/s2i/python/test/model-template-app/MyModel.py`:

```python
class MyModel(object):
"""
Model template.
You can load your model parameters in __init__ from a location accessible at runtime.
"""

def __init__(self):
"""
Add any initialization parameters.
These will be passed at runtime from the graph definition parameters defined in your seldondeployment kubernetes resource manifest.
"""
print("Initializing")

def predict(self,X,features_names):
"""
Return a prediction.
Parameters
----------
X : array-like
feature_names : array of feature names (optional)
"""
print("Predict called - will run identity function")
return X
```

* The file is called MyModel.py and it defines a class MyModel
* The class contains a predict method that takes an array (numpy) X and feature_names and returns an array of predictions.
* You can add any required initialization inside the class init method.
* Your return array should be at least 2-dimensional.
Your source code should contain a python file which defines a class of the same name as the file. For further details see [details on creating your python class](python_component.md)

### Dependencies

Expand Down Expand Up @@ -119,13 +88,14 @@ Define the core parameters needed by our python builder image to wrap your model

```bash
MODEL_NAME=MyModel
API_TYPE=REST
SERVICE_TYPE=MODEL
PERSISTENCE=0
```

These values can also be provided or overridden on the command line when building the image.

See below for the possible keys and values for this file.

## Step 3 - Build your image
Use `s2i build` to create your Docker image from source code. You will need Docker installed on the machine and optionally git if your source code is in a public git repo. You can choose from three python builder images

Expand Down Expand Up @@ -181,10 +151,6 @@ The required environment variables understood by the builder image are explained

The name of the class containing the model. Also the name of the python file which will be imported.

### API_TYPE

API type to create. Can be REST or GRPC

### SERVICE_TYPE

The service type being created. Available options are:
Expand Down Expand Up @@ -304,7 +270,7 @@ The allowable `type` values for the parameters are defined in the [proto buffer


### Local Python Dependencies
`from version 0.5-SNAPSHOT`
`from version 0.5`

To use a private repository for installing Python dependencies use the following build command:

Expand Down
Loading

0 comments on commit b5e64ad

Please sign in to comment.