From f9163cd5fce500cc92842613386b0dda7bf5bc5c Mon Sep 17 00:00:00 2001 From: Daisuke Taniwaki Date: Fri, 16 Aug 2019 23:15:29 +0900 Subject: [PATCH] Add Chainer MNIST example --- .gitignore | 4 + doc/source/examples/notebooks.rst | 1 + examples/models/README.md | 3 +- .../models/chainer_mnist/.s2i/environment | 4 + .../models/chainer_mnist/MnistClassifier.py | 64 + .../models/chainer_mnist/chainer_mnist.ipynb | 1626 +++++++++++++++++ .../chainer_mnist_deployment.json | 53 + examples/models/chainer_mnist/contract.json | 20 + .../models/chainer_mnist/requirements.txt | 1 + examples/models/chainer_mnist/train_mnist.py | 134 ++ 10 files changed, 1909 insertions(+), 1 deletion(-) create mode 100644 examples/models/chainer_mnist/.s2i/environment create mode 100644 examples/models/chainer_mnist/MnistClassifier.py create mode 100644 examples/models/chainer_mnist/chainer_mnist.ipynb create mode 100644 examples/models/chainer_mnist/chainer_mnist_deployment.json create mode 100644 examples/models/chainer_mnist/contract.json create mode 100644 examples/models/chainer_mnist/requirements.txt create mode 100755 examples/models/chainer_mnist/train_mnist.py diff --git a/.gitignore b/.gitignore index 0200d96402..14ccbb972e 100644 --- a/.gitignore +++ b/.gitignore @@ -107,6 +107,7 @@ examples/istio/canary_update/tmp/ examples/models/keras_mnist/MnistClassifier.h5 examples/models/keras_mnist/data/ examples/models/keras_mnist/tensorboardlogs_test/ +examples/models/chainer_mnist/result/ examples/models/r_iris/model.Rds examples/models/r_mnist/model.Rds examples/models/r_mnist/t10k-images-idx3-ubyte @@ -204,3 +205,6 @@ wrappers/s2i/python/_python/ seldon-controller/go testing/scripts/go + +# pyenv +.python-version diff --git a/doc/source/examples/notebooks.rst b/doc/source/examples/notebooks.rst index 666e1cbd6d..3ecc6cabfa 100644 --- a/doc/source/examples/notebooks.rst +++ b/doc/source/examples/notebooks.rst @@ -25,6 +25,7 @@ Notebooks Istio Examples Jaeger Tracing Keras MNIST + Chainer MNIST Kubeflow Seldon E2E Pipeline Max gRPC Message Size Model with Custom Metrics diff --git a/examples/models/README.md b/examples/models/README.md index 62f23baeeb..f40be2de75 100644 --- a/examples/models/README.md +++ b/examples/models/README.md @@ -7,6 +7,7 @@ These examples provide illustrations of creating various wrapped ML models for d * [Keras MNIST Classifier](./keras_mnist/keras_mnist.ipynb) * [Scikit-learn MNIST Classifier](./sk_mnist/skmnist.ipynb) * [Scikit-learn Iris Classifier](./sklearn_iris/sklearn_iris.ipynb) + * [Chainer MNIST Classifier](./chainer_mnist/chainer_mnist.ipynb) * R * [R MNIST Classifier](./r_mnist/r_mnist.ipynb) * [R Iris Classifier](./r_iris/r_iris.ipynb) @@ -18,4 +19,4 @@ These examples provide illustrations of creating various wrapped ML models for d * [ResNet ONNX Classifier using Intel nGraph](./onnx_resnet50/onnx_resnet50.ipynb) * Misc * Custom endpoints (for Prometheus) - \ No newline at end of file + diff --git a/examples/models/chainer_mnist/.s2i/environment b/examples/models/chainer_mnist/.s2i/environment new file mode 100644 index 0000000000..066bc973b7 --- /dev/null +++ b/examples/models/chainer_mnist/.s2i/environment @@ -0,0 +1,4 @@ +MODEL_NAME=MnistClassifier +API_TYPE=REST +SERVICE_TYPE=MODEL +PERSISTENCE=0 diff --git a/examples/models/chainer_mnist/MnistClassifier.py b/examples/models/chainer_mnist/MnistClassifier.py new file mode 100644 index 0000000000..1f3409eacd --- /dev/null +++ b/examples/models/chainer_mnist/MnistClassifier.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python +import chainer +import numpy as np +from train_mnist import MLP + +class MnistClassifier(object): + def __init__(self, gpu=-1, model_path='result/snapshot_iter_12000', unit=1000): + self.gpu = gpu + + # Create a same model object as what you used for training + model = MLP(unit, 10) + if gpu >= 0: + model.to_gpu(gpu) + + # Load saved parameters from a NPZ file of the Trainer object + try: + chainer.serializers.load_npz( + model_path, model, path='updater/model:main/predictor/') + except Exception: + chainer.serializers.load_npz( + model_path, model, path='predictor/') + + self.model = model + + def predict(self, X, features_names, meta = None): + X = np.float32(X) + if self.gpu >= 0: + X = chainer.cuda.cupy.asarray(X) + with chainer.using_config('train', False): + return self.model(X[None, ...]).array + + +def main(): + import argparse + + parser = argparse.ArgumentParser(description='Chainer example: MNIST') + parser.add_argument('--gpu', '-g', type=int, default=-1, + help='GPU ID (negative value indicates CPU)') + parser.add_argument('--snapshot', '-s', + default='result/snapshot_iter_12000', + help='The path to a saved snapshot (NPZ)') + parser.add_argument('--unit', '-u', type=int, default=1000, + help='Number of units') + args = parser.parse_args() + + print('GPU: {}'.format(args.gpu)) + print('# unit: {}'.format(args.unit)) + print('') + + # Prepare data + train, test = chainer.datasets.get_mnist() + x, answer = test[0] + x = x.reshape(1, x.size) + + classifier = MnistClassifier(args.gpu, args.snapshot, args.unit) + res = classifier.predict(x, []) + prediction = res.argmax() + + print('Prediction:', prediction) + print('Answer:', answer) + + +if __name__ == '__main__': + main() diff --git a/examples/models/chainer_mnist/chainer_mnist.ipynb b/examples/models/chainer_mnist/chainer_mnist.ipynb new file mode 100644 index 0000000000..b8029f32be --- /dev/null +++ b/examples/models/chainer_mnist/chainer_mnist.ipynb @@ -0,0 +1,1626 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Chainer MNIST Model Deployment\n", + "\n", + " * Wrap a Chainer MNIST python model for use as a prediction microservice in seldon-core\n", + " * Run locally on Docker to test\n", + " * Deploy on seldon-core running on minikube\n", + " \n", + "## Dependencies\n", + "\n", + " * [Helm](https://github.com/kubernetes/helm)\n", + " * [Minikube](https://github.com/kubernetes/minikube)\n", + " * [S2I](https://github.com/openshift/source-to-image)\n", + "\n", + "```bash\n", + "pip install seldon-core\n", + "pip install chainer==6.2.0\n", + "```\n", + "\n", + "## Train locally\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/chainer/_environment_check.py:41: UserWarning: Accelerate has been detected as a NumPy backend library.\n", + "vecLib, which is a part of Accelerate, is known not to work correctly with Chainer.\n", + "We recommend using other BLAS libraries such as OpenBLAS.\n", + "For details of the issue, please see\n", + "https://docs.chainer.org/en/stable/tips.html#mnist-example-does-not-converge-in-cpu-mode-on-mac-os-x.\n", + "\n", + "Please be aware that Mac OS X is not an officially supported OS.\n", + "\n", + " ''') # NOQA\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Device: @numpy\n", + "# unit: 1000\n", + "# Minibatch-size: 100\n", + "# epoch: 20\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/chainer/training/extensions/plot_report.py:32: UserWarning: matplotlib is not installed on your environment, so nothing will be plotted at this time. Please install matplotlib to plot figures.\n", + "\n", + " $ pip install matplotlib\n", + "\n", + " warnings.warn('matplotlib is not installed on your environment, '\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "epoch main/loss validation/main/loss main/accuracy validation/main/accuracy elapsed_time\n", + "\u001b[J total [..................................................] 0.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 100 iter, 0 epoch / 20 epochs\n", + " inf iters/sec. Estimated time to finish: 0:00:00.\n", + "\u001b[4A\u001b[J total [..................................................] 1.67%\n", + "this epoch [################..................................] 33.33%\n", + " 200 iter, 0 epoch / 20 epochs\n", + " 43.916 iters/sec. Estimated time to finish: 0:04:28.695672.\n", + "\u001b[4A\u001b[J total [#.................................................] 2.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 300 iter, 0 epoch / 20 epochs\n", + " 45.431 iters/sec. Estimated time to finish: 0:04:17.534906.\n", + "\u001b[4A\u001b[J total [#.................................................] 3.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 400 iter, 0 epoch / 20 epochs\n", + " 44.796 iters/sec. Estimated time to finish: 0:04:18.950502.\n", + "\u001b[4A\u001b[J total [##................................................] 4.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 500 iter, 0 epoch / 20 epochs\n", + " 44.363 iters/sec. Estimated time to finish: 0:04:19.224978.\n", + "\u001b[4A\u001b[J1 0.191134 0.104015 0.942833 0.9664 14.3463 \n", + "\u001b[J total [##................................................] 5.00%\n", + "this epoch [..................................................] 0.00%\n", + " 600 iter, 1 epoch / 20 epochs\n", + " 41.815 iters/sec. Estimated time to finish: 0:04:32.630750.\n", + "\u001b[4A\u001b[J total [##................................................] 5.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 700 iter, 1 epoch / 20 epochs\n", + " 41.68 iters/sec. Estimated time to finish: 0:04:31.113255.\n", + "\u001b[4A\u001b[J total [###...............................................] 6.67%\n", + "this epoch [################..................................] 33.33%\n", + " 800 iter, 1 epoch / 20 epochs\n", + " 41.313 iters/sec. Estimated time to finish: 0:04:31.103168.\n", + "\u001b[4A\u001b[J total [###...............................................] 7.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 900 iter, 1 epoch / 20 epochs\n", + " 40.762 iters/sec. Estimated time to finish: 0:04:32.314401.\n", + "\u001b[4A\u001b[J total [####..............................................] 8.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 1000 iter, 1 epoch / 20 epochs\n", + " 40.337 iters/sec. Estimated time to finish: 0:04:32.701110.\n", + "\u001b[4A\u001b[J total [####..............................................] 9.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 1100 iter, 1 epoch / 20 epochs\n", + " 40.131 iters/sec. Estimated time to finish: 0:04:31.609394.\n", + "\u001b[4A\u001b[J2 0.0770754 0.0721484 0.9757 0.9764 30.3365 \n", + "\u001b[J total [#####.............................................] 10.00%\n", + "this epoch [..................................................] 0.00%\n", + " 1200 iter, 2 epoch / 20 epochs\n", + " 39.361 iters/sec. Estimated time to finish: 0:04:34.386204.\n", + "\u001b[4A\u001b[J total [#####.............................................] 10.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 1300 iter, 2 epoch / 20 epochs\n", + " 38.88 iters/sec. Estimated time to finish: 0:04:35.203848.\n", + "\u001b[4A\u001b[J total [#####.............................................] 11.67%\n", + "this epoch [################..................................] 33.33%\n", + " 1400 iter, 2 epoch / 20 epochs\n", + " 38.471 iters/sec. Estimated time to finish: 0:04:35.534012.\n", + "\u001b[4A\u001b[J total [######............................................] 12.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 1500 iter, 2 epoch / 20 epochs\n", + " 38.217 iters/sec. Estimated time to finish: 0:04:34.750417.\n", + "\u001b[4A\u001b[J total [######............................................] 13.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 1600 iter, 2 epoch / 20 epochs\n", + " 38.206 iters/sec. Estimated time to finish: 0:04:32.210044.\n", + "\u001b[4A\u001b[J total [#######...........................................] 14.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 1700 iter, 2 epoch / 20 epochs\n", + " 38.03 iters/sec. Estimated time to finish: 0:04:30.836325.\n", + "\u001b[4A\u001b[J3 0.0491765 0.0686589 0.984117 0.9783 47.5777 \n", + "\u001b[J total [#######...........................................] 15.00%\n", + "this epoch [..................................................] 0.00%\n", + " 1800 iter, 3 epoch / 20 epochs\n", + " 37.621 iters/sec. Estimated time to finish: 0:04:31.123488.\n", + "\u001b[4A\u001b[J total [#######...........................................] 15.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 1900 iter, 3 epoch / 20 epochs\n", + " 37.486 iters/sec. Estimated time to finish: 0:04:29.434877.\n", + "\u001b[4A\u001b[J total [########..........................................] 16.67%\n", + "this epoch [################..................................] 33.33%\n", + " 2000 iter, 3 epoch / 20 epochs\n", + " 37.124 iters/sec. Estimated time to finish: 0:04:29.367115.\n", + "\u001b[4A\u001b[J total [########..........................................] 17.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 2100 iter, 3 epoch / 20 epochs\n", + " 36.981 iters/sec. Estimated time to finish: 0:04:27.702144.\n", + "\u001b[4A\u001b[J total [#########.........................................] 18.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 2200 iter, 3 epoch / 20 epochs\n", + " 36.893 iters/sec. Estimated time to finish: 0:04:25.632841.\n", + "\u001b[4A\u001b[J total [#########.........................................] 19.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 2300 iter, 3 epoch / 20 epochs\n", + " 36.626 iters/sec. Estimated time to finish: 0:04:24.839007.\n", + "\u001b[4A\u001b[J4 0.0344382 0.0732416 0.988733 0.9793 66.0447 \n", + "\u001b[J total [##########........................................] 20.00%\n", + "this epoch [..................................................] 0.00%\n", + " 2400 iter, 4 epoch / 20 epochs\n", + " 36.133 iters/sec. Estimated time to finish: 0:04:25.687154.\n", + "\u001b[4A\u001b[J total [##########........................................] 20.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 2500 iter, 4 epoch / 20 epochs\n", + " 36.036 iters/sec. Estimated time to finish: 0:04:23.627129.\n", + "\u001b[4A\u001b[J total [##########........................................] 21.67%\n", + "this epoch [################..................................] 33.33%\n", + " 2600 iter, 4 epoch / 20 epochs\n", + " 35.894 iters/sec. Estimated time to finish: 0:04:21.883252.\n", + "\u001b[4A\u001b[J total [###########.......................................] 22.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 2700 iter, 4 epoch / 20 epochs\n", + " 35.863 iters/sec. Estimated time to finish: 0:04:19.317012.\n", + "\u001b[4A\u001b[J total [###########.......................................] 23.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 2800 iter, 4 epoch / 20 epochs\n", + " 35.882 iters/sec. Estimated time to finish: 0:04:16.399349.\n", + "\u001b[4A\u001b[J total [############......................................] 24.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 2900 iter, 4 epoch / 20 epochs\n", + " 35.858 iters/sec. Estimated time to finish: 0:04:13.775733.\n", + "\u001b[4A\u001b[J5 0.0280074 0.0716412 0.990383 0.9804 83.6667 \n", + "\u001b[J total [############......................................] 25.00%\n", + "this epoch [..................................................] 0.00%\n", + " 3000 iter, 5 epoch / 20 epochs\n", + " 35.681 iters/sec. Estimated time to finish: 0:04:12.234897.\n", + "\u001b[4A\u001b[J total [############......................................] 25.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 3100 iter, 5 epoch / 20 epochs\n", + " 35.678 iters/sec. Estimated time to finish: 0:04:09.451360.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[4A\u001b[J total [#############.....................................] 26.67%\n", + "this epoch [################..................................] 33.33%\n", + " 3200 iter, 5 epoch / 20 epochs\n", + " 35.648 iters/sec. Estimated time to finish: 0:04:06.858238.\n", + "\u001b[4A\u001b[J total [#############.....................................] 27.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 3300 iter, 5 epoch / 20 epochs\n", + " 35.646 iters/sec. Estimated time to finish: 0:04:04.067962.\n", + "\u001b[4A\u001b[J total [##############....................................] 28.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 3400 iter, 5 epoch / 20 epochs\n", + " 35.62 iters/sec. Estimated time to finish: 0:04:01.439835.\n", + "\u001b[4A\u001b[J total [##############....................................] 29.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 3500 iter, 5 epoch / 20 epochs\n", + " 35.548 iters/sec. Estimated time to finish: 0:03:59.114360.\n", + "\u001b[4A\u001b[J6 0.0221732 0.0800123 0.992683 0.9783 101.278 \n", + "\u001b[J total [###############...................................] 30.00%\n", + "this epoch [..................................................] 0.00%\n", + " 3600 iter, 6 epoch / 20 epochs\n", + " 35.394 iters/sec. Estimated time to finish: 0:03:57.328954.\n", + "\u001b[4A\u001b[J total [###############...................................] 30.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 3700 iter, 6 epoch / 20 epochs\n", + " 35.101 iters/sec. Estimated time to finish: 0:03:56.458370.\n", + "\u001b[4A\u001b[J total [###############...................................] 31.67%\n", + "this epoch [################..................................] 33.33%\n", + " 3800 iter, 6 epoch / 20 epochs\n", + " 35.097 iters/sec. Estimated time to finish: 0:03:53.638225.\n", + "\u001b[4A\u001b[J total [################..................................] 32.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 3900 iter, 6 epoch / 20 epochs\n", + " 35.08 iters/sec. Estimated time to finish: 0:03:50.901687.\n", + "\u001b[4A\u001b[J total [################..................................] 33.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 4000 iter, 6 epoch / 20 epochs\n", + " 35.062 iters/sec. Estimated time to finish: 0:03:48.167167.\n", + "\u001b[4A\u001b[J total [#################.................................] 34.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 4100 iter, 6 epoch / 20 epochs\n", + " 35.035 iters/sec. Estimated time to finish: 0:03:45.491051.\n", + "\u001b[4A\u001b[J7 0.0197829 0.0768317 0.9938 0.9784 119.732 \n", + "\u001b[J total [#################.................................] 35.00%\n", + "this epoch [..................................................] 0.00%\n", + " 4200 iter, 7 epoch / 20 epochs\n", + " 34.941 iters/sec. Estimated time to finish: 0:03:43.233173.\n", + "\u001b[4A\u001b[J total [#################.................................] 35.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 4300 iter, 7 epoch / 20 epochs\n", + " 34.943 iters/sec. Estimated time to finish: 0:03:40.358854.\n", + "\u001b[4A\u001b[J total [##################................................] 36.67%\n", + "this epoch [################..................................] 33.33%\n", + " 4400 iter, 7 epoch / 20 epochs\n", + " 34.939 iters/sec. Estimated time to finish: 0:03:37.521260.\n", + "\u001b[4A\u001b[J total [##################................................] 37.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 4500 iter, 7 epoch / 20 epochs\n", + " 34.942 iters/sec. Estimated time to finish: 0:03:34.640363.\n", + "\u001b[4A\u001b[J total [###################...............................] 38.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 4600 iter, 7 epoch / 20 epochs\n", + " 34.936 iters/sec. Estimated time to finish: 0:03:31.816158.\n", + "\u001b[4A\u001b[J total [###################...............................] 39.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 4700 iter, 7 epoch / 20 epochs\n", + " 34.921 iters/sec. Estimated time to finish: 0:03:29.044796.\n", + "\u001b[4A\u001b[J8 0.016657 0.113716 0.994683 0.9749 137.569 \n", + "\u001b[J total [####################..............................] 40.00%\n", + "this epoch [..................................................] 0.00%\n", + " 4800 iter, 8 epoch / 20 epochs\n", + " 34.769 iters/sec. Estimated time to finish: 0:03:27.079825.\n", + "\u001b[4A\u001b[J total [####################..............................] 40.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 4900 iter, 8 epoch / 20 epochs\n", + " 34.729 iters/sec. Estimated time to finish: 0:03:24.437863.\n", + "\u001b[4A\u001b[J total [####################..............................] 41.67%\n", + "this epoch [################..................................] 33.33%\n", + " 5000 iter, 8 epoch / 20 epochs\n", + " 34.73 iters/sec. Estimated time to finish: 0:03:21.554960.\n", + "\u001b[4A\u001b[J total [#####################.............................] 42.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 5100 iter, 8 epoch / 20 epochs\n", + " 34.724 iters/sec. Estimated time to finish: 0:03:18.709996.\n", + "\u001b[4A\u001b[J total [#####################.............................] 43.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 5200 iter, 8 epoch / 20 epochs\n", + " 34.723 iters/sec. Estimated time to finish: 0:03:15.835849.\n", + "\u001b[4A\u001b[J total [######################............................] 44.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 5300 iter, 8 epoch / 20 epochs\n", + " 34.715 iters/sec. Estimated time to finish: 0:03:13.000289.\n", + "\u001b[4A\u001b[J9 0.0196976 0.0879474 0.993633 0.9807 155.458 \n", + "\u001b[J total [######################............................] 45.00%\n", + "this epoch [..................................................] 0.00%\n", + " 5400 iter, 9 epoch / 20 epochs\n", + " 34.626 iters/sec. Estimated time to finish: 0:03:10.610652.\n", + "\u001b[4A\u001b[J total [######################............................] 45.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 5500 iter, 9 epoch / 20 epochs\n", + " 34.6 iters/sec. Estimated time to finish: 0:03:07.863616.\n", + "\u001b[4A\u001b[J total [#######################...........................] 46.67%\n", + "this epoch [################..................................] 33.33%\n", + " 5600 iter, 9 epoch / 20 epochs\n", + " 34.582 iters/sec. Estimated time to finish: 0:03:05.066867.\n", + "\u001b[4A\u001b[J total [#######################...........................] 47.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 5700 iter, 9 epoch / 20 epochs\n", + " 34.56 iters/sec. Estimated time to finish: 0:03:02.289066.\n", + "\u001b[4A\u001b[J total [########################..........................] 48.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 5800 iter, 9 epoch / 20 epochs\n", + " 34.524 iters/sec. Estimated time to finish: 0:02:59.585753.\n", + "\u001b[4A\u001b[J total [########################..........................] 49.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 5900 iter, 9 epoch / 20 epochs\n", + " 34.479 iters/sec. Estimated time to finish: 0:02:56.921205.\n", + "\u001b[4A\u001b[J10 0.0135036 0.094506 0.9956 0.9788 174.195 \n", + "\u001b[J total [#########################.........................] 50.00%\n", + "this epoch [..................................................] 0.00%\n", + " 6000 iter, 10 epoch / 20 epochs\n", + " 34.342 iters/sec. Estimated time to finish: 0:02:54.714546.\n", + "\u001b[4A\u001b[J total [#########################.........................] 50.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 6100 iter, 10 epoch / 20 epochs\n", + " 34.293 iters/sec. Estimated time to finish: 0:02:52.047114.\n", + "\u001b[4A\u001b[J total [#########################.........................] 51.67%\n", + "this epoch [################..................................] 33.33%\n", + " 6200 iter, 10 epoch / 20 epochs\n", + " 34.267 iters/sec. Estimated time to finish: 0:02:49.260471.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[4A\u001b[J total [##########################........................] 52.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 6300 iter, 10 epoch / 20 epochs\n", + " 34.226 iters/sec. Estimated time to finish: 0:02:46.539137.\n", + "\u001b[4A\u001b[J total [##########################........................] 53.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 6400 iter, 10 epoch / 20 epochs\n", + " 34.194 iters/sec. Estimated time to finish: 0:02:43.773183.\n", + "\u001b[4A\u001b[J total [###########################.......................] 54.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 6500 iter, 10 epoch / 20 epochs\n", + " 34.149 iters/sec. Estimated time to finish: 0:02:41.057623.\n", + "\u001b[4A\u001b[J11 0.0174658 0.118295 0.9947 0.9766 193.345 \n", + "\u001b[J total [###########################.......................] 55.00%\n", + "this epoch [..................................................] 0.00%\n", + " 6600 iter, 11 epoch / 20 epochs\n", + " 34.04 iters/sec. Estimated time to finish: 0:02:38.637333.\n", + "\u001b[4A\u001b[J total [###########################.......................] 55.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 6700 iter, 11 epoch / 20 epochs\n", + " 33.997 iters/sec. Estimated time to finish: 0:02:35.894480.\n", + "\u001b[4A\u001b[J total [############################......................] 56.67%\n", + "this epoch [################..................................] 33.33%\n", + " 6800 iter, 11 epoch / 20 epochs\n", + " 33.929 iters/sec. Estimated time to finish: 0:02:33.260000.\n", + "\u001b[4A\u001b[J total [############################......................] 57.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 6900 iter, 11 epoch / 20 epochs\n", + " 33.885 iters/sec. Estimated time to finish: 0:02:30.506863.\n", + "\u001b[4A\u001b[J total [#############################.....................] 58.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 7000 iter, 11 epoch / 20 epochs\n", + " 33.848 iters/sec. Estimated time to finish: 0:02:27.717927.\n", + "\u001b[4A\u001b[J total [#############################.....................] 59.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 7100 iter, 11 epoch / 20 epochs\n", + " 33.798 iters/sec. Estimated time to finish: 0:02:24.978252.\n", + "\u001b[4A\u001b[J12 0.0110422 0.092281 0.996467 0.9804 213.149 \n", + "\u001b[J total [##############################....................] 60.00%\n", + "this epoch [..................................................] 0.00%\n", + " 7200 iter, 12 epoch / 20 epochs\n", + " 33.688 iters/sec. Estimated time to finish: 0:02:22.482880.\n", + "\u001b[4A\u001b[J total [##############################....................] 60.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 7300 iter, 12 epoch / 20 epochs\n", + " 33.627 iters/sec. Estimated time to finish: 0:02:19.767865.\n", + "\u001b[4A\u001b[J total [##############################....................] 61.67%\n", + "this epoch [################..................................] 33.33%\n", + " 7400 iter, 12 epoch / 20 epochs\n", + " 33.57 iters/sec. Estimated time to finish: 0:02:17.027452.\n", + "\u001b[4A\u001b[J total [###############################...................] 62.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 7500 iter, 12 epoch / 20 epochs\n", + " 33.454 iters/sec. Estimated time to finish: 0:02:14.514958.\n", + "\u001b[4A\u001b[J total [###############################...................] 63.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 7600 iter, 12 epoch / 20 epochs\n", + " 33.404 iters/sec. Estimated time to finish: 0:02:11.721757.\n", + "\u001b[4A\u001b[J total [################################..................] 64.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 7700 iter, 12 epoch / 20 epochs\n", + " 33.361 iters/sec. Estimated time to finish: 0:02:08.892462.\n", + "\u001b[4A\u001b[J13 0.0131023 0.112165 0.996183 0.9795 233.983 \n", + "\u001b[J total [################################..................] 65.00%\n", + "this epoch [..................................................] 0.00%\n", + " 7800 iter, 13 epoch / 20 epochs\n", + " 33.249 iters/sec. Estimated time to finish: 0:02:06.321430.\n", + "\u001b[4A\u001b[J total [################################..................] 65.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 7900 iter, 13 epoch / 20 epochs\n", + " 33.197 iters/sec. Estimated time to finish: 0:02:03.506134.\n", + "\u001b[4A\u001b[J total [#################################.................] 66.67%\n", + "this epoch [################..................................] 33.33%\n", + " 8000 iter, 13 epoch / 20 epochs\n", + " 33.148 iters/sec. Estimated time to finish: 0:02:00.671093.\n", + "\u001b[4A\u001b[J total [#################################.................] 67.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 8100 iter, 13 epoch / 20 epochs\n", + " 33.099 iters/sec. Estimated time to finish: 0:01:57.829645.\n", + "\u001b[4A\u001b[J total [##################################................] 68.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 8200 iter, 13 epoch / 20 epochs\n", + " 33.044 iters/sec. Estimated time to finish: 0:01:54.999835.\n", + "\u001b[4A\u001b[J total [##################################................] 69.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 8300 iter, 13 epoch / 20 epochs\n", + " 32.925 iters/sec. Estimated time to finish: 0:01:52.378181.\n", + "\u001b[4A\u001b[J14 0.0107402 0.106344 0.99645 0.9813 255.572 \n", + "\u001b[J total [###################################...............] 70.00%\n", + "this epoch [..................................................] 0.00%\n", + " 8400 iter, 14 epoch / 20 epochs\n", + " 32.783 iters/sec. Estimated time to finish: 0:01:49.812125.\n", + "\u001b[4A\u001b[J total [###################################...............] 70.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 8500 iter, 14 epoch / 20 epochs\n", + " 32.686 iters/sec. Estimated time to finish: 0:01:47.081025.\n", + "\u001b[4A\u001b[J total [###################################...............] 71.67%\n", + "this epoch [################..................................] 33.33%\n", + " 8600 iter, 14 epoch / 20 epochs\n", + " 32.641 iters/sec. Estimated time to finish: 0:01:44.163313.\n", + "\u001b[4A\u001b[J total [####################################..............] 72.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 8700 iter, 14 epoch / 20 epochs\n", + " 32.589 iters/sec. Estimated time to finish: 0:01:41.262226.\n", + "\u001b[4A\u001b[J total [####################################..............] 73.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 8800 iter, 14 epoch / 20 epochs\n", + " 32.542 iters/sec. Estimated time to finish: 0:01:38.335180.\n", + "\u001b[4A\u001b[J total [#####################################.............] 74.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 8900 iter, 14 epoch / 20 epochs\n", + " 32.489 iters/sec. Estimated time to finish: 0:01:35.416948.\n", + "\u001b[4A\u001b[J15 0.0125309 0.0853632 0.996467 0.9835 277.066 \n", + "\u001b[J total [#####################################.............] 75.00%\n", + "this epoch [..................................................] 0.00%\n", + " 9000 iter, 15 epoch / 20 epochs\n", + " 32.402 iters/sec. Estimated time to finish: 0:01:32.586512.\n", + "\u001b[4A\u001b[J total [#####################################.............] 75.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 9100 iter, 15 epoch / 20 epochs\n", + " 32.348 iters/sec. Estimated time to finish: 0:01:29.649633.\n", + "\u001b[4A\u001b[J total [######################################............] 76.67%\n", + "this epoch [################..................................] 33.33%\n", + " 9200 iter, 15 epoch / 20 epochs\n", + " 32.302 iters/sec. Estimated time to finish: 0:01:26.683121.\n", + "\u001b[4A\u001b[J total [######################################............] 77.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 9300 iter, 15 epoch / 20 epochs\n", + " 32.247 iters/sec. Estimated time to finish: 0:01:23.728150.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[4A\u001b[J total [#######################################...........] 78.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 9400 iter, 15 epoch / 20 epochs\n", + " 32.181 iters/sec. Estimated time to finish: 0:01:20.793071.\n", + "\u001b[4A\u001b[J total [#######################################...........] 79.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 9500 iter, 15 epoch / 20 epochs\n", + " 32.114 iters/sec. Estimated time to finish: 0:01:17.848522.\n", + "\u001b[4A\u001b[J16 0.0076524 0.0845158 0.9977 0.9833 299.077 \n", + "\u001b[J total [########################################..........] 80.00%\n", + "this epoch [..................................................] 0.00%\n", + " 9600 iter, 16 epoch / 20 epochs\n", + " 32.021 iters/sec. Estimated time to finish: 0:01:14.951347.\n", + "\u001b[4A\u001b[J total [########################################..........] 80.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 9700 iter, 16 epoch / 20 epochs\n", + " 31.966 iters/sec. Estimated time to finish: 0:01:11.951340.\n", + "\u001b[4A\u001b[J total [########################################..........] 81.67%\n", + "this epoch [################..................................] 33.33%\n", + " 9800 iter, 16 epoch / 20 epochs\n", + " 31.917 iters/sec. Estimated time to finish: 0:01:08.928651.\n", + "\u001b[4A\u001b[J total [#########################################.........] 82.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 9900 iter, 16 epoch / 20 epochs\n", + " 31.865 iters/sec. Estimated time to finish: 0:01:05.903967.\n", + "\u001b[4A\u001b[J total [#########################################.........] 83.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 10000 iter, 16 epoch / 20 epochs\n", + " 31.812 iters/sec. Estimated time to finish: 0:01:02.869297.\n", + "\u001b[4A\u001b[J total [##########################################........] 84.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 10100 iter, 16 epoch / 20 epochs\n", + " 31.743 iters/sec. Estimated time to finish: 0:00:59.854943.\n", + "\u001b[4A\u001b[J17 0.0125142 0.0967522 0.996583 0.9808 321.412 \n", + "\u001b[J total [##########################################........] 85.00%\n", + "this epoch [..................................................] 0.00%\n", + " 10200 iter, 17 epoch / 20 epochs\n", + " 31.571 iters/sec. Estimated time to finish: 0:00:57.014005.\n", + "\u001b[4A\u001b[J total [##########################################........] 85.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 10300 iter, 17 epoch / 20 epochs\n", + " 31.412 iters/sec. Estimated time to finish: 0:00:54.119356.\n", + "\u001b[4A\u001b[J total [###########################################.......] 86.67%\n", + "this epoch [################..................................] 33.33%\n", + " 10400 iter, 17 epoch / 20 epochs\n", + " 31.253 iters/sec. Estimated time to finish: 0:00:51.194843.\n", + "\u001b[4A\u001b[J total [###########################################.......] 87.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 10500 iter, 17 epoch / 20 epochs\n", + " 31.052 iters/sec. Estimated time to finish: 0:00:48.305561.\n", + "\u001b[4A\u001b[J total [############################################......] 88.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 10600 iter, 17 epoch / 20 epochs\n", + " 30.971 iters/sec. Estimated time to finish: 0:00:45.203923.\n", + "\u001b[4A\u001b[J total [############################################......] 89.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 10700 iter, 17 epoch / 20 epochs\n", + " 30.835 iters/sec. Estimated time to finish: 0:00:42.159730.\n", + "\u001b[4A\u001b[J18 0.00738255 0.112494 0.997783 0.9795 345.352 \n", + "\u001b[J total [#############################################.....] 90.00%\n", + "this epoch [..................................................] 0.00%\n", + " 10800 iter, 18 epoch / 20 epochs\n", + " 30.674 iters/sec. Estimated time to finish: 0:00:39.121586.\n", + "\u001b[4A\u001b[J total [#############################################.....] 90.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 10900 iter, 18 epoch / 20 epochs\n", + " 30.563 iters/sec. Estimated time to finish: 0:00:35.991122.\n", + "\u001b[4A\u001b[J total [#############################################.....] 91.67%\n", + "this epoch [################..................................] 33.33%\n", + " 11000 iter, 18 epoch / 20 epochs\n", + " 30.458 iters/sec. Estimated time to finish: 0:00:32.831952.\n", + "\u001b[4A\u001b[J total [##############################################....] 92.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 11100 iter, 18 epoch / 20 epochs\n", + " 30.334 iters/sec. Estimated time to finish: 0:00:29.669973.\n", + "\u001b[4A\u001b[J total [##############################################....] 93.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 11200 iter, 18 epoch / 20 epochs\n", + " 30.257 iters/sec. Estimated time to finish: 0:00:26.440229.\n", + "\u001b[4A\u001b[J total [###############################################...] 94.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 11300 iter, 18 epoch / 20 epochs\n", + " 30.178 iters/sec. Estimated time to finish: 0:00:23.195403.\n", + "\u001b[4A\u001b[J19 0.0121511 0.0853964 0.9964 0.9833 368.794 \n", + "\u001b[J total [###############################################...] 95.00%\n", + "this epoch [..................................................] 0.00%\n", + " 11400 iter, 19 epoch / 20 epochs\n", + " 30.066 iters/sec. Estimated time to finish: 0:00:19.956411.\n", + "\u001b[4A\u001b[J total [###############################################...] 95.83%\n", + "this epoch [########..........................................] 16.67%\n", + " 11500 iter, 19 epoch / 20 epochs\n", + " 29.966 iters/sec. Estimated time to finish: 0:00:16.685483.\n", + "\u001b[4A\u001b[J total [################################################..] 96.67%\n", + "this epoch [################..................................] 33.33%\n", + " 11600 iter, 19 epoch / 20 epochs\n", + " 29.849 iters/sec. Estimated time to finish: 0:00:13.400864.\n", + "\u001b[4A\u001b[J total [################################################..] 97.50%\n", + "this epoch [#########################.........................] 50.00%\n", + " 11700 iter, 19 epoch / 20 epochs\n", + " 29.753 iters/sec. Estimated time to finish: 0:00:10.082910.\n", + "\u001b[4A\u001b[J total [#################################################.] 98.33%\n", + "this epoch [#################################.................] 66.67%\n", + " 11800 iter, 19 epoch / 20 epochs\n", + " 29.675 iters/sec. Estimated time to finish: 0:00:06.739601.\n", + "\u001b[4A\u001b[J total [#################################################.] 99.17%\n", + "this epoch [#########################################.........] 83.33%\n", + " 11900 iter, 19 epoch / 20 epochs\n", + " 29.581 iters/sec. Estimated time to finish: 0:00:03.380585.\n", + "\u001b[4A\u001b[J20 0.00638073 0.098195 0.997883 0.9814 392.74 \n", + "\u001b[J total [##################################################] 100.00%\n", + "this epoch [..................................................] 0.00%\n", + " 12000 iter, 20 epoch / 20 epochs\n", + " 29.484 iters/sec. Estimated time to finish: 0:00:00.\n", + "\u001b[4A\u001b[J" + ] + } + ], + "source": [ + "#!/usr/bin/env python\n", + "import argparse\n", + "\n", + "import chainer\n", + "import chainer.functions as F\n", + "import chainer.links as L\n", + "from chainer import training\n", + "from chainer.training import extensions\n", + "import chainerx\n", + "\n", + "\n", + "# Network definition\n", + "class MLP(chainer.Chain):\n", + "\n", + " def __init__(self, n_units, n_out):\n", + " super(MLP, self).__init__()\n", + " with self.init_scope():\n", + " # the size of the inputs to each layer will be inferred\n", + " self.l1 = L.Linear(None, n_units) # n_in -> n_units\n", + " self.l2 = L.Linear(None, n_units) # n_units -> n_units\n", + " self.l3 = L.Linear(None, n_out) # n_units -> n_out\n", + "\n", + " def forward(self, x):\n", + " h1 = F.relu(self.l1(x))\n", + " h2 = F.relu(self.l2(h1))\n", + " return self.l3(h2)\n", + "\n", + "\n", + "def main():\n", + " parser = argparse.ArgumentParser(description='Chainer example: MNIST')\n", + " parser.add_argument('--batchsize', '-b', type=int, default=100,\n", + " help='Number of images in each mini-batch')\n", + " parser.add_argument('--epoch', '-e', type=int, default=20,\n", + " help='Number of sweeps over the dataset to train')\n", + " parser.add_argument('--frequency', '-f', type=int, default=-1,\n", + " help='Frequency of taking a snapshot')\n", + " parser.add_argument('--device', '-d', type=str, default='-1',\n", + " help='Device specifier. Either ChainerX device '\n", + " 'specifier or an integer. If non-negative integer, '\n", + " 'CuPy arrays with specified device id are used. If '\n", + " 'negative integer, NumPy arrays are used')\n", + " parser.add_argument('--out', '-o', default='result',\n", + " help='Directory to output the result')\n", + " parser.add_argument('--resume', '-r', type=str,\n", + " help='Resume the training from snapshot')\n", + " parser.add_argument('--unit', '-u', type=int, default=1000,\n", + " help='Number of units')\n", + " parser.add_argument('--noplot', dest='plot', action='store_false',\n", + " help='Disable PlotReport extension')\n", + " group = parser.add_argument_group('deprecated arguments')\n", + " group.add_argument('--gpu', '-g', dest='device',\n", + " type=int, nargs='?', const=0,\n", + " help='GPU ID (negative value indicates CPU)')\n", + " args = parser.parse_args(args=[])\n", + "\n", + " device = chainer.get_device(args.device)\n", + "\n", + " print('Device: {}'.format(device))\n", + " print('# unit: {}'.format(args.unit))\n", + " print('# Minibatch-size: {}'.format(args.batchsize))\n", + " print('# epoch: {}'.format(args.epoch))\n", + " print('')\n", + "\n", + " # Set up a neural network to train\n", + " # Classifier reports softmax cross entropy loss and accuracy at every\n", + " # iteration, which will be used by the PrintReport extension below.\n", + " model = L.Classifier(MLP(args.unit, 10))\n", + " model.to_device(device)\n", + " device.use()\n", + "\n", + " # Setup an optimizer\n", + " optimizer = chainer.optimizers.Adam()\n", + " optimizer.setup(model)\n", + "\n", + " # Load the MNIST dataset\n", + " train, test = chainer.datasets.get_mnist()\n", + "\n", + " train_iter = chainer.iterators.SerialIterator(train, args.batchsize)\n", + " test_iter = chainer.iterators.SerialIterator(test, args.batchsize,\n", + " repeat=False, shuffle=False)\n", + "\n", + " # Set up a trainer\n", + " updater = training.updaters.StandardUpdater(\n", + " train_iter, optimizer, device=device)\n", + " trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)\n", + "\n", + " # Evaluate the model with the test dataset for each epoch\n", + " trainer.extend(extensions.Evaluator(test_iter, model, device=device))\n", + "\n", + " # Dump a computational graph from 'loss' variable at the first iteration\n", + " # The \"main\" refers to the target link of the \"main\" optimizer.\n", + " # TODO(niboshi): Temporarily disabled for chainerx. Fix it.\n", + " if device.xp is not chainerx:\n", + " trainer.extend(extensions.DumpGraph('main/loss'))\n", + "\n", + " # Take a snapshot for each specified epoch\n", + " frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)\n", + " trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))\n", + "\n", + " # Write a log of evaluation statistics for each epoch\n", + " trainer.extend(extensions.LogReport())\n", + "\n", + " # Save two plot images to the result dir\n", + " if args.plot and extensions.PlotReport.available():\n", + " trainer.extend(\n", + " extensions.PlotReport(['main/loss', 'validation/main/loss'],\n", + " 'epoch', file_name='loss.png'))\n", + " trainer.extend(\n", + " extensions.PlotReport(\n", + " ['main/accuracy', 'validation/main/accuracy'],\n", + " 'epoch', file_name='accuracy.png'))\n", + "\n", + " # Print selected entries of the log to stdout\n", + " # Here \"main\" refers to the target link of the \"main\" optimizer again, and\n", + " # \"validation\" refers to the default name of the Evaluator extension.\n", + " # Entries other than 'epoch' are reported by the Classifier link, called by\n", + " # either the updater or the evaluator.\n", + " trainer.extend(extensions.PrintReport(\n", + " ['epoch', 'main/loss', 'validation/main/loss',\n", + " 'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))\n", + "\n", + " # Print a progress bar to stdout\n", + " trainer.extend(extensions.ProgressBar())\n", + "\n", + " if args.resume is not None:\n", + " # Resume from a snapshot\n", + " chainer.serializers.load_npz(args.resume, trainer)\n", + "\n", + " # Run the training\n", + " trainer.run()\n", + "\n", + "\n", + "if __name__ == '__main__':\n", + " main()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Wrap model using s2i" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---> Installing application source...\n", + "---> Installing dependencies ...\n", + "Looking in links: /whl\n", + "Collecting chainer==6.2.0 (from -r requirements.txt (line 1))\n", + " WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\n", + "Downloading https://files.pythonhosted.org/packages/2c/5a/86c50a0119a560a39d782c4cdd9b72927c090cc2e3f70336e01b19a5f97a/chainer-6.2.0.tar.gz (873kB)\n", + "Requirement already satisfied: setuptools in /usr/local/lib/python3.6/site-packages (from chainer==6.2.0->-r requirements.txt (line 1)) (41.0.1)\n", + "Collecting typing<=3.6.6 (from chainer==6.2.0->-r requirements.txt (line 1))\n", + " WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\n", + "Downloading https://files.pythonhosted.org/packages/4a/bd/eee1157fc2d8514970b345d69cb9975dcd1e42cd7e61146ed841f6e68309/typing-3.6.6-py3-none-any.whl\n", + "Collecting typing_extensions<=3.6.6 (from chainer==6.2.0->-r requirements.txt (line 1))\n", + " WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\n", + "Downloading https://files.pythonhosted.org/packages/62/4f/392a1fa2873e646f5990eb6f956e662d8a235ab474450c72487745f67276/typing_extensions-3.6.6-py3-none-any.whl\n", + "Collecting filelock (from chainer==6.2.0->-r requirements.txt (line 1))\n", + " WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\n", + "Downloading https://files.pythonhosted.org/packages/93/83/71a2ee6158bb9f39a90c0dea1637f81d5eef866e188e1971a1b1ab01a35a/filelock-3.0.12-py3-none-any.whl\n", + "Requirement already satisfied: numpy>=1.9.0 in /usr/local/lib/python3.6/site-packages (from chainer==6.2.0->-r requirements.txt (line 1)) (1.16.4)\n", + "Collecting protobuf<3.8.0rc1,>=3.0.0 (from chainer==6.2.0->-r requirements.txt (line 1))\n", + " WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\n", + "Downloading https://files.pythonhosted.org/packages/5a/aa/a858df367b464f5e9452e1c538aa47754d467023850c00b000287750fa77/protobuf-3.7.1-cp36-cp36m-manylinux1_x86_64.whl (1.2MB)\n", + "Requirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.6/site-packages (from chainer==6.2.0->-r requirements.txt (line 1)) (1.12.0)\n", + "Building wheels for collected packages: chainer\n", + "Building wheel for chainer (setup.py): started\n", + "Building wheel for chainer (setup.py): finished with status 'done'\n", + "Stored in directory: /root/.cache/pip/wheels/2e/be/c5/6ee506abcaa4a53106f7d7671bbee8b4e5243bc562a9d32ad1\n", + "Successfully built chainer\n", + "Installing collected packages: typing, typing-extensions, filelock, protobuf, chainer\n", + "Found existing installation: protobuf 3.8.0\n", + "Uninstalling protobuf-3.8.0:\n", + "Successfully uninstalled protobuf-3.8.0\n", + "Successfully installed chainer-6.2.0 filelock-3.0.12 protobuf-3.7.1 typing-3.6.6 typing-extensions-3.6.6\n", + "WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\n", + "WARNING: You are using pip version 19.1, however version 19.2.2 is available.\n", + "You should consider upgrading via the 'pip install --upgrade pip' command.\n", + "Build completed successfully\n" + ] + } + ], + "source": [ + "!s2i build . seldonio/seldon-core-s2i-python3:0.10 chainer-mnist:0.1" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "b03f58f82ca07e25261be34b75be4a0ffbbfa1ad736d3866790682bf0d8202a3\r\n" + ] + } + ], + "source": [ + "!docker run --name \"mnist_predictor\" -d --rm -p 5000:5000 chainer-mnist:0.1" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Send some random features that conform to the contract" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n", + "----------------------------------------\n", + "SENDING NEW REQUEST:\n", + "\n", + "[[0.997 0.039 0.778 0.59 0.526 0.591 0.659 0.423 0.404 0.302 0.322 0.453\n", + " 0.54 0.852 0.268 0.564 0.163 0.032 0.934 0.317 0.395 0.122 0.056 0.729\n", + " 0.106 0.443 0.334 0.784 0.646 0.296 0.524 0.855 0.503 0.727 0.326 0.491\n", + " 0.385 0.042 0.82 0.715 0.972 0.699 0.431 0.618 0.096 0.849 0.224 0.187\n", + " 0.145 0.357 0.187 0.779 0.009 0.775 0.775 0.584 0.897 0.674 0.01 0.775\n", + " 0.095 0.081 0.089 0.351 0.985 0.878 0.906 0.396 0.499 0.646 0.127 0.966\n", + " 0.087 0.668 0.314 0.853 0.55 0.345 0.95 0.792 0.797 0.037 0.18 0.592\n", + " 0.941 0.662 0.101 0.388 0.902 0.868 0.505 0.824 0.8 0.855 0.568 0.368\n", + " 0.605 0.224 0.214 0.582 0.365 0.44 0.389 0.922 0.028 0.142 0.525 0.843\n", + " 0.706 0.61 0.215 0.962 0.334 0.273 0.365 0.075 0.929 0.693 0.382 0.76\n", + " 0.75 0.403 0.344 0.218 0.831 0.431 0.469 0.527 0.755 0.048 0.407 0.953\n", + " 0.468 0.186 0.589 0.839 0.513 0.307 0.251 0.738 0.173 0.185 0.499 0.797\n", + " 0.264 0.149 0.547 0.699 0.935 0.071 0.145 0.853 0.884 0.195 0.944 0.775\n", + " 0.523 0.627 0.729 0.826 0.894 0.117 0.935 0.363 0.03 0.16 0.435 0.579\n", + " 0.954 0.487 0.133 0.348 0.12 0.741 0.203 0.103 0.334 0.009 0.898 0.597\n", + " 0.375 0.241 0.27 0.094 0.819 0.737 0.147 0.715 0.138 0.801 0.427 0.602\n", + " 0.336 0.796 0.691 0.415 0.329 0.155 0.17 0.152 0.237 0.957 0.298 0.837\n", + " 0.982 0.805 0.972 0.125 0.916 0.101 0.054 0.347 0.566 0.232 0.885 0.864\n", + " 0.049 0.205 0.361 0.767 0.099 0.634 0.359 0.975 0.56 0.289 0.49 0.359\n", + " 0.901 0.39 0.197 0.985 0.141 0.232 0.336 0.932 0.923 0.032 0.126 0.51\n", + " 0.571 0.743 0.831 0.999 0.972 0.649 0.527 0.909 0.071 0.539 0.676 0.851\n", + " 0.104 0.103 0.392 0.641 0.838 0.333 0.453 0.573 0.199 0.924 0.588 0.955\n", + " 0.866 0.085 0.985 0.803 0.386 0.713 0.056 0.972 0.489 0.623 0.108 0.904\n", + " 0.746 0.986 0.824 0.996 0.161 0.738 0.24 0.153 0.935 0.782 0.393 0.098\n", + " 0.449 0.24 0.621 0.293 0.569 0.196 0.893 0.605 0.608 0.114 0.383 0.038\n", + " 0.573 0.373 0.474 0.006 0.292 0.738 0.943 0.65 0.553 0.684 0.3 0.587\n", + " 0.183 0.521 0.211 0.074 0.696 0.672 0.206 0.694 0.129 0.81 0.415 0.56\n", + " 0.994 0.686 0.807 0.514 0.215 0.096 0.295 0.233 0.625 0.663 0.794 0.16\n", + " 0.837 0.194 0.07 0.939 0.965 0.142 0.66 0.152 0.249 0.995 0.892 0.265\n", + " 0.865 0.742 0.19 0.03 0.42 0.807 0.15 0.163 0.529 0.23 0.59 0.676\n", + " 0.121 0.474 0.329 0.383 0.534 0.093 0.861 0.058 0.019 0.212 0.296 0.947\n", + " 0.879 0.445 0.357 0.021 0.551 0.362 0.653 0.258 0.146 0.453 0.373 0.448\n", + " 0.339 0.974 0.266 0.656 0.036 0.698 0.651 0.91 0.438 0.767 0.716 0.267\n", + " 0.871 0.781 0.13 0.912 0.13 0.332 0.647 0.31 0.171 0.323 0.703 0.197\n", + " 0.918 0.803 0.43 0.103 0.606 0.955 0.733 0.902 0.139 0.471 0.994 0.393\n", + " 0.95 0.485 0.782 0.213 0.994 0.206 0.938 0.019 0.429 0.135 0.811 0.209\n", + " 0.991 0.93 0.878 0.742 0.859 0.397 0.128 0.087 0.447 0.392 0.61 0.18\n", + " 0.087 0.641 0.31 0.033 0.211 0.431 0.051 0.639 0.461 0.466 0.171 0.736\n", + " 0.727 0.183 0.542 0.416 0.524 0.251 0.513 0.087 0.395 0.164 0.25 0.384\n", + " 0.705 0.683 0.827 0.188 0.163 0.325 0.256 0.904 0.161 0.334 0.639 0.728\n", + " 0.267 0.463 0.373 0.111 0.585 0.794 0.972 0.281 0.984 0.564 0.671 0.868\n", + " 0.741 0.638 0.702 0.778 0.667 0.372 0.818 0.49 0.102 0.403 0.187 0.283\n", + " 0.492 0.937 0.643 0.657 0.514 0.492 0.042 0.809 0.088 0.018 0.631 0.731\n", + " 0.516 0.625 0.597 0.629 0.798 0.907 0.861 0.439 0.777 0.014 0.771 0.152\n", + " 0.16 0.997 0.699 0.127 0.038 0.503 0.572 0.878 0.901 0.215 0.606 0.686\n", + " 0.847 0.007 0.976 0.895 0.357 0.374 0.989 0.544 0.317 0.043 0.718 0.788\n", + " 0.121 0.432 0.16 0.485 0.553 0.048 0.003 0.375 0.592 0.207 0.853 0.81\n", + " 0.043 0.554 0.084 0.584 0.73 0.766 0.738 0.038 0.56 0.475 0.763 0.002\n", + " 0.382 0.49 0.302 0.873 0.141 0.023 0.341 0.113 0.197 0.948 0.088 0.294\n", + " 0.778 0.807 0.935 0.712 0.466 0.885 0.815 0.843 0.745 0.217 0.664 0.142\n", + " 0.421 0.371 0.536 0.009 0.036 0.352 0.916 0.161 0.345 0.348 0.688 0.806\n", + " 0.434 0.413 0.567 0.043 0.934 0.072 0.54 0.347 0.817 0.321 0.85 0.478\n", + " 0.832 0.899 0.283 0.34 0.304 0.955 0.915 0.934 0.452 0.423 0.75 0.013\n", + " 0.5 0.691 0.854 0.453 0.959 0.843 0.698 0.756 0.918 0.992 0.663 0.608\n", + " 0.756 0.7 0.347 0.427 0.198 0.37 0.837 0.362 0.291 0.126 0.695 0.777\n", + " 0.318 0.88 0.859 0.958 0.075 0.332 0.321 0.179 0.834 0.027 0.332 0.799\n", + " 0.504 0.274 0.819 0.081 0.337 0.02 0.598 0.727 0.159 0.937 0.199 0.639\n", + " 0.063 0.75 0.637 0.686 0.677 0.102 0.135 0.264 0.091 0.837 0.562 0.453\n", + " 0.503 0.884 0.147 0.966 0.118 0.293 0.327 0.859 0.958 0.498 0.369 0.123\n", + " 0.354 0.812 0.163 0.96 0.64 0.596 0.029 0.84 0.159 0.717 0.025 0.394\n", + " 0.185 0.29 0.554 0.646 0.432 0.197 0.668 0.531 0.206 0.599 0.842 0.579\n", + " 0.836 0.889 0.797 0.891 0.1 0.087 0.825 0.952 0.781 0.295 0.819 0.038\n", + " 0.34 0.476 0.08 0.784 0.556 0.282 0.699 0.954 0.5 0.332 0.213 0.618\n", + " 0.92 0.776 0.147 0.749 0.597 0.191 0.957 0.47 0.324 0.352 0.837 0.263\n", + " 0.536 0.48 0.997 0.417 0.08 0.464 0.886 0.019 0.307 0.164 0.36 0.638\n", + " 0.46 0.803 0.139 0.575]]\n", + "Traceback (most recent call last):\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/urllib3/connection.py\", line 160, in _new_conn\n", + " (self._dns_host, self.port), self.timeout, **extra_kw)\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/urllib3/util/connection.py\", line 80, in create_connection\n", + " raise err\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/urllib3/util/connection.py\", line 70, in create_connection\n", + " sock.connect(sa)\n", + "ConnectionRefusedError: [Errno 61] Connection refused\n", + "\n", + "During handling of the above exception, another exception occurred:\n", + "\n", + "Traceback (most recent call last):\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 603, in urlopen\n", + " chunked=chunked)\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 355, in _make_request\n", + " conn.request(method, url, **httplib_request_kw)\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/http/client.py\", line 1244, in request\n", + " self._send_request(method, url, body, headers, encode_chunked)\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/http/client.py\", line 1290, in _send_request\n", + " self.endheaders(body, encode_chunked=encode_chunked)\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/http/client.py\", line 1239, in endheaders\n", + " self._send_output(message_body, encode_chunked=encode_chunked)\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/http/client.py\", line 1026, in _send_output\n", + " self.send(msg)\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/http/client.py\", line 966, in send\n", + " self.connect()\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/urllib3/connection.py\", line 183, in connect\n", + " conn = self._new_conn()\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/urllib3/connection.py\", line 169, in _new_conn\n", + " self, \"Failed to establish a new connection: %s\" % e)\n", + "urllib3.exceptions.NewConnectionError: : Failed to establish a new connection: [Errno 61] Connection refused\n", + "\n", + "During handling of the above exception, another exception occurred:\n", + "\n", + "Traceback (most recent call last):\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/requests/adapters.py\", line 449, in send\n", + " timeout=timeout\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 641, in urlopen\n", + " _stacktrace=sys.exc_info()[2])\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/urllib3/util/retry.py\", line 399, in increment\n", + " raise MaxRetryError(_pool, url, error or ResponseError(cause))\n", + "urllib3.exceptions.MaxRetryError: HTTPConnectionPool(host='0.0.0.0', port=5000): Max retries exceeded with url: /predict (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 61] Connection refused'))\n", + "\n", + "During handling of the above exception, another exception occurred:\n", + "\n", + "Traceback (most recent call last):\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/bin/seldon-core-tester\", line 10, in \n", + " sys.exit(main())\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/seldon_core/microservice_tester.py\", line 258, in main\n", + " run_predict(args)\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/seldon_core/microservice_tester.py\", line 225, in run_predict\n", + " response = sc.microservice(data=batch, transport=transport, method=\"predict\", payload_type=payload_type, names=feature_names)\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/seldon_core/seldon_client.py\", line 395, in microservice\n", + " return microservice_api_rest_seldon_message(**k)\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/seldon_core/seldon_client.py\", line 534, in microservice_api_rest_seldon_message\n", + " data={\"json\": json.dumps(payload)})\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/requests/api.py\", line 116, in post\n", + " return request('post', url, data=data, json=json, **kwargs)\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/requests/api.py\", line 60, in request\n", + " return session.request(method=method, url=url, **kwargs)\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/requests/sessions.py\", line 533, in request\n", + " resp = self.send(prep, **send_kwargs)\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/requests/sessions.py\", line 646, in send\n", + " r = adapter.send(request, **kwargs)\n", + " File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/requests/adapters.py\", line 516, in send\n", + " raise ConnectionError(e, request=request)\n", + "requests.exceptions.ConnectionError: HTTPConnectionPool(host='0.0.0.0', port=5000): Max retries exceeded with url: /predict (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 61] Connection refused'))\n" + ] + } + ], + "source": [ + "!seldon-core-tester contract.json 0.0.0.0 5000 -p" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Error: No such container: mnist_predictor\r\n" + ] + } + ], + "source": [ + "!docker rm mnist_predictor --force" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Test using Minikube\n", + "\n", + "**Due to a [minikube/s2i issue](https://github.com/SeldonIO/seldon-core/issues/253) you will need [s2i >= 1.1.13](https://github.com/openshift/source-to-image/releases/tag/v1.1.13)**" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "😄 minikube v1.2.0 on darwin (amd64)\n", + "🔥 Creating virtualbox VM (CPUs=2, Memory=4096MB, Disk=20000MB) ...\n", + "🐳 Configuring environment for Kubernetes v1.15.0 on Docker 18.09.6\n", + "🚜 Pulling images ...\n", + "🚀 Launching Kubernetes ... \n", + "⌛ Verifying: apiserver proxy etcd scheduler controller dns\n", + "🏄 Done! kubectl is now configured to use \"minikube\"\n" + ] + } + ], + "source": [ + "!minikube start --memory 4096 " + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "clusterrolebinding.rbac.authorization.k8s.io/kube-system-cluster-admin created\r\n" + ] + } + ], + "source": [ + "!kubectl create clusterrolebinding kube-system-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "$HELM_HOME has been configured at /Users/dtaniwaki/.helm.\n", + "\n", + "Tiller (the Helm server-side component) has been installed into your Kubernetes Cluster.\n", + "\n", + "Please note: by default, Tiller is deployed with an insecure 'allow unauthenticated users' policy.\n", + "To prevent this, run `helm init` with the --tiller-tls-verify flag.\n", + "For more information on securing your installation see: https://docs.helm.sh/using_helm/#securing-your-helm-installation\n" + ] + } + ], + "source": [ + "!helm init" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Waiting for deployment \"tiller-deploy\" rollout to finish: 0 of 1 updated replicas are available...\n", + "deployment \"tiller-deploy\" successfully rolled out\n" + ] + } + ], + "source": [ + "!kubectl rollout status deploy/tiller-deploy -n kube-system" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "NAME: seldon-core\n", + "LAST DEPLOYED: Fri Aug 16 22:53:14 2019\n", + "NAMESPACE: seldon-system\n", + "STATUS: DEPLOYED\n", + "\n", + "RESOURCES:\n", + "==> v1/ClusterRole\n", + "NAME AGE\n", + "seldon-operator-manager-role 1s\n", + "\n", + "==> v1/ClusterRoleBinding\n", + "NAME AGE\n", + "seldon-operator-manager-rolebinding 1s\n", + "\n", + "==> v1/ConfigMap\n", + "NAME DATA AGE\n", + "seldon-config 1 1s\n", + "seldon-spartakus-config 1 1s\n", + "\n", + "==> v1/Pod(related)\n", + "NAME READY STATUS RESTARTS AGE\n", + "seldon-operator-controller-manager-0 0/1 ContainerCreating 0 1s\n", + "seldon-spartakus-volunteer-7d6dd98f89-p6tcn 0/1 ContainerCreating 0 1s\n", + "\n", + "==> v1/Secret\n", + "NAME TYPE DATA AGE\n", + "seldon-operator-webhook-server-secret Opaque 0 1s\n", + "\n", + "==> v1/Service\n", + "NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\n", + "seldon-operator-controller-manager-service ClusterIP 10.108.43.223 443/TCP 1s\n", + "webhook-server-service ClusterIP 10.104.128.63 443/TCP 1s\n", + "\n", + "==> v1/ServiceAccount\n", + "NAME SECRETS AGE\n", + "seldon-core-seldon-core-operator 1 1s\n", + "seldon-spartakus-volunteer 1 1s\n", + "\n", + "==> v1/StatefulSet\n", + "NAME READY AGE\n", + "seldon-operator-controller-manager 0/1 1s\n", + "\n", + "==> v1beta1/ClusterRole\n", + "NAME AGE\n", + "seldon-spartakus-volunteer 1s\n", + "\n", + "==> v1beta1/ClusterRoleBinding\n", + "NAME AGE\n", + "seldon-spartakus-volunteer 1s\n", + "\n", + "==> v1beta1/CustomResourceDefinition\n", + "NAME AGE\n", + "seldondeployments.machinelearning.seldon.io 1s\n", + "\n", + "==> v1beta1/Deployment\n", + "NAME READY UP-TO-DATE AVAILABLE AGE\n", + "seldon-spartakus-volunteer 0/1 1 0 1s\n", + "\n", + "\n", + "NOTES:\n", + "NOTES: TODO\n", + "\n", + "\n" + ] + } + ], + "source": [ + "!helm install ../../../helm-charts/seldon-core-operator --name seldon-core --set usageMetrics.enabled=true --namespace seldon-system" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Waiting for 1 pods to be ready...\n", + "partitioned roll out complete: 1 new pods have been updated...\n" + ] + } + ], + "source": [ + "!kubectl rollout status statefulset.apps/seldon-operator-controller-manager -n seldon-system" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup Ingress\n", + "There are gRPC issues with the latest Ambassador, so we rewcommend 0.40.2 until these are fixed." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "NAME: ambassador\n", + "LAST DEPLOYED: Fri Aug 16 22:57:51 2019\n", + "NAMESPACE: default\n", + "STATUS: DEPLOYED\n", + "\n", + "RESOURCES:\n", + "==> v1/Deployment\n", + "NAME READY UP-TO-DATE AVAILABLE AGE\n", + "ambassador 0/3 3 0 1s\n", + "\n", + "==> v1/Pod(related)\n", + "NAME READY STATUS RESTARTS AGE\n", + "ambassador-5f7fd859b-96ktm 0/1 ContainerCreating 0 1s\n", + "ambassador-5f7fd859b-qv5x9 0/1 ContainerCreating 0 1s\n", + "ambassador-5f7fd859b-zcrb2 0/1 ContainerCreating 0 1s\n", + "\n", + "==> v1/Service\n", + "NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\n", + "ambassador LoadBalancer 10.97.72.34 80:30535/TCP,443:32073/TCP 1s\n", + "ambassador-admin ClusterIP 10.108.82.228 8877/TCP 1s\n", + "\n", + "==> v1/ServiceAccount\n", + "NAME SECRETS AGE\n", + "ambassador 1 1s\n", + "\n", + "==> v1beta1/ClusterRole\n", + "NAME AGE\n", + "ambassador 1s\n", + "ambassador-crds 1s\n", + "\n", + "==> v1beta1/ClusterRoleBinding\n", + "NAME AGE\n", + "ambassador 1s\n", + "ambassador-crds 1s\n", + "\n", + "==> v1beta1/CustomResourceDefinition\n", + "NAME AGE\n", + "authservices.getambassador.io 1s\n", + "consulresolvers.getambassador.io 1s\n", + "kubernetesendpointresolvers.getambassador.io 1s\n", + "kubernetesserviceresolvers.getambassador.io 1s\n", + "mappings.getambassador.io 1s\n", + "modules.getambassador.io 1s\n", + "ratelimitservices.getambassador.io 1s\n", + "tcpmappings.getambassador.io 1s\n", + "tlscontexts.getambassador.io 1s\n", + "tracingservices.getambassador.io 1s\n", + "\n", + "\n", + "NOTES:\n", + "Congratuations! You've successfully installed Ambassador.\n", + "\n", + "For help, visit our Slack at https://d6e.co/slack or view the documentation online at https://www.getambassador.io.\n", + "\n", + "To get the IP address of Ambassador, run the following commands:\n", + "NOTE: It may take a few minutes for the LoadBalancer IP to be available.\n", + " You can watch the status of by running 'kubectl get svc -w --namespace default ambassador'\n", + "\n", + " On GKE/Azure:\n", + " export SERVICE_IP=$(kubectl get svc --namespace default ambassador -o jsonpath='{.status.loadBalancer.ingress[0].ip}')\n", + "\n", + " On AWS:\n", + " export SERVICE_IP=$(kubectl get svc --namespace default ambassador -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')\n", + "\n", + " echo http://$SERVICE_IP:\n", + "\n" + ] + } + ], + "source": [ + "!helm install stable/ambassador --name ambassador --set crds.keep=false" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Waiting for deployment \"ambassador\" rollout to finish: 0 of 3 updated replicas are available...\n", + "Waiting for deployment \"ambassador\" rollout to finish: 1 of 3 updated replicas are available...\n", + "Waiting for deployment \"ambassador\" rollout to finish: 2 of 3 updated replicas are available...\n", + "deployment \"ambassador\" successfully rolled out\n" + ] + } + ], + "source": [ + "!kubectl rollout status deployment.apps/ambassador" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---> Installing application source...\n", + "---> Installing dependencies ...\n", + "Looking in links: /whl\n", + "Collecting chainer==6.2.0 (from -r requirements.txt (line 1))\n", + " WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\n", + "Downloading https://files.pythonhosted.org/packages/2c/5a/86c50a0119a560a39d782c4cdd9b72927c090cc2e3f70336e01b19a5f97a/chainer-6.2.0.tar.gz (873kB)\n", + "Requirement already satisfied: setuptools in /usr/local/lib/python3.6/site-packages (from chainer==6.2.0->-r requirements.txt (line 1)) (41.0.1)\n", + "Collecting typing<=3.6.6 (from chainer==6.2.0->-r requirements.txt (line 1))\n", + " WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\n", + "Downloading https://files.pythonhosted.org/packages/4a/bd/eee1157fc2d8514970b345d69cb9975dcd1e42cd7e61146ed841f6e68309/typing-3.6.6-py3-none-any.whl\n", + "Collecting typing_extensions<=3.6.6 (from chainer==6.2.0->-r requirements.txt (line 1))\n", + " WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\n", + "Downloading https://files.pythonhosted.org/packages/62/4f/392a1fa2873e646f5990eb6f956e662d8a235ab474450c72487745f67276/typing_extensions-3.6.6-py3-none-any.whl\n", + "Collecting filelock (from chainer==6.2.0->-r requirements.txt (line 1))\n", + " WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\n", + "Downloading https://files.pythonhosted.org/packages/93/83/71a2ee6158bb9f39a90c0dea1637f81d5eef866e188e1971a1b1ab01a35a/filelock-3.0.12-py3-none-any.whl\n", + "Requirement already satisfied: numpy>=1.9.0 in /usr/local/lib/python3.6/site-packages (from chainer==6.2.0->-r requirements.txt (line 1)) (1.16.4)\n", + "Collecting protobuf<3.8.0rc1,>=3.0.0 (from chainer==6.2.0->-r requirements.txt (line 1))\n", + " WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\n", + "Downloading https://files.pythonhosted.org/packages/5a/aa/a858df367b464f5e9452e1c538aa47754d467023850c00b000287750fa77/protobuf-3.7.1-cp36-cp36m-manylinux1_x86_64.whl (1.2MB)\n", + "Requirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.6/site-packages (from chainer==6.2.0->-r requirements.txt (line 1)) (1.12.0)\n", + "Building wheels for collected packages: chainer\n", + "Building wheel for chainer (setup.py): started\n", + "Building wheel for chainer (setup.py): finished with status 'done'\n", + "Stored in directory: /root/.cache/pip/wheels/2e/be/c5/6ee506abcaa4a53106f7d7671bbee8b4e5243bc562a9d32ad1\n", + "Successfully built chainer\n", + "Installing collected packages: typing, typing-extensions, filelock, protobuf, chainer\n", + "Found existing installation: protobuf 3.8.0\n", + "Uninstalling protobuf-3.8.0:\n", + "Successfully uninstalled protobuf-3.8.0\n", + "Successfully installed chainer-6.2.0 filelock-3.0.12 protobuf-3.7.1 typing-3.6.6 typing-extensions-3.6.6\n", + "WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\n", + "WARNING: You are using pip version 19.1, however version 19.2.2 is available.\n", + "You should consider upgrading via the 'pip install --upgrade pip' command.\n", + "Build completed successfully\n" + ] + } + ], + "source": [ + "!eval $(minikube docker-env) && s2i build . seldonio/seldon-core-s2i-python3:0.10 chainer-mnist:0.1" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "seldondeployment.machinelearning.seldon.io/seldon-deployment-example created\r\n" + ] + } + ], + "source": [ + "!kubectl create -f chainer_mnist_deployment.json" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Waiting for deployment \"chainer-mnist-deployment-chainer-mnist-predictor-76478b2\" rollout to finish: 0 of 1 updated replicas are available...\n", + "deployment \"chainer-mnist-deployment-chainer-mnist-predictor-76478b2\" successfully rolled out\n" + ] + } + ], + "source": [ + "!kubectl rollout status deploy/chainer-mnist-deployment-chainer-mnist-predictor-76478b2" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n", + "----------------------------------------\n", + "SENDING NEW REQUEST:\n", + "\n", + "[[0.64 0.213 0.028 0.604 0.586 0.076 0.629 0.568 0.806 0.931 0.266 0.098\n", + " 0.526 0.336 0.569 0.965 0.157 0.401 0.15 0.405 0.594 0.21 0.699 0.085\n", + " 0.314 0.467 0.303 0.384 0.788 0.135 0.349 0.467 0.025 0.525 0.767 0.819\n", + " 0.275 0.212 0.784 0.448 0.808 0.582 0.939 0.165 0.761 0.272 0.332 0.321\n", + " 0.005 0.921 0.285 0.181 0.161 0.948 0.148 0.788 0.664 0.65 0.795 0.548\n", + " 0.754 0.407 0.057 0.429 0.569 0.538 0.295 0.4 0.581 0.569 0.299 0.066\n", + " 0.456 0.118 0.983 0.93 0.316 0.865 0.492 0.048 0.505 0.573 0.595 0.13\n", + " 0.595 0.595 0.474 0.334 0.708 0.25 0.183 0.391 0.268 0.252 0.366 0.029\n", + " 0.676 0.869 0.12 0.737 0.502 0.868 0.846 0.891 0.578 0.598 0.984 0.543\n", + " 0.515 0.081 0.998 0.976 0.611 0.492 0.494 0.985 0.443 0.246 0.252 0.871\n", + " 0.615 0.885 0.903 0.254 0.651 0.412 0.645 0.608 0.921 0.5 0.18 0.845\n", + " 0.91 0.601 0.782 0.27 0.643 0.671 0.273 0.37 0.454 0.08 0.854 0.439\n", + " 0.912 0.709 0.703 0.817 0.381 0.963 0.057 0.015 0.126 0.686 0.284 0.463\n", + " 0.231 0.332 0.932 0.804 0.538 0.039 0.12 0.992 0.436 0.791 0.261 0.842\n", + " 0.901 0.208 0.578 0.423 0.657 0.293 0.633 0.45 0.609 0.715 0.149 0.244\n", + " 0.026 0.332 0.525 0.157 0.749 0.88 0.713 0.405 0.473 0.01 0.038 0.807\n", + " 0.934 0.157 0.141 0.155 0.124 0.781 0.738 0.018 0.42 0.635 0.867 0.925\n", + " 0.398 0.505 0.695 0.429 0.174 0.327 0.123 0.967 0.378 0.224 0.393 0.053\n", + " 0.344 0.731 0.02 0.848 0.079 0.814 0.023 0.087 0.578 0.642 0.18 0.563\n", + " 0.276 0.491 0.021 0.719 0.85 0.156 0.031 0.506 0.271 0.095 0.186 0.002\n", + " 0.799 0.138 0.734 0.925 0.881 0.187 0.559 0.946 0.826 0.488 0.744 0.322\n", + " 0.333 0.322 0.665 0.032 0.663 0.754 0.495 0.569 0.917 0.167 0.168 0.409\n", + " 0.369 0.363 0.23 0.961 0.201 0.463 0.565 0.834 0.431 0.848 0.742 0.436\n", + " 0.061 0.656 0.3 0.128 0.485 0.78 0.617 0.082 0.396 0.416 0.673 0.961\n", + " 0.727 0.986 0.222 0.909 0.898 0.144 0.639 0.046 0.101 0.546 0.782 0.069\n", + " 0.672 0.824 0.861 0.981 0.003 0.591 0.303 0.384 0.67 0.7 0.834 0.475\n", + " 0.932 0.949 0.938 0.945 0.368 0.522 0.833 0.045 0.452 0.068 0.165 0.569\n", + " 0.44 0.702 0.727 0.069 0.686 0.262 0.891 0.547 0.994 0.454 0.947 0.364\n", + " 0.154 0.322 0.571 0.19 0.476 0.925 0.871 0.605 0.442 0.585 0.544 0.316\n", + " 0.915 0.253 0.973 0.501 0.402 0.96 0.206 0.501 0.37 0.463 0.904 0.981\n", + " 0.969 0.877 0.724 0.5 0.447 0.499 0.443 0.349 0.79 0.051 0.384 0.27\n", + " 0.094 0.774 0.742 0.16 0.517 0.266 0.908 0.796 0.862 0.987 0.939 0.909\n", + " 0.962 0.587 0.964 0.159 0.029 0.952 0.416 0.72 0.346 0.257 0.152 0.233\n", + " 0.862 0.457 0.153 0.076 0.105 0.634 0.652 0.435 0.757 0.985 0.487 0.114\n", + " 0.95 0.217 0.877 0.483 0.302 0.929 0.856 0.768 0.223 0.006 0.841 0.565\n", + " 0.611 0.407 0.71 0.588 0.654 0.197 0.506 0.938 0.779 0.387 0.007 0.482\n", + " 0.523 0.993 0.671 0.044 0.497 0.71 0.418 0.06 0.114 0.082 0.811 0.083\n", + " 0.773 0.134 0.87 0.414 0.787 0.972 0.132 0.047 0.593 0.502 0.15 0.042\n", + " 0.363 0.311 0.17 0.895 0.569 0.774 0.006 0.408 0.92 0.753 0.543 0.279\n", + " 0.911 0.314 0.195 0.538 0.977 0.606 0.954 0.378 0.397 0.261 0.085 0.656\n", + " 0.978 0.598 0.216 0.832 0.105 0.958 0.185 0.81 0.444 0.308 0.013 0.176\n", + " 0.603 0.383 0.671 0.436 0.981 0.072 0.713 0.349 0.962 0.055 0.315 0.417\n", + " 0.052 0.076 0.198 0.786 0.397 0.757 0.145 0.539 0.671 0.583 0.42 0.575\n", + " 0.563 0.286 0.788 0.481 0.403 0.85 0.864 0.945 0.427 0.511 0.268 0.091\n", + " 0.049 0.611 0.137 0.58 0.281 0.057 0.453 0.461 0.895 0.701 0.662 0.599\n", + " 0.967 0.562 0.295 0.6 0.742 0.909 0.69 0.383 0.553 0.078 0.949 0.109\n", + " 0.771 0.083 0.712 0.514 0.549 0.403 0.575 0.494 0.31 0.307 0.091 0.874\n", + " 0.591 0.315 0.199 0.372 0.131 0.905 0.32 0.284 0.516 0.055 0.832 0.042\n", + " 0.927 0.667 0.273 0.426 0.054 0.799 0.356 0.564 0.223 0.772 0.79 0.628\n", + " 0.893 0.512 0.523 0.518 0.48 0.869 0.49 0.416 0.775 0.864 0.921 0.968\n", + " 0.109 0.812 0.943 0.042 0.179 0.943 0.324 0.079 0.017 0.226 0.848 0.803\n", + " 0.873 0.834 0.696 0.582 0.125 0.042 0.917 0.909 0.491 0.5 0.101 0.779\n", + " 0.65 0.424 0.94 0.582 0.706 0.935 0.286 0.057 0.544 0.198 0.893 0.537\n", + " 0.405 0.91 0.908 0.297 0.288 0.368 0.654 0.347 0.002 0.677 0.32 0.691\n", + " 0.17 0.133 0.586 0.857 0.001 0.639 0.223 0.164 0.689 0.97 0.913 0.947\n", + " 0.962 0.44 0.201 0.343 0.493 0.662 0.728 0.295 0.445 0.739 0.764 0.955\n", + " 0.206 0.298 0.996 0.835 0.983 0.033 0.801 0.284 0.621 0.941 0.293 0.865\n", + " 0.158 0.788 0.681 0.613 0.705 0.753 0.006 0.175 0.414 0.299 0.116 0.67\n", + " 0.66 0.845 0.905 0.369 0.11 0.841 0.717 0.348 0.537 0.116 0.024 0.575\n", + " 0.211 0.427 0.84 0.447 0.056 0.427 0.39 0.424 0.48 0.738 0.698 0.377\n", + " 0.143 0.242 0.877 0.238 0.188 0.786 0.965 0.112 0.952 0.679 0.916 0.13\n", + " 0.882 0.353 0.433 0.608 0.297 0.558 0.663 0.646 0.185 0.91 0.131 0.217\n", + " 0.549 0.759 0.087 0.96 0.11 0.613 0.643 0.218 0.126 0.535 0.751 0.097\n", + " 0.681 0.782 0.367 0.197 0.05 0.742 0.623 0.763 0.625 0.317 0.364 0.879\n", + " 0.445 0.751 0.87 0.727 0.879 0.035 0.412 0.907 0.895 0.923 0.373 0.22\n", + " 0.21 0.176 0.182 0.821]]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "RECEIVED RESPONSE:\r\n", + "meta {\r\n", + " puid: \"c88qkq0g5nsdtkero34a1i56dv\"\r\n", + " requestPath {\r\n", + " key: \"chainer-mnist-classifier\"\r\n", + " value: \"chainer-mnist:0.1\"\r\n", + " }\r\n", + "}\r\n", + "data {\r\n", + " names: \"t:0\"\r\n", + " names: \"t:1\"\r\n", + " names: \"t:2\"\r\n", + " names: \"t:3\"\r\n", + " names: \"t:4\"\r\n", + " names: \"t:5\"\r\n", + " names: \"t:6\"\r\n", + " names: \"t:7\"\r\n", + " names: \"t:8\"\r\n", + " names: \"t:9\"\r\n", + " ndarray {\r\n", + " values {\r\n", + " list_value {\r\n", + " values {\r\n", + " number_value: 2.4670965671539307\r\n", + " }\r\n", + " values {\r\n", + " number_value: -4.471328258514404\r\n", + " }\r\n", + " values {\r\n", + " number_value: -12.0973482131958\r\n", + " }\r\n", + " values {\r\n", + " number_value: -8.344386100769043\r\n", + " }\r\n", + " values {\r\n", + " number_value: -1.4862585067749023\r\n", + " }\r\n", + " values {\r\n", + " number_value: -2.065153121948242\r\n", + " }\r\n", + " values {\r\n", + " number_value: 1.480709195137024\r\n", + " }\r\n", + " values {\r\n", + " number_value: -6.679415702819824\r\n", + " }\r\n", + " values {\r\n", + " number_value: -7.224794864654541\r\n", + " }\r\n", + " values {\r\n", + " number_value: -7.008320331573486\r\n", + " }\r\n", + " }\r\n", + " }\r\n", + " }\r\n", + "}\r\n", + "\r\n", + "\r\n" + ] + } + ], + "source": [ + "!seldon-core-api-tester contract.json `minikube ip` `kubectl get svc ambassador -o jsonpath='{.spec.ports[0].nodePort}'` \\\n", + " seldon-deployment-example --namespace default -p" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!minikube delete" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + }, + "varInspector": { + "cols": { + "lenName": 16, + "lenType": 16, + "lenVar": 40 + }, + "kernels_config": { + "python": { + "delete_cmd_postfix": "", + "delete_cmd_prefix": "del ", + "library": "var_list.py", + "varRefreshCmd": "print(var_dic_list())" + }, + "r": { + "delete_cmd_postfix": ") ", + "delete_cmd_prefix": "rm(", + "library": "var_list.r", + "varRefreshCmd": "cat(var_dic_list()) " + } + }, + "types_to_exclude": [ + "module", + "function", + "builtin_function_or_method", + "instance", + "_Feature" + ], + "window_display": false + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/models/chainer_mnist/chainer_mnist_deployment.json b/examples/models/chainer_mnist/chainer_mnist_deployment.json new file mode 100644 index 0000000000..a0c47d7f40 --- /dev/null +++ b/examples/models/chainer_mnist/chainer_mnist_deployment.json @@ -0,0 +1,53 @@ +{ + "apiVersion": "machinelearning.seldon.io/v1alpha2", + "kind": "SeldonDeployment", + "metadata": { + "labels": { + "app": "seldon" + }, + "name": "seldon-deployment-example" + }, + "spec": { + "annotations": { + "project_name": "Digits classification", + "deployment_version": "0.0" + }, + "name": "chainer-mnist-deployment", + "oauth_key": "oauth-key", + "oauth_secret": "oauth-secret", + "predictors": [ + { + "componentSpecs": [{ + "spec": { + "containers": [ + { + "image": "chainer-mnist:0.1", + "imagePullPolicy": "IfNotPresent", + "name": "chainer-mnist-classifier", + "resources": { + "requests": { + "memory": "1Mi" + } + } + } + ], + "terminationGracePeriodSeconds": 20 + } + }], + "graph": { + "children": [], + "name": "chainer-mnist-classifier", + "endpoint": { + "type" : "REST" + }, + "type": "MODEL" + }, + "name": "chainer-mnist-predictor", + "replicas": 1, + "annotations": { + "predictor_version" : "0.0" + } + } + ] + } +} diff --git a/examples/models/chainer_mnist/contract.json b/examples/models/chainer_mnist/contract.json new file mode 100644 index 0000000000..3947eba31f --- /dev/null +++ b/examples/models/chainer_mnist/contract.json @@ -0,0 +1,20 @@ +{ + "features":[ + { + "name":"x", + "dtype":"FLOAT", + "ftype":"continuous", + "range":[0,1], + "repeat":784 + } + ], + "targets":[ + { + "name":"class", + "dtype":"FLOAT", + "ftype":"continuous", + "range":[0,1], + "repeat":10 + } + ] +} diff --git a/examples/models/chainer_mnist/requirements.txt b/examples/models/chainer_mnist/requirements.txt new file mode 100644 index 0000000000..27c885912b --- /dev/null +++ b/examples/models/chainer_mnist/requirements.txt @@ -0,0 +1 @@ +chainer==6.2.0 diff --git a/examples/models/chainer_mnist/train_mnist.py b/examples/models/chainer_mnist/train_mnist.py new file mode 100755 index 0000000000..5aa5c90df2 --- /dev/null +++ b/examples/models/chainer_mnist/train_mnist.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python +import argparse + +import chainer +import chainer.functions as F +import chainer.links as L +from chainer import training +from chainer.training import extensions +import chainerx + + +# Network definition +class MLP(chainer.Chain): + + def __init__(self, n_units, n_out): + super(MLP, self).__init__() + with self.init_scope(): + # the size of the inputs to each layer will be inferred + self.l1 = L.Linear(None, n_units) # n_in -> n_units + self.l2 = L.Linear(None, n_units) # n_units -> n_units + self.l3 = L.Linear(None, n_out) # n_units -> n_out + + def forward(self, x): + h1 = F.relu(self.l1(x)) + h2 = F.relu(self.l2(h1)) + return self.l3(h2) + + +def main(): + parser = argparse.ArgumentParser(description='Chainer example: MNIST') + parser.add_argument('--batchsize', '-b', type=int, default=100, + help='Number of images in each mini-batch') + parser.add_argument('--epoch', '-e', type=int, default=20, + help='Number of sweeps over the dataset to train') + parser.add_argument('--frequency', '-f', type=int, default=-1, + help='Frequency of taking a snapshot') + parser.add_argument('--device', '-d', type=str, default='-1', + help='Device specifier. Either ChainerX device ' + 'specifier or an integer. If non-negative integer, ' + 'CuPy arrays with specified device id are used. If ' + 'negative integer, NumPy arrays are used') + parser.add_argument('--out', '-o', default='result', + help='Directory to output the result') + parser.add_argument('--resume', '-r', type=str, + help='Resume the training from snapshot') + parser.add_argument('--unit', '-u', type=int, default=1000, + help='Number of units') + parser.add_argument('--noplot', dest='plot', action='store_false', + help='Disable PlotReport extension') + group = parser.add_argument_group('deprecated arguments') + group.add_argument('--gpu', '-g', dest='device', + type=int, nargs='?', const=0, + help='GPU ID (negative value indicates CPU)') + args = parser.parse_args() + + device = chainer.get_device(args.device) + + print('Device: {}'.format(device)) + print('# unit: {}'.format(args.unit)) + print('# Minibatch-size: {}'.format(args.batchsize)) + print('# epoch: {}'.format(args.epoch)) + print('') + + # Set up a neural network to train + # Classifier reports softmax cross entropy loss and accuracy at every + # iteration, which will be used by the PrintReport extension below. + model = L.Classifier(MLP(args.unit, 10)) + model.to_device(device) + device.use() + + # Setup an optimizer + optimizer = chainer.optimizers.Adam() + optimizer.setup(model) + + # Load the MNIST dataset + train, test = chainer.datasets.get_mnist() + + train_iter = chainer.iterators.SerialIterator(train, args.batchsize) + test_iter = chainer.iterators.SerialIterator(test, args.batchsize, + repeat=False, shuffle=False) + + # Set up a trainer + updater = training.updaters.StandardUpdater( + train_iter, optimizer, device=device) + trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out) + + # Evaluate the model with the test dataset for each epoch + trainer.extend(extensions.Evaluator(test_iter, model, device=device)) + + # Dump a computational graph from 'loss' variable at the first iteration + # The "main" refers to the target link of the "main" optimizer. + # TODO(niboshi): Temporarily disabled for chainerx. Fix it. + if device.xp is not chainerx: + trainer.extend(extensions.DumpGraph('main/loss')) + + # Take a snapshot for each specified epoch + frequency = args.epoch if args.frequency == -1 else max(1, args.frequency) + trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch')) + + # Write a log of evaluation statistics for each epoch + trainer.extend(extensions.LogReport()) + + # Save two plot images to the result dir + if args.plot and extensions.PlotReport.available(): + trainer.extend( + extensions.PlotReport(['main/loss', 'validation/main/loss'], + 'epoch', file_name='loss.png')) + trainer.extend( + extensions.PlotReport( + ['main/accuracy', 'validation/main/accuracy'], + 'epoch', file_name='accuracy.png')) + + # Print selected entries of the log to stdout + # Here "main" refers to the target link of the "main" optimizer again, and + # "validation" refers to the default name of the Evaluator extension. + # Entries other than 'epoch' are reported by the Classifier link, called by + # either the updater or the evaluator. + trainer.extend(extensions.PrintReport( + ['epoch', 'main/loss', 'validation/main/loss', + 'main/accuracy', 'validation/main/accuracy', 'elapsed_time'])) + + # Print a progress bar to stdout + trainer.extend(extensions.ProgressBar()) + + if args.resume is not None: + # Resume from a snapshot + chainer.serializers.load_npz(args.resume, trainer) + + # Run the training + trainer.run() + + +if __name__ == '__main__': + main()