From 53e2abd04d24da1892b0f298777b58c8b301d63b Mon Sep 17 00:00:00 2001 From: nijkah Date: Sun, 3 Apr 2022 19:38:01 +0900 Subject: [PATCH] add documentation for serving --- docs/en/useful_tools.md | 128 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 128 insertions(+) create mode 100644 docs/en/useful_tools.md diff --git a/docs/en/useful_tools.md b/docs/en/useful_tools.md new file mode 100644 index 000000000..af86aea9e --- /dev/null +++ b/docs/en/useful_tools.md @@ -0,0 +1,128 @@ +Apart from training/testing scripts, We provide lots of useful tools under the + `tools/` directory. + +## Model Serving + +In order to serve an `MMRotate` model with [`TorchServe`](https://pytorch.org/serve/), you can follow the steps: + +### 1. Convert model from MMRotate to TorchServe + +```shell +python tools/deployment/mmrotate2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \ +--output-folder ${MODEL_STORE} \ +--model-name ${MODEL_NAME} +``` + +Example: + +```shell +wget -P checkpoint \ +https://download.openmmlab.com/mmrotate/v0.1.0/rotated_faster_rcnn/rotated_faster_rcnn_r50_fpn_1x_dota_le90/rotated_faster_rcnn_r50_fpn_1x_dota_le90-0393aa5c.pth + +python tools/deployment/mmrotate2torchserve.py configs/rotated_faster_rcnn/rotated_faster_rcnn_r50_fpn_1x_dota_le90.py checkpoint/rotated_faster_rcnn_r50_fpn_1x_dota_le90-0393aa5c.pth \ +--output-folder ${MODEL_STORE} \ +--model-name rotated_faster_rcnn +``` + +**Note**: ${MODEL_STORE} needs to be an absolute path to a folder. + +### 2. Build `mmrotate-serve` docker image + +```shell +docker build -t mmrotate-serve:latest docker/serve/ +``` + +### 3. Run `mmrotate-serve` + +Check the official docs for [running TorchServe with docker](https://github.com/pytorch/serve/blob/master/docker/README.md#running-torchserve-in-a-production-docker-environment). + +In order to run in GPU, you need to install [nvidia-docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). You can omit the `--gpus` argument in order to run in CPU. + +Example: + +```shell +docker run --rm \ +--cpus 8 \ +--gpus device=0 \ +-p8080:8080 -p8081:8081 -p8082:8082 \ +--mount type=bind,source=$MODEL_STORE,target=/home/model-server/model-store \ +mmrotate-serve:latest +``` + +[Read the docs](https://github.com/pytorch/serve/blob/072f5d088cce9bb64b2a18af065886c9b01b317b/docs/rest_api.md/) about the Inference (8080), Management (8081) and Metrics (8082) APis + +### 4. Test deployment + +```shell +curl -O https://raw.githubusercontent.com/open-mmlab/mmrotate/main/demo/demo.jpg +curl http://127.0.0.1:8080/predictions/${MODEL_NAME} -T demo.jpg +``` + +You should obtain a response similar to: + +```json +[ + { + "class_name": "small-vehicle", + "bbox": [ + 584.9473266601562, + 327.2749938964844, + 38.45665740966797, + 16.898427963256836, + -0.7229751944541931 + ], + "score": 0.9766026139259338 + }, + { + "class_name": "small-vehicle", + "bbox": [ + 152.0239715576172, + 305.92572021484375, + 43.144744873046875, + 18.85024642944336, + 0.014928221702575684 + ], + "score": 0.972826361656189 + }, + # ... + { + "class_name": "large-vehicle", + "bbox": [ + 160.58056640625, + 437.3690185546875, + 55.6795654296875, + 19.31710433959961, + 0.007036328315734863 + ], + "score": 0.888836681842804 + }, + { + "class_name": "large-vehicle", + "bbox": [ + 666.2868041992188, + 1011.3961181640625, + 60.396209716796875, + 21.821645736694336, + 0.8549195528030396 + ], + "score": 0.8240180015563965 + } +] +``` + +And you can use `test_torchserver.py` to compare result of torchserver and pytorch, and visualize them. + +```shell +python tools/deployment/test_torchserver.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} ${MODEL_NAME} +[--inference-addr ${INFERENCE_ADDR}] [--device ${DEVICE}] [--score-thr ${SCORE_THR}] +``` + +Example: + +```shell +python tools/deployment/test_torchserver.py \ +demo/demo.jpg \ +configs/rotated_faster_rcnn/rotated_faster_rcnn_r50_fpn_1x_dota_le90.py \ +rotated_faster_rcnn_r50_fpn_1x_dota_le90-0393aa5c.pth \ +rotated_fater_rcnn +``` \ No newline at end of file