From 51fa2ff5668be34c84b3b18fc865f369cc486d9e Mon Sep 17 00:00:00 2001 From: AllentDan <41138331+AllentDan@users.noreply.github.com> Date: Mon, 7 Feb 2022 13:47:38 +0800 Subject: [PATCH 1/4] [Enhancement] Support mmocr v0.4+ (#115) * support mmocr v0.4+ * 0.4.0 -> 0.4.1 --- mmdeploy/codebase/mmcls/deploy/classification_model.py | 2 +- mmdeploy/codebase/mmocr/deploy/mmocr.py | 8 +++++++- mmdeploy/codebase/mmocr/deploy/text_detection_model.py | 2 +- mmdeploy/codebase/mmocr/deploy/text_recognition_model.py | 2 +- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/mmdeploy/codebase/mmcls/deploy/classification_model.py b/mmdeploy/codebase/mmcls/deploy/classification_model.py index 458c66ef8a..f7f3bbfe73 100644 --- a/mmdeploy/codebase/mmcls/deploy/classification_model.py +++ b/mmdeploy/codebase/mmcls/deploy/classification_model.py @@ -98,7 +98,7 @@ def forward_test(self, imgs: torch.Tensor, *args, **kwargs) -> \ def show_result(self, img: np.ndarray, result: list, - win_name: str, + win_name: str = '', show: bool = True, out_file: str = None): """Show predictions of classification. diff --git a/mmdeploy/codebase/mmocr/deploy/mmocr.py b/mmdeploy/codebase/mmocr/deploy/mmocr.py index 5bc95bf2c6..c97d2958a4 100644 --- a/mmdeploy/codebase/mmocr/deploy/mmocr.py +++ b/mmdeploy/codebase/mmocr/deploy/mmocr.py @@ -4,6 +4,7 @@ import mmcv import torch from mmcv.utils import Registry +from packaging import version from torch.utils.data import DataLoader, Dataset from mmdeploy.codebase.base import CODEBASE, BaseTask, MMCodebase @@ -137,6 +138,11 @@ def single_gpu_test(model: torch.nn.Module, Returns: list: The prediction results. """ - from mmdet.apis import single_gpu_test + import mmocr + # fixed the bug when using `--show-dir` after mocr v0.4.1 + if version.parse(mmocr.__version__) < version.parse('0.4.1'): + from mmdet.apis import single_gpu_test + else: + from mmocr.apis import single_gpu_test outputs = single_gpu_test(model, data_loader, show, out_dir, **kwargs) return outputs diff --git a/mmdeploy/codebase/mmocr/deploy/text_detection_model.py b/mmdeploy/codebase/mmocr/deploy/text_detection_model.py index 4b9b4ec99b..31861b66e4 100644 --- a/mmdeploy/codebase/mmocr/deploy/text_detection_model.py +++ b/mmdeploy/codebase/mmocr/deploy/text_detection_model.py @@ -118,7 +118,7 @@ def forward_test(self, imgs: torch.Tensor, *args, **kwargs) -> \ def show_result(self, img: np.ndarray, result: dict, - win_name: str, + win_name: str = '', show: bool = True, score_thr: float = 0.3, out_file: str = None): diff --git a/mmdeploy/codebase/mmocr/deploy/text_recognition_model.py b/mmdeploy/codebase/mmocr/deploy/text_recognition_model.py index 42e88f2235..7f07dbba63 100644 --- a/mmdeploy/codebase/mmocr/deploy/text_recognition_model.py +++ b/mmdeploy/codebase/mmocr/deploy/text_recognition_model.py @@ -125,7 +125,7 @@ def forward_test(self, imgs: torch.Tensor, def show_result(self, img: np.ndarray, result: list, - win_name: str, + win_name: str = '', show: bool = True, score_thr: float = 0.3, out_file: str = None): From 9f9670ebaef294edaee7e7df48c88eab2a0dd39e Mon Sep 17 00:00:00 2001 From: RunningLeon Date: Tue, 8 Feb 2022 13:50:05 +0800 Subject: [PATCH 2/4] fix onnxruntime wrapper for gpu inference (#123) * fix ncnn wrapper for ort-gpu * resolve comment * fix lint --- mmdeploy/backend/onnxruntime/wrapper.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mmdeploy/backend/onnxruntime/wrapper.py b/mmdeploy/backend/onnxruntime/wrapper.py index 51116716cd..4239853e2d 100644 --- a/mmdeploy/backend/onnxruntime/wrapper.py +++ b/mmdeploy/backend/onnxruntime/wrapper.py @@ -27,7 +27,7 @@ class ORTWrapper(BaseWrapper): >>> import torch >>> >>> onnx_file = 'model.onnx' - >>> model = ORTWrapper(onnx_file, -1) + >>> model = ORTWrapper(onnx_file, 'cpu') >>> inputs = dict(input=torch.randn(1, 3, 224, 224, device='cpu')) >>> outputs = model(inputs) >>> print(outputs) @@ -79,7 +79,9 @@ def forward(self, inputs: Dict[str, input_tensor = input_tensor.contiguous() if not self.is_cuda_available: input_tensor = input_tensor.cpu() - element_type = input_tensor.numpy().dtype + # Avoid unnecessary data transfer between host and device + element_type = input_tensor.new_zeros( + 1, device='cpu').numpy().dtype self.io_binding.bind_input( name=name, device_type=self.device_type, From 89372d8659e2a1b6fa8a32b3346b7e4e6a915361 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=A8=E5=9F=B9=E6=96=87=20=28Yang=20Peiwen=29?= <915505626@qq.com> Date: Tue, 8 Feb 2022 16:59:33 +0800 Subject: [PATCH 3/4] Fix typo (#132) --- docs/en/tutorials/how_to_install_mmdeploy_on_jetsons.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/tutorials/how_to_install_mmdeploy_on_jetsons.md b/docs/en/tutorials/how_to_install_mmdeploy_on_jetsons.md index 484d1ce1c2..5d186c5111 100644 --- a/docs/en/tutorials/how_to_install_mmdeploy_on_jetsons.md +++ b/docs/en/tutorials/how_to_install_mmdeploy_on_jetsons.md @@ -104,7 +104,7 @@ cmake --version ### Install mmdeploy Just follow the instruction [here](../build.md). If it throws `failed building wheel for numpy...ERROR: Failed to build one or more wheels` when installing `h5py`, try install `h5py` manually. ``` -sudo apt-get install pkd-config libhdf5-100 libhdf5-dev +sudo apt-get install pkg-config libhdf5-100 libhdf5-dev pip install versioned-hdf5 --no-cache-dir ``` From cce81d3ce01f67fb68247a13f3af323c574c5b9c Mon Sep 17 00:00:00 2001 From: RunningLeon Date: Tue, 8 Feb 2022 19:33:48 +0800 Subject: [PATCH 4/4] lock mmcls version (#131) --- requirements/optional.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/optional.txt b/requirements/optional.txt index a93bb9c43b..f7dcdfda44 100644 --- a/requirements/optional.txt +++ b/requirements/optional.txt @@ -1,4 +1,4 @@ -mmcls>=0.15.0 +mmcls>=0.15.0,<=0.19.0 mmdet>=2.19.0 mmedit mmocr==0.3.0