Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(codebase): support mmdet3d dev-1.x #1067

Closed
wants to merge 8 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions configs/mmdet3d/voxel-detection/voxel-detection_static.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,5 @@
codebase_config = dict(
type='mmdet3d', task='VoxelDetection', model_type='end2end')
onnx_config = dict(
input_names=['voxels', 'num_points', 'coors'],
output_names=['scores', 'bbox_preds', 'dir_scores'])
input_names=['placeholder', 'num_points', 'voxels', 'coors'],
output_names=['cls_score', 'bbox_pred', 'dir_cls_pred'])
2 changes: 1 addition & 1 deletion docs/en/07-developer-guide/architecture.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ $ tree -L 1
├── service # Some small boards not support python, we use C/S mode for model conversion, here is server code
├── tests # unittest
├── third_party # 3rd party dependencies required by SDK and FFI
└── tools # Tools are also the entrance to all functions, such as onnx2xx.py, profile.py, test.py, etc.
└── tools # Tools are also the entrance to all functions, such as onnx2xx.py, profiler.py, test.py, etc.
```

It should be clear
Expand Down
2 changes: 1 addition & 1 deletion docs/zh_cn/07-developer-guide/architecture.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ $ tree -L 1
├── service # 有些小板子不能跑 python,模型转换用的 C/S 模式。这个目录放 Server
├── tests # 单元测试
├── third_party # SDK 和 ffi 要的第三方依赖
└── tools # 工具,也是一切功能的入口。除了 deploy.py 还有 onnx2xx.py、profile.py 和 test.py
└── tools # 工具,也是一切功能的入口。除了 deploy.py 还有 onnx2xx.py、profiler.py 和 test.py
```

这样大致应该清楚了
Expand Down
7 changes: 2 additions & 5 deletions mmdeploy/apis/pytorch2onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,15 +60,12 @@ def torch2onnx(img: Any,
task_processor = build_task_processor(model_cfg, deploy_cfg, device)

torch_model = task_processor.build_pytorch_model(model_checkpoint)
data, model_inputs = task_processor.create_input(
img,
input_shape,
data_preprocessor=getattr(torch_model, 'data_preprocessor', None))
data, model_inputs = task_processor.create_input(img, input_shape)
if not isinstance(model_inputs, torch.Tensor) and len(model_inputs) == 1:
model_inputs = model_inputs[0]
data_samples = data['data_samples']
patch_metas = {'data_samples': data_samples}
input_metas = {'data_samples': data_samples, 'mode': 'predict'}
input_metas = {'data_samples': data_samples, 'mode': task_processor.mode()}

# export to onnx
context_info = dict()
Expand Down
6 changes: 6 additions & 0 deletions mmdeploy/apis/visualize.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,13 +68,19 @@ def visualize_model(model_cfg: Union[str, mmengine.Config],
model = task_processor.build_backend_model(model)

model_inputs, _ = task_processor.create_input(img, input_shape)

with torch.no_grad():
result = model.test_step(model_inputs)[0]

data_samples = None
if 'data_samples' in model_inputs:
data_samples = model_inputs['data_samples']

task_processor.visualize(
image=img,
model=model,
result=result,
data_samples=data_samples,
output_file=output_file,
window_name=backend.value,
show_result=show_result)
60 changes: 60 additions & 0 deletions mmdeploy/backend/tensorrt/utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import os
import re
import sys
from typing import Dict, Optional, Sequence, Union

import onnx
Expand Down Expand Up @@ -38,6 +41,54 @@ def load(path: str) -> trt.ICudaEngine:
return engine


def search_cuda_version() -> str:
"""try cmd to get cuda version, then try `torch.cuda`
Returns:
str: cuda version, for example 10.2
"""

version = None

pattern = re.compile(r'[0-9]+\.[0-9]+')
platform = sys.platform.lower()

def cmd_result(txt: str):
cmd = os.popen(txt)
return cmd.read().rstrip().lstrip()

if platform == 'linux' or platform == 'darwin' or platform == 'freebsd': # noqa E501
version = cmd_result(
" nvcc --version | grep release | awk '{print $5}' | awk -F , '{print $1}' " # noqa E501
)
if version is None or pattern.match(version) is None:
version = cmd_result(
" nvidia-smi | grep CUDA | awk '{print $9}' ")

elif platform == 'win32' or platform == 'cygwin':
# nvcc_release = "Cuda compilation tools, release 10.2, V10.2.89"
nvcc_release = cmd_result(' nvcc --version | find "release" ')
if nvcc_release is not None:
result = pattern.findall(nvcc_release)
if len(result) > 0:
version = result[0]

if version is None or pattern.match(version) is None:
# nvidia_smi = "| NVIDIA-SMI 440.33.01 Driver Version: 440.33.01 CUDA Version: 10.2 |" # noqa E501
nvidia_smi = cmd_result(' nvidia-smi | find "CUDA Version" ')
result = pattern.findall(nvidia_smi)
if len(result) > 2:
version = result[2]

if version is None or pattern.match(version) is None:
try:
import torch
version = torch.version.cuda
except Exception:
pass

return version


def from_onnx(onnx_model: Union[str, onnx.ModelProto],
output_file_prefix: str,
input_shapes: Dict[str, Sequence[int]],
Expand Down Expand Up @@ -127,6 +178,15 @@ def from_onnx(onnx_model: Union[str, onnx.ModelProto],
profile.set_shape(input_name, min_shape, opt_shape, max_shape)
config.add_optimization_profile(profile)

cuda_version = search_cuda_version()
if cuda_version is not None:
version_major = int(cuda_version.split('.')[0])
if version_major < 11:
# cu11 support cublasLt, so cudnn heuristic tactic should disable CUBLAS_LT # noqa E501
tactic_source = config.get_tactic_sources() - (
1 << int(trt.TacticSource.CUBLAS_LT))
config.set_tactic_sources(tactic_source)

if fp16_mode:
if version.parse(trt.__version__) < version.parse('8'):
builder.fp16_mode = fp16_mode
Expand Down
10 changes: 10 additions & 0 deletions mmdeploy/codebase/base/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ def build_data_preprocessor(self):

from mmengine.registry import MODELS
data_preprocessor = MODELS.build(preprocess_cfg)
data_preprocessor.to(self.device)

return data_preprocessor

Expand Down Expand Up @@ -295,6 +296,15 @@ def visualize(self,
show=show_result,
out_file=output_file)

@abstractmethod
def mode(self) -> str:
"""Get pytorch inference mode, it depends on codebase implementation.

Returns:
str: codebase inference mode
"""
return 'predict'

@staticmethod
@abstractmethod
def get_partition_cfg(partition_type: str, **kwargs) -> Dict:
Expand Down
3 changes: 1 addition & 2 deletions mmdeploy/codebase/mmdet3d/deploy/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
# Copyright (c) OpenMMLab. All rights reserved.
from .mmdetection3d import MMDetection3d
from .voxel_detection import VoxelDetection
from .voxel_detection import MMDetection3d, VoxelDetection
from .voxel_detection_model import VoxelDetectionModel

__all__ = ['MMDetection3d', 'VoxelDetection', 'VoxelDetectionModel']
116 changes: 0 additions & 116 deletions mmdeploy/codebase/mmdet3d/deploy/mmdetection3d.py

This file was deleted.

Loading