diff --git a/configs/mmseg/segmentation_openvino_static-1024x2048.py b/configs/mmseg/segmentation_openvino_static-1024x2048.py new file mode 100644 index 0000000000..472e923e61 --- /dev/null +++ b/configs/mmseg/segmentation_openvino_static-1024x2048.py @@ -0,0 +1,4 @@ +_base_ = ['./segmentation_static.py', '../_base_/backends/openvino.py'] +onnx_config = dict(input_shape=[2048, 1024]) +backend_config = dict( + model_inputs=[dict(opt_shapes=dict(input=[1, 3, 1024, 2048]))]) diff --git a/configs/mmseg/segmentation_openvino_static-512x512.py b/configs/mmseg/segmentation_openvino_static-512x512.py index 29d7582758..ef974335e4 100644 --- a/configs/mmseg/segmentation_openvino_static-512x512.py +++ b/configs/mmseg/segmentation_openvino_static-512x512.py @@ -1,4 +1,4 @@ _base_ = ['./segmentation_static.py', '../_base_/backends/openvino.py'] - +onnx_config = dict(input_shape=[512, 512]) backend_config = dict( model_inputs=[dict(opt_shapes=dict(input=[1, 3, 512, 512]))]) diff --git a/mmdeploy/apis/pytorch2torchscript.py b/mmdeploy/apis/pytorch2torchscript.py index c984892360..8b54ce4ce8 100644 --- a/mmdeploy/apis/pytorch2torchscript.py +++ b/mmdeploy/apis/pytorch2torchscript.py @@ -42,8 +42,8 @@ def torch2torchscript_impl(model: torch.nn.Module, ir=IR.TORCHSCRIPT), torch.no_grad(), torch.jit.optimized_execution( True): # for exporting models with weight that depends on inputs - patched_model( - *inputs) if isinstance(inputs, Sequence) else patched_model(inputs) + patched_model(*inputs) if isinstance(inputs, Sequence) \ + else patched_model(inputs) ts_model = torch.jit.trace(patched_model, inputs) # perform optimize, note that optimizing models may trigger errors when diff --git a/mmdeploy/codebase/mmpose/deploy/pose_detection.py b/mmdeploy/codebase/mmpose/deploy/pose_detection.py index 52bb1632bc..4dee279a8c 100644 --- a/mmdeploy/codebase/mmpose/deploy/pose_detection.py +++ b/mmdeploy/codebase/mmpose/deploy/pose_detection.py @@ -198,6 +198,7 @@ def evaluate_outputs(model_cfg: mmcv.Config, out: Optional[str] = None, metric_options: Optional[dict] = None, format_only: bool = False, + log_file: Optional[str] = None, **kwargs): """Perform post-processing to predictions of model. @@ -215,10 +216,15 @@ def evaluate_outputs(model_cfg: mmcv.Config, evaluation. It is useful when you want to format the result to a specific format and submit it to the test server. Defaults to `False`. + log_file (str | None): The file to write the evaluation results. + Defaults to `None` and the results will only print on stdout. """ + from mmcv.utils import get_logger + logger = get_logger('test', log_file=log_file, log_level=logging.INFO) + res_folder = '.' if out: - logging.info(f'\nwriting results to {out}') + logger.info(f'\nwriting results to {out}') mmcv.dump(outputs, out) res_folder, _ = os.path.split(out) os.makedirs(res_folder, exist_ok=True) @@ -229,7 +235,7 @@ def evaluate_outputs(model_cfg: mmcv.Config, results = dataset.evaluate(outputs, res_folder, **eval_config) for k, v in sorted(results.items()): - print(f'{k}: {v}') + logger.info(f'{k}: {v:.4f}') def get_model_name(self) -> str: """Get the model name.