diff --git a/.gitignore b/.gitignore index b2602607a9..b268868c02 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,12 @@ fastdeploy/libs/lib* +build +cmake-build-debug +cmake-build-release +.vscode +FastDeploy.cmake +fastdeploy/core/config.h +build-debug.sh +*dist +fastdeploy.egg-info +.setuptools-cmake-build +fastdeploy/version.py \ No newline at end of file diff --git a/fastdeploy/backends/tensorrt/trt_backend.cc b/fastdeploy/backends/tensorrt/trt_backend.cc index dfc2840c2b..5dbb61ffe8 100644 --- a/fastdeploy/backends/tensorrt/trt_backend.cc +++ b/fastdeploy/backends/tensorrt/trt_backend.cc @@ -52,7 +52,8 @@ std::vector toVec(const nvinfer1::Dims& dim) { return out; } -bool TrtBackend::InitFromTrt(const std::string& trt_engine_file) { +bool TrtBackend::InitFromTrt(const std::string& trt_engine_file, + const TrtBackendOption& option) { if (initialized_) { FDERROR << "TrtBackend is already initlized, cannot initialize again." << std::endl; diff --git a/fastdeploy/backends/tensorrt/trt_backend.h b/fastdeploy/backends/tensorrt/trt_backend.h index 3b77c8bc24..e3f848a012 100644 --- a/fastdeploy/backends/tensorrt/trt_backend.h +++ b/fastdeploy/backends/tensorrt/trt_backend.h @@ -69,7 +69,8 @@ class TrtBackend : public BaseBackend { bool InitFromOnnx(const std::string& model_file, const TrtBackendOption& option = TrtBackendOption(), bool from_memory_buffer = false); - bool InitFromTrt(const std::string& trt_engine_file); + bool InitFromTrt(const std::string& trt_engine_file, + const TrtBackendOption& option = TrtBackendOption()); bool Infer(std::vector& inputs, std::vector* outputs); diff --git a/fastdeploy/vision/common/processors/cast.cc b/fastdeploy/vision/common/processors/cast.cc index 2f8a0993ec..77a1b249ae 100644 --- a/fastdeploy/vision/common/processors/cast.cc +++ b/fastdeploy/vision/common/processors/cast.cc @@ -18,30 +18,40 @@ namespace fastdeploy { namespace vision { bool Cast::CpuRun(Mat* mat) { - if (mat->layout != Layout::CHW) { - FDERROR << "Cast: The input data must be Layout::HWC format!" << std::endl; - return false; - } cv::Mat* im = mat->GetCpuMat(); + int c = im->channels(); if (dtype_ == "float") { - im->convertTo(*im, CV_32FC(im->channels())); + if (im->type() != CV_32FC(c)) { + im->convertTo(*im, CV_32FC(c)); + } } else if (dtype_ == "double") { - im->convertTo(*im, CV_64FC(im->channels())); + if (im->type() != CV_64FC(c)) { + im->convertTo(*im, CV_64FC(c)); + } + } else { + FDLogger() << "[WARN] Cast not support for " << dtype_ + << " now! will skip this operation." + << std::endl; } return true; } #ifdef ENABLE_OPENCV_CUDA bool Cast::GpuRun(Mat* mat) { - if (mat->layout != Layout::CHW) { - FDERROR << "Cast: The input data must be Layout::HWC format!" << std::endl; - return false; - } cv::cuda::GpuMat* im = mat->GetGpuMat(); + int c = im->channels(); if (dtype_ == "float") { - im->convertTo(*im, CV_32FC(im->channels())); + if (im->type() != CV_32FC(c)) { + im->convertTo(*im, CV_32FC(c)); + } } else if (dtype_ == "double") { - im->convertTo(*im, CV_64FC(im->channels())); + if (im->type() != CV_64FC(c)) { + im->convertTo(*im, CV_64FC(c)); + } + } else { + FDLogger() << "[WARN] Cast not support for " << dtype_ + << " now! will skip this operation." + << std::endl; } return true; } diff --git a/fastdeploy/vision/ppcls/model.cc b/fastdeploy/vision/ppcls/model.cc index ad894f3daf..915cb97512 100644 --- a/fastdeploy/vision/ppcls/model.cc +++ b/fastdeploy/vision/ppcls/model.cc @@ -44,7 +44,7 @@ bool Model::BuildPreprocessPipelineFromConfig() { return false; } auto preprocess_cfg = cfg["PreProcess"]["transform_ops"]; - processors_.push_back(std::make_shared()); + processors_.push_back(std::make_shared()); for (const auto& op : preprocess_cfg) { FDASSERT(op.IsMap(), "Require the transform information in yaml be Map type.");