-
Notifications
You must be signed in to change notification settings - Fork 465
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Modify PPMatting backend and docs #182
Changes from 77 commits
1684b05
71c00d9
21ab2f9
d63e862
7b3b0e2
d039e80
a34a815
eb010a8
39f64f2
d071b37
d5026ca
fb376ad
4b8737c
ce922a0
6e00b82
8c359fb
906c730
80c1223
6072757
2c6e6a4
48136f0
6feca92
ae70d4f
f591b85
f0def41
15b9160
4706e8c
dc83584
086debd
4f980b9
2e61c95
80beadf
8103772
f5f7a86
e6cec25
e25e4f2
e8a8439
a182893
3aa015f
d6b98aa
871cfc6
013921a
7a5a6d9
c996117
0aefe32
2330414
4660161
033c18e
6c94d65
85fb256
90ca4cb
f6a4ed2
3682091
ca1e110
93ba6a6
767842e
cc32733
2771a3b
a1e29ac
5ecc6fe
2780588
c00be81
9082178
4b14f56
4876b82
9cebb1f
d1e3b29
69cf0d2
2ff10e1
a673a2c
832d777
e513eac
ded2054
19db925
15be4a6
3a5b93a
f765853
c2332b0
950f948
64a13c9
09c073d
99969b6
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,78 @@ | ||
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. | ||
// | ||
// Licensed under the Apache License, Version 2.0 (the "License"); | ||
// you may not use this file except in compliance with the License. | ||
// You may obtain a copy of the License at | ||
// | ||
// http://www.apache.org/licenses/LICENSE-2.0 | ||
// | ||
// Unless required by applicable law or agreed to in writing, software | ||
// distributed under the License is distributed on an "AS IS" BASIS, | ||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
// See the License for the specific language governing permissions and | ||
// limitations under the License. | ||
|
||
#include "fastdeploy/vision/common/processors/resize_by_long.h" | ||
|
||
namespace fastdeploy { | ||
namespace vision { | ||
|
||
bool ResizeByLong::CpuRun(Mat* mat) { | ||
cv::Mat* im = mat->GetCpuMat(); | ||
int origin_w = im->cols; | ||
int origin_h = im->rows; | ||
double scale = GenerateScale(origin_w, origin_h); | ||
if (use_scale_) { | ||
cv::resize(*im, *im, cv::Size(), scale, scale, interp_); | ||
} else { | ||
int width = static_cast<int>(round(scale * im->cols)); | ||
int height = static_cast<int>(round(scale * im->rows)); | ||
cv::resize(*im, *im, cv::Size(width, height), 0, 0, interp_); | ||
} | ||
mat->SetWidth(im->cols); | ||
mat->SetHeight(im->rows); | ||
return true; | ||
} | ||
|
||
#ifdef ENABLE_OPENCV_CUDA | ||
bool ResizeByLong::GpuRun(Mat* mat) { | ||
cv::cuda::GpuMat* im = mat->GetGpuMat(); | ||
int origin_w = im->cols; | ||
int origin_h = im->rows; | ||
double scale = GenerateScale(origin_w, origin_h); | ||
im->convertTo(*im, CV_32FC(im->channels())); | ||
if (use_scale_) { | ||
cv::cuda::resize(*im, *im, cv::Size(), scale, scale, interp_); | ||
} else { | ||
int width = static_cast<int>(round(scale * im->cols)); | ||
int height = static_cast<int>(round(scale * im->rows)); | ||
cv::cuda::resize(*im, *im, cv::Size(width, height), 0, 0, interp_); | ||
} | ||
mat->SetWidth(im->cols); | ||
mat->SetHeight(im->rows); | ||
return true; | ||
} | ||
#endif | ||
|
||
double ResizeByLong::GenerateScale(const int origin_w, const int origin_h) { | ||
int im_size_max = std::max(origin_w, origin_h); | ||
int im_size_min = std::min(origin_w, origin_h); | ||
double scale = 1.0f; | ||
if (target_size_ == -1) { | ||
if (im_size_max > max_size_) { | ||
scale = static_cast<double>(max_size_) / static_cast<double>(im_size_max); | ||
} | ||
} else { | ||
scale = | ||
static_cast<double>(target_size_) / static_cast<double>(im_size_max); | ||
} | ||
return scale; | ||
} | ||
|
||
bool ResizeByLong::Run(Mat* mat, int target_size, int interp, bool use_scale, | ||
int max_size, ProcLib lib) { | ||
auto r = ResizeByLong(target_size, interp, use_scale, max_size); | ||
return r(mat, lib); | ||
} | ||
} // namespace vision | ||
} // namespace fastdeploy |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,49 @@ | ||
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. | ||
// | ||
// Licensed under the Apache License, Version 2.0 (the "License"); | ||
// you may not use this file except in compliance with the License. | ||
// You may obtain a copy of the License at | ||
// | ||
// http://www.apache.org/licenses/LICENSE-2.0 | ||
// | ||
// Unless required by applicable law or agreed to in writing, software | ||
// distributed under the License is distributed on an "AS IS" BASIS, | ||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
// See the License for the specific language governing permissions and | ||
// limitations under the License. | ||
|
||
#pragma once | ||
|
||
#include "fastdeploy/vision/common/processors/base.h" | ||
|
||
namespace fastdeploy { | ||
namespace vision { | ||
|
||
class ResizeByLong : public Processor { | ||
public: | ||
ResizeByLong(int target_size, int interp = 1, bool use_scale = true, | ||
int max_size = -1) { | ||
target_size_ = target_size; | ||
max_size_ = max_size; | ||
interp_ = interp; | ||
use_scale_ = use_scale; | ||
} | ||
bool CpuRun(Mat* mat); | ||
#ifdef ENABLE_OPENCV_CUDA | ||
bool GpuRun(Mat* mat); | ||
#endif | ||
std::string Name() { return "ResizeByLong"; } | ||
|
||
static bool Run(Mat* mat, int target_size, int interp = 1, | ||
bool use_scale = true, int max_size = -1, | ||
ProcLib lib = ProcLib::OPENCV_CPU); | ||
|
||
private: | ||
double GenerateScale(const int origin_w, const int origin_h); | ||
int target_size_; | ||
int max_size_; | ||
int interp_; | ||
bool use_scale_; | ||
}; | ||
} // namespace vision | ||
} // namespace fastdeploy |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -27,7 +27,7 @@ PPMatting::PPMatting(const std::string& model_file, | |
const Frontend& model_format) { | ||
config_file_ = config_file; | ||
valid_cpu_backends = {Backend::PDINFER, Backend::ORT}; | ||
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; | ||
valid_gpu_backends = {Backend::PDINFER, Backend::TRT}; | ||
runtime_option = custom_option; | ||
runtime_option.model_format = model_format; | ||
runtime_option.model_file = model_file; | ||
|
@@ -74,6 +74,10 @@ bool PPMatting::BuildPreprocessPipelineFromConfig() { | |
if (op["min_short"]) { | ||
min_short = op["min_short"].as<int>(); | ||
} | ||
std::cout << "If LimintShort in yaml file, you may transfer PPMatting " | ||
"model by yourself, please make sure your input image's " | ||
"width==hight and not smaller than " | ||
<< max_short << std::endl; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 日志输出改为FDINFO LimitShort, height 单词拼错了 改为 |
||
processors_.push_back( | ||
std::make_shared<LimitShort>(max_short, min_short)); | ||
} else if (op["type"].as<std::string>() == "ResizeToIntMult") { | ||
|
@@ -92,6 +96,22 @@ bool PPMatting::BuildPreprocessPipelineFromConfig() { | |
std = op["std"].as<std::vector<float>>(); | ||
} | ||
processors_.push_back(std::make_shared<Normalize>(mean, std)); | ||
} else if (op["type"].as<std::string>() == "ResizeByLong") { | ||
int target_size = 512; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 为啥要给默认值 |
||
if (op["target_size"]) { | ||
target_size = op["target_size"].as<int>(); | ||
} | ||
processors_.push_back(std::make_shared<ResizeByLong>(target_size)); | ||
} else if (op["type"].as<std::string>() == "Pad") { | ||
// size: (w, h) | ||
auto size = op["size"].as<std::vector<int>>(); | ||
std::vector<float> value = {114, 114, 114}; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 为啥是114, 114的这个默认值是PaddleSeg里面默认的吗? |
||
if (op["fill_value"]) { | ||
auto value = op["fill_value"].as<std::vector<float>>(); | ||
} | ||
processors_.push_back(std::make_shared<Cast>("float")); | ||
processors_.push_back( | ||
std::make_shared<PadToSize>(size[1], size[0], value)); | ||
} | ||
} | ||
processors_.push_back(std::make_shared<HWC2CHW>()); | ||
|
@@ -107,6 +127,14 @@ bool PPMatting::Preprocess(Mat* mat, FDTensor* output, | |
<< "." << std::endl; | ||
return false; | ||
} | ||
if (processors_[i]->Name().compare("PadToSize") == 0) { | ||
(*im_info)["pad_to_size"] = {static_cast<int>(mat->Height()), | ||
static_cast<int>(mat->Width())}; | ||
} | ||
if (processors_[i]->Name().compare("ResizeByLong") == 0) { | ||
(*im_info)["resize_by_long"] = {static_cast<int>(mat->Height()), | ||
static_cast<int>(mat->Width())}; | ||
} | ||
} | ||
|
||
// Record output shape of preprocessed image | ||
|
@@ -135,6 +163,8 @@ bool PPMatting::Postprocess( | |
// 先获取alpha并resize (使用opencv) | ||
auto iter_ipt = im_info.find("input_shape"); | ||
auto iter_out = im_info.find("output_shape"); | ||
auto pad_to_size = im_info.find("output_shape"); | ||
auto resize_by_long = im_info.find("resize_by_long"); | ||
FDASSERT(iter_out != im_info.end() && iter_ipt != im_info.end(), | ||
"Cannot find input_shape or output_shape from im_info."); | ||
int out_h = iter_out->second[0]; | ||
|
@@ -145,7 +175,19 @@ bool PPMatting::Postprocess( | |
// TODO: 需要修改成FDTensor或Mat的运算 现在依赖cv::Mat | ||
float* alpha_ptr = static_cast<float*>(alpha_tensor.Data()); | ||
cv::Mat alpha_zero_copy_ref(out_h, out_w, CV_32FC1, alpha_ptr); | ||
Mat alpha_resized(alpha_zero_copy_ref); // ref-only, zero copy. | ||
cv::Mat cropped_alpha; | ||
if (pad_to_size != im_info.end() && resize_by_long != im_info.end()) { | ||
int resize_h = resize_by_long->second[0]; | ||
int resize_w = resize_by_long->second[1]; | ||
int pad_h = pad_to_size->second[0]; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这里pad_h和pad_w获取后并没有用上,所以这两个值的具体作用是什么 |
||
int pad_w = pad_to_size->second[1]; | ||
alpha_zero_copy_ref(cv::Rect(0, 0, resize_w, resize_h)) | ||
.copyTo(cropped_alpha); | ||
} else { | ||
cropped_alpha = alpha_zero_copy_ref; | ||
} | ||
Mat alpha_resized(cropped_alpha); // ref-only, zero copy. | ||
|
||
if ((out_h != ipt_h) || (out_w != ipt_w)) { | ||
// already allocated a new continuous memory after resize. | ||
// cv::resize(alpha_resized, alpha_resized, cv::Size(ipt_w, ipt_h)); | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -27,7 +27,7 @@ wget https://bj.bcebos.com/paddlehub/fastdeploy/matting_bgr.jpg | |
|
||
# CPU推理 | ||
./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 0 | ||
# GPU推理 (TODO: ORT-GPU 推理会报错) | ||
# GPU推理 | ||
./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 1 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这里保留GpuInfer,但是设置一下backend为paddle |
||
# GPU上TensorRT推理 | ||
./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 2 | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -19,7 +19,7 @@ wget https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg | |
wget https://bj.bcebos.com/paddlehub/fastdeploy/matting_bgr.jpg | ||
# CPU推理 | ||
python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device cpu | ||
# GPU推理 (TODO: ORT-GPU 推理会报错) | ||
# GPU推理 | ||
python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device gpu | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 同上 |
||
# GPU上使用TensorRT推理 (注意:TensorRT推理第一次运行,有序列化模型的操作,有一定耗时,需要耐心等待) | ||
python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device gpu --use_trt True | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
naive 值传递 不需要 const限制