Skip to content

Commit

Permalink
[PD FE] loading weight from ov::tensor (openvinotoolkit#20044)
Browse files Browse the repository at this point in the history
* fix paddle load model from memory

* fix coding style

* ignore the deprecated api

* fix a istream bug; add test case

* simplify func variant_to_stream_ptr

* restore the previous impl for less memory affect

* fix memory leak
  • Loading branch information
xczhai authored Oct 31, 2023
1 parent 246410b commit 0076f7f
Show file tree
Hide file tree
Showing 2 changed files with 83 additions and 11 deletions.
40 changes: 29 additions & 11 deletions src/frontends/paddle/src/frontend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -135,22 +135,32 @@ bool normalize_framework_node(const std::shared_ptr<FrameworkNode>& node,
return true;
}

std::istream* variant_to_stream_ptr(const ov::Any& variant, std::ifstream& ext_stream) {
OPENVINO_SUPPRESS_DEPRECATED_START
std::istream* variant_to_stream_ptr(const ov::Any& variant, std::fstream& fs, std::stringstream& ss) {
if (variant.is<std::istream*>()) {
return variant.as<std::istream*>();
} else if (variant.is<std::shared_ptr<ngraph::runtime::AlignedBuffer>>()) {
auto& aligned_weights_buffer = variant.as<std::shared_ptr<ngraph::runtime::AlignedBuffer>>();
ss.write(aligned_weights_buffer->get_ptr<char>(), aligned_weights_buffer->size());
FRONT_END_INITIALIZATION_CHECK(ss && ss.good(), "Cannot open ov::tensor.");
return &ss;
} else if (variant.is<std::string>()) {
const auto& model_path = variant.as<std::string>();
ext_stream.open(model_path, std::ios::in | std::ifstream::binary);
fs.open(model_path, std::ios::in | std::ifstream::binary);
FRONT_END_INITIALIZATION_CHECK(fs && fs.is_open(), "Cannot open model file.");
return &fs;
}
#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
else if (variant.is<std::wstring>()) {
const auto& model_path = variant.as<std::wstring>();
ext_stream.open(model_path.c_str(), std::ios::in | std::ifstream::binary);
fs.open(model_path.c_str(), std::ios::in | std::ifstream::binary);
FRONT_END_INITIALIZATION_CHECK(fs && fs.is_open(), "Cannot open model file.");
return &fs;
}
#endif
FRONT_END_INITIALIZATION_CHECK(ext_stream && ext_stream.is_open(), "Cannot open model file.");
return &ext_stream;
return nullptr;
}
OPENVINO_SUPPRESS_DEPRECATED_END
} // namespace

FrontEnd::FrontEnd() : m_op_translators(paddle::get_supported_ops()) {}
Expand Down Expand Up @@ -392,9 +402,17 @@ bool FrontEnd::supported_impl(const std::vector<ov::Any>& variants) const {
#endif
else if (variants[0].is<std::istream*>()) {
// Validating first stream, it must contain a model
auto p_model_stream = variants[0].as<std::istream*>();
// step 1:
// PDPD API ParseFromIstream always deconstructs the context in model stream.
// So, make a copy for variants[0] to avoid breaking the context in variants[0].
const auto p_model_stream = variants[0].as<std::istream*>();
std::istream copy_model_stream(p_model_stream->rdbuf());
::paddle::framework::proto::ProgramDesc fw;
return fw.ParseFromIstream(p_model_stream);
auto ret = fw.ParseFromIstream(&copy_model_stream);
// step 2:
// reset the stream position to the beginning.
p_model_stream->seekg(0, p_model_stream->beg);
return ret;
}
return false;
}
Expand Down Expand Up @@ -422,10 +440,10 @@ InputModel::Ptr FrontEnd::load_impl(const std::vector<ov::Any>& variants) const
}
} else if (variants.size() == 2 + extra_variants_num) {
// The case when .pdmodel and .pdparams files are provided
std::ifstream model_stream;
std::ifstream weights_stream;
std::istream* p_model_stream = paddle::variant_to_stream_ptr(variants[0], model_stream);
std::istream* p_weights_stream = paddle::variant_to_stream_ptr(variants[1], weights_stream);
std::fstream model_fstream, weights_fstream;
std::stringstream model_sstream, weights_sstream;
std::istream* p_model_stream = paddle::variant_to_stream_ptr(variants[0], model_fstream, model_sstream);
std::istream* p_weights_stream = paddle::variant_to_stream_ptr(variants[1], weights_fstream, weights_sstream);
if (p_model_stream && p_weights_stream) {
return std::make_shared<InputModel>(std::vector<std::istream*>{p_model_stream, p_weights_stream},
m_telemetry);
Expand Down
54 changes: 54 additions & 0 deletions src/frontends/paddle/tests/read_paddle_model_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,62 @@
#include "frontend/shared/include/utils.hpp"
#include "openvino/openvino.hpp"
#include "openvino/opsets/opset1.hpp"
#include "openvino/opsets/opset8.hpp"
#include "openvino/pass/serialize.hpp"

TEST(Paddle_Reader_Tests, LoadModelMemoryToCore) {
auto model = std::string(TEST_PADDLE_MODELS_DIRNAME) + "conv2d_relu/conv2d_relu.pdmodel";
auto param = std::string(TEST_PADDLE_MODELS_DIRNAME) + "conv2d_relu/conv2d_relu.pdiparams";

ov::Core core;
auto read_file = [&](const std::string& file_name, size_t& size) {
FILE* sFile = fopen(file_name.c_str(), "r");
fseek(sFile, 0, SEEK_END);
size = ftell(sFile);
uint8_t* ss = (uint8_t*)malloc(size);
rewind(sFile);
const size_t length = fread(&ss[0], 1, size, sFile);
if (size != length) {
std::cerr << "file size is not correct\n";
}
fclose(sFile);
return ss;
};

size_t xml_size, bin_size;
auto xml_ptr = read_file(model, xml_size);
auto bin_ptr = read_file(param, bin_size);
ov::Tensor weight_tensor = ov::Tensor(ov::element::u8, {1, bin_size}, bin_ptr);
std::string model_str = std::string((char*)xml_ptr, xml_size);
auto function = core.read_model(model_str, weight_tensor);

const auto inputType = ov::element::f32;
const auto inputShape = ov::Shape{1, 3, 4, 4};
const auto data = std::make_shared<ov::opset1::Parameter>(inputType, inputShape);
data->set_friendly_name("xxx");
data->output(0).get_tensor().add_names({"xxx"});
const auto weight = std::make_shared<ov::opset1::Constant>(ov::element::f32, ov::Shape{5, 3, 1, 1}, 1.0);
const auto conv2d = std::make_shared<ov::opset1::Convolution>(data->output(0),
weight->output(0),
ov::Strides({1, 1}),
ov::CoordinateDiff({1, 1}),
ov::CoordinateDiff({1, 1}),
ov::Strides({1, 1}));
conv2d->set_friendly_name("conv2d_0.tmp_0");
conv2d->output(0).get_tensor().add_names({"conv2d_0.tmp_0"});
const auto relu = std::make_shared<ov::opset1::Relu>(conv2d->output(0));
relu->set_friendly_name("relu_0.tmp_0");
relu->output(0).get_tensor().add_names({"relu_0.tmp_0"});
const auto result = std::make_shared<ov::opset1::Result>(relu->output(0));
result->set_friendly_name("relu_0.tmp_0/Result");
const auto reference = std::make_shared<ov::Model>(ov::NodeVector{result}, ov::ParameterVector{data}, "Model0");
const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::NONE);
const FunctionsComparator::Result res = func_comparator(function, reference);
ASSERT_TRUE(res.valid) << res.message;
free(xml_ptr);
free(bin_ptr);
}

TEST(Paddle_Reader_Tests, ImportBasicModelToCore) {
auto model = std::string(TEST_PADDLE_MODELS_DIRNAME) + "relu/relu.pdmodel";

Expand Down

0 comments on commit 0076f7f

Please sign in to comment.