Skip to content

Commit

Permalink
clang formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
mryzhov committed Feb 21, 2023
1 parent 308b239 commit 86a3641
Show file tree
Hide file tree
Showing 10 changed files with 139 additions and 151 deletions.
32 changes: 18 additions & 14 deletions src/plugins/intel_gna/src/gna_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@
#include "gna_fused_iterator.hpp"
#include "gna_graph_patterns.hpp"
#include "gna_itt.hpp"
#include "serial/gna_model_serial.hpp"
#include "gna_plugin_config.hpp"
#include "gna_tensor_tools.hpp"
#include "gna_transformations_pipeline.hpp"
Expand Down Expand Up @@ -341,7 +340,9 @@ void GNAPlugin::ImportFrames(void* ptr_dst,
}
}

void GNAPlugin::pre_post_process(InferenceEngine::Blob::Ptr input_blob, InferenceEngine::Blob::Ptr output_blob, std::shared_ptr<ov::Model> model) {
void GNAPlugin::pre_post_process(InferenceEngine::Blob::Ptr input_blob,
InferenceEngine::Blob::Ptr output_blob,
std::shared_ptr<ov::Model> model) {
const ov::element::Type input_prc = details::convertPrecision(input_blob->getTensorDesc().getPrecision());
const ov::element::Type output_prc = details::convertPrecision(output_blob->getTensorDesc().getPrecision());
const ov::Shape& input_shape = input_blob->getTensorDesc().getDims();
Expand Down Expand Up @@ -985,12 +986,12 @@ void GNAPlugin::LoadNetwork(const CNNNetwork& _network) {
}
}

//TODO: Need to remove this conversation when ngraph NCHW<->NHWC transformation is enabled
// TODO: Need to remove this conversation when ngraph NCHW<->NHWC transformation is enabled
if (!transpose_inputs_info.empty()) {
convert_transpose_map_to_model(transpose_inputs_info, inputs_ptr_->Get());
ConvertTransposeMapToModel(transpose_inputs_info, inputs_ptr_->Get());
}
if (!transpose_outputs_info.empty()) {
convert_transpose_map_to_model(transpose_outputs_info, outputs_.Get());
ConvertTransposeMapToModel(transpose_outputs_info, outputs_.Get());
}

DumpXNNToFile();
Expand Down Expand Up @@ -1115,14 +1116,15 @@ uint32_t GNAPlugin::QueueInference(const InferenceEngine::BlobMap& inputs, Infer
int inputNum = 0;
for (auto& input : inputs) {
std::string input_name = input.first;
Blob::Ptr gna_input_blob = input.second; // this copy is needed to split user input and plugin's
Blob::Ptr gna_input_blob = input.second; // this copy is needed to split user input and plugin's
InferenceEngine::Layout input_layout = gna_input_blob->getTensorDesc().getLayout();

if (input_layout != InferenceEngine::Layout::C && input_layout != InferenceEngine::Layout::NC &&
input_layout != InferenceEngine::Layout::CN && input_layout != InferenceEngine::Layout::CHW &&
input_layout != InferenceEngine::Layout::NCHW) {
THROW_GNA_EXCEPTION << "Expected input blob to have Layout::C, Layout::NC, Layout::CN, Layout::NCHW or "
"Layout::CHW. But was: " << input_layout;
"Layout::CHW. But was: "
<< input_layout;
}

if (input_layout == InferenceEngine::Layout::NCHW || input_layout == InferenceEngine::Layout::CHW) {
Expand Down Expand Up @@ -1274,15 +1276,17 @@ RequestStatus GNAPlugin::WaitFor(uint32_t request_idx, int64_t millisTimeout) {
output_layout != InferenceEngine::Layout::CN && output_layout != InferenceEngine::Layout::NCHW &&
output_layout != InferenceEngine::Layout::CHW && output_layout != InferenceEngine::Layout::SCALAR) {
THROW_GNA_EXCEPTION << "Expected output blob to have Layout::C, Layout::NC, Layout::CN, Layout::NCHW or "
"Layout::CHW. But was " << output_layout;
"Layout::CHW. But was "
<< output_layout;
}

auto dims = output_blob->getTensorDesc().getDims();
auto is1D = output_layout == InferenceEngine::Layout::C;
auto isScalar = output_layout == InferenceEngine::Layout::SCALAR;
auto is3D = output_layout == InferenceEngine::Layout::CHW;
auto batchSize = (is1D || isScalar || is3D) ? 1 : dims[0];
auto elementsPerBatch = isScalar ? 1 : (is1D ? dims.front() : details::product(++std::begin(dims), std::end(dims)));\
auto elementsPerBatch =
isScalar ? 1 : (is1D ? dims.front() : details::product(++std::begin(dims), std::end(dims)));

OutputDesc& gna_output_desc = outputs_.at(output_name);
TensorDesc tensor_desc(gna_output_desc.tensor_precision, gna_output_desc.dims, gna_output_desc.model_layout);
Expand Down Expand Up @@ -1534,12 +1538,12 @@ InferenceEngine::IExecutableNetworkInternal::Ptr GNAPlugin::ImportNetwork(std::i
}
}

//TODO: Need to remove this conversation when ngraph NCHW<->NHWC transformation is enabled
if(!transpose_inputs_info.empty()) {
convert_transpose_map_to_model(transpose_inputs_info, inputs_ptr_->Get());
// TODO: Need to remove this conversation when ngraph NCHW<->NHWC transformation is enabled
if (!transpose_inputs_info.empty()) {
ConvertTransposeMapToModel(transpose_inputs_info, inputs_ptr_->Get());
}
if(!transpose_outputs_info.empty()) {
convert_transpose_map_to_model(transpose_outputs_info, outputs_.Get());
if (!transpose_outputs_info.empty()) {
ConvertTransposeMapToModel(transpose_outputs_info, outputs_.Get());
}

for (auto&& memory : mt) {
Expand Down
14 changes: 7 additions & 7 deletions src/plugins/intel_gna/src/gna_plugin.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@
#include "gna_data_types.hpp"
#include "gna_graph_compiler.hpp"
#include "gna_plugin_config.hpp"
#include "preprocessing.hpp"
#include "log/debug.hpp"
#include "log/log.hpp"
#include "preprocessing.hpp"

namespace ov {
namespace intel_gna {
Expand Down Expand Up @@ -193,8 +193,8 @@ class GNAPlugin : public InferenceEngine::IInferencePlugin {
void DumpXNNToFile() const;

void pre_post_process(InferenceEngine::Blob::Ptr input_blob,
InferenceEngine::Blob::Ptr output_blob,
std::shared_ptr<ov::Model> model);
InferenceEngine::Blob::Ptr output_blob,
std::shared_ptr<ov::Model> model);

void ImportFrames(void* ptr_dst,
const void* ptr_src,
Expand Down Expand Up @@ -227,13 +227,13 @@ class GNAPlugin : public InferenceEngine::IInferencePlugin {
intel_dnn_orientation_t orientation,
float scaleFactor);

// TODO: Need to remove this conversation when ngraph NCHW<->NHWC transformation is enabled
template <class T1, class T2>
inline void convert_transpose_map_to_model(T1& transposes, T2& nodes) {
for (auto && node : nodes)
{
inline void ConvertTransposeMapToModel(T1& transposes, T2& nodes) {
for (auto&& node : nodes) {
auto t_it = transposes.find(node.name);
if (t_it != transposes.end() && !t_it->second.empty()) {
node.pre_post_process_model = to_pre_post_process_model(t_it->second);
node.pre_post_process_model = ToProcessModel(t_it->second);
}
}
};
Expand Down
18 changes: 8 additions & 10 deletions src/plugins/intel_gna/src/preprocessing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,10 @@

#include "preprocessing.hpp"


#include "gna_data_types.hpp"
#include "ngraph/opsets/opset9.hpp"
#include "openvino/core/model.hpp"
#include "openvino/core/shape.hpp"
#include "ngraph/opsets/opset9.hpp"
#include "gna_data_types.hpp"


using namespace ngraph::opset9;

Expand Down Expand Up @@ -54,7 +52,7 @@ void ConvertToInt16(int16_t* ptr_dst,
/*
Convert legacy transposition info to preprocessing model
*/
std::shared_ptr<ov::Model> to_pre_post_process_model(const TranspositionInfo& t_info) {
std::shared_ptr<ov::Model> ToProcessModel(const TranspositionInfo& t_info) {
size_t c_size = t_info.num_transpose_rows;
size_t hw_size = t_info.num_transpose_columns;

Expand All @@ -81,20 +79,20 @@ std::shared_ptr<ov::Model> to_pre_post_process_model(const TranspositionInfo& t_
/*
Convert legacy transposition info to preprocessing model
*/
std::shared_ptr<ov::Model> to_pre_post_process_model(const std::vector<TranspositionInfo>& transposes) {
std::shared_ptr<ov::Model> ToProcessModel(const std::vector<TranspositionInfo>& transposes) {
// case wheb the input should be transposed entirely
if (transposes.size() == 1) {
return to_pre_post_process_model(transposes.front());
return ToProcessModel(transposes.front());
}

std::vector<int32_t> indexes = {};
for (auto & transpose : transposes) {
for (auto& transpose : transposes) {
size_t c_size = transpose.num_transpose_rows;
size_t hw_size = transpose.num_transpose_columns;
size_t chw_size = c_size * hw_size;
size_t id = indexes.size();
for(size_t i{0}; i < chw_size; ++i) {
size_t idx = (transpose.transpose) ? hw_size * (i % c_size) + i / c_size : i;
for (size_t i{0}; i < chw_size; ++i) {
size_t idx = (transpose.transpose) ? hw_size * (i % c_size) + i / c_size : i;
indexes.push_back(id + idx);
}
}
Expand Down
6 changes: 3 additions & 3 deletions src/plugins/intel_gna/src/preprocessing.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,14 @@

#include <cstdint>

#include "openvino/core/model.hpp"
#include "gna_data_types.hpp"
#include "openvino/core/model.hpp"

namespace ov {
namespace intel_gna {

std::shared_ptr<ov::Model> to_pre_post_process_model(const TranspositionInfo& t_info);
std::shared_ptr<ov::Model> to_pre_post_process_model(const std::vector<TranspositionInfo>& transposes);
std::shared_ptr<ov::Model> ToProcessModel(const TranspositionInfo& t_info);
std::shared_ptr<ov::Model> ToProcessModel(const std::vector<TranspositionInfo>& transposes);

void ConvertToInt16(int16_t* ptr_dst,
const float* ptr_src,
Expand Down
25 changes: 12 additions & 13 deletions src/plugins/intel_gna/src/serial/gna_model_serial.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,12 @@
# include <mm_malloc.h>
#endif

#include "openvino/pass/serialize.hpp"
#include "openvino/runtime/core.hpp"

#include "common/versioning.hpp"
#include "gna2_model_helper.hpp"
#include "gna_model_serial.hpp"
#include "gna_plugin.hpp"
#include "openvino/pass/serialize.hpp"
#include "openvino/runtime/core.hpp"
#include "serial/headers/2dot2/gna_model_header.hpp"
#include "serial/headers/2dot5/gna_model_header.hpp"
#include "serial/headers/2dot7/gna_model_header.hpp"
Expand Down Expand Up @@ -478,7 +477,7 @@ void GNAModelSerial::Export(const GnaAllocations& allocations, std::ostream& os)
writeString(tname, os);
}
// write pre_processing model
if(input.pre_post_process_model) {
if (input.pre_post_process_model) {
// allocate buffer for ir.xml
std::ostringstream xml_buf;
// allocate buffer for ir.bin
Expand Down Expand Up @@ -515,7 +514,7 @@ void GNAModelSerial::Export(const GnaAllocations& allocations, std::ostream& os)
}

// write pre_processing model
if(output.pre_post_process_model) {
if (output.pre_post_process_model) {
// allocate buffer for ir.xml
std::ostringstream xml_buf;
// allocate buffer for ir.bin
Expand Down Expand Up @@ -611,15 +610,16 @@ void GNAModelSerial::Export(const GnaAllocations& allocations, std::ostream& os)
}

template <class T>
void GNAModelSerial::ImportNodes(std::istream &is, void* base_ptr, T &nodes) {
for (auto &node : nodes.Get()) {
void GNAModelSerial::ImportNodes(std::istream& is, void* base_ptr, T& nodes) {
for (auto& node : nodes.Get()) {
header_latest::RuntimeEndPoint ep = ReadEndPoint(is);

node.ptrs.push_back(reinterpret_cast<float*>(reinterpret_cast<uint8_t *> (base_ptr) + ep.descriptor_offset));
node.ptrs.push_back(reinterpret_cast<float*>(reinterpret_cast<uint8_t*>(base_ptr) + ep.descriptor_offset));
node.orientation = ep.orientation;
node.num_elements = ep.elements_count;
node.scale_factor = ep.scaleFactor;
node.model_precision = InferenceEngine::Precision(static_cast<InferenceEngine::Precision::ePrecision>(ep.precision));
node.model_precision =
InferenceEngine::Precision(static_cast<InferenceEngine::Precision::ePrecision>(ep.precision));
node.set_precision(ep.element_size);
node.model_layout = static_cast<InferenceEngine::Layout>(ep.layout);
node.allocated_size = node.get_required_size();
Expand All @@ -637,11 +637,10 @@ void GNAModelSerial::ImportNodes(std::istream &is, void* base_ptr, T &nodes) {
AppendTensorNameIfNeeded(node);

// read preprocessing model
if (model_header_.version.major == 2 && model_header_.version.minor >= 9)
{
if (model_header_.version.major == 2 && model_header_.version.minor >= 9) {
std::string ir_xml_str = readString(is);
if(!ir_xml_str.empty()) {
//read IR bin
if (!ir_xml_str.empty()) {
// read IR bin
size_t ir_bin_size = 0;
readBits(ir_bin_size, is);

Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_gna/src/serial/gna_model_serial.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class GNAModelSerial {
GNAVersionSerializer version_;

template <class T>
void ImportNodes(std::istream &is, void* basePtr, T &inputs); //inputs or outputs
void ImportNodes(std::istream& is, void* basePtr, T& inputs); // inputs or outputs

void ImportTranspositionInfo(std::istream& is,
std::string& name,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,10 @@

#include <cstdint>
#include <map>

#include "backend/dnn_types.hpp"
#include "serial/headers/2dot8/gna_model_header.hpp"
#include "gna_data_types.hpp"
#include "serial/headers/2dot8/gna_model_header.hpp"

#pragma pack(push, 1)

Expand Down Expand Up @@ -217,6 +218,6 @@ struct RuntimeEndPoint {
orientation(orientation) {}
};

} // namespace header_2_dot_8
} // namespace header_2_dot_9
} // namespace intel_gna
} // namespace ov
Loading

0 comments on commit 86a3641

Please sign in to comment.