Skip to content

Commit

Permalink
Fix code style
Browse files Browse the repository at this point in the history
  • Loading branch information
slyalin committed Nov 27, 2024
1 parent e684725 commit 1bc1e47
Show file tree
Hide file tree
Showing 5 changed files with 68 additions and 80 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,11 @@

import openvino


# Creates a new file with a given name, populates it with data from a given Constant,
# returns a new Constant node with content memory-mapped to that file.
# Doesn't remove the file in the end of the returned Constant's life time.
def move_constant_to_file(constant, path):
openvino.save_tensor_data(constant.get_tensor_view(), path)
mmapped = openvino.read_tensor_data(path, constant.get_output_element_type(0), constant.get_output_partial_shape(0))
return openvino.runtime.op.Constant(mmapped, shared_memory=True)
return openvino.runtime.op.Constant(mmapped, shared_memory=True)
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,15 @@

"""Postponed Constant is a way to materialize a big constant only when it is going to be serialized to IR and then immediately dispose."""


# `maker` is a function that returns ov.Tensor that represents a target Constant
def make_postponed_constant(element_type, shape, maker):
class PostponedConstant(openvino.Op):
class_type_info = openvino.runtime.DiscreteTypeInfo("PostponedConstant", "extension")

def __init__(self):
super().__init__(self)
self.get_rt_info()['postponed_constant'] = True # value doesn't matter
self.get_rt_info()["postponed_constant"] = True # value doesn't matter
self.m_element_type = element_type
self.m_shape = shape
self.constructor_validate_and_infer_types()
Expand All @@ -31,4 +32,4 @@ def clone_with_new_inputs(self, _):
def validate_and_infer_types(self):
self.set_output_type(0, self.m_element_type, openvino.PartialShape(self.m_shape))

return PostponedConstant()
return PostponedConstant()
79 changes: 33 additions & 46 deletions src/core/include/openvino/runtime/tensor_util.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,13 @@

#pragma once

#include "openvino/runtime/tensor.hpp"
#include "openvino/core/partial_shape.hpp"
#include "openvino/runtime/tensor.hpp"

namespace ov {

/// \brief Save given tensor data into a file. File will contain only raw bytes of a tensor.data as it is allocated in memory.
/// \brief Save given tensor data into a file. File will contain only raw bytes of a tensor.data as it is allocated in
/// memory.
/// No element type nor shape nor other metadata are serialized. Strides are preserved.
/// \param tensor Tensor which data will be serialized.
/// \param file_name Path to the output file
Expand All @@ -18,76 +19,62 @@ void save_tensor_data(const Tensor& tensor, const std::string& file_name);

#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT)
OPENVINO_API
void save_tensor_data(const Tensor& tensor,
const std::wstring& output_model);
void save_tensor_data(const Tensor& tensor, const std::wstring& output_model);
#endif


/// \brief Read a tensor content from a file. Only raw data is loaded.
/// \param file_name Path to the output file
/// \param element_type Element type, when not specified the it is assumed as element::u8.
/// \param shape Shape for resulting tensor. If provided shape is static, specified number of elements is read only. File should contain enough bytes, an exception is raised otherwise.
/// One of the dimensions can be dynamic. In this case it will be determined automatically based on the length of the file content and `offset`.
/// Default value is [?].
/// \param offset Read file starting from specified offset. Default is 0. The remining size of the file should be compatible with shape.
/// \param mmap Use mmap that postpones real read from file until data is accessed.
/// \param shape Shape for resulting tensor. If provided shape is static, specified number of elements is read only.
/// File should contain enough bytes, an exception is raised otherwise.
/// One of the dimensions can be dynamic. In this case it will be determined automatically based on the
/// length of the file content and `offset`. Default value is [?].
/// \param offset Read file starting from specified offset. Default is 0. The remining size of the file should be
/// compatible with shape. \param mmap Use mmap that postpones real read from file until data is accessed.
OPENVINO_API
Tensor read_tensor_data(
const std::string& file_name,
const element::Type& element_type = element::u8,
const PartialShape& shape = PartialShape{Dimension::dynamic()},
std::size_t offset = 0,
bool mmap = true);
Tensor read_tensor_data(const std::string& file_name,
const element::Type& element_type = element::u8,
const PartialShape& shape = PartialShape{Dimension::dynamic()},
std::size_t offset = 0,
bool mmap = true);

#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT)
OPENVINO_API
Tensor read_tensor_data(
const std::wstring& file_name,
const element::Type& element_type = element::u8,
const PartialShape& shape = PartialShape{Dimension::dynamic()},
std::size_t offset = 0,
bool mmap = true);
Tensor read_tensor_data(const std::wstring& file_name,
const element::Type& element_type = element::u8,
const PartialShape& shape = PartialShape{Dimension::dynamic()},
std::size_t offset = 0,
bool mmap = true);
#endif


/// \brief Read raw data from a file into pre-allocated tensor.
/// \param file_name Path to the input file with raw tensor data.
/// \param tensor Tensor to read data to. Tensor should have correct element_type and shape set that is used to determine how many bytes will be read from the file.
/// \param offset Read file starting from specified offset. Default is 0. The remining part of the file should contain enough bytes to satisfy tensor size.
/// \param tensor Tensor to read data to. Tensor should have correct element_type and shape set that is used to
/// determine how many bytes will be read from the file. \param offset Read file starting from specified offset. Default
/// is 0. The remining part of the file should contain enough bytes to satisfy tensor size.
OPENVINO_API
void read_tensor_data(
const std::string& file_name,
Tensor& tensor,
std::size_t offset = 0);
void read_tensor_data(const std::string& file_name, Tensor& tensor, std::size_t offset = 0);

#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT)
OPENVINO_API
void read_tensor_data(
const std::wstring& file_name,
Tensor& tensor,
std::size_t offset = 0);
void read_tensor_data(const std::wstring& file_name, Tensor& tensor, std::size_t offset = 0);
#endif

/// \brief Read raw data from a file into a tensor. Optionally re-allocate memory in tensor if required.
/// \param file_name Path to the input file with raw tensor data.
/// \param tensor Tensor to read data to. Memory is allocated using set_shape method.
/// \param shape Shape for resulting tensor. If provided shape is static, specified number of elements is read only. File should contain enough bytes, an exception is raised otherwise.
/// One of the dimensions can be dynamic. In this case it will be determined automatically based on the length of the file content and `offset`.
/// \param offset Read file starting from specified offset. Default is 0. The remining size of the file should be compatible with shape.
/// \param shape Shape for resulting tensor. If provided shape is static, specified number of elements is read only.
/// File should contain enough bytes, an exception is raised otherwise.
/// One of the dimensions can be dynamic. In this case it will be determined automatically based on the
/// length of the file content and `offset`.
/// \param offset Read file starting from specified offset. Default is 0. The remining size of the file should be
/// compatible with shape.
OPENVINO_API
void read_tensor_data(
const std::string& file_name,
Tensor& tensor,
const PartialShape& shape,
std::size_t offset = 0);
void read_tensor_data(const std::string& file_name, Tensor& tensor, const PartialShape& shape, std::size_t offset = 0);

#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT)
OPENVINO_API
void read_tensor_data(
const std::wstring& file_name,
Tensor& tensor,
const PartialShape& shape,
std::size_t offset = 0);
void read_tensor_data(const std::wstring& file_name, Tensor& tensor, const PartialShape& shape, std::size_t offset = 0);
#endif

} // namespace ov
37 changes: 20 additions & 17 deletions src/core/src/pass/serialize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@
#include "openvino/core/model.hpp"
#include "openvino/core/parallel.hpp"
#include "openvino/core/type/float16.hpp"
#include "openvino/op/util/framework_node.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/util/framework_node.hpp"
#include "openvino/opsets/opset1.hpp"
#include "openvino/pass/constant_folding.hpp"
#include "openvino/reference/convert.hpp"
Expand Down Expand Up @@ -101,7 +101,8 @@ class ConstantWriter {
size_t& new_size,
bool compress_to_fp16 = false,
ov::element::Type src_type = ov::element::dynamic,
bool ptr_is_temporary = false) { // when true, do not rely on ptr after this function call, data is temporary allocated
bool ptr_is_temporary = false) { // when true, do not rely on ptr after this function call, data
// is temporary allocated
const FilePosition write_pos = m_binary_output.tellp();
const auto offset = write_pos - m_blob_offset;
new_size = size;
Expand Down Expand Up @@ -141,10 +142,10 @@ class ConstantWriter {
return it->second.first;
}
}
if(!ptr_is_temporary) {
// Since fp16_compressed data will be disposed at exit point and since we cannot reread it from the ostream,
// we store pointer to the original uncompressed blob.

if (!ptr_is_temporary) {
// Since fp16_compressed data will be disposed at exit point and since we cannot reread it from the
// ostream, we store pointer to the original uncompressed blob.
m_hash_to_file_positions.insert({hash, {offset, static_cast<void const*>(ptr)}});
}
if (m_write_hash_value) {
Expand Down Expand Up @@ -541,12 +542,13 @@ class XmlSerializer : public ov::AttributeVisitor {
a2->get_header(header_ptr, header_size);
}

int64_t offset = m_constant_write_handler.write(reinterpret_cast<const char*>(header_ptr.get()),
header_size,
inter_size,
m_compress_to_fp16,
m_output_element_type,
true); // header_ptr is allocated in AttributeAdapter that has limited life time
int64_t offset = m_constant_write_handler.write(
reinterpret_cast<const char*>(header_ptr.get()),
header_size,
inter_size,
m_compress_to_fp16,
m_output_element_type,
true); // header_ptr is allocated in AttributeAdapter that has limited life time
new_size += inter_size;

// write raw strings part
Expand Down Expand Up @@ -901,15 +903,14 @@ class PaddingsFixer {
}
};

// Substiture a Constant node instead of a node by calling node->constant_fold if 'postponed_constant' rt_info attribute is present in the node
// Substiture a Constant node instead of a node by calling node->constant_fold if 'postponed_constant' rt_info attribute
// is present in the node
class PostponedConstantReplacer {
private:

ov::Node* m_node;
std::shared_ptr<ov::Node> m_constant;

public:

ov::Node* get_node() {
return m_node;
}
Expand All @@ -919,10 +920,12 @@ class PostponedConstantReplacer {
}

PostponedConstantReplacer(ov::Node* node) : m_node(node) {
if(node->get_rt_info().count("postponed_constant")) {
if (node->get_rt_info().count("postponed_constant")) {
OPENVINO_ASSERT(node->get_output_size() == 1);
ov::OutputVector outputs(1);
OPENVINO_ASSERT(node->constant_fold(outputs, node->input_values()), "Node with set `postponed_constant` attribute cannot be fold to constant when saving model to IR file");
OPENVINO_ASSERT(
node->constant_fold(outputs, node->input_values()),
"Node with set `postponed_constant` attribute cannot be fold to constant when saving model to IR file");
m_constant = outputs[0].get_node_shared_ptr();
m_node = m_constant.get();
}
Expand Down
24 changes: 10 additions & 14 deletions src/core/src/runtime/tensor_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,14 @@
// SPDX-License-Identifier: Apache-2.0
//

#include "openvino/runtime/tensor_util.hpp"

#include <fstream>

#include "openvino/runtime/tensor_util.hpp"
#include "openvino/runtime/aligned_buffer.hpp"
#include "openvino/runtime/shared_buffer.hpp"
#include "openvino/util/mmap_object.hpp"


namespace ov {

namespace {
Expand All @@ -18,7 +18,7 @@ struct StaticBufferAllocator {
std::shared_ptr<ov::AlignedBuffer> buffer;
bool allocated = false; // if buffer was returned as allocated region

StaticBufferAllocator (std::shared_ptr<ov::AlignedBuffer> _buffer) : buffer(_buffer) {}
StaticBufferAllocator(std::shared_ptr<ov::AlignedBuffer> _buffer) : buffer(_buffer) {}

void* allocate(const size_t bytes, const size_t alignment) {
// TODO: Add check for alignment
Expand All @@ -37,7 +37,7 @@ struct StaticBufferAllocator {
return true;
}
};
}
} // namespace

void save_tensor_data(const ov::Tensor& tensor, const std::string& file_name) {
OPENVINO_ASSERT(tensor.get_element_type() != ov::element::string);
Expand All @@ -46,23 +46,19 @@ void save_tensor_data(const ov::Tensor& tensor, const std::string& file_name) {
file.write(data, tensor.get_byte_size());
}

Tensor read_tensor_data(
const std::string& file_name,
const element::Type& element_type,
const PartialShape& shape,
std::size_t offset,
bool mmap) {

Tensor read_tensor_data(const std::string& file_name,
const element::Type& element_type,
const PartialShape& shape,
std::size_t offset,
bool mmap) {
OPENVINO_ASSERT(element_type != ov::element::string);
OPENVINO_ASSERT(shape.is_static(), "Dynamic shape as an argument of read_tensor_data is not implemented.");
OPENVINO_ASSERT(offset == 0);
OPENVINO_ASSERT(mmap);

auto mapped_memory = ov::load_mmap_object(file_name);
using Buffer = ov::SharedBuffer<std::shared_ptr<MappedMemory>>;
auto mmaped = std::make_shared<Buffer>(mapped_memory->data(),
mapped_memory->size(),
mapped_memory);
auto mmaped = std::make_shared<Buffer>(mapped_memory->data(), mapped_memory->size(), mapped_memory);

return Tensor(element_type, shape.get_shape(), StaticBufferAllocator(mmaped));
}
Expand Down

0 comments on commit 1bc1e47

Please sign in to comment.