Skip to content

Commit

Permalink
[Snippets] Applied Vladislav comments
Browse files Browse the repository at this point in the history
  • Loading branch information
a-sidorova committed Jul 15, 2024
1 parent 65635c4 commit bc8f441
Show file tree
Hide file tree
Showing 15 changed files with 170 additions and 243 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,9 @@ class PortDescriptor {
void set_reg_type(RegType type) { m_reg.type = type; }
void set_reg_idx(size_t idx) { m_reg.idx = idx; }

// Indexing starts from the end (rbegin() + idx)
void set_subtensor_value(size_t idx, VectorDims::value_type value);

std::string serialize() const;
bool empty() const { return m_layout.empty() && m_subtensor_shape.empty();}
PortDescriptorPtr clone() const;
Expand Down
11 changes: 5 additions & 6 deletions src/common/snippets/src/lowered/pass/propagate_subtensors.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,11 @@ namespace {

// The algorithm uses the following special values in subtensors/shapes:
// 1. Dynamic value in subtensor/shape : SIZE_MAX
// 2. Full fimension in subtensor : SIZE_MAX - 1
// 2. Full dimension in subtensor : SIZE_MAX - 1
// 3. Default value of `new_dim_value` : SIZE_MAX - 2
// 4. `Forced` special dynamic value : SIZE_MAX - 3
//
// We have to introduce `SPECIAL_DYNAMIC_VALUE` to distinguish `new_dim_value = DYNAMIC`
// We have to introduce `FORCED_DYNAMIC_VALUE` to distinguish `new_dim_value = DYNAMIC`
// from the real dynamic values in subtensors and shapes and force this value in subtensors.
// For example, there is Brgemm with the following info in the tail Loop:
// Input 0: shape [?, ?], existing subtensor [32, FULL_DIM]
Expand All @@ -36,7 +36,7 @@ namespace {
// 3. Update subtensor on output using shape:
// new_subtensor[i] = std::min(planar_shape[i], subtensor[i]); // i = 0: std::min(SIZE_MAX(?), 32)
// new subtensor [32, FULL_DIM] - has not been changed! But should be [?, FULL_DIM]
// Conculsion: we have to distinguish forced dynamic value with existing dynamic values in shape and subtensor
// Conclusion: we have to distinguish forced dynamic value with existing dynamic values in shape and subtensor

constexpr size_t NEW_DEFAULT_VALUE = SIZE_MAX - 2;
constexpr size_t FORCED_DYNAMIC_VALUE = SIZE_MAX - 3;
Expand All @@ -61,9 +61,8 @@ void propagate_updated_subtensor_through_loop(const LinearIR& linear_ir,
const auto& expr = port.expr_port->get_expr();
const auto& desc = port.expr_port->get_descriptor_ptr();
auto subtensor = desc->get_subtensor();
if (port.dim_idx < subtensor.size()) {
*(subtensor.rbegin() + port.dim_idx) = new_dim_value;
desc->set_subtensor(subtensor);
if (port.dim_idx < desc->get_subtensor().size()) {
desc->set_subtensor_value(port.dim_idx, new_dim_value);
}

const auto parent_desc = expr->get_input_port_connector(port.expr_port->get_index())->get_source().get_descriptor_ptr();
Expand Down
5 changes: 5 additions & 0 deletions src/common/snippets/src/lowered/port_descriptor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,11 @@ void PortDescriptor::set_shape(const VectorDims& tensor) {
*m_tensor_shape = tensor;
}

void PortDescriptor::set_subtensor_value(size_t idx, VectorDims::value_type value) {
OPENVINO_ASSERT(idx < m_subtensor_shape.size(), "Failed to set subtensor value: idx should be less than size");
*(m_subtensor_shape.rbegin() + idx) = value;
}

PortDescriptorPtr PortDescriptor::clone() const {
auto desc = std::make_shared<PortDescriptor>(*m_tensor_shape, m_subtensor_shape, m_layout);
desc->set_reg(m_reg);
Expand Down
2 changes: 1 addition & 1 deletion src/common/snippets/src/runtime_configurator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ void RuntimeConfigurator::update_loop_info(const std::shared_ptr<lowered::Linear
current_work_amount -= expanded_loop_info->get_work_amount();

if (expanded_loop_info->is_evaluate_once()) {
expanded_loop_info->update_ptr_increments(std::vector<int64_t>(ptr_increments.size(), 0));
// Update only `finalization offsets`. `Ptr increments` are always zeroed in this case
auto updated_finalization_offsets = current_work_amount > 0 ? std::vector<int64_t>(finalization_offsets.size(), 0) : finalization_offsets;
// work_amount is equal to increment in cases with `evaluate_once`
for (size_t i = 0; i < updated_finalization_offsets.size(); ++i)
Expand Down
34 changes: 0 additions & 34 deletions src/common/snippets/tests/include/lir_test_utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,40 +44,6 @@ void init_expr_descriptors(const ov::snippets::lowered::ExpressionPtr& expr,
const std::vector<ov::snippets::VectorDims>& subtensors = {},
const std::vector<ov::snippets::VectorDims>& layouts = {});

/**
* @brief Creates unified loop info based on provided entry and exit points, and adds it to the linear_ir's loops map
* @attention This helper wraps LoopManager::mark_loop method, but only for LoopInfo creation (whereas original
* mark_loop method also marks expressions with the corresponding loop info).
* @param linear_ir linear_ir in which loop info should be added
* @param entries entry points of loop
* @param exits exit points of loop
* @return ID of created loop
*/
size_t create_and_add_unified_loop_info(const std::shared_ptr<ov::snippets::lowered::LinearIR>& linear_ir,
size_t work_amount,
size_t increment,
const std::vector<ov::snippets::lowered::LoopPort>& entries,
const std::vector<ov::snippets::lowered::LoopPort>& exits,
bool add_default_handlers = true);
/**
* @brief Creates unified loop info based on provided entry and exit points, and adds it to the linear_ir's loops map.
* Meanwhile set loop id to expr range [loop_begin_pos, loop_end_pos).
* @attention This helper wraps LoopManager::mark_loop method, which also marks expressions with the corresponding loop info
* @param linear_ir linear_ir in which loop info should be added
* @param loop_begin_pos begin expr postion in this loop
* @param loop_end_pos end expr postion in this loop
* @param entries entry points of loop
* @param exits exit points of loop
* @return ID of created loop
*/
size_t create_and_add_unified_loop_info(const std::shared_ptr<ov::snippets::lowered::LinearIR>& linear_ir,
ov::snippets::lowered::LinearIR::constExprIt loop_begin_pos,
ov::snippets::lowered::LinearIR::constExprIt loop_end_pos,
size_t work_amount,
size_t increment,
const std::vector<ov::snippets::lowered::LoopPort>& entries,
const std::vector<ov::snippets::lowered::LoopPort>& exits,
bool add_default_handlers = true);
} // namespace snippets
} // namespace test
} // namespace ov
22 changes: 0 additions & 22 deletions src/common/snippets/tests/src/lir_test_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,28 +85,6 @@ void init_expr_descriptors(const ov::snippets::lowered::ExpressionPtr& expr,
}
}

size_t create_and_add_unified_loop_info(const LinearIRPtr& linear_ir,
size_t work_amount,
size_t increment,
const std::vector<LoopPort>& entries,
const std::vector<LoopPort>& exits,
bool set_default_handlers) {
// Equal begin and end iterators are set to avoid expressions marking with new loop id
return create_and_add_unified_loop_info(linear_ir, linear_ir->begin(), linear_ir->begin(), work_amount, increment, entries, exits, set_default_handlers);
}

size_t create_and_add_unified_loop_info(const LinearIRPtr& linear_ir,
ov::snippets::lowered::LinearIR::constExprIt loop_begin_pos,
ov::snippets::lowered::LinearIR::constExprIt loop_end_pos,
size_t work_amount,
size_t increment,
const std::vector<LoopPort>& entries,
const std::vector<LoopPort>& exits,
bool set_default_handlers) {
const auto& loop_manager = linear_ir->get_loop_manager();
return loop_manager->mark_loop(loop_begin_pos, loop_end_pos, work_amount, increment, entries, exits, set_default_handlers);
}

} // namespace snippets
} // namespace test
} // namespace ov
Loading

0 comments on commit bc8f441

Please sign in to comment.