Skip to content

Commit

Permalink
Move configurator logic to separate methods
Browse files Browse the repository at this point in the history
  • Loading branch information
v-Golubev committed Oct 11, 2024
1 parent f6425ca commit 5dbab73
Show file tree
Hide file tree
Showing 2 changed files with 46 additions and 36 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -39,45 +39,11 @@ CPURuntimeConfigurator::CPURuntimeConfigurator() : ov::snippets::RuntimeConfigur
}

void CPURuntimeConfigurator::update(const ov::snippets::lowered::LinearIRCPtr& linear_ir) {
const auto& cpu_config = ov::as_type_ptr<CPURuntimeConfig>(m_config);
auto& optimal_descs = cpu_config->m_in_requested_descs;
optimal_descs.resize(m_in_num);
const auto& params = linear_ir->get_parameters();
OPENVINO_ASSERT(params.size() == m_in_num);
for (size_t i = 0; i < m_in_num; ++i) {
// TODO: remove
if (i != 1) continue;
const auto& param = params[i];
const auto consumers = param->get_output_port_connector(0)->get_consumers();
OPENVINO_ASSERT(consumers.size() == 1);
const auto& consumer = consumers.begin()->get_expr();
// TODO: this logic should be more flexible
if (ov::is_type<ov::intel_cpu::BrgemmCPU>(consumer->get_node())) {
const auto& shape = param->get_output_port_descriptor(0)->get_shape();
VectorDims normalized_dims(3, 1);
*normalized_dims.rbegin() = *shape.rbegin();
*++normalized_dims.rbegin() = *++shape.rbegin();
normalized_dims[0] = std::accumulate(shape.begin(), shape.end() - 2, static_cast<Dim>(1), std::multiplies<Dim>());

const auto data_type = DnnlExtensionUtils::ElementTypeToDataType(param->get_node()->get_output_element_type(0));
// TODO: tag must be selected based on Brgemm params (inner block + vnni factor?)
const auto tag = dnnl::memory::format_tag::aCB16b64c2b;
optimal_descs[i] = std::make_shared<DnnlBlockedMemoryDesc>(Shape(normalized_dims), data_type, tag);
}
}

update_requested_descs(linear_ir);
RuntimeConfigurator::update(linear_ir);
if (linear_ir->is_dynamic())
update_loop_args(linear_ir);

for (size_t i = 0; i < m_in_num; ++i) {
if (optimal_descs[i]) {
auto& offsets = m_config->io_data_offsets[i];
// TODO: how exactly should offsets be corrected using info from blocking descriptor?
if (i == 1)
offsets[3] = 2048 * 2;
}
}
adjust_offsets_from_descs();
}

void CPURuntimeConfigurator::update_tensor_rank(const ov::snippets::VectorDims& master_shape) {
Expand Down Expand Up @@ -111,5 +77,46 @@ void CPURuntimeConfigurator::update_loop_args(const ov::snippets::lowered::Linea
}
}

void CPURuntimeConfigurator::update_requested_descs(const ov::snippets::lowered::LinearIRCPtr& linear_ir) const {
const auto& cpu_config = ov::as_type_ptr<CPURuntimeConfig>(m_config);
auto& optimal_descs = cpu_config->m_in_requested_descs;
optimal_descs.resize(m_in_num);
const auto& params = linear_ir->get_parameters();
OPENVINO_ASSERT(params.size() == m_in_num);
for (size_t i = 0; i < m_in_num; ++i) {
// TODO: remove
if (i != 1) continue;
const auto& param = params[i];
const auto consumers = param->get_output_port_connector(0)->get_consumers();
OPENVINO_ASSERT(consumers.size() == 1);
const auto& consumer = consumers.begin()->get_expr();
// TODO: this logic should be more flexible
if (ov::is_type<ov::intel_cpu::BrgemmCPU>(consumer->get_node())) {
const auto& shape = param->get_output_port_descriptor(0)->get_shape();
VectorDims normalized_dims(3, 1);
*normalized_dims.rbegin() = *shape.rbegin();
*++normalized_dims.rbegin() = *++shape.rbegin();
normalized_dims[0] = std::accumulate(shape.begin(), shape.end() - 2, static_cast<Dim>(1), std::multiplies<Dim>());

const auto data_type = DnnlExtensionUtils::ElementTypeToDataType(param->get_node()->get_output_element_type(0));
// TODO: tag must be selected based on Brgemm params (inner block + vnni factor?)
const auto tag = dnnl::memory::format_tag::aCB16b64c2b;
optimal_descs[i] = std::make_shared<DnnlBlockedMemoryDesc>(Shape(normalized_dims), data_type, tag);
}
}
}
void CPURuntimeConfigurator::adjust_offsets_from_descs() const {
const auto& cpu_config = ov::as_type_ptr<CPURuntimeConfig>(m_config);
auto& optimal_descs = cpu_config->m_in_requested_descs;
for (size_t i = 0; i < m_in_num; ++i) {
if (optimal_descs[i]) {
auto& offsets = m_config->io_data_offsets[i];
// TODO: how exactly should offsets be corrected using info from blocking descriptor?
if (i == 1)
offsets[3] = 2048 * 2;
}
}
}

} // namespace intel_cpu
} // namespace ov
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,9 @@ class CPURuntimeConfigurator : public ov::snippets::RuntimeConfigurator {
*/
void update_loop_args(const ov::snippets::lowered::LinearIRCPtr& linear_ir) const;

void update_requested_descs(const ov::snippets::lowered::LinearIRCPtr& linear_ir) const;
void adjust_offsets_from_descs() const;

static const size_t rank6D;
};

Expand Down

0 comments on commit 5dbab73

Please sign in to comment.