Skip to content

Commit

Permalink
[Snippets] Added Softmax support
Browse files Browse the repository at this point in the history
[Snippets] Added support for Reshape around Softmax

applied comment part

Added config parameter to disable MHA ops tokenization
  • Loading branch information
a-sidorova committed Nov 24, 2022
1 parent d00ba5b commit a664c06
Show file tree
Hide file tree
Showing 62 changed files with 2,903 additions and 253 deletions.
39 changes: 39 additions & 0 deletions src/common/snippets/include/snippets/config.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

namespace ngraph {
namespace snippets {


/**
* @interface SubgraphConfig
* @brief Config to know which transformations should be called.
* It helps to avoid overheads of extra transformation calls
* @ingroup snippets
*/

struct SubgraphConfig {
// True if Subgraph contains FakeQuantize -> FQ decomposition should be called
bool m_is_quantized = false;
// True if we should align element types indise body
bool m_is_needed_to_align_precision = false;
// True if Subgraph contains TypeRelaxed nodes -> for several streams in tp mode we should copy body using mutexes
// because TypeRelaxed::copy_with_new_inputs() isn't save-thread method
bool m_has_type_relaxed_ops = false;
// True if we should check runtime info for nodes to call specific needed transformations
bool m_need_fill_tail_register = false;
// True if we should go through whole body to check for where loops should be explicitly inserted.
// Otherwise, we insert Loops on Parameters and Results - for example, it's optimized out for subgraph with only Eltwise ops
bool m_explicit_loop_insertion = false;
// True if body has operations that don't support plugin-side domain optimizations
// (e.g. Transpose, Softmax, MatMul in general doesn't support dimensions collapsing)
bool m_has_domain_sensitive_ops = false;
// True if one evaluation optimizations are enabled
bool m_one_evaluation_optimizations = true;
};

} // namespace snippets
} // namespace ngraph
5 changes: 4 additions & 1 deletion src/common/snippets/include/snippets/generator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
*/
#pragma once

#include "snippets/config.hpp"
#include "snippets_isa.hpp"
#include "emitter.hpp"

Expand Down Expand Up @@ -115,9 +116,11 @@ class Generator {
/**
* @brief virtual method any specific implementation should implement
* @param m model in canonical for for table-based code generation
* @param config config with transformation and optimization parameters
* @param compile_params parameters for generated code
* @return pointer to generated code
*/
code generate(std::shared_ptr<ov::Model>& m, const void* compile_params = nullptr) const;
code generate(std::shared_ptr<ov::Model>& m, const SubgraphConfig& config, const void* compile_params = nullptr) const;

/**
* @brief gets target machine
Expand Down
43 changes: 43 additions & 0 deletions src/common/snippets/include/snippets/op/buffer.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include <ngraph/op/op.hpp>

namespace ngraph {
namespace snippets {
namespace op {

/**
* @interface Buffer
* @brief The operation is for intermediate data storage
* Notes:
* - All buffers in a graph have the same memory pointer. So if we have a few buffers,
* each buffer should have its own offset for common memory
* - If Buffer is an input for operation output, this Buffer should be a single consumer for this port
* @ingroup snippets
*/
class Buffer : public ngraph::op::Op {
public:
OPENVINO_OP("Buffer", "SnippetsOpset");
BWDCMP_RTTI_DECLARATION;

Buffer(const Output<Node>& x);
Buffer() = default;

size_t get_offset() const { return m_offset; }
void set_offset(const size_t offset);

bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;

private:
size_t m_offset = 0lu;
};

} // namespace op
} // namespace snippets
} // namespace ngraph
47 changes: 47 additions & 0 deletions src/common/snippets/include/snippets/op/fill.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include <ngraph/op/op.hpp>

namespace ngraph {
namespace snippets {
namespace op {

/**
* @interface Fill
* @brief Generated in Tail Loop vector representation in code generation step for cases when we should
* refill regsiters by special numbers.
* For example, for cases with ReduceMax or ReduceSum in Softmax
* Where:
* - offset - is value shift for filling
* - fill_value - hexadecimal filling value
* @ingroup snippets
*/
class Fill : public ngraph::op::Op {
public:
OPENVINO_OP("Fill", "SnippetsOpset");

Fill(const Output<Node>& x, const size_t offset, const uint32_t fill_value = 0x0);
Fill() = default;

size_t get_offset() const { return m_offset; }
uint32_t get_fill_value() const { return m_fill_value; }

void set_offset(const size_t offset) { m_offset = offset; }
void set_fill_value(const uint32_t fill_value) { m_fill_value = fill_value; }

bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;

protected:
size_t m_offset = 0lu;
uint32_t m_fill_value = 0x0;
};

} // namespace op
} // namespace snippets
} // namespace ngraph
32 changes: 32 additions & 0 deletions src/common/snippets/include/snippets/op/horizon_max.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include "ngraph/op/op.hpp"

namespace ngraph {
namespace snippets {
namespace op {

/**
* @interface HorizonMax
* @brief The operation calculates a horizon maximum of a vector register
* @ingroup snippets
*/
class HorizonMax : public ngraph::op::Op {
public:
OPENVINO_OP("HorizonMax", "SnippetsOpset");

HorizonMax(const Output<Node>& x);
HorizonMax() = default;

bool visit_attributes(AttributeVisitor& visitor) override { return true;}
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;
};

} // namespace op
} // namespace snippets
} // namespace ngraph
32 changes: 32 additions & 0 deletions src/common/snippets/include/snippets/op/horizon_sum.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include "ngraph/op/op.hpp"

namespace ngraph {
namespace snippets {
namespace op {

/**
* @interface HorizonSum
* @brief The operation calculates a horizon sum of a vector register
* @ingroup snippets
*/
class HorizonSum : public ngraph::op::Op {
public:
OPENVINO_OP("HorizonSum", "SnippetsOpset");

HorizonSum(const Output<Node>& x);
HorizonSum() = default;

bool visit_attributes(AttributeVisitor& visitor) override { return true;}
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;
};

} // namespace op
} // namespace snippets
} // namespace ngraph
15 changes: 8 additions & 7 deletions src/common/snippets/include/snippets/op/load.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,26 +12,26 @@ namespace op {

/**
* @interface Load
* @brief Generated by Canonicalization step where explicit instructions should be emitted for data loading
* where number of elements to load is determined by "count"
* Default value is "1" - to load one element
* @brief Generated during Lowering stage (convert_to_snippets_dialect) where explicit instructions should be emitted for data loading
* where number of elements to load is determined by "count" (Default value is "1" - to load one element)
* and memory offset for loading is determined by "offset" (Default value is "0" - to load starting from the first element)
* @ingroup snippets
*/
class Load : public ngraph::op::Op {
public:
OPENVINO_OP("Load", "SnippetsOpset");

Load(const Output<Node>& x, const size_t count = 1lu);
Load(const Output<Node>& x, const size_t count = 1lu, const size_t offset = 0lu);
Load() = default;

size_t get_count() const { return m_count; }
size_t get_offset() const { return m_offset; }

void set_count(const size_t count) { m_count = count; }
void set_offset(const size_t offset) { m_offset = offset; }

bool visit_attributes(AttributeVisitor& visitor) override;

std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

void validate_and_infer_types() override;

OPENVINO_SUPPRESS_DEPRECATED_START
Expand All @@ -40,14 +40,15 @@ class Load : public ngraph::op::Op {

protected:
size_t m_count = 0lu;
size_t m_offset = 0lu;
};

// todo: LoadReshape is just Load (and mapped on LoadEmitter), it just allows to keep correct shape propagation
// when we decompose Transpose to Load and Store.
class LoadReshape : public Load {
public:
OPENVINO_OP("LoadReshape", "SnippetsOpset");
LoadReshape(const Output<Node>& x, size_t count = 1lu, std::vector<size_t> order = {});
LoadReshape(const Output<Node>& x, size_t count = 1lu, const size_t offset = 0lu, std::vector<size_t> order = {});
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;
Expand Down
13 changes: 7 additions & 6 deletions src/common/snippets/include/snippets/op/store.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,26 +12,26 @@ namespace op {

/**
* @interface Store
* @brief Generated by Canonicalization step where explicit instructions should be emitted for data storing
* where number of elements to store is determined by "count"
* Default value is "1" - to store one element
* @brief Generated during Lowering stage (convert_to_snippets_dialect) where explicit instructions should be emitted for data storing
* where number of elements to store is determined by "count" (Default value is "1" - to store one element)
* and memory offset for storing is determined by "offset" (Default value is "0" - to store starting at start memory ptr)
* @ingroup snippets
*/
class Store : public ngraph::op::Op {
public:
OPENVINO_OP("Store", "SnippetsOpset");

Store(const Output<Node>& x, const size_t count = 1lu);
Store(const Output<Node>& x, const size_t count = 1lu, const size_t offset = 0lu);
Store() = default;

size_t get_count() const { return m_count; }
size_t get_offset() const { return m_offset; }

void set_count(const size_t count) { m_count = count; }
void set_offset(const size_t offset) { m_offset = offset; }

bool visit_attributes(AttributeVisitor& visitor) override;

std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

void validate_and_infer_types() override;

OPENVINO_SUPPRESS_DEPRECATED_START
Expand All @@ -40,6 +40,7 @@ class Store : public ngraph::op::Op {

protected:
size_t m_count = 0lu;
size_t m_offset = 0lu;
};

} // namespace op
Expand Down
53 changes: 19 additions & 34 deletions src/common/snippets/include/snippets/op/subgraph.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include <ngraph/pass/manager.hpp>

#include "snippets/generator.hpp"
#include "snippets/config.hpp"

namespace ngraph {
namespace snippets {
Expand Down Expand Up @@ -89,21 +90,13 @@ class Subgraph : public ngraph::op::Op {
return m_generator;
}

size_t get_non_scalar_constants_count() const {
return m_non_scalar_constants_count;
}

bool is_quantized() const {
return config.m_is_quantized;
}

bool has_type_relaxed_ops() const {
return config.m_has_type_relaxed_ops;
}

bool has_domain_sensitive_ops() const {
return config.m_has_domain_sensitive_ops;
}
// Return common memory size for all buffers in body
size_t get_buffer_scratchpad_size() const;
size_t get_virtual_port_count() const { return m_virtual_port_count; }
size_t is_buffer_needed() const { return m_buffer_needed; }
bool is_quantized() const { return config.m_is_quantized; }
bool has_type_relaxed_ops() const { return config.m_has_type_relaxed_ops; }
bool has_domain_sensitive_ops() const { return config.m_has_domain_sensitive_ops; }

size_t tileRank = 0; // set by plugin to facilitate scheduling

Expand All @@ -120,7 +113,8 @@ class Subgraph : public ngraph::op::Op {
// plugin sets generator for a snippet to some specific generator.
// it's going to be replaced with Jitters table later
void set_generator(std::shared_ptr<ngraph::snippets::Generator> generator);
void set_non_scalar_constants_count(const size_t count);
void set_virtual_port_count(const size_t count);
void buffer_needed(const bool need);

void print() const;
void print_statistics(bool verbose);
Expand All @@ -134,32 +128,23 @@ class Subgraph : public ngraph::op::Op {
private:
void align_element_types(const BlockedShapeVector& outputShapes, const BlockedShapeVector& inputShapes);
void convert_to_snippet_dialect();
// Count of potentional non-scalar Consants that will be created after some tranformations
// At the moment it's relevant only for FakeQuantize decomposition
// NOTE: To avoid overheads in each calcution of this count (for example, in validate_and_type_infer()),
void init_config();
// Count of Subgraph virtual ports:
// - Potential non-scalar Constants that will be created after some transformations (At the moment it's relevant only for FakeQuantize decomposition)
// Need Buffer op or not
// - Buffers. All Buffers are considered as one common additional virtual port. So we cannot summarize them as potential non-scalar Constants
// NOTE: To avoid overheads in each calculation of this count (for example, in validate_and_type_infer()),
// we should MANUALLY calculate it where it needed.
size_t m_non_scalar_constants_count = 0;
size_t m_virtual_port_count = 0;
bool m_buffer_needed = false;
Shape exec_domain = {};
std::shared_ptr<ov::Model> m_body = nullptr;
std::shared_ptr<ngraph::snippets::Generator> m_generator = nullptr;

// TODO: Change logic of insert Converts. This exec element type can be different for plugins
const ov::element::Type execution_element_type = ov::element::f32;

// Config to know which transformations should be called.
// It helps to avoid overheads of extra transformation calls
struct {
// True if Subgraph contains FakeQuantize -> FQ decomposition should be called
bool m_is_quantized = false;
// True if we should align element types indise body
bool m_is_needed_to_align_precision = false;
// True if Subgraph contains TypeRelaxed nodes -> for several streams in tp mode we should copy body using mutexes
// because TypeRelaxed::copy_with_new_inputs() isn't save-thread method
bool m_has_type_relaxed_ops = false;
// True if body has operations that don't support plugin-side domain optimizations
// (e.g. Transpose in general doesn't support dimensions collapsing)
bool m_has_domain_sensitive_ops = false;
} config;
SubgraphConfig config;

ov::PartialShape master_shape;
};
Expand Down
Loading

0 comments on commit a664c06

Please sign in to comment.