Skip to content

Commit

Permalink
[Op][Internal] Rename SwiGLU to GLU (openvinotoolkit#27683)
Browse files Browse the repository at this point in the history
### Details:
- Rename internal op SwiGLU to GLU (no naming changes for GPU swiglu
kernel in this PR)
 
Current SwiGLU can be also GeGLU, it depends on the glu_type member.
It has been proposed by several people to rename this op and make the
name more generic like GLU.

Related comment:

openvinotoolkit#27579 (comment)

### Tickets:
 - 157623
  • Loading branch information
mitruska authored Nov 22, 2024
1 parent 2e39485 commit f6e0ba0
Show file tree
Hide file tree
Showing 13 changed files with 104 additions and 104 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -11,29 +11,29 @@ namespace ov {
namespace op {
namespace internal {

/// \brief Operator performing Swish Gated Linear Unit Activation
/// \brief Operator performing Gated Linear Unit Activation
/// This operation performs gated linear unit activation that combines swish or gelu activation function
class TRANSFORMATIONS_API SwiGLU : public ov::op::Op {
class TRANSFORMATIONS_API GLU : public ov::op::Op {
public:
OPENVINO_OP("SwiGLU", "ie_internal_opset");
OPENVINO_OP("GLU", "ie_internal_opset");

enum GluType { Swish = 0, Gelu, Gelu_Tanh };

SwiGLU() = default;
/// \brief Constructs an SwiGLU operation.
GLU() = default;
/// \brief Constructs an GLU operation.
///
/// \param data Input tensor with data
/// \param axis The index of an axis in "data" along which to perform the split
/// \param split_lenghts A list containing the sizes of each output tensor along the split "axis"
/// \param glu_type GLU type, one of Swish, Gelu and Gelu_Tanh
/// \param split_to_glu_idx Output index of variadic split, which is connected to GLU
/// \param output_type Output element type
SwiGLU(const Output<Node>& data,
int64_t axis,
int64_t split_lengths,
const GluType glu_type,
const size_t split_to_glu_idx,
const ov::element::Type output_type = ov::element::undefined);
GLU(const Output<Node>& data,
int64_t axis,
int64_t split_lengths,
const GluType glu_type,
const size_t split_to_glu_idx,
const ov::element::Type output_type = ov::element::undefined);

bool visit_attributes(ov::AttributeVisitor& visitor) override;

Expand Down Expand Up @@ -76,7 +76,7 @@ class TRANSFORMATIONS_API SwiGLU : public ov::op::Op {
};

// TODO 157615: Move to shape_inference
TRANSFORMATIONS_API std::vector<ov::PartialShape> shape_infer(const SwiGLU* op,
TRANSFORMATIONS_API std::vector<ov::PartialShape> shape_infer(const GLU* op,
std::vector<ov::PartialShape> input_shapes);

} // namespace internal
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@
namespace ov {
namespace pass {

class TRANSFORMATIONS_API SwiGLUFusion : public ov::pass::MatcherPass {
class TRANSFORMATIONS_API GLUFusion : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("SwiGLUFusion", "0");
SwiGLUFusion();
OPENVINO_RTTI("GLUFusion", "0");
GLUFusion();
};

} // namespace pass
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//

#include "ov_ops/swiglu.hpp"
#include "ov_ops/glu.hpp"

#include "openvino/core/partial_shape.hpp"
#include "openvino/core/validation_util.hpp"
Expand All @@ -13,12 +13,12 @@ namespace ov {
namespace op {
namespace internal {

SwiGLU::SwiGLU(const Output<Node>& data,
int64_t axis,
int64_t split_lengths,
const GluType glu_type,
const size_t split_to_glu_idx,
const ov::element::Type output_type)
GLU::GLU(const Output<Node>& data,
int64_t axis,
int64_t split_lengths,
const GluType glu_type,
const size_t split_to_glu_idx,
const ov::element::Type output_type)
: Op({data}),
m_axis(axis),
m_split_lengths(split_lengths),
Expand All @@ -28,14 +28,14 @@ SwiGLU::SwiGLU(const Output<Node>& data,
validate_and_infer_types();
}

bool SwiGLU::visit_attributes(ov::AttributeVisitor& visitor) {
bool GLU::visit_attributes(ov::AttributeVisitor& visitor) {
visitor.on_attribute("axis", m_axis);
visitor.on_attribute("split_lengths", m_split_lengths);
visitor.on_attribute("output_type", m_output_type);
return true;
}

void SwiGLU::validate_and_infer_types() {
void GLU::validate_and_infer_types() {
auto output_type = m_output_type == ov::element::undefined ? get_input_element_type(0) : m_output_type;

std::vector<ov::PartialShape> input_shapes = {get_input_partial_shape(0),
Expand All @@ -45,17 +45,17 @@ void SwiGLU::validate_and_infer_types() {
set_output_type(0, output_type, shape_infer(this, input_shapes)[0]);
}

std::shared_ptr<Node> SwiGLU::clone_with_new_inputs(const ov::OutputVector& new_args) const {
std::shared_ptr<Node> GLU::clone_with_new_inputs(const ov::OutputVector& new_args) const {
check_new_args_count(this, new_args);
return std::make_shared<SwiGLU>(new_args.at(0),
m_axis,
m_split_lengths,
m_glu_type,
m_split_to_glu_idx,
m_output_type);
return std::make_shared<GLU>(new_args.at(0),
m_axis,
m_split_lengths,
m_glu_type,
m_split_to_glu_idx,
m_output_type);
}

std::vector<ov::PartialShape> shape_infer(const SwiGLU* op, std::vector<ov::PartialShape> input_shapes) {
std::vector<ov::PartialShape> shape_infer(const GLU* op, std::vector<ov::PartialShape> input_shapes) {
ov::op::v1::VariadicSplit variadic_split;
std::vector<int64_t> axis = {op->get_axis()};
std::vector<int64_t> split_lengths = {op->get_split_lengths(), -1};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//

#include "transformations/common_optimizations/swiglu_fusion.hpp"
#include "transformations/common_optimizations/glu_fusion.hpp"

#include "openvino/core/rt_info.hpp"
#include "openvino/op/constant.hpp"
Expand All @@ -13,13 +13,13 @@
#include "openvino/pass/manager.hpp"
#include "openvino/pass/pattern/op/or.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"
#include "ov_ops/swiglu.hpp"
#include "ov_ops/glu.hpp"
#include "transformations/utils/utils.hpp"

namespace ov {
namespace pass {

SwiGLUFusion::SwiGLUFusion() {
GLUFusion::GLUFusion() {
using namespace ov::pass::pattern;
using ov::pass::pattern::op::Or;

Expand All @@ -28,8 +28,8 @@ SwiGLUFusion::SwiGLUFusion() {
return out_ps.rank().is_static() && out_ps[out_ps.rank().get_length() - 1].is_static() && out_ps.size() <= 5;
};

// Detect SwiGLU decomposition pattern
// SwiGLU(Xw, Xv, beta) = (Xw * (1.0 + exp(-beta * Xw))) * Xv
// Detect GLU decomposition pattern
// GLU(Xw, Xv, beta) = (Xw * (1.0 + exp(-beta * Xw))) * Xv
auto data_m = any_input(last_dim_static);

// VariadicSplit(X, axis, split_lengths) = Xw, Xv
Expand Down Expand Up @@ -60,11 +60,11 @@ SwiGLUFusion::SwiGLUFusion() {
auto isSwiGLU = pattern_map.count(swish_m);
auto isGeGLU = pattern_map.count(gelu_m);
size_t split_to_glu_idx = 0;
ov::op::internal::SwiGLU::GluType glu_type = ov::op::internal::SwiGLU::GluType::Swish;
ov::op::internal::GLU::GluType glu_type = ov::op::internal::GLU::GluType::Swish;

if (isSwiGLU) {
auto swish = std::dynamic_pointer_cast<ov::op::v4::Swish>(pattern_map.at(swish_m).get_node_shared_ptr());
glu_type = ov::op::internal::SwiGLU::GluType::Swish;
glu_type = ov::op::internal::GLU::GluType::Swish;
split_to_glu_idx = swish->input_value(0).get_index();

size_t split_in_idx = ov::is_type<ov::op::v4::Swish>(mul->get_input_node_shared_ptr(0)) ? 1 : 0;
Expand All @@ -73,8 +73,8 @@ SwiGLUFusion::SwiGLUFusion() {
} else if (isGeGLU) {
auto gelu = std::dynamic_pointer_cast<ov::op::v7::Gelu>(pattern_map.at(gelu_m).get_node_shared_ptr());
glu_type = (gelu->get_approximation_mode() == ov::op::GeluApproximationMode::ERF)
? ov::op::internal::SwiGLU::GluType::Gelu
: ov::op::internal::SwiGLU::GluType::Gelu_Tanh;
? ov::op::internal::GLU::GluType::Gelu
: ov::op::internal::GLU::GluType::Gelu_Tanh;
split_to_glu_idx = gelu->input_value(0).get_index();

size_t split_in_idx = ov::is_type<ov::op::v7::Gelu>(mul->get_input_node_shared_ptr(0)) ? 1 : 0;
Expand Down Expand Up @@ -107,20 +107,20 @@ SwiGLUFusion::SwiGLUFusion() {
auto data = pattern_map.at(data_m);
auto output_type = m.get_match_root()->get_output_element_type(0);

auto swiglu = std::make_shared<ov::op::internal::SwiGLU>(data,
axis_value,
split_lengths_value,
glu_type,
split_to_glu_idx,
output_type);
auto swiglu = std::make_shared<ov::op::internal::GLU>(data,
axis_value,
split_lengths_value,
glu_type,
split_to_glu_idx,
output_type);
swiglu->set_friendly_name(m.get_match_root()->get_friendly_name());
ov::copy_runtime_info(m.get_matched_nodes(), swiglu);
ov::replace_node(m.get_match_root(), swiglu);

return true;
};

auto m = std::make_shared<ov::pass::pattern::Matcher>(mul_m, "SwiGLUFusion");
auto m = std::make_shared<ov::pass::pattern::Matcher>(mul_m, "GLUFusion");
this->register_matcher(m, callback);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//

#include "transformations/common_optimizations/swiglu_fusion.hpp"
#include "transformations/common_optimizations/glu_fusion.hpp"

#include <gtest/gtest.h>

Expand All @@ -18,13 +18,13 @@
#include "openvino/op/swish.hpp"
#include "openvino/op/variadic_split.hpp"
#include "openvino/pass/manager.hpp"
#include "ov_ops/swiglu.hpp"
#include "ov_ops/glu.hpp"
#include "transformations/utils/utils.hpp"

using namespace testing;
using namespace ov::pass;

TEST_F(TransformationTestsF, SwiGLUFusionTest1) {
TEST_F(TransformationTestsF, GLUFusionTest1) {
{
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f16, ov::PartialShape{2, 1, 6});
auto axis_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {-1});
Expand All @@ -34,24 +34,24 @@ TEST_F(TransformationTestsF, SwiGLUFusionTest1) {
auto mul = std::make_shared<ov::op::v1::Multiply>(swish, variadic_split->output(1));

model = std::make_shared<ov::Model>(ov::NodeVector{mul}, ov::ParameterVector{input});
manager.register_pass<SwiGLUFusion>();
manager.register_pass<GLUFusion>();
}
{
int64_t axis = -1;
int64_t split_lenghts = 3;
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f16, ov::PartialShape{2, 1, 6});
auto swiglu = std::make_shared<ov::op::internal::SwiGLU>(input,
axis,
split_lenghts,
ov::op::internal::SwiGLU::GluType::Swish,
0,
ov::element::f16);
auto swiglu = std::make_shared<ov::op::internal::GLU>(input,
axis,
split_lenghts,
ov::op::internal::GLU::GluType::Swish,
0,
ov::element::f16);

model_ref = std::make_shared<ov::Model>(ov::NodeVector{swiglu}, ov::ParameterVector{input});
}
}

TEST_F(TransformationTestsF, SwiGLUFusionTest2) {
TEST_F(TransformationTestsF, GLUFusionTest2) {
{
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f16, ov::PartialShape{-1, -1, 6});
auto axis_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {0});
Expand All @@ -61,11 +61,11 @@ TEST_F(TransformationTestsF, SwiGLUFusionTest2) {
auto mul = std::make_shared<ov::op::v1::Multiply>(swish, variadic_split->output(1));

model = std::make_shared<ov::Model>(ov::NodeVector{mul}, ov::ParameterVector{input});
manager.register_pass<SwiGLUFusion>();
manager.register_pass<GLUFusion>();
}
}

TEST_F(TransformationTestsF, SwiGLUFusionTest3) {
TEST_F(TransformationTestsF, GLUFusionTest3) {
{
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f16, ov::PartialShape{-1, -1, 6});
auto axis_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {-1});
Expand All @@ -75,24 +75,24 @@ TEST_F(TransformationTestsF, SwiGLUFusionTest3) {
auto mul = std::make_shared<ov::op::v1::Multiply>(swish, variadic_split->output(1));

model = std::make_shared<ov::Model>(ov::NodeVector{mul}, ov::ParameterVector{input});
manager.register_pass<SwiGLUFusion>();
manager.register_pass<GLUFusion>();
}
{
int64_t axis = -1;
int64_t split_lenghts = 3;
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f16, ov::PartialShape{-1, -1, 6});
auto swiglu = std::make_shared<ov::op::internal::SwiGLU>(input,
axis,
split_lenghts,
ov::op::internal::SwiGLU::GluType::Swish,
0,
ov::element::f16);
auto swiglu = std::make_shared<ov::op::internal::GLU>(input,
axis,
split_lenghts,
ov::op::internal::GLU::GluType::Swish,
0,
ov::element::f16);

model_ref = std::make_shared<ov::Model>(ov::NodeVector{swiglu}, ov::ParameterVector{input});
}
}

TEST_F(TransformationTestsF, SwiGLUFusionTest3ReverseOrder) {
TEST_F(TransformationTestsF, GLUFusionTest3ReverseOrder) {
{
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f16, ov::PartialShape{-1, -1, 6});
auto axis_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {-1});
Expand All @@ -102,24 +102,24 @@ TEST_F(TransformationTestsF, SwiGLUFusionTest3ReverseOrder) {
auto mul = std::make_shared<ov::op::v1::Multiply>(variadic_split->output(1), swish);

model = std::make_shared<ov::Model>(ov::NodeVector{mul}, ov::ParameterVector{input});
manager.register_pass<SwiGLUFusion>();
manager.register_pass<GLUFusion>();
}
{
int64_t axis = -1;
int64_t split_lenghts = 3;
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f16, ov::PartialShape{-1, -1, 6});
auto swiglu = std::make_shared<ov::op::internal::SwiGLU>(input,
axis,
split_lenghts,
ov::op::internal::SwiGLU::GluType::Swish,
0,
ov::element::f16);
auto swiglu = std::make_shared<ov::op::internal::GLU>(input,
axis,
split_lenghts,
ov::op::internal::GLU::GluType::Swish,
0,
ov::element::f16);

model_ref = std::make_shared<ov::Model>(ov::NodeVector{swiglu}, ov::ParameterVector{input});
}
}

TEST_F(TransformationTestsF, SwiGLUFusionTest4) {
TEST_F(TransformationTestsF, GLUFusionTest4) {
{
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f16, ov::PartialShape{-1, -1, 6});
auto axis_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {-1});
Expand All @@ -129,7 +129,7 @@ TEST_F(TransformationTestsF, SwiGLUFusionTest4) {
auto mul = std::make_shared<ov::op::v1::Multiply>(swish, variadic_split->output(0));

model = std::make_shared<ov::Model>(ov::NodeVector{mul}, ov::ParameterVector{input});
manager.register_pass<SwiGLUFusion>();
manager.register_pass<GLUFusion>();
}
}

Expand All @@ -143,18 +143,18 @@ TEST_F(TransformationTestsF, GeGLUFusionTest1) {
auto mul = std::make_shared<ov::op::v1::Multiply>(variadic_split->output(0), gelu);

model = std::make_shared<ov::Model>(ov::NodeVector{mul}, ov::ParameterVector{input});
manager.register_pass<SwiGLUFusion>();
manager.register_pass<GLUFusion>();
}
{
int64_t axis = -1;
int64_t split_lenghts = 3;
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f16, ov::PartialShape{2, 1, 6});
auto swiglu = std::make_shared<ov::op::internal::SwiGLU>(input,
axis,
split_lenghts,
ov::op::internal::SwiGLU::GluType::Gelu,
1,
ov::element::f16);
auto swiglu = std::make_shared<ov::op::internal::GLU>(input,
axis,
split_lenghts,
ov::op::internal::GLU::GluType::Gelu,
1,
ov::element::f16);

model_ref = std::make_shared<ov::Model>(ov::NodeVector{swiglu}, ov::ParameterVector{input});
}
Expand Down
Loading

0 comments on commit f6e0ba0

Please sign in to comment.