Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add floor, shape_of and squeeze patterns #175

Open
wants to merge 2 commits into
base: mlir
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,9 @@
#include "mlir_op.hpp"
#include "op/matmul.hpp"
#include "op/relu.hpp"
#include "op/floor.hpp"
#include "op/shape_of.hpp"
#include "op/squeeze.hpp"
#include "op/binary_eltwise.hpp"
#include "openvino/core/dimension.hpp"
#include "openvino/core/rt_info.hpp"
Expand Down Expand Up @@ -311,6 +314,9 @@ void injectMLIR(std::shared_ptr<ov::Model> model,
manager.register_pass<BinaryEltwisePattern<v1::Multiply, linalg::MulOp>>();
manager.register_pass<BinaryEltwisePattern<v1::Divide, linalg::DivOp>>();
manager.register_pass<ReluPattern>();
manager.register_pass<FloorPattern>();
manager.register_pass<ShapeOfPattern>();
manager.register_pass<SqueezePattern>();
manager.register_pass<MatMulPattern>();
manager.register_pass<Partitioner>(context, mode, loweringContext);
manager.run_passes(model);
Expand All @@ -322,6 +328,7 @@ void loadDialects(MLIRContext* context) {
context->loadDialect<mlir::func::FuncDialect>();
context->loadDialect<mlir::linalg::LinalgDialect>();
context->loadDialect<mlir::bufferization::BufferizationDialect>();
context->loadDialect<mlir::shape::ShapeDialect>();
}

MLIRContext* get_shared_mlir_context(MlirMode mode) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,9 @@ Location createLayerLocation(MLIRContext* ctx, const std::string& layerName, con
}

SmallVector<int64_t> importShape(const ov::PartialShape& shape) {
if (shape.rank().is_dynamic()) {
OPENVINO_THROW("Dynamic ranks are not supported.");
}
SmallVector<int64_t> out(shape.rank().get_length());
// TODO: Add support for dynamically ranked shapes
for (size_t i = 0; i < out.size(); ++i) {
Expand All @@ -98,27 +101,21 @@ Type importPrecision(MLIRContext* ctx, const ov::element::Type& precision) {
case ov::element::Type_t::bf16:
return BFloat16Type::get(ctx);
case ov::element::Type_t::i64:
return getSInt64Type(ctx);
case ov::element::Type_t::u64:
return getUInt64Type(ctx);
return IntegerType::get(ctx, 64);
case ov::element::Type_t::i32:
return getSInt32Type(ctx);
case ov::element::Type_t::u32:
return getUInt32Type(ctx);
return IntegerType::get(ctx, 32);
case ov::element::Type_t::i16:
return getSInt16Type(ctx);
case ov::element::Type_t::u16:
return getUInt16Type(ctx);
return IntegerType::get(ctx, 16);
case ov::element::Type_t::i8:
return getSInt8Type(ctx);
case ov::element::Type_t::u8:
return getUInt8Type(ctx);
case ov::element::Type_t::boolean:
return IntegerType::get(ctx, 8);
case ov::element::Type_t::i4:
return getSInt4Type(ctx);
case ov::element::Type_t::u4:
return getUInt4Type(ctx);
case ov::element::Type_t::boolean:
return getBool8Type(ctx);
return IntegerType::get(ctx, 4);
default:
OPENVINO_THROW("Unsupported element_type: ", precision);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,9 @@ void prepareMLIRKernelWithoutWrapper(mlir::OwningOpRef<mlir::ModuleOp>& module,
pm.addNestedPass<func::FuncOp>(createCanonicalizerPass());
pm.addNestedPass<func::FuncOp>(createCSEPass());

// Rewrite shape ops in tensor/arith/etc
pm.addPass(createConvertShapeToStandardPass());

Comment on lines +104 to +106

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

will we need the same in the GC pipeline?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not if we only feed it with statically shaped batches. In any case, adding it should be a single-line change, so we can just add it.

// Remove empty tensors to avoid converting them into temporary buffers.
pm.addPass(bufferization::createEmptyTensorEliminationPass());

Expand Down
46 changes: 46 additions & 0 deletions src/common/transformations/src/transformations/mlir/op/floor.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"

#include <openvino/op/floor.hpp>
#include "openvino/pass/pattern/op/wrap_type.hpp"

#include "floor.hpp"
#include "../convert_common.hpp"


namespace {

using namespace ov::mlir;

struct ConvertFloor {
void operator()(ConversionContext& context, NodePtr node) {
auto loc = createLocation(context.context, node);
auto& builder = context.builder();
const auto input = context.getInputs(node)[0];
const auto ov_output_element_type = node->get_output_element_type(0);
const auto ov_output_shape = node->get_output_partial_shape(0);
auto outType = importTensor(context.context, ov_output_shape, ov_output_element_type);
auto dynamic_dimensions = context.get_dynamic_dimension_values(ov_output_shape);
auto empty = builder.create<tensor::EmptyOp>(loc, outType, dynamic_dimensions);
auto floor = builder.create<linalg::FloorOp>(loc, mlir::ValueRange{input}, mlir::ValueRange{empty});
context.addOutputs(node, floor);
}
};

} // namespace

namespace ov {
namespace mlir {

using namespace ov::pass::pattern;
using namespace ov::op;

FloorPattern::FloorPattern()
: MarkPattern(wrap_type<v0::Floor>({any_input()}), ConvertFloor()) {}

} // namespace mlir
} // namespace ov
23 changes: 23 additions & 0 deletions src/common/transformations/src/transformations/mlir/op/floor.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include "mlir/IR/Builders.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Value.h"

#include "../conversion_context.hpp"

namespace ov {
namespace mlir {

class FloorPattern : public MarkPattern {
public:
OPENVINO_RTTI("FloorPattern", "0");
FloorPattern();
};

} // namespace mlir
} // namespace ov
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Dialect/Arith/IR/Arith.h"

#include <openvino/op/shape_of.hpp>
#include "openvino/pass/pattern/op/wrap_type.hpp"

#include "shape_of.hpp"
#include "../convert_common.hpp"


namespace {

using namespace ov::mlir;

struct ConvertShapeOf {
void operator()(ConversionContext& context, NodePtr node) {
auto loc = createLocation(context.context, node);
auto& builder = context.builder();
const auto ov_output_element_type = node->get_output_element_type(0);
const auto ov_output_shape = node->get_output_partial_shape(0);
const auto input = context.getInputs(node)[0];
auto shapeOf = builder.create<shape::ShapeOfOp>(loc, mlir::ValueRange{input});
auto casted_type = RankedTensorType::get(ArrayRef(importShape(ov_output_shape)), importPrecision(context.context, ov_output_element_type));
auto cast = builder.create<arith::IndexCastOp>(loc, casted_type, mlir::ValueRange{shapeOf});
context.addOutputs(node, cast);
}
};

} // namespace

namespace ov {
namespace mlir {

using namespace ov::pass::pattern;
using namespace ov::op;

ShapeOfPattern::ShapeOfPattern() : MarkPattern(wrap_type<v3::ShapeOf>({any_input()}), ConvertShapeOf()) {}

} // namespace mlir
} // namespace ov
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include "mlir/IR/Builders.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Value.h"

#include "../conversion_context.hpp"

namespace ov {
namespace mlir {

class ShapeOfPattern : public MarkPattern {
public:
OPENVINO_RTTI("ShapeOfPattern", "0");
ShapeOfPattern();
};

} // namespace mlir
} // namespace ov
56 changes: 56 additions & 0 deletions src/common/transformations/src/transformations/mlir/op/squeeze.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"

#include <openvino/op/squeeze.hpp>
#include "openvino/pass/pattern/op/wrap_type.hpp"

#include "squeeze.hpp"
#include "../convert_common.hpp"


namespace {

using namespace ov::mlir;

struct ConvertSqueeze {
void operator()(ConversionContext& context, NodePtr node) {
auto loc = createLocation(context.context, node);
auto& builder = context.builder();
const auto input = context.getInputs(node)[0];

auto src_partial_shape = node->get_input_partial_shape(0);
auto src_rank = src_partial_shape.rank().get_length();
SmallVector<ReassociationIndices> collapse_groups;
ReassociationIndices group = ReassociationIndices();
for (size_t src_i = 0; src_i < src_rank; src_i++) {
auto src_d = src_partial_shape[src_i];
group.push_back(src_i);
if (src_d.is_static() && src_d.get_length() == 1) {
// continue collecting
} else {
collapse_groups.emplace_back(group);
group = ReassociationIndices();
}
}

auto reshape = builder.create<tensor::CollapseShapeOp>(loc, input, collapse_groups);
context.addOutputs(node, reshape);
}
};

} // namespace

namespace ov {
namespace mlir {

using namespace ov::pass::pattern;
using namespace ov::op;

SqueezePattern::SqueezePattern() : MarkPattern(wrap_type<v0::Squeeze>({any_input()}), ConvertSqueeze()) {}

} // namespace mlir
} // namespace ov
23 changes: 23 additions & 0 deletions src/common/transformations/src/transformations/mlir/op/squeeze.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include "mlir/IR/Builders.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Value.h"

#include "../conversion_context.hpp"

namespace ov {
namespace mlir {

class SqueezePattern : public MarkPattern {
public:
OPENVINO_RTTI("SqueezePattern", "0");
SqueezePattern();
};

} // namespace mlir
} // namespace ov
Loading