Skip to content

Commit

Permalink
Reduce warnings (openvinotoolkit#133)
Browse files Browse the repository at this point in the history
Mostly static declarations, but one unnecessarily wide lambda capture.
  • Loading branch information
rengolin authored Jul 8, 2024
1 parent 59739aa commit b16f120
Showing 1 changed file with 30 additions and 30 deletions.
60 changes: 30 additions & 30 deletions src/common/transformations/src/transformations/mlir/convert.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ static void prepareMLIRKernelWithoutWrapper(mlir::OwningOpRef<mlir::ModuleOp>& m
}
}

std::unique_ptr<llvm::Module> lowerToLLVMIR(Operation* module, llvm::LLVMContext& llvmContext) {
static std::unique_ptr<llvm::Module> lowerToLLVMIR(Operation* module, llvm::LLVMContext& llvmContext) {
// Default lowering for mlir-cpu-runner
auto llvmModule = translateModuleToLLVMIR(module, llvmContext);
assert(llvmModule);
Expand Down Expand Up @@ -329,7 +329,7 @@ class OPENVINO_API MLIROp : public ov::op::Op {
}
};

mlir::Location createLayerLocation(mlir::MLIRContext* ctx, const std::string& layerName, const std::string& layerType) {
static mlir::Location createLayerLocation(mlir::MLIRContext* ctx, const std::string& layerName, const std::string& layerType) {
const auto layerNameAttr = mlir::StringAttr::get(ctx, layerName);
const auto nameLoc = mlir::NameLoc::get(layerNameAttr);

Expand All @@ -341,7 +341,7 @@ mlir::Location createLayerLocation(mlir::MLIRContext* ctx, const std::string& la
return mlir::FusedLoc::get(ctx, {nameLoc}, metadata);
}

SmallVector<int64_t> importShape(const ov::PartialShape& shape) {
static SmallVector<int64_t> importShape(const ov::PartialShape& shape) {
SmallVector<int64_t> out(shape.rank().get_length());
// TODO: Add support for dynamically ranked shapes
for (size_t i = 0; i < out.size(); ++i) {
Expand All @@ -351,76 +351,76 @@ SmallVector<int64_t> importShape(const ov::PartialShape& shape) {
return out;
}

mlir::IntegerType getInt1Type(mlir::MLIRContext* ctx) {
static mlir::IntegerType getInt1Type(mlir::MLIRContext* ctx) {
return mlir::IntegerType::get(ctx, 1);
}

mlir::IntegerType getInt4Type(mlir::MLIRContext* ctx) {
static mlir::IntegerType getInt4Type(mlir::MLIRContext* ctx) {
return mlir::IntegerType::get(ctx, 4);
}

mlir::IntegerType getInt8Type(mlir::MLIRContext* ctx) {
static mlir::IntegerType getInt8Type(mlir::MLIRContext* ctx) {
return mlir::IntegerType::get(ctx, 8);
}

mlir::IntegerType getInt16Type(mlir::MLIRContext* ctx) {
static mlir::IntegerType getInt16Type(mlir::MLIRContext* ctx) {
return mlir::IntegerType::get(ctx, 16);
}

mlir::IntegerType getInt32Type(mlir::MLIRContext* ctx) {
static mlir::IntegerType getInt32Type(mlir::MLIRContext* ctx) {
return mlir::IntegerType::get(ctx, 32);
}

mlir::IntegerType getInt64Type(mlir::MLIRContext* ctx) {
static mlir::IntegerType getInt64Type(mlir::MLIRContext* ctx) {
return mlir::IntegerType::get(ctx, 64);
}

mlir::IntegerType getSInt4Type(mlir::MLIRContext* ctx) {
static mlir::IntegerType getSInt4Type(mlir::MLIRContext* ctx) {
return mlir::IntegerType::get(ctx, 4, mlir::IntegerType::Signed);
}

mlir::IntegerType getSInt8Type(mlir::MLIRContext* ctx) {
static mlir::IntegerType getSInt8Type(mlir::MLIRContext* ctx) {
return mlir::IntegerType::get(ctx, 8, mlir::IntegerType::Signed);
}

mlir::IntegerType getSInt16Type(mlir::MLIRContext* ctx) {
static mlir::IntegerType getSInt16Type(mlir::MLIRContext* ctx) {
return mlir::IntegerType::get(ctx, 16, mlir::IntegerType::Signed);
}

mlir::IntegerType getSInt32Type(mlir::MLIRContext* ctx) {
static mlir::IntegerType getSInt32Type(mlir::MLIRContext* ctx) {
return mlir::IntegerType::get(ctx, 32, mlir::IntegerType::Signed);
}

mlir::IntegerType getSInt64Type(mlir::MLIRContext* ctx) {
static mlir::IntegerType getSInt64Type(mlir::MLIRContext* ctx) {
return mlir::IntegerType::get(ctx, 64, mlir::IntegerType::Signed);
}

mlir::IntegerType getUInt4Type(mlir::MLIRContext* ctx) {
static mlir::IntegerType getUInt4Type(mlir::MLIRContext* ctx) {
return mlir::IntegerType::get(ctx, 4, mlir::IntegerType::Unsigned);
}

mlir::IntegerType getUInt8Type(mlir::MLIRContext* ctx) {
static mlir::IntegerType getUInt8Type(mlir::MLIRContext* ctx) {
return mlir::IntegerType::get(ctx, 8, mlir::IntegerType::Unsigned);
}

mlir::IntegerType getUInt16Type(mlir::MLIRContext* ctx) {
static mlir::IntegerType getUInt16Type(mlir::MLIRContext* ctx) {
return mlir::IntegerType::get(ctx, 16, mlir::IntegerType::Unsigned);
}

mlir::IntegerType getUInt32Type(mlir::MLIRContext* ctx) {
static mlir::IntegerType getUInt32Type(mlir::MLIRContext* ctx) {
return mlir::IntegerType::get(ctx, 32, mlir::IntegerType::Unsigned);
}

mlir::IntegerType getUInt64Type(mlir::MLIRContext* ctx) {
static mlir::IntegerType getUInt64Type(mlir::MLIRContext* ctx) {
return mlir::IntegerType::get(ctx, 64, mlir::IntegerType::Unsigned);
}

mlir::IntegerType getBool8Type(mlir::MLIRContext* ctx) {
static mlir::IntegerType getBool8Type(mlir::MLIRContext* ctx) {
// Signless 8-bit integer use for BOOL, to distinguish it from U8
return mlir::IntegerType::get(ctx, 8, mlir::IntegerType::Signless);
}

mlir::Type importPrecision(mlir::MLIRContext* ctx, const ov::element::Type& precision) {
static mlir::Type importPrecision(mlir::MLIRContext* ctx, const ov::element::Type& precision) {
switch (precision) {
case ov::element::Type_t::f64:
return mlir::Float64Type::get(ctx);
Expand Down Expand Up @@ -457,13 +457,13 @@ mlir::Type importPrecision(mlir::MLIRContext* ctx, const ov::element::Type& prec
}
}

mlir::RankedTensorType importTensor(mlir::MLIRContext* ctx,
static mlir::RankedTensorType importTensor(mlir::MLIRContext* ctx,
const ov::PartialShape& shape,
const ov::element::Type& elemType) {
return mlir::RankedTensorType::get(ArrayRef(importShape(shape)), importPrecision(ctx, elemType));
}

mlir::Location createLocation(mlir::MLIRContext* ctx, std::shared_ptr<ov::Node> node) {
static mlir::Location createLocation(mlir::MLIRContext* ctx, std::shared_ptr<ov::Node> node) {
return createLayerLocation(ctx, node->get_friendly_name(), node->get_type_name());
}

Expand All @@ -488,14 +488,14 @@ struct hash<ov::Output<ov::Node>> final {
} // namespace std


MemRefType convertTensorToMemRef(TensorType tensorType) {
static MemRefType convertTensorToMemRef(TensorType tensorType) {
ArrayRef<int64_t> shape = tensorType.getShape();
Type elementType = tensorType.getElementType();
return MemRefType::get(shape, elementType);
}


SmallVector<mlir::Type> tensorsToMemRefs(SmallVector<mlir::Type> tensors) {
static SmallVector<mlir::Type> tensorsToMemRefs(SmallVector<mlir::Type> tensors) {
SmallVector<mlir::Type> out;
out.reserve(tensors.size());
for (const auto& tensor : tensors) {
Expand All @@ -505,7 +505,7 @@ SmallVector<mlir::Type> tensorsToMemRefs(SmallVector<mlir::Type> tensors) {
}


SmallVector<mlir::Type> get_types_for_values(mlir::MLIRContext* context, const ov::OutputVector& values) {
static SmallVector<mlir::Type> get_types_for_values(mlir::MLIRContext* context, const ov::OutputVector& values) {
SmallVector<mlir::Type> types;
types.reserve(values.size());
for (const auto& output : values) {
Expand Down Expand Up @@ -588,7 +588,7 @@ struct ConvertBinary {
const std::map<ov::DiscreteTypeInfo, ConversionContext::Convertor> ConversionContext::convertors = {
{ov::op::v1::Add::get_type_info_static(), Convertor(ConvertBinary<linalg::AddOp>())}};

mlir::OwningOpRef<mlir::ModuleOp> ngraph_to_mlir(MLIRContext* context,
static mlir::OwningOpRef<mlir::ModuleOp> ngraph_to_mlir(MLIRContext* context,
const ov::OutputVector& inputs,
const ov::NodeVector& nodes,
const ov::OutputVector& outputs) {
Expand Down Expand Up @@ -653,7 +653,7 @@ class AddLowering : public ov::pass::MatcherPass {
auto pattern = ov::pass::pattern::wrap_type<ov::op::v1::Add>(
{ov::pass::pattern::any_input(), ov::pass::pattern::any_input()});

auto callback = [=, context](ov::pass::pattern::Matcher& m) {
auto callback = [context](ov::pass::pattern::Matcher& m) {
std::cout << "[ INFO ] Matched AddLowering\n";
auto add = m.get_match_root();

Expand Down Expand Up @@ -682,15 +682,15 @@ class AddLowering : public ov::pass::MatcherPass {
};


void injectMLIR(std::shared_ptr<ov::Model> model, MLIRContext* context) {
static void injectMLIR(std::shared_ptr<ov::Model> model, MLIRContext* context) {
ov::pass::Manager manager;
manager.set_per_pass_validation(true);
manager.register_pass<AddLowering>(context);
manager.run_passes(model);
}


MLIRContext* get_shared_mlir_context() {
static MLIRContext* get_shared_mlir_context() {
// Gives MLIRContext instance shared for entire OV process and initialized once upon the initial request
// FIXME: Bind with OpenVINO lifetime in the sutable class instead of dirty tricking with static lifetime

Expand Down

0 comments on commit b16f120

Please sign in to comment.