Skip to content

Commit

Permalink
End to end implementation of logical ops. (#900)
Browse files Browse the repository at this point in the history
Following ops are added
* logical_and
* logical_not
* logical_or
  • Loading branch information
mmanzoorTT authored Oct 16, 2024
1 parent 21955b4 commit 998c81a
Show file tree
Hide file tree
Showing 12 changed files with 163 additions and 1 deletion.
21 changes: 21 additions & 0 deletions include/ttmlir/Dialect/TTIR/IR/TTIROps.td
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,13 @@ def TTIR_NegOp: TTIR_ElementwiseUnaryOp<"neg"> {
}];
}

def TTIR_LogicalNotOp: TTIR_ElementwiseUnaryOp<"logical_not"> {
let summary = "Eltwise logical not op.";
let description = [{
Eltwise logical not operation.
}];
}

def TTIR_ReciprocalOp : TTIR_ElementwiseUnaryOp<"reciprocal"> {
let summary = "Eltwise reciprocal.";
let description = [{
Expand Down Expand Up @@ -303,6 +310,20 @@ def TTIR_LessThanOp : TTIR_ElementwiseBinaryOp<"lt"> {
}];
}

def TTIR_LogicalAndOp : TTIR_ElementwiseBinaryOp<"logical_and"> {
let summary = "Eltwise logical and.";
let description = [{
Eltwise logical and operation.
}];
}

def TTIR_LogicalOrOp : TTIR_ElementwiseBinaryOp<"logical_or"> {
let summary = "Eltwise logical or.";
let description = [{
Eltwise logical or operation.
}];
}

def TTIR_MaximumOp : TTIR_ElementwiseBinaryOp<"maximum"> {
let summary = "Eltwise maximum OP.";
let description = [{
Expand Down
21 changes: 21 additions & 0 deletions include/ttmlir/Dialect/TTNN/IR/TTNNOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,13 @@ def TTNN_NegOp : TTNN_ElementwiseUnaryOp<"neg"> {
}];
}

def TTNN_LogicalNotOp: TTNN_ElementwiseUnaryOp<"logical_not"> {
let summary = "Eltwise logical not op.";
let description = [{
Eltwise logical not operation.
}];
}

def TTNN_ReciprocalOp : TTNN_ElementwiseUnaryOp<"reciprocal"> {
let summary = "Eltwise reciprocal.";
let description = [{
Expand Down Expand Up @@ -251,6 +258,20 @@ def TTNN_LessThanOp : TTNN_ElementwiseBinaryOp<"lt"> {
}];
}

def TTNN_LogicalAndOp : TTNN_ElementwiseBinaryOp<"logical_and"> {
let summary = "Eltwise logical and.";
let description = [{
Eltwise logical and operation.
}];
}

def TTNN_LogicalOrOp : TTNN_ElementwiseBinaryOp<"logical_or"> {
let summary = "Eltwise logical or.";
let description = [{
Eltwise logical or operation.
}];
}

def TTNN_MaximumOp : TTNN_ElementwiseBinaryOp<"maximum"> {
let summary = "Eltwise maximum OP.";
let description = [{
Expand Down
5 changes: 4 additions & 1 deletion include/ttmlir/Target/TTNN/program.fbs
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,12 @@ enum EltwiseOpType: uint32 {
Typecast = 14,
Equal = 15,
NotEqual = 16,
GreaterThan = 19,
LessEqual = 17,
LessThan = 18,
GreaterThan = 19,
LogicalAnd = 20,
LogicalOr = 21,
LogicalNot = 22,
}

table EltwiseOp {
Expand Down
3 changes: 3 additions & 0 deletions lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -729,6 +729,9 @@ void populateTTIRToTTNNPatterns(MLIRContext *ctx, RewritePatternSet &patterns,
ToLayoutOpConversionPattern,
ElementwiseOpConversionPattern<ttir::AbsOp, ttnn::AbsOp>,
ElementwiseOpConversionPattern<ttir::AddOp, ttnn::AddOp>,
ElementwiseOpConversionPattern<ttir::LogicalAndOp, ttnn::LogicalAndOp>,
ElementwiseOpConversionPattern<ttir::LogicalOrOp, ttnn::LogicalOrOp>,
ElementwiseOpConversionPattern<ttir::LogicalNotOp, ttnn::LogicalNotOp>,
ElementwiseOpConversionPattern<ttir::MultiplyOp, ttnn::MultiplyOp>,
ElementwiseOpConversionPattern<ttir::EqualOp, ttnn::EqualOp>,
ElementwiseOpConversionPattern<ttir::NotEqualOp, ttnn::NotEqualOp>,
Expand Down
3 changes: 3 additions & 0 deletions lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -527,6 +527,7 @@ void populateTTNNToEmitCPatterns(mlir::MLIRContext *ctx,
// Eltwise unary ops
//
patterns.add<DefaultOpConversionPattern<ttnn::AbsOp>,
DefaultOpConversionPattern<ttnn::LogicalNotOp>,
DefaultOpConversionPattern<ttnn::NegOp>,
DefaultOpConversionPattern<ttnn::ReluOp>,
DefaultOpConversionPattern<ttnn::SqrtOp>,
Expand All @@ -539,6 +540,8 @@ void populateTTNNToEmitCPatterns(mlir::MLIRContext *ctx,
// Eltwise binary ops
//
patterns.add<DefaultOpConversionPattern<ttnn::AddOp>,
DefaultOpConversionPattern<ttnn::LogicalAndOp>,
DefaultOpConversionPattern<ttnn::LogicalOrOp>,
DefaultOpConversionPattern<ttnn::SubtractOp>,
MultiplyOpConversionPattern,
DefaultOpConversionPattern<ttnn::EqualOp>,
Expand Down
15 changes: 15 additions & 0 deletions lib/Target/TTNN/TTNNToFlatbuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,12 @@ createEltwiseOp(FlatbufferObjectCache &cache, EltwiseOp op) {
type = ::tt::target::ttnn::EltwiseOpType::Abs;
} else if constexpr (std::is_same_v<EltwiseOp, AddOp>) {
type = ::tt::target::ttnn::EltwiseOpType::Add;
} else if constexpr (std::is_same_v<EltwiseOp, LogicalAndOp>) {
type = ::tt::target::ttnn::EltwiseOpType::LogicalAnd;
} else if constexpr (std::is_same_v<EltwiseOp, LogicalNotOp>) {
type = ::tt::target::ttnn::EltwiseOpType::LogicalNot;
} else if constexpr (std::is_same_v<EltwiseOp, LogicalOrOp>) {
type = ::tt::target::ttnn::EltwiseOpType::LogicalOr;
} else if constexpr (std::is_same_v<EltwiseOp, MultiplyOp>) {
type = ::tt::target::ttnn::EltwiseOpType::Multiply;
} else if constexpr (std::is_same_v<EltwiseOp, NegOp>) {
Expand Down Expand Up @@ -475,6 +481,15 @@ emitTTNNOperation(FlatbufferObjectCache &cache, Operation *op,
if (auto addOp = dyn_cast<AddOp>(op); addOp) {
return createOperation(cache, createEltwiseOp(cache, addOp), debugString);
}
if (auto andOp = dyn_cast<LogicalAndOp>(op); andOp) {
return createOperation(cache, createEltwiseOp(cache, andOp), debugString);
}
if (auto notOp = dyn_cast<LogicalNotOp>(op); notOp) {
return createOperation(cache, createEltwiseOp(cache, notOp), debugString);
}
if (auto orOp = dyn_cast<LogicalOrOp>(op); orOp) {
return createOperation(cache, createEltwiseOp(cache, orOp), debugString);
}
if (auto multiplyOp = dyn_cast<MultiplyOp>(op); multiplyOp) {
return createOperation(cache, createEltwiseOp(cache, multiplyOp),
debugString);
Expand Down
8 changes: 8 additions & 0 deletions runtime/lib/ttnn/operations/eltwise/binary.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,14 @@ void run(const ::tt::target::ttnn::EltwiseOp *op, ProgramContext &context) {
runEltwiseBinaryOP(op, tensorPool, ::ttnn::add);
break;
}
case ::tt::target::ttnn::EltwiseOpType::LogicalAnd: {
runEltwiseBinaryOP(op, tensorPool, ::ttnn::logical_and);
break;
}
case ::tt::target::ttnn::EltwiseOpType::LogicalOr: {
runEltwiseBinaryOP(op, tensorPool, ::ttnn::logical_or);
break;
}
case ::tt::target::ttnn::EltwiseOpType::Multiply: {
runEltwiseBinaryOP(op, tensorPool, ::ttnn::multiply);
break;
Expand Down
4 changes: 4 additions & 0 deletions runtime/lib/ttnn/operations/eltwise/unary.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,10 @@ void run(const ::tt::target::ttnn::EltwiseOp *op, ProgramContext &context) {
runEltwiseUnaryOP(op, tensorPool, ::ttnn::abs);
break;
}
case ::tt::target::ttnn::EltwiseOpType::LogicalNot: {
runEltwiseUnaryOP(op, tensorPool, ::ttnn::logical_not);
break;
}
case ::tt::target::ttnn::EltwiseOpType::Neg: {
runEltwiseUnaryOP(op, tensorPool, ::ttnn::neg);
break;
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s

#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
module attributes {} {
func.func @logical_and(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: {{.*}} = "ttnn.empty"{{.*}}
%1 = "ttir.logical_and"(%arg0, %arg1, %0) <{operandSegmentSizes = array<i32: 2, 1>, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.logical_and"
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
return %1 : tensor<64x128xf32>
}
}
15 changes: 15 additions & 0 deletions test/ttmlir/Dialect/TTNN/eltwise/binary/logical_or/simple_or.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s

#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
module attributes {} {
func.func @logical_or(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: {{.*}} = "ttnn.empty"{{.*}}
%1 = "ttir.logical_or"(%arg0, %arg1, %0) <{operandSegmentSizes = array<i32: 2, 1>, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.logical_or"
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
return %1 : tensor<64x128xf32>
}
}
14 changes: 14 additions & 0 deletions test/ttmlir/Dialect/TTNN/eltwise/unary/logical_not/simple_not.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s

#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
module attributes {} {
func.func @logical_not(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: {{.*}} = "ttnn.empty"{{.*}}
%1 = "ttir.logical_not"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.logical_not"
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
return %1 : tensor<64x128xf32>
}
}
40 changes: 40 additions & 0 deletions test/ttmlir/Silicon/TTNN/simple_logical.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir
// RUN: FileCheck %s --input-file=%t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn

#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>

module attributes {} {
func.func @logical_and(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: {{.*}} = "ttnn.empty"{{.*}}
%1 = "ttir.logical_and"(%arg0, %arg1, %0) <{operandSegmentSizes = array<i32: 2, 1>, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.logical_and"
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
return %1 : tensor<64x128xf32>
}

func.func @logical_not(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: {{.*}} = "ttnn.empty"{{.*}}
%1 = "ttir.logical_not"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.logical_not"
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
return %1 : tensor<64x128xf32>
}

func.func @logical_or(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: {{.*}} = "ttnn.empty"{{.*}}
%1 = "ttir.logical_or"(%arg0, %arg1, %0) <{operandSegmentSizes = array<i32: 2, 1>, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.logical_or"
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
// CHECK-SAME: tensor<64x128xf32,
return %1 : tensor<64x128xf32>
}
}

0 comments on commit 998c81a

Please sign in to comment.