Skip to content

Commit

Permalink
[NNPA] Not using either in tablegen (onnx#2607)
Browse files Browse the repository at this point in the history
* Not using either

Signed-off-by: Tung D. Le <[email protected]>

* add lit tests

Signed-off-by: Tung D. Le <[email protected]>

* Undo changes in RunONNXModel.py

Signed-off-by: Tung D. Le <[email protected]>

---------

Signed-off-by: Tung D. Le <[email protected]>
  • Loading branch information
tungld authored Nov 8, 2023
1 parent 214d2d6 commit 70358dd
Show file tree
Hide file tree
Showing 2 changed files with 40 additions and 4 deletions.
18 changes: 16 additions & 2 deletions src/Accelerators/NNPA/Conversion/ONNXToZHigh/ONNXToZHigh.td
Original file line number Diff line number Diff line change
Expand Up @@ -217,8 +217,8 @@ def replaceONNXDivPattern : Pat<
(addBenefit 0)
>;

def replaceONNXDivBroadcastPattern : Pat<
(ONNXDivOp (either $x, $y)),
def replaceONNXDivBroadcastPattern1 : Pat<
(ONNXDivOp $x, $y),
(ZHighUnstickOp
(ZHighDivOp
(ZHighStickOp:$s_x $x, (NoneLayoutAttr)),
Expand All @@ -231,6 +231,20 @@ def replaceONNXDivBroadcastPattern : Pat<
(addBenefit 1)
>;

def replaceONNXDivBroadcastPattern2 : Pat<
(ONNXDivOp $x, $y),
(ZHighUnstickOp
(ZHighDivOp
(ZHighStickifiedConstantOfShapeOp
(GetDynShape $y),
(GetScalarF32AttrFromConstant $x),
(NoneLayoutAttr)),
(ZHighStickOp:$s_y $y, (NoneLayoutAttr)),
(returnType $s_y))),
[(IsF32ScalarConstantTensor $x)], [],
(addBenefit 1)
>;

//===----------------------------------------------------------------------===//
// ONNXLogOp %X = ZHighUnstickOp (ZHighLogOp (ZHighStickOp %X))
//===----------------------------------------------------------------------===//
Expand Down
26 changes: 24 additions & 2 deletions test/mlir/accelerators/nnpa/conversion/onnx-to-zhigh/div.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,12 @@ func.func @test_div_3ds(%arg0 : tensor<10x10x10xf32>, %arg1 : tensor<10x10x10xf3
// -----

// COM: Division by a scalar in case of dynamic dimensions.
func.func @test_div_unknown_scalar(%arg0 : tensor<?x10xf32>) -> tensor<*xf32> {
func.func @test_div_unknown_scalar1(%arg0 : tensor<?x10xf32>) -> tensor<*xf32> {
%0 = onnx.Constant dense<8.000000e+00> : tensor<f32>
%1 = "onnx.Div"(%arg0, %0) : (tensor<?x10xf32>, tensor<f32>) -> tensor<*xf32>
"func.return"(%1) : (tensor<*xf32>) -> ()

// CHECK-LABEL: func.func @test_div_unknown_scalar
// CHECK-LABEL: func.func @test_div_unknown_scalar1
// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<?x10xf32>) -> tensor<?x10xf32> {
// CHECK-DAG: [[VAR_0_:%.+]] = onnx.Constant dense<8.000000e+00> : tensor<f32>
// CHECK-DAG: [[VAR_1_:%.+]] = "zhigh.Stick"([[PARAM_0_]]) {layout = "2D"} : (tensor<?x10xf32>) -> tensor<?x10xf32, #zhigh.layout<{dataLayout = "2D"}>>
Expand All @@ -54,6 +54,28 @@ func.func @test_div_unknown_scalar(%arg0 : tensor<?x10xf32>) -> tensor<*xf32> {

// -----

// COM: Division by a scalar in case of dynamic dimensions.
func.func @test_div_unknown_scalar2(%arg0 : tensor<?x10xf32>) -> tensor<*xf32> {
%0 = onnx.Constant dense<8.000000e+00> : tensor<f32>
%1 = "onnx.Div"(%0, %arg0) : (tensor<f32>, tensor<?x10xf32>) -> tensor<*xf32>
"func.return"(%1) : (tensor<*xf32>) -> ()

// CHECK-LABEL: func.func @test_div_unknown_scalar2
// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<?x10xf32>) -> tensor<?x10xf32> {
// CHECK-DAG: [[VAR_0_:%.+]] = onnx.Constant dense<8.000000e+00> : tensor<f32>
// CHECK-DAG: [[VAR_1_:%.+]] = "onnx.Dim"([[PARAM_0_]]) {axis = 0 : si64} : (tensor<?x10xf32>) -> tensor<1xi64>
// CHECK-DAG: [[VAR_2_:%.+]] = "onnx.Dim"([[PARAM_0_]]) {axis = 1 : si64} : (tensor<?x10xf32>) -> tensor<1xi64>
// CHECK: [[VAR_3_:%.+]] = "onnx.Concat"([[VAR_1_]], [[VAR_2_]]) {axis = 0 : si64} : (tensor<1xi64>, tensor<1xi64>) -> tensor<2xi64>
// CHECK-DAG: [[VAR_4_:%.+]] = "zhigh.StickifiedConstantOfShape"([[VAR_3_]]) {layout = "2D", value = 8.000000e+00 : f32} : (tensor<2xi64>) -> tensor<?x?xf32, #zhigh.layout<{dataLayout = "2D"}>>
// CHECK-DAG: [[VAR_5_:%.+]] = "zhigh.Stick"([[PARAM_0_]]) {layout = "2D"} : (tensor<?x10xf32>) -> tensor<?x10xf32, #zhigh.layout<{dataLayout = "2D"}>>
// CHECK: [[VAR_6_:%.+]] = "zhigh.Div"([[VAR_4_]], [[VAR_5_]]) : (tensor<?x?xf32, #zhigh.layout<{dataLayout = "2D"}>>, tensor<?x10xf32, #zhigh.layout<{dataLayout = "2D"}>>) -> tensor<?x10xf32, #zhigh.layout<{dataLayout = "2D"}>>
// CHECK: [[VAR_7_:%.+]] = "zhigh.Unstick"([[VAR_6_]]) : (tensor<?x10xf32, #zhigh.layout<{dataLayout = "2D"}>>) -> tensor<?x10xf32>
// CHECK: return [[VAR_7_]] : tensor<?x10xf32>
// CHECK: }
}

// -----

// COM: Do not lower broadcasting onnx.Div to zHigh.
func.func @test_div_not_lowered_diff_shape(%arg0 : tensor<10x10xf32>, %arg1 : tensor<10xf32>) -> tensor<*xf32> {
%0 = "onnx.Div"(%arg0, %arg1) : (tensor<10x10xf32>, tensor<10xf32>) -> tensor<*xf32>
Expand Down

0 comments on commit 70358dd

Please sign in to comment.