diff --git a/include/ttmlir/Dialect/TTIR/IR/TTIROps.td b/include/ttmlir/Dialect/TTIR/IR/TTIROps.td index 69510f93a4..9d9b13a834 100644 --- a/include/ttmlir/Dialect/TTIR/IR/TTIROps.td +++ b/include/ttmlir/Dialect/TTIR/IR/TTIROps.td @@ -185,8 +185,7 @@ class TTIR_ElementwiseOp traits = []> : }]; let arguments = (ins Variadic:$inputs, - Variadic:$outputs, - TT_OperandConstraintArrayAttr:$operand_constraints); + Variadic:$outputs); let results = (outs Variadic:$results); } @@ -199,9 +198,9 @@ class TTIR_ElementwiseTernaryOp traits = []> : let builders = [ - OpBuilder<(ins "Value": $first, "Value": $second, "Value": $third, "Value": $out, "ArrayAttr": $operand_constraints), + OpBuilder<(ins "Value": $first, "Value": $second, "Value": $third, "Value": $out), [{ - build($_builder, $_state, {out.getType()}, {first, second, third}, out, operand_constraints); + build($_builder, $_state, {out.getType()}, {first, second, third}, out); }]> ]; } @@ -222,9 +221,9 @@ class TTIR_ElementwiseUnaryOp traits = []> : let builders = [ - OpBuilder<(ins "Value": $in, "Value": $out, "ArrayAttr": $operand_constraints), + OpBuilder<(ins "Value": $in, "Value": $out), [{ - build($_builder, $_state, {out.getType()}, in, out, operand_constraints); + build($_builder, $_state, {out.getType()}, in, out); }]> ]; } @@ -406,14 +405,13 @@ class TTIR_ElementwiseUnaryWithFloatParameterOp tra let arguments = (ins Variadic:$inputs, Variadic:$outputs, - F32Attr:$parameter, - TT_OperandConstraintArrayAttr:$operand_constraints); + F32Attr:$parameter); let builders = [ - OpBuilder<(ins "Value": $in, "Value": $out, "FloatAttr":$parameter, "ArrayAttr": $operand_constraints), + OpBuilder<(ins "Value": $in, "Value": $out, "FloatAttr":$parameter), [{ - build($_builder, $_state, {out.getType()}, {in}, {out}, parameter, operand_constraints); + build($_builder, $_state, {out.getType()}, {in}, {out}, parameter); }]> ]; } @@ -450,9 +448,9 @@ class TTIR_ElementwiseBinaryOp traits = []> : let builders = [ - OpBuilder<(ins "Value": $lhs, "Value": $rhs, "Value": $out, "ArrayAttr": $operand_constraints), + OpBuilder<(ins "Value": $lhs, "Value": $rhs, "Value": $out), [{ - build($_builder, $_state, {out.getType()}, {lhs, rhs}, out, operand_constraints); + build($_builder, $_state, {out.getType()}, {lhs, rhs}, out); }]> ]; } @@ -566,8 +564,7 @@ class TTIR_ReductionOp traits = []> : let arguments = (ins AnyRankedTensor:$input, AnyRankedTensor:$output, BoolAttr:$keep_dim, - OptionalAttr:$dim_arg, - TT_OperandConstraintArrayAttr:$operand_constraints); + OptionalAttr:$dim_arg); let results = (outs AnyRankedTensor:$result); @@ -634,8 +631,7 @@ def TTIR_EmbeddingOp : TTIR_DPSOp<"embedding"> { let arguments = (ins AnyRankedTensor:$input, AnyRankedTensor:$weight, - AnyRankedTensor:$output, - TT_OperandConstraintArrayAttr:$operand_constraints); + AnyRankedTensor:$output); let results = (outs AnyRankedTensor:$result); @@ -654,8 +650,7 @@ def TTIR_SoftmaxOp : TTIR_DPSOp<"softmax"> { let arguments = (ins AnyRankedTensor:$input, AnyRankedTensor:$output, - SI32Attr:$dimension, - TT_OperandConstraintArrayAttr:$operand_constraints); + SI32Attr:$dimension); let results = (outs AnyRankedTensor:$result); @@ -675,8 +670,7 @@ def TTIR_TransposeOp : TTIR_DPSOp<"transpose"> { let arguments = (ins AnyRankedTensor:$input, AnyRankedTensor:$output, SI32Attr:$dim0, - SI32Attr:$dim1, - TT_OperandConstraintArrayAttr:$operand_constraints); + SI32Attr:$dim1); let results = (outs AnyRankedTensor:$result); @@ -695,8 +689,7 @@ def TTIR_ConcatOp : TTIR_DPSOp<"concat"> { let arguments = (ins Variadic:$inputs, AnyRankedTensor:$output, - SI32Attr:$dim, - TT_OperandConstraintArrayAttr:$operand_constraints); + SI32Attr:$dim); let results = (outs AnyRankedTensor:$result); @@ -756,8 +749,7 @@ def TTIR_BroadcastOp : TTIR_DPSOp<"broadcast"> { let arguments = (ins AnyRankedTensor:$input, AnyRankedTensor:$output, - I64ArrayAttr:$dimension, - TT_OperandConstraintArrayAttr:$operand_constraints); + I64ArrayAttr:$dimension); let results = (outs AnyRankedTensor:$result); @@ -784,8 +776,7 @@ def TTIR_Conv2dOp : TTIR_DPSOp<"conv2d"> { SI32Attr:$padding_left, SI32Attr:$padding_right, SI32Attr:$padding_top, - SI32Attr:$padding_bottom, - TT_OperandConstraintArrayAttr:$operand_constraints); + SI32Attr:$padding_bottom); let results = (outs AnyRankedTensor:$result); @@ -818,8 +809,7 @@ def TTIR_ConvolutionOp : TTIR_DPSOp<"convolution"> { DenseBoolArrayAttr:$window_reversal, TTIR_ConvolutionLayoutAttr:$convolution_layout, ConfinedAttr:$feature_group_count, - ConfinedAttr:$batch_group_count, - TT_OperandConstraintArrayAttr:$operand_constraints + ConfinedAttr:$batch_group_count ); let results = (outs AnyRankedTensor); @@ -846,8 +836,7 @@ def TTIR_GatherOp: TTIR_DPSOp<"gather"> { DenseI64ArrayAttr:$start_index_map, SI64Attr:$index_vector_dim, DenseI64ArrayAttr:$slice_sizes, - BoolAttr:$indices_are_sorted, - TT_OperandConstraintArrayAttr:$operand_constraints); + BoolAttr:$indices_are_sorted); let results = (outs AnyRankedTensor:$result); let extraClassDeclaration = [{ MutableOperandRange getDpsInitsMutable() { return getOutputMutable(); } @@ -868,8 +857,7 @@ def TTIR_PoolingOp : TTIR_DPSOp<"pooling", [AttrSizedOperandSegments]> { DenseI64ArrayAttr:$window_strides, DenseI64ArrayAttr:$base_dilations, DenseI64ArrayAttr:$window_dilations, - DenseI64ArrayAttr:$padding, - TT_OperandConstraintArrayAttr:$operand_constraints + DenseI64ArrayAttr:$padding ); let results = (outs Variadic); @@ -895,8 +883,7 @@ def TTIR_MaxPool2dOp : TTIR_DPSOp<"max_pool2d"> { SI32Attr:$padding_left, SI32Attr:$padding_right, SI32Attr:$padding_top, - SI32Attr:$padding_bottom, - TT_OperandConstraintArrayAttr:$operand_constraints); + SI32Attr:$padding_bottom); let results = (outs AnyRankedTensor:$result); @@ -915,8 +902,7 @@ def TTIR_ReshapeOp: TTIR_DPSOp<"reshape"> { let arguments = (ins AnyRankedTensor:$input, AnyRankedTensor:$output, - I32ArrayAttr:$shape, - TT_OperandConstraintArrayAttr:$operand_constraints); + I32ArrayAttr:$shape); let results = (outs AnyRankedTensor:$result); @@ -941,8 +927,7 @@ def TTIR_SliceOp: TTIR_DPSOp<"slice"> { AnyRankedTensor:$output, I32ArrayAttr:$begins, I32ArrayAttr:$ends, - I32ArrayAttr:$step, - TT_OperandConstraintArrayAttr:$operand_constraints); + I32ArrayAttr:$step); let results = (outs AnyRankedTensor:$result); @@ -968,8 +953,7 @@ def TTIR_SelectOp: TTIR_DPSOp<"select"> { SI32Attr:$dim, SI32Attr:$begin, SI32Attr:$length, - DefaultValuedOptionalAttr:$stride, - TT_OperandConstraintArrayAttr:$operand_constraints); + DefaultValuedOptionalAttr:$stride); let results = (outs AnyRankedTensor:$result); @@ -994,8 +978,7 @@ def TTIR_IndexOp: TTIR_DPSOp<"index"> { I32Attr:$dim, I32Attr:$begin, I32Attr:$end, - I32Attr:$step, - TT_OperandConstraintArrayAttr:$operand_constraints); + I32Attr:$step); let results = (outs AnyRankedTensor:$result); @@ -1015,8 +998,7 @@ def TTIR_SqueezeOp : TTIR_DPSOp<"squeeze"> { let arguments = (ins AnyRankedTensor:$input, AnyRankedTensor:$output, - SI32Attr:$dim, - TT_OperandConstraintArrayAttr:$operand_constraints); + SI32Attr:$dim); let results = (outs AnyRankedTensor:$result); @@ -1035,8 +1017,7 @@ def TTIR_UnsqueezeOp : TTIR_DPSOp<"unsqueeze"> { let arguments = (ins AnyRankedTensor:$input, AnyRankedTensor:$output, - SI32Attr:$dim, - TT_OperandConstraintArrayAttr:$operand_constraints); + SI32Attr:$dim); let results = (outs AnyRankedTensor:$result); @@ -1064,8 +1045,7 @@ def TTIR_ClampOp : TTIR_DPSOp<"clamp"> { let arguments = (ins AnyRankedTensor:$input, AnyRankedTensor:$output, F32Attr:$min, - F32Attr:$max, - TT_OperandConstraintArrayAttr:$operand_constraints); + F32Attr:$max); let extraClassDeclaration = [{ MutableOperandRange getDpsInitsMutable() { return getOutputMutable(); } @@ -1151,8 +1131,7 @@ def TTIR_FillOp : TTIR_DPSOp<"fill", [AllShapesMatch<["value", "result"]>]> { }]; let arguments = (ins AnyRankedTensor:$output, - ElementsAttr:$value, - TT_OperandConstraintArrayAttr:$operand_constraints); + ElementsAttr:$value); let results = (outs AnyRankedTensor:$result); @@ -1177,8 +1156,7 @@ def TTIR_LinearOp : TTIR_DPSOp<"linear"> { let arguments = (ins AnyRankedTensor:$a, AnyRankedTensor:$b, Optional:$bias, - AnyRankedTensor:$output, - TT_OperandConstraintArrayAttr:$operand_constraints); + AnyRankedTensor:$output); let results = (outs AnyRankedTensor:$result); @@ -1198,8 +1176,7 @@ def TTIR_MatmulOp : TTIR_DPSOp<"matmul"> { let arguments = (ins AnyRankedTensor:$a, AnyRankedTensor:$b, - AnyRankedTensor:$output, - TT_OperandConstraintArrayAttr:$operand_constraints); + AnyRankedTensor:$output); let results = (outs AnyRankedTensor:$result); @@ -1322,8 +1299,7 @@ def TTIR_ScatterOp: TTIR_DPSOp<"scatter"> { I32Attr:$index_vector_dim, BoolAttr:$indices_are_sorted, BoolAttr:$unique_indices, - AnyRankedTensor:$output, - TT_OperandConstraintArrayAttr:$operand_constraints); + AnyRankedTensor:$output); let regions = (region SizedRegion<1>:$update_computation); @@ -1351,8 +1327,7 @@ def TTIR_KernelOp : TTIR_DPSOp<"kernel", [AttrSizedOperandSegments]> { let arguments = (ins FlatSymbolRefAttr:$op, FlatSymbolRefAttr:$kind, Variadic:$inputs, - Variadic:$outputs, - TT_OperandConstraintArrayAttr:$operand_constraints); + Variadic:$outputs); let results = (outs Variadic:$results); } @@ -1377,8 +1352,7 @@ def TTIR_AllGatherOp : TTIR_DPSOp<"all_gather"> { let arguments = (ins AnyRankedTensor:$input, AnyRankedTensor:$output, - SI32Attr:$dim, - TT_OperandConstraintArrayAttr:$operand_constraints); + SI32Attr:$dim); let results = (outs AnyRankedTensor:$result); diff --git a/include/ttmlir/Dialect/TTIR/IR/TTIROpsInterfaces.td b/include/ttmlir/Dialect/TTIR/IR/TTIROpsInterfaces.td index a130332f0d..64c3142791 100644 --- a/include/ttmlir/Dialect/TTIR/IR/TTIROpsInterfaces.td +++ b/include/ttmlir/Dialect/TTIR/IR/TTIROpsInterfaces.td @@ -11,16 +11,6 @@ include "ttmlir/Dialect/TT/IR/TTOpsTypes.td" def TTIROpInterface : OpInterface<"TTIROp"> { let cppNamespace = "::mlir::tt::ttir"; let methods = [ - InterfaceMethod< - /*desc=*/[{ - Return the constraints on the operands of this operation. - }], - /*retTy=*/"::mlir::ArrayAttr", - /*methodName=*/"getOperandConstraints", - /*args=*/(ins), - /*methodBody=*/"", - /*defaultImplementation=*/"" - >, InterfaceMethod< /*desc=*/[{ Get the device of the current scope. diff --git a/lib/Conversion/StableHLOToTTIR/StableHLOToTTIRPatterns.cpp b/lib/Conversion/StableHLOToTTIR/StableHLOToTTIRPatterns.cpp index 96ef7ca017..1a70345c07 100644 --- a/lib/Conversion/StableHLOToTTIR/StableHLOToTTIRPatterns.cpp +++ b/lib/Conversion/StableHLOToTTIR/StableHLOToTTIRPatterns.cpp @@ -53,11 +53,7 @@ class StableHLOToTTIROpDefaultConversionPattern srcOp, TypeRange( this->getTypeConverter()->convertType(outputTensor.getType())), - adaptor.getOperands(), ValueRange(outputTensor), - rewriter.getArrayAttr( - SmallVector(adaptor.getOperands().size() + 1, - rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + adaptor.getOperands(), ValueRange(outputTensor)); return success(); } }; @@ -125,18 +121,9 @@ class StableHLOToTTIRReduceOpConversionPattern ? adaptor.getDimensionsAttr()[0] : 1))); - // If someone changes definition of TTIR_ReductionOp this constant will - // become outdated, but I currently see no way to get this info (without - // manually constructing the adaptor for dest OP). - const std::size_t ttirReduceOpOperandsCount = 2; - mlir::ArrayAttr operandConstraints = - rewriter.getArrayAttr(SmallVector( - ttirReduceOpOperandsCount, rewriter.getAttr( - OperandConstraint::AnyDeviceTile))); - rewriter.replaceOpWithNewOp( srcOp, outputType, adaptor.getInputs().front(), outputTensor, - false /* keep_dim */, dimArg, operandConstraints); + false /* keep_dim */, dimArg); return success(); } @@ -171,11 +158,7 @@ class StableHLOToTTIRTransposeOpConversionPattern input = rewriter.create( srcOp.getLoc(), outputType, input, outputTensor, - rewriter.getSI32IntegerAttr(dim0), rewriter.getSI32IntegerAttr(dim1), - rewriter.getArrayAttr( - SmallVector(adaptor.getOperands().size() + 1, - rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + rewriter.getSI32IntegerAttr(dim0), rewriter.getSI32IntegerAttr(dim1)); } rewriter.replaceOp(srcOp, input); return success(); @@ -218,11 +201,7 @@ class StableHLOToTTIRReshapeOpConversionPattern ArrayAttr new_shape_attr = rewriter.getI32ArrayAttr(new_shape_i32); rewriter.replaceOpWithNewOp( srcOp, getTypeConverter()->convertType(outputTensor.getType()), - adaptor.getOperand(), outputTensor, new_shape_attr, - rewriter.getArrayAttr( - SmallVector(adaptor.getOperands().size() + 1, - rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + adaptor.getOperand(), outputTensor, new_shape_attr); return success(); } @@ -265,11 +244,7 @@ class StableHLOToTTIRDotGeneralOpConversionPattern rewriter.replaceOpWithNewOp( srcOp, getTypeConverter()->convertType(outputTensor.getType()), - adaptor.getLhs(), adaptor.getRhs(), Value(outputTensor), - rewriter.getArrayAttr( - SmallVector(adaptor.getOperands().size() + 1, - rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + adaptor.getLhs(), adaptor.getRhs(), Value(outputTensor)); return success(); } @@ -583,11 +558,7 @@ class StableHLOToTTIRConvolutionOpConversionPattern dimNums.getOutputBatchDimension(), dimNums.getOutputFeatureDimension(), dimNums.getOutputSpatialDimensions()), - adaptor.getFeatureGroupCountAttr(), adaptor.getBatchGroupCountAttr(), - rewriter.getArrayAttr( - SmallVector(adaptor.getOperands().size() + 1, - rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + adaptor.getFeatureGroupCountAttr(), adaptor.getBatchGroupCountAttr()); return success(); } @@ -683,10 +654,6 @@ class StableHLOToTTIRReduceWindowOpConversionPattern : rewriter.getDenseI64ArrayAttr( SmallVector(windowDimensions.size() * 2, 0)); - auto operandConstraints = rewriter.getArrayAttr(SmallVector( - adaptor.getOperands().size(), rewriter.getAttr( - OperandConstraint::AnyDeviceTile))); - mlir::tt::ttir::PoolingMethod poolingMethod; if (isMaxPool(srcOp)) { poolingMethod = mlir::tt::ttir::PoolingMethod::Max; @@ -701,7 +668,7 @@ class StableHLOToTTIRReduceWindowOpConversionPattern rewriter.replaceOpWithNewOp( srcOp, outputType, adaptor.getInputs(), outputs, poolingMethod, windowDimensions, windowStrides, baseDilations, window_dilations, - padding, operandConstraints); + padding); return success(); } @@ -836,11 +803,7 @@ class StableHLOToTTIRBroadcastInDimOpConversionPattern rewriter.replaceOpWithNewOp( srcOp, getTypeConverter()->convertType(outputTensor.getType()), - Value(adaptor.getOperand()), Value(outputTensor), dimArg, - rewriter.getArrayAttr( - SmallVector(adaptor.getOperands().size() + 1, - rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + Value(adaptor.getOperand()), Value(outputTensor), dimArg); return success(); } @@ -932,11 +895,7 @@ class StableHLOToTTIRCompareOpConversionPattern srcOp, TypeRange( this->getTypeConverter()->convertType(outputTensor.getType())), - adaptor.getOperands(), ValueRange(outputTensor), - rewriter.getArrayAttr( - SmallVector(adaptor.getOperands().size() + 1, - rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + adaptor.getOperands(), ValueRange(outputTensor)); return success(); } @@ -975,11 +934,7 @@ class StableHLOToTTIRConcatOpConversionPattern adaptor.getInputs(), // input values Value(outputTensor), // output value rewriter.getSI32IntegerAttr( - static_cast(adaptor.getDimension())), // dimension - rewriter.getArrayAttr( // operand constraints - SmallVector(adaptor.getOperands().size() + 1, - rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + static_cast(adaptor.getDimension()))); // dimension return success(); } @@ -1035,11 +990,7 @@ class StableHLOToTTIROpLogicalOpConversionPattern srcOp, TypeRange( this->getTypeConverter()->convertType(outputTensor.getType())), - adaptor.getOperands(), ValueRange(outputTensor), - rewriter.getArrayAttr( - SmallVector(adaptor.getOperands().size() + 1, - rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + adaptor.getOperands(), ValueRange(outputTensor)); return success(); } @@ -1527,11 +1478,7 @@ class StableHLOToTTIRSliceOpConversionPattern adaptor.getOperand(), // input values outputTensor, // output value rewriter.getI32ArrayAttr(start_indices), - rewriter.getI32ArrayAttr(end_indices), rewriter.getI32ArrayAttr(step), - rewriter.getArrayAttr( // operand constraints - SmallVector(adaptor.getOperands().size() + 1, - rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + rewriter.getI32ArrayAttr(end_indices), rewriter.getI32ArrayAttr(step)); return success(); } }; @@ -1575,31 +1522,19 @@ class StableHLOToTTIROpClampOpConversionPattern this->getTypeConverter()->convertType(outputTensor.getType()), Value(adaptor.getOperand()), Value(outputTensor), rewriter.getF32FloatAttr(minValue), - rewriter.getF32FloatAttr(maxValue), - rewriter.getArrayAttr( - SmallVector(adaptor.getOperands().size() + 1, - rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + rewriter.getF32FloatAttr(maxValue)); return success(); } } ttir::MaximumOp maximumOp = rewriter.create( - srcOp->getLoc(), min, adaptor.getOperand(), outputTensor, - rewriter.getArrayAttr( - SmallVector(adaptor.getOperands().size() + 1, - rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + srcOp->getLoc(), min, adaptor.getOperand(), outputTensor); tensor::EmptyOp finalOutputTensor = rewriter.create( srcOp.getLoc(), outputType.getShape(), outputType.getElementType()); rewriter.replaceOpWithNewOp( - srcOp, maximumOp->getResult(0), max, finalOutputTensor, - rewriter.getArrayAttr( - SmallVector(adaptor.getOperands().size() + 1, - rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + srcOp, maximumOp->getResult(0), max, finalOutputTensor); return success(); } }; @@ -1629,11 +1564,7 @@ class StableHLOToTTIRGatherOpConversionPattern dimensionNumbers.getOperandBatchingDims(), dimensionNumbers.getStartIndicesBatchingDims(), dimensionNumbers.getStartIndexMap(), - dimensionNumbers.getIndexVectorDim(), srcOp.getSliceSizesAttr(), false, - rewriter.getArrayAttr( - SmallVector(adaptor.getOperands().size() + 1, - rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + dimensionNumbers.getIndexVectorDim(), srcOp.getSliceSizesAttr(), false); return success(); } }; diff --git a/lib/Conversion/TTIRToTTIRDecomposition/TTIRToTTIRDecomposition.cpp b/lib/Conversion/TTIRToTTIRDecomposition/TTIRToTTIRDecomposition.cpp index 9ba4257428..3803b9f26c 100644 --- a/lib/Conversion/TTIRToTTIRDecomposition/TTIRToTTIRDecomposition.cpp +++ b/lib/Conversion/TTIRToTTIRDecomposition/TTIRToTTIRDecomposition.cpp @@ -64,7 +64,7 @@ struct IndexToSliceConversionPattern auto newOp = rewriter.create( op.getLoc(), op.getType(), adaptor.getInput(), adaptor.getOutput(), rewriter.getArrayAttr(begins), rewriter.getArrayAttr(ends), - rewriter.getArrayAttr(steps), adaptor.getOperandConstraints()); + rewriter.getArrayAttr(steps)); rewriter.replaceOp(op, newOp.getResult()); return success(); @@ -144,8 +144,7 @@ generateTransposeIndices(std::vector currentLayout, * result at the end of the sequence */ static Value generateTransposeOps(Value input, PatternRewriter &rewriter, - std::vector transposeIndices, - ::mlir::ArrayAttr operandConstraints) { + std::vector transposeIndices) { for (auto [dim0, dim1] : transposeIndices) { auto inputType = mlir::cast(input.getType()); @@ -162,8 +161,7 @@ static Value generateTransposeOps(Value input, PatternRewriter &rewriter, input.getLoc(), outputShape, outputType.getElementType()); input = rewriter .create(input.getLoc(), outputType, input, - dpsOutput, dim0Attr, dim1Attr, - operandConstraints) + dpsOutput, dim0Attr, dim1Attr) .getResult(); } @@ -316,12 +314,10 @@ struct Legalize1DConvolutionPattern : public ConvolutionDecompositionPattern { weightShape.end()); reshapeWeightShape.push_back(1); - ttir::ReshapeOp reshapeInput = - createReshapeOp(op.getLoc(), adaptor.getInput(), reshapeInputShape, - op.getOperandConstraints(), rewriter); - ttir::ReshapeOp reshapeWeight = - createReshapeOp(op.getLoc(), adaptor.getWeight(), reshapeWeightShape, - op.getOperandConstraints(), rewriter); + ttir::ReshapeOp reshapeInput = createReshapeOp( + op.getLoc(), adaptor.getInput(), reshapeInputShape, rewriter); + ttir::ReshapeOp reshapeWeight = createReshapeOp( + op.getLoc(), adaptor.getWeight(), reshapeWeightShape, rewriter); mlir::DenseI64ArrayAttr conv2dOpWindowsStridesAttr = addIntegerToDenseArrayAttr(rewriter, adaptor.getWindowStridesAttr(), 1); @@ -374,14 +370,9 @@ struct Legalize1DConvolutionPattern : public ConvolutionDecompositionPattern { convolutionLayout.getOutputFeatureDimension(), conv2dOutputSpatialDimensions), adaptor.getFeatureGroupCountAttr(), - adaptor.getBatchGroupCountAttr(), - rewriter.getArrayAttr( - SmallVector(adaptor.getOperands().size() + 1, - rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + adaptor.getBatchGroupCountAttr()); ttir::ReshapeOp reshapeOutput = - createReshapeOp(op.getLoc(), new2dConvolutionOp, outputShape, - op.getOperandConstraints(), rewriter); + createReshapeOp(op.getLoc(), new2dConvolutionOp, outputShape, rewriter); rewriter.replaceOp(op, reshapeOutput); @@ -391,7 +382,6 @@ struct Legalize1DConvolutionPattern : public ConvolutionDecompositionPattern { private: ttir::ReshapeOp createReshapeOp(Location loc, Value tensor, llvm::ArrayRef target_input_shape, - ::mlir::ArrayAttr constraints, ConversionPatternRewriter &rewriter) const { auto inputType = mlir::cast(tensor.getType()); @@ -406,7 +396,7 @@ struct Legalize1DConvolutionPattern : public ConvolutionDecompositionPattern { loc, mlir::RankedTensorType::get(target_input_shape, inputType.getElementType()), - tensor, DPSReshapeOutput, shape_attr, constraints); + tensor, DPSReshapeOutput, shape_attr); } mlir::DenseI64ArrayAttr @@ -502,26 +492,23 @@ struct ConvolutionToConv2dPattern : public ConvolutionDecompositionPattern { auto transposeIndices = generateConvTransposeIndices(op, conv2dLayout); Value input = - generateTransposeOps(adaptor.getInput(), rewriter, transposeIndices, - adaptor.getOperandConstraints()); + generateTransposeOps(adaptor.getInput(), rewriter, transposeIndices); auto kernelTransposeIndices = generateConvKernelTransposeIndices(op, conv2dKernelLayout); Value weight = generateTransposeOps(adaptor.getWeight(), rewriter, - kernelTransposeIndices, - adaptor.getOperandConstraints()); + kernelTransposeIndices); ttir::Conv2dOp newConv = rewriter.create( op.getLoc(), outputType, input, weight, adaptor.getBias(), convDPSOutput, strideHeightAttr, strideWidthAttr, dilationHeightAttr, dilationWidthAttr, groupsAttr, paddingLeftAttr, paddingRightAttr, - paddingTopAttr, paddingBottomAttr, adaptor.getOperandConstraints()); + paddingTopAttr, paddingBottomAttr); // Applying the transposes in reverse order to the output will restore the // tensor to the original layout std::reverse(transposeIndices.begin(), transposeIndices.end()); Value output = - generateTransposeOps(newConv.getResult(), rewriter, transposeIndices, - adaptor.getOperandConstraints()); + generateTransposeOps(newConv.getResult(), rewriter, transposeIndices); rewriter.replaceOp(op, output); return success(); @@ -618,8 +605,7 @@ struct GatherToEmbeddingConversionPattern ttir::ReshapeOp createReshapeOp(PatternRewriter &rewriter, Location loc, Value input, - ::llvm::ArrayRef shapei64, - ::mlir::ArrayAttr operandConstraints) const { + ::llvm::ArrayRef shapei64) const { // reshape start indices (input) to remove the last dimension auto ty = mlir::cast(input.getType()); @@ -630,7 +616,7 @@ struct GatherToEmbeddingConversionPattern return rewriter.create( loc, mlir::RankedTensorType::get(shapei64, ty.getElementType()), input, - output, shape_attr, operandConstraints); + output, shape_attr); } /** @@ -680,8 +666,7 @@ struct GatherToEmbeddingConversionPattern startIndicesType.getShape().end() - 1); ttir::ReshapeOp reshapeOp = - createReshapeOp(rewriter, op.getLoc(), startIndices, newShapeI64, - op.getOperandConstraints()); + createReshapeOp(rewriter, op.getLoc(), startIndices, newShapeI64); assert(reshapeOp && "Failed to create reshape op"); reshapeOp->moveBefore(op); @@ -691,10 +676,7 @@ struct GatherToEmbeddingConversionPattern // convert gather to embedding, use reshaped input if needed ttir::EmbeddingOp embeddingOp = rewriter.create( op.getLoc(), op.getResult().getType(), input, op.getOperands()[0], - op.getOutput(), - rewriter.getArrayAttr(SmallVector( - op.getNumOperands() + 1, rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + op.getOutput()); assert(embeddingOp != nullptr && "Failed to create embedding op"); rewriter.replaceOp(op, embeddingOp); @@ -840,12 +822,10 @@ struct PoolingToPool2dPattern : public OpConversionPattern { rewriter.getSI32IntegerAttr(op.getPadding()[2 * spatialDims[1]]); auto paddingRightAttr = rewriter.getSI32IntegerAttr(op.getPadding()[2 * spatialDims[1] + 1]); - auto operandConstraints = adaptor.getOperandConstraints(); std::vector outputs; for (Value input : adaptor.getInputs()) { - input = generateTransposeOps(input, rewriter, transposeIndices, - operandConstraints); + input = generateTransposeOps(input, rewriter, transposeIndices); auto outputType = mlir::cast(op.getResult(0).getType()); auto newOutputShape = outputType.getShape().vec(); @@ -863,14 +843,13 @@ struct PoolingToPool2dPattern : public OpConversionPattern { op.getLoc(), newOutputType, input, outputTensor, kernelHeightAttr, kernelWidthAttr, strideHeightAttr, strideWidthAttr, dilationHeightAttr, dilationWidthAttr, ceilModeAttr, paddingTopAttr, - paddingBottomAttr, paddingLeftAttr, paddingRightAttr, - operandConstraints); + paddingBottomAttr, paddingLeftAttr, paddingRightAttr); // Applying the transposes in reverse order to the output will restore the // tensor to the original layout std::reverse(transposeIndices.begin(), transposeIndices.end()); - Value output = generateTransposeOps(newPool.getResult(), rewriter, - transposeIndices, operandConstraints); + Value output = + generateTransposeOps(newPool.getResult(), rewriter, transposeIndices); // Reverse back so the proper input transposes are generated for the next // pool @@ -1042,7 +1021,7 @@ struct SelectToSliceConversionPattern auto newOp = rewriter.create( op.getLoc(), resultType, adaptor.getInput(), sliceDpsResult, rewriter.getI32ArrayAttr(begins), rewriter.getI32ArrayAttr(ends), - rewriter.getI32ArrayAttr(steps), adaptor.getOperandConstraints()); + rewriter.getI32ArrayAttr(steps)); slices.push_back(newOp->getResult(0)); } @@ -1052,7 +1031,7 @@ struct SelectToSliceConversionPattern op.getLoc(), outputType.getShape(), outputType.getElementType()); auto concatOp = rewriter.create( op.getLoc(), outputType, slices, concatDpsResult, - rewriter.getSI32IntegerAttr(dim), adaptor.getOperandConstraints()); + rewriter.getSI32IntegerAttr(dim)); rewriter.replaceOp(op, concatOp.getResult()); } else { @@ -1133,10 +1112,7 @@ struct ArangeForceLastDimensionPattern output = rewriter.create( op.getLoc(), transposeType, output, dpsOutput, arangeDimensionNegative + transposeShape.size(), - arangeOutputType.getRank() - 1, - rewriter.getArrayAttr(SmallVector( - 2, rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + arangeOutputType.getRank() - 1); outputShape = transposeShape; } @@ -1160,10 +1136,7 @@ struct ArangeForceLastDimensionPattern reshapeType.getElementType()); output = rewriter.create( op.getLoc(), reshapeType, output, dpsOutput, - rewriter.getI32ArrayAttr(reshapeShape), - rewriter.getArrayAttr(SmallVector( - 2, rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + rewriter.getI32ArrayAttr(reshapeShape)); outputShape = std::vector(reshapeShape.begin(), reshapeShape.end()); @@ -1186,10 +1159,7 @@ struct ArangeForceLastDimensionPattern output = rewriter.create( op.getLoc(), broadcastType, output, dpsOutput, - rewriter.getArrayAttr(broadcastDims), - rewriter.getArrayAttr(SmallVector( - 2, rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + rewriter.getArrayAttr(broadcastDims)); assert(mlir::cast(output.getType()).getShape() == outputType.getShape() && diff --git a/lib/Conversion/TosaToTTIR/TosaToTTIRPatterns.cpp b/lib/Conversion/TosaToTTIR/TosaToTTIRPatterns.cpp index 607a083310..12b41b980c 100644 --- a/lib/Conversion/TosaToTTIR/TosaToTTIRPatterns.cpp +++ b/lib/Conversion/TosaToTTIR/TosaToTTIRPatterns.cpp @@ -46,11 +46,7 @@ class TosaToTTIRDefaultDPSOpConversionPattern srcOp.getLoc(), outputType.getShape(), outputType.getElementType()); rewriter.replaceOpWithNewOp( srcOp, TypeRange(outputTensor.getType()), adaptor.getOperands(), - ValueRange(outputTensor), - rewriter.getArrayAttr( - SmallVector(adaptor.getOperands().size() + 1, - rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + ValueRange(outputTensor)); return success(); } @@ -103,11 +99,7 @@ class TosaToTTIRMatmulOpConversionPattern rewriter.replaceOpWithNewOp( srcOp, TypeRange(outputTensor.getType()), operands[0], operands[1], - outputTensor, - rewriter.getArrayAttr( - SmallVector(adaptor.getOperands().size() + 1, - rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + outputTensor); return success(); } @@ -140,11 +132,8 @@ class TosaToTTIRReduceOpConversionPattern : public OpConversionPattern { rewriter.replaceOpWithNewOp( srcOp, outputTensor.getType(), adaptor.getInput(), outputTensor, true /*keepdim*/, - rewriter.getArrayAttr(SmallVector(1, adaptor.getAxisAttr())), rewriter.getArrayAttr( - SmallVector(adaptor.getOperands().size() + 1, - rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + SmallVector(1, adaptor.getAxisAttr()))); return success(); } }; @@ -169,11 +158,7 @@ class TosaToTTIRMaxPool2DOpConversionPattern rewriter.replaceOpWithNewOp( srcOp, TypeRange(outputTensor.getType()), adaptor.getInput(), outputTensor, dims[0], dims[1], strides[0], strides[1], 1, 1, false, - pad[2], pad[3], pad[0], pad[1], - rewriter.getArrayAttr( - SmallVector(adaptor.getOperands().size() + 1, - rewriter.getAttr( - OperandConstraint::AnyDeviceTile)))); + pad[2], pad[3], pad[0], pad[1]); return success(); } }; diff --git a/lib/Dialect/TTIR/IR/TTIROps.cpp b/lib/Dialect/TTIR/IR/TTIROps.cpp index 44af2f2c4b..f845db47e4 100644 --- a/lib/Dialect/TTIR/IR/TTIROps.cpp +++ b/lib/Dialect/TTIR/IR/TTIROps.cpp @@ -1606,23 +1606,16 @@ void mlir::tt::ttir::MaximumOp::buildGenericRegion(::mlir::OpBuilder &opBuilder, static mlir::tt::ttir::KernelOp buildKernelOp(::mlir::OpBuilder &opBuilder, ::mlir::Location loc, ::mlir::StringRef kernelName, ::mlir::StringRef kernelKind, - ::mlir::ValueRange inputs, ::mlir::ValueRange outputs, - ::mlir::ArrayAttr operandConstraints) { + ::mlir::ValueRange inputs, ::mlir::ValueRange outputs) { return opBuilder.create( - loc, outputs.getTypes(), kernelName, kernelKind, inputs, outputs, - operandConstraints); + loc, outputs.getTypes(), kernelName, kernelKind, inputs, outputs); } // Reduce op kernel builder static void createReduceOp(::mlir::OpBuilder &opBuilder, ::mlir::Block *block, mlir::Location loc, ::mlir::StringRef kernelKind) { - auto kernelOp = - buildKernelOp(opBuilder, loc, "reduce", kernelKind, block->getArgument(0), - block->getArgument(1), - opBuilder.getArrayAttr(llvm::SmallVector( - block->getNumArguments(), - opBuilder.getAttr( - mlir::tt::OperandConstraint::AnyDeviceTile)))); + auto kernelOp = buildKernelOp(opBuilder, loc, "reduce", kernelKind, + block->getArgument(0), block->getArgument(1)); opBuilder.create(loc, kernelOp->getResults()); } diff --git a/lib/Dialect/TTIR/Transforms/Constant.cpp b/lib/Dialect/TTIR/Transforms/Constant.cpp index 775dda9283..2151d2a9b8 100644 --- a/lib/Dialect/TTIR/Transforms/Constant.cpp +++ b/lib/Dialect/TTIR/Transforms/Constant.cpp @@ -26,11 +26,8 @@ class TTIRConstantAsFillRewriter : public OpRewritePattern { auto empty = rewriter.create( op.getLoc(), resultTy.getShape(), resultTy.getElementType(), resultTy.getEncoding()); - auto operandConstraints = rewriter.getArrayAttr(SmallVector( - 1, - rewriter.getAttr(OperandConstraint::AnyDevice))); - rewriter.replaceOpWithNewOp( - op, resultTy, empty, op.getValue(), operandConstraints); + rewriter.replaceOpWithNewOp(op, resultTy, empty, + op.getValue()); return success(); } }; diff --git a/lib/Dialect/TTIR/Transforms/Generic.cpp b/lib/Dialect/TTIR/Transforms/Generic.cpp index 3bf96f3cd6..15064ed346 100644 --- a/lib/Dialect/TTIR/Transforms/Generic.cpp +++ b/lib/Dialect/TTIR/Transforms/Generic.cpp @@ -68,7 +68,7 @@ class TTIRNamedToKernelRewriter : public OpRewritePattern { auto kernel = rewriter.create( op.getLoc(), op.getResultTypes(), kernelName, kernelKind, - op.getInputs(), op.getOutputs(), op.getOperandConstraints()); + op.getInputs(), op.getOutputs()); rewriter.replaceOp(op, kernel); diff --git a/lib/Dialect/TTIR/Transforms/Layout.cpp b/lib/Dialect/TTIR/Transforms/Layout.cpp index c3ccbf1a44..ee1be155c2 100644 --- a/lib/Dialect/TTIR/Transforms/Layout.cpp +++ b/lib/Dialect/TTIR/Transforms/Layout.cpp @@ -176,17 +176,21 @@ createToLayoutOp(PatternRewriter &rewriter, Location loc, Value input, static std::optional createToLayoutOp(PatternRewriter &rewriter, Location loc, Value input, - OperandConstraint operandConstraint, MemorySpace defaultMemorySpace, TensorMemoryLayout defaultDeviceMemoryLayout) { - auto desiredMemorySpace = - getLegalMemorySpace(operandConstraint, defaultMemorySpace); + // auto desiredMemorySpace = + // getLegalMemorySpace(operandConstraint, defaultMemorySpace); - auto desiredMemoryLayout = getLegalTensorMemoryLayout( - operandConstraint, desiredMemorySpace, defaultDeviceMemoryLayout); + // auto desiredMemoryLayout = getLegalTensorMemoryLayout( + // operandConstraint, desiredMemorySpace, defaultDeviceMemoryLayout); + + // bool tiled = + // !bitEnumContainsAny(operandConstraint, OperandConstraint::Scalar); + + auto desiredMemorySpace = defaultMemorySpace; + auto desiredMemoryLayout = defaultDeviceMemoryLayout; + bool tiled = true; - bool tiled = - !bitEnumContainsAny(operandConstraint, OperandConstraint::Scalar); return createToLayoutOp(rewriter, loc, input, desiredMemorySpace, desiredMemoryLayout, tiled); } @@ -223,16 +227,16 @@ class TTIRLayoutDPSOperandsRewriter if (mlir::isa(op.getOperation()) && !isResult) { continue; } - auto operandConstraint = - mlir::cast( - mlir::cast(op.getOperation()) - .getOperandConstraints()[operand.getOperandNumber()]) - .getValue(); + // auto operandConstraint = + // mlir::cast( + // mlir::cast(op.getOperation()) + // .getOperandConstraints()[operand.getOperandNumber()]) + // .getValue(); Location newLoc = appendInputSuffix(op.getLoc(), operand.getOperandNumber()); auto desiredLayout = - createToLayoutOp(rewriter, newLoc, operand.get(), operandConstraint, - defaultMemorySpace, defaultDeviceMemoryLayout); + createToLayoutOp(rewriter, newLoc, operand.get(), defaultMemorySpace, + defaultDeviceMemoryLayout); if (desiredLayout) { rewriter.modifyOpInPlace(op, [&]() { diff --git a/lib/Dialect/TTNN/Transforms/TTNNLayout.cpp b/lib/Dialect/TTNN/Transforms/TTNNLayout.cpp index 712e12ad08..6bb5ff9497 100644 --- a/lib/Dialect/TTNN/Transforms/TTNNLayout.cpp +++ b/lib/Dialect/TTNN/Transforms/TTNNLayout.cpp @@ -248,32 +248,14 @@ createToLayoutOp(PatternRewriter &rewriter, Location loc, Value input, ->getResult(0); } -static std::optional -createToLayoutOp(PatternRewriter &rewriter, Location loc, Value input, - OperandConstraint operandConstraint) { - // Find out which buffer type we want - tt::MemorySpace ttDefaultMemSpace = - utils::toTTMemorySpace(g_defaultMemorySpaceDevice); - tt::MemorySpace desiredMemorySpace = - getLegalMemorySpace(operandConstraint, ttDefaultMemSpace); - BufferType desiredBufferType = utils::toTTNNBufferType(desiredMemorySpace); - - // Find out which memory layout we want - tt::TensorMemoryLayout ttMemoryLayout = - utils::toTTTensorMemoryLayout(g_defaultMemoryLayout); - tt::TensorMemoryLayout desiredMemoryLayout = getLegalTensorMemoryLayout( - operandConstraint, desiredMemorySpace, ttMemoryLayout); - TensorMemoryLayoutAttr ttnnMemoryLayoutAttr; - if (desiredMemoryLayout != tt::TensorMemoryLayout::None) { - TensorMemoryLayout ttnnMemoryLayout = - utils::toTTNNTensorMemoryLayout(desiredMemoryLayout); - ttnnMemoryLayoutAttr = - TensorMemoryLayoutAttr::get(rewriter.getContext(), ttnnMemoryLayout); - } - - // Check if the tensor should be tiled - bool tiled = - !bitEnumContainsAny(operandConstraint, OperandConstraint::Scalar); +static std::optional createToLayoutOp(PatternRewriter &rewriter, + Location loc, Value input) { + // Default layout is Tile/Dram/Interleaved + BufferType desiredBufferType = g_defaultMemorySpaceDevice; + TensorMemoryLayout ttnnMemoryLayout = g_defaultMemoryLayout; + TensorMemoryLayoutAttr ttnnMemoryLayoutAttr = + TensorMemoryLayoutAttr::get(rewriter.getContext(), ttnnMemoryLayout); + bool tiled = true; return createToLayoutOp(rewriter, loc, input, desiredBufferType, ttnnMemoryLayoutAttr, tiled); @@ -334,17 +316,11 @@ class TTNNLayoutDPSOperandsRewriter continue; } - // Read operand constrait for current operand - OperandConstraint operandConstraint = - mlir::cast( - mlir::cast(op.getOperation()) - .getOperandConstraints()[operand.getOperandNumber()]) - .getValue(); Location newLoc = appendInputSuffix(op.getLoc(), operand.getOperandNumber()); // Given the operand constraint, create the desired layout for the operand std::optional desiredLayout = - createToLayoutOp(rewriter, newLoc, operand.get(), operandConstraint); + createToLayoutOp(rewriter, newLoc, operand.get()); // If layout changed update the operand if (desiredLayout) { diff --git a/test/ttmlir/Conversion/StableHLOToTTIR/binary/concat_op.mlir b/test/ttmlir/Conversion/StableHLOToTTIR/binary/concat_op.mlir index 51cfd214bb..055a5fa37a 100644 --- a/test/ttmlir/Conversion/StableHLOToTTIR/binary/concat_op.mlir +++ b/test/ttmlir/Conversion/StableHLOToTTIR/binary/concat_op.mlir @@ -6,7 +6,7 @@ module @jit_concat attributes {} { dimension = 1 : i64 } : (tensor<32x32xf32>, tensor<32x64xf32>) -> tensor<32x96xf32> // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] - // CHECK: %[[C:.*]] = "ttir.concat"(%arg0, %arg1, %0) <{dim = 1 : si32, operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x96xf32>) -> tensor<32x96xf32> + // CHECK: %[[C:.*]] = "ttir.concat"(%arg0, %arg1, %0) <{dim = 1 : si32}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x96xf32>) -> tensor<32x96xf32> return %0 : tensor<32x96xf32> } @@ -15,7 +15,7 @@ module @jit_concat attributes {} { dimension = 0 : i64 } : (tensor<3x2xi64>, tensor<1x2xi64>) -> tensor<4x2xi64> // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] - // CHECK: %[[C:.*]] = "ttir.concat"(%arg0, %arg1, %0) <{dim = 0 : si32, operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<3x2xi32>, tensor<1x2xi32>, tensor<4x2xi32>) -> tensor<4x2xi32> + // CHECK: %[[C:.*]] = "ttir.concat"(%arg0, %arg1, %0) <{dim = 0 : si32}> : (tensor<3x2xi32>, tensor<1x2xi32>, tensor<4x2xi32>) -> tensor<4x2xi32> return %0 : tensor<4x2xi64> } @@ -24,7 +24,7 @@ module @jit_concat attributes {} { dimension = 1 : i64 } : (tensor<4x3xf32>, tensor<4x5xf32>) -> tensor<4x8xf32> // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] - // CHECK: %[[C:.*]] = "ttir.concat"(%arg0, %arg1, %0) <{dim = 1 : si32, operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<4x3xf32>, tensor<4x5xf32>, tensor<4x8xf32>) -> tensor<4x8xf32> + // CHECK: %[[C:.*]] = "ttir.concat"(%arg0, %arg1, %0) <{dim = 1 : si32}> : (tensor<4x3xf32>, tensor<4x5xf32>, tensor<4x8xf32>) -> tensor<4x8xf32> return %0 : tensor<4x8xf32> } @@ -33,7 +33,7 @@ module @jit_concat attributes {} { dimension = 1 : i64 } : (tensor<128x64xf32>, tensor<128x96xf32>) -> tensor<128x160xf32> // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] - // CHECK: %[[C:.*]] = "ttir.concat"(%arg0, %arg1, %0) <{dim = 1 : si32, operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128x64xf32>, tensor<128x96xf32>, tensor<128x160xf32>) -> tensor<128x160xf32> + // CHECK: %[[C:.*]] = "ttir.concat"(%arg0, %arg1, %0) <{dim = 1 : si32}> : (tensor<128x64xf32>, tensor<128x96xf32>, tensor<128x160xf32>) -> tensor<128x160xf32> return %0 : tensor<128x160xf32> } @@ -42,7 +42,7 @@ module @jit_concat attributes {} { dimension = 1 : i64 } : (tensor<256x512xi64>, tensor<256x256xi64>) -> tensor<256x768xi64> // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] - // CHECK: %[[C:.*]] = "ttir.concat"(%arg0, %arg1, %0) <{dim = 1 : si32, operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<256x512xi32>, tensor<256x256xi32>, tensor<256x768xi32>) -> tensor<256x768xi32> + // CHECK: %[[C:.*]] = "ttir.concat"(%arg0, %arg1, %0) <{dim = 1 : si32}> : (tensor<256x512xi32>, tensor<256x256xi32>, tensor<256x768xi32>) -> tensor<256x768xi32> return %0 : tensor<256x768xi64> } @@ -51,7 +51,7 @@ module @jit_concat attributes {} { dimension = 1 : i64 } : (tensor<64x32xf64>, tensor<64x64xf64>) -> tensor<64x96xf64> // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] - // CHECK: %[[C:.*]] = "ttir.concat"(%arg0, %arg1, %0) <{dim = 1 : si32, operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x32xf32>, tensor<64x64xf32>, tensor<64x96xf32>) -> tensor<64x96xf32> + // CHECK: %[[C:.*]] = "ttir.concat"(%arg0, %arg1, %0) <{dim = 1 : si32}> : (tensor<64x32xf32>, tensor<64x64xf32>, tensor<64x96xf32>) -> tensor<64x96xf32> return %0 : tensor<64x96xf64> } @@ -60,7 +60,7 @@ module @jit_concat attributes {} { dimension = 0 : i64 } : (tensor<1000x128xi32>, tensor<500x128xi32>) -> tensor<1500x128xi32> // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] - // CHECK: %[[C:.*]] = "ttir.concat"(%arg0, %arg1, %0) <{dim = 0 : si32, operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<1000x128xi32>, tensor<500x128xi32>, tensor<1500x128xi32>) -> tensor<1500x128xi32> + // CHECK: %[[C:.*]] = "ttir.concat"(%arg0, %arg1, %0) <{dim = 0 : si32}> : (tensor<1000x128xi32>, tensor<500x128xi32>, tensor<1500x128xi32>) -> tensor<1500x128xi32> return %0 : tensor<1500x128xi32> } @@ -69,7 +69,7 @@ module @jit_concat attributes {} { dimension = 3 : i64 } : (tensor<3x2x4x5xf64>, tensor<3x2x4x3xf64>) -> tensor<3x2x4x8xf64> // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] - // CHECK: %[[C:.*]] = "ttir.concat"(%arg0, %arg1, %0) <{dim = 3 : si32, operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<3x2x4x5xf32>, tensor<3x2x4x3xf32>, tensor<3x2x4x8xf32>) -> tensor<3x2x4x8xf32> + // CHECK: %[[C:.*]] = "ttir.concat"(%arg0, %arg1, %0) <{dim = 3 : si32}> : (tensor<3x2x4x5xf32>, tensor<3x2x4x3xf32>, tensor<3x2x4x8xf32>) -> tensor<3x2x4x8xf32> return %0 : tensor<3x2x4x8xf64> } @@ -78,7 +78,7 @@ module @jit_concat attributes {} { dimension = 2 : i64 } : (tensor<8x4x6xi32>, tensor<8x4x2xi32>) -> tensor<8x4x8xi32> // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] - // CHECK: %[[C:.*]] = "ttir.concat"(%arg0, %arg1, %0) <{dim = 2 : si32, operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<8x4x6xi32>, tensor<8x4x2xi32>, tensor<8x4x8xi32>) -> tensor<8x4x8xi32> + // CHECK: %[[C:.*]] = "ttir.concat"(%arg0, %arg1, %0) <{dim = 2 : si32}> : (tensor<8x4x6xi32>, tensor<8x4x2xi32>, tensor<8x4x8xi32>) -> tensor<8x4x8xi32> return %0 : tensor<8x4x8xi32> } } diff --git a/test/ttmlir/Conversion/StableHLOToTTIR/clamp_op.mlir b/test/ttmlir/Conversion/StableHLOToTTIR/clamp_op.mlir index 6bd602e276..d46b00e6a6 100644 --- a/test/ttmlir/Conversion/StableHLOToTTIR/clamp_op.mlir +++ b/test/ttmlir/Conversion/StableHLOToTTIR/clamp_op.mlir @@ -6,7 +6,7 @@ module @jit_transpose attributes {} { %cst_0 = stablehlo.constant dense<3.000000e+00> : tensor<4xf32> // CHECK: %[[EMPTY:.*]] = tensor.empty() : [[TENSOR:tensor<4xf32>]] // CHECK: "ttir.clamp"(%arg0, %[[EMPTY]]) - // CHECK-SAME: max = 3.000000e+00 : f32, min = 2.000000e+00 : f32, + // CHECK-SAME: max = 3.000000e+00 : f32, min = 2.000000e+00 : f32 // CHECK-SAME: ([[TENSOR]], [[TENSOR]]) -> [[TENSOR]] %0 = stablehlo.clamp %cst, %arg0, %cst_0 : tensor<4xf32> return %0 : tensor<4xf32> diff --git a/test/ttmlir/Conversion/StableHLOToTTIR/exponential_minus_one_op.mlir b/test/ttmlir/Conversion/StableHLOToTTIR/exponential_minus_one_op.mlir index 179268b762..6a86fed843 100644 --- a/test/ttmlir/Conversion/StableHLOToTTIR/exponential_minus_one_op.mlir +++ b/test/ttmlir/Conversion/StableHLOToTTIR/exponential_minus_one_op.mlir @@ -1,11 +1,10 @@ // REQUIRES: stablehlo // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module @jit_eltwise_expm1 attributes {} { func.func public @test_expm1(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { %0 = stablehlo.exponential_minus_one %arg0 : tensor<13x21x3xf32> // CHECK: [[VAL0:%[0-9]+]] = tensor.empty() : [[TENSOR_SIZE:tensor<[0-9]+x[0-9]+x[0-9]+xf[0-9]+>]] - // CHECK: [[VAL1:%[0-9]+]] = "ttir.expm1"(%arg0, [[VAL0]]) <{operandSegmentSizes = array, operand_constraints = [#any_device_tile, #any_device_tile]}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]] + // CHECK: [[VAL1:%[0-9]+]] = "ttir.expm1"(%arg0, [[VAL0]]) <{operandSegmentSizes = array}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]] return %0 : tensor<13x21x3xf32> // CHECK: return [[VAL1]] : [[TENSOR_SIZE]] } diff --git a/test/ttmlir/Conversion/StableHLOToTTIR/log_plus_one_op.mlir b/test/ttmlir/Conversion/StableHLOToTTIR/log_plus_one_op.mlir index d1d44f3af8..feba16b6f1 100644 --- a/test/ttmlir/Conversion/StableHLOToTTIR/log_plus_one_op.mlir +++ b/test/ttmlir/Conversion/StableHLOToTTIR/log_plus_one_op.mlir @@ -1,11 +1,10 @@ // REQUIRES: stablehlo // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module @jit_eltwise_log_plus_one attributes {} { func.func public @test_log_plus_one(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { %0 = stablehlo.log_plus_one %arg0 : tensor<13x21x3xf32> // CHECK: [[VAL0:%[0-9]+]] = tensor.empty() : [[TENSOR_SIZE:tensor<[0-9]+x[0-9]+x[0-9]+xf[0-9]+>]] - // CHECK: [[VAL1:%[0-9]+]] = "ttir.log1p"(%arg0, [[VAL0]]) <{operandSegmentSizes = array, operand_constraints = [#any_device_tile, #any_device_tile]}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]] + // CHECK: [[VAL1:%[0-9]+]] = "ttir.log1p"(%arg0, [[VAL0]]) <{operandSegmentSizes = array}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]] return %0 : tensor<13x21x3xf32> // CHECK: return [[VAL1]] : [[TENSOR_SIZE]] } diff --git a/test/ttmlir/Conversion/StableHLOToTTIR/select_op.mlir b/test/ttmlir/Conversion/StableHLOToTTIR/select_op.mlir index 458879081a..24df823d4f 100644 --- a/test/ttmlir/Conversion/StableHLOToTTIR/select_op.mlir +++ b/test/ttmlir/Conversion/StableHLOToTTIR/select_op.mlir @@ -1,13 +1,12 @@ // REQUIRES: stablehlo // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module @jit_eltwise_select attributes {} { func.func public @test_select(%arg0: tensor<13x37xf32>, %arg1: tensor<13x37xf32>) -> tensor<13x37xf32> { %0 = stablehlo.compare EQ, %arg0, %arg1 : (tensor<13x37xf32>, tensor<13x37xf32>) -> tensor<13x37xi1> %1 = stablehlo.select %0, %arg0, %arg1 : (tensor<13x37xi1>, tensor<13x37xf32>, tensor<13x37xf32>) -> tensor<13x37xf32> // CHECK: %[[EMPTY:[0-9]+]] = tensor.empty() // CHECK: %[[VAL1:[0-9]+]] = "ttir.eq" - // CHECK: %[[SELECT:[0-9]+]] = "ttir.where"(%[[VAL1:[0-9]+]], %arg0, %arg1, %[[EMPTY:[0-9]+]]) <{operandSegmentSizes = array, operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<13x37xbf16>, tensor<13x37xf32>, tensor<13x37xf32>, tensor<13x37xf32>) -> tensor<13x37xf32> + // CHECK: %[[SELECT:[0-9]+]] = "ttir.where"(%[[VAL1:[0-9]+]], %arg0, %arg1, %[[EMPTY:[0-9]+]]) <{operandSegmentSizes = array}> : (tensor<13x37xbf16>, tensor<13x37xf32>, tensor<13x37xf32>, tensor<13x37xf32>) -> tensor<13x37xf32> return %1 : tensor<13x37xf32> } } diff --git a/test/ttmlir/Conversion/StableHLOToTTIR/sign_op.mlir b/test/ttmlir/Conversion/StableHLOToTTIR/sign_op.mlir index 0bf4a1bcad..a0b9d7c056 100644 --- a/test/ttmlir/Conversion/StableHLOToTTIR/sign_op.mlir +++ b/test/ttmlir/Conversion/StableHLOToTTIR/sign_op.mlir @@ -1,11 +1,10 @@ // REQUIRES: stablehlo // RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module @jit_eltwise_sign attributes {} { func.func public @test_sign(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { %0 = stablehlo.sign %arg0 : tensor<13x21x3xf32> // CHECK: [[VAL0:%[0-9]+]] = tensor.empty() : [[TENSOR_SIZE:tensor<[0-9]+x[0-9]+x[0-9]+xf[0-9]+>]] - // CHECK: [[VAL1:%[0-9]+]] = "ttir.sign"(%arg0, [[VAL0]]) <{operandSegmentSizes = array, operand_constraints = [#any_device_tile, #any_device_tile]}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]] + // CHECK: [[VAL1:%[0-9]+]] = "ttir.sign"(%arg0, [[VAL0]]) <{operandSegmentSizes = array}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]] return %0 : tensor<13x21x3xf32> // CHECK: return [[VAL1]] : [[TENSOR_SIZE]] } diff --git a/test/ttmlir/Conversion/StableHLOToTTIR/unary/ceil_op.mlir b/test/ttmlir/Conversion/StableHLOToTTIR/unary/ceil_op.mlir index f81c4b37bf..48d60ebd0c 100644 --- a/test/ttmlir/Conversion/StableHLOToTTIR/unary/ceil_op.mlir +++ b/test/ttmlir/Conversion/StableHLOToTTIR/unary/ceil_op.mlir @@ -4,7 +4,7 @@ module @jit_eltwise_ceil attributes {} { func.func public @test_ceil(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { %0 = stablehlo.ceil %arg0 : tensor<13x21x3xf32> // CHECK: [[VAL0:%[0-9]+]] = tensor.empty() : [[TENSOR_SIZE:tensor<[0-9]+x[0-9]+x[0-9]+xf[0-9]+>]] - // CHECK: [[VAL1:%[0-9]+]] = "ttir.ceil"(%arg0, [[VAL0]]) <{operandSegmentSizes = array, operand_constraints = [#any_device_tile, #any_device_tile]}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]] + // CHECK: [[VAL1:%[0-9]+]] = "ttir.ceil"(%arg0, [[VAL0]]) <{operandSegmentSizes = array}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]] return %0 : tensor<13x21x3xf32> } } diff --git a/test/ttmlir/Conversion/StableHLOToTTIR/unary/cosine_op.mlir b/test/ttmlir/Conversion/StableHLOToTTIR/unary/cosine_op.mlir index fb54f073e4..3c4853e6c6 100644 --- a/test/ttmlir/Conversion/StableHLOToTTIR/unary/cosine_op.mlir +++ b/test/ttmlir/Conversion/StableHLOToTTIR/unary/cosine_op.mlir @@ -4,7 +4,7 @@ module @jit_eltwise_cosine attributes {} { func.func public @test_cosine(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { %0 = stablehlo.cosine %arg0 : tensor<13x21x3xf32> // CHECK: [[VAL0:%[0-9]+]] = tensor.empty() : [[TENSOR_SIZE:tensor<[0-9]+x[0-9]+x[0-9]+xf[0-9]+>]] - // CHECK: [[VAL1:%[0-9]+]] = "ttir.cos"(%arg0, [[VAL0]]) <{operandSegmentSizes = array, operand_constraints = [#any_device_tile, #any_device_tile]}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]] + // CHECK: [[VAL1:%[0-9]+]] = "ttir.cos"(%arg0, [[VAL0]]) <{operandSegmentSizes = array}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]] return %0 : tensor<13x21x3xf32> } } diff --git a/test/ttmlir/Conversion/StableHLOToTTIR/unary/sine_op.mlir b/test/ttmlir/Conversion/StableHLOToTTIR/unary/sine_op.mlir index 24ea372382..be0883c3fc 100644 --- a/test/ttmlir/Conversion/StableHLOToTTIR/unary/sine_op.mlir +++ b/test/ttmlir/Conversion/StableHLOToTTIR/unary/sine_op.mlir @@ -4,7 +4,7 @@ module @jit_eltwise_sine attributes {} { func.func public @test_sine(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { %0 = stablehlo.sine %arg0 : tensor<13x21x3xf32> // CHECK: [[VAL0:%[0-9]+]] = tensor.empty() : [[TENSOR_SIZE:tensor<[0-9]+x[0-9]+x[0-9]+xf[0-9]+>]] - // CHECK: [[VAL1:%[0-9]+]] = "ttir.sin"(%arg0, [[VAL0]]) <{operandSegmentSizes = array, operand_constraints = [#any_device_tile, #any_device_tile]}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]] + // CHECK: [[VAL1:%[0-9]+]] = "ttir.sin"(%arg0, [[VAL0]]) <{operandSegmentSizes = array}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]] return %0 : tensor<13x21x3xf32> } } diff --git a/test/ttmlir/Dialect/TTIR/constant_as_fill.mlir b/test/ttmlir/Dialect/TTIR/constant_as_fill.mlir index dbe7f079b5..4cc8c0d8a9 100644 --- a/test/ttmlir/Dialect/TTIR/constant_as_fill.mlir +++ b/test/ttmlir/Dialect/TTIR/constant_as_fill.mlir @@ -1,13 +1,10 @@ // RUN: ttmlir-opt --ttir-constant-as-fill %s | FileCheck %s - -#any_device = #tt.operand_constraint - func.func public @add5(%arg0: tensor<32x32xf32>) -> tensor<32x32xf32> { // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] // CHECK: %[[C:.*]] = "ttir.fill"[[C:.*]] %0 = "ttir.constant"() <{value = dense<5.000000e+00> : tensor<32x32xf32>}> : () -> tensor<32x32xf32> // CHECK: %[[C:.*]] = tensor.empty[[C:.*]] %1 = tensor.empty() : tensor<32x32xf32> - %2 = "ttir.add"(%arg0, %0, %1) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> + %2 = "ttir.add"(%arg0, %0, %1) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> return %2 : tensor<32x32xf32> } diff --git a/test/ttmlir/Dialect/TTIR/convolution/convolution_tests_negative.mlir b/test/ttmlir/Dialect/TTIR/convolution/convolution_tests_negative.mlir index afdb92cc0d..278bb9f211 100644 --- a/test/ttmlir/Dialect/TTIR/convolution/convolution_tests_negative.mlir +++ b/test/ttmlir/Dialect/TTIR/convolution/convolution_tests_negative.mlir @@ -1,6 +1,4 @@ // RUN: not ttmlir-opt --split-input-file %s 2>&1 | FileCheck %s -#any_device_tile = #tt.operand_constraint - module @jit_convolution_bad_spatial_dimensions { func.func public @test_illegal_convolution(%arg0: tensor<1x3x100x100xbf16>, %arg1: tensor<7x3x3x3xbf16>) -> tensor<1x7x100x100xbf16> { %0 = tensor.empty() : tensor<1x7x100x100xbf16> @@ -20,7 +18,6 @@ module @jit_convolution_bad_spatial_dimensions { >, feature_group_count = 1 : i64, input_dilation = array, - operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile], padding = array, weight_dilation = array, window_reversal = array, @@ -51,7 +48,6 @@ module @jit_convolution_bad_stride_dimensions { >, feature_group_count = 1 : i64, input_dilation = array, - operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile], padding = array, weight_dilation = array, window_reversal = array, @@ -82,7 +78,6 @@ module @jit_convolution_bad_input_tensor { >, feature_group_count = 1 : i64, input_dilation = array, - operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile], padding = array, weight_dilation = array, window_reversal = array, @@ -113,7 +108,6 @@ module @jit_convolution_bad_weight_tensor { >, feature_group_count = 1 : i64, input_dilation = array, - operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile], padding = array, weight_dilation = array, window_reversal = array, @@ -144,7 +138,6 @@ module @jit_convolution_bad_bias_tensor { >, feature_group_count = 1 : i64, input_dilation = array, - operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile], padding = array, weight_dilation = array, window_reversal = array, diff --git a/test/ttmlir/Dialect/TTIR/index/index_tests_negative.mlir b/test/ttmlir/Dialect/TTIR/index/index_tests_negative.mlir index 03c2e6fafa..9f5d8b04ae 100644 --- a/test/ttmlir/Dialect/TTIR/index/index_tests_negative.mlir +++ b/test/ttmlir/Dialect/TTIR/index/index_tests_negative.mlir @@ -2,12 +2,11 @@ // Negative tests for index operation // Verify that the parsing fails if the begins attribute is not a 3D tensor -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @index_negative_invalid_shape(%arg0: tensor) -> tensor<1xbf16> { %0 = tensor.empty() : tensor<1xbf16> // CHECK: error: 'ttir.index' op Input must be at least a 1D tensor - %1 = "ttir.index"(%arg0, %0) <{dim = 0: i32, begin = 0: i32, end = 0: i32, step = 1: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor, tensor<1xbf16>) -> tensor<1xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 0: i32, begin = 0: i32, end = 0: i32, step = 1: i32}> : (tensor, tensor<1xbf16>) -> tensor<1xbf16> return %1 : tensor<1xbf16> } } @@ -19,7 +18,7 @@ module attributes {} { func.func @index_negative_invalid_begins(%arg0: tensor<3x128x64xbf16>) -> tensor<3x128x64xbf16> { %0 = tensor.empty() : tensor<3x128x64xbf16> // CHECK: error: 'ttir.index' op Invalid dimension index 3. Input tensor rank is 3 - %1 = "ttir.index"(%arg0, %0) <{dim = 3 : i32, begin = 0: i32, end = 0: i32, step = 1: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<3x128x64xbf16>, tensor<3x128x64xbf16>) -> tensor<3x128x64xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 3 : i32, begin = 0: i32, end = 0: i32, step = 1: i32}> : (tensor<3x128x64xbf16>, tensor<3x128x64xbf16>) -> tensor<3x128x64xbf16> return %1 : tensor<3x128x64xbf16> } } @@ -31,7 +30,7 @@ module attributes {} { func.func @index_negative_invalid_output_datatype(%arg0: tensor<3x128x64xbf16>) -> tensor<3x128x32xf32> { %0 = tensor.empty() : tensor<3x128x32xf32> // CHECK: error: 'ttir.index' op Output tensor must have the same element type as the input tensor - %1 = "ttir.index"(%arg0, %0) <{dim = 2 : i32, begin = 0: i32, end = 32: i32, step = 1: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<3x128x64xbf16>, tensor<3x128x32xf32>) -> tensor<3x128x32xf32> + %1 = "ttir.index"(%arg0, %0) <{dim = 2 : i32, begin = 0: i32, end = 32: i32, step = 1: i32}> : (tensor<3x128x64xbf16>, tensor<3x128x32xf32>) -> tensor<3x128x32xf32> return %1 : tensor<3x128x32xf32> } } @@ -43,7 +42,7 @@ module attributes {} { func.func @index_negative_input_output_rank_missmatch(%arg0: tensor<3x128x64xbf16>) -> tensor<3x64x64x1xbf16> { %0 = tensor.empty() : tensor<3x64x64x1xbf16> // CHECK: error: 'ttir.index' op Output tensor must have the same rank as the input tensor - %1 = "ttir.index"(%arg0, %0) <{dim = 1: i32, begin = 0: i32, end = 64: i32, step = 1: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<3x128x64xbf16>, tensor<3x64x64x1xbf16>) -> tensor<3x64x64x1xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 1: i32, begin = 0: i32, end = 64: i32, step = 1: i32}> : (tensor<3x128x64xbf16>, tensor<3x64x64x1xbf16>) -> tensor<3x64x64x1xbf16> return %1 : tensor<3x64x64x1xbf16> } } @@ -55,7 +54,7 @@ module attributes {} { func.func @index_negative_invalid_begin_positive(%arg0: tensor<10x3x128x64xbf16>) -> tensor<10x1x128x64xbf16> { %0 = tensor.empty() : tensor<10x1x128x64xbf16> // CHECK: error: 'ttir.index' op Invalid begin index for dimension 1. Expected value in range [-3, 3), got 3. Input shape: (10, 3, 128, 64) - %1 = "ttir.index"(%arg0, %0) <{dim = 1: i32, begin = 3: i32, end = 3: i32, step = 1: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<10x1x128x64xbf16>) -> tensor<10x1x128x64xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 1: i32, begin = 3: i32, end = 3: i32, step = 1: i32}> : (tensor<10x3x128x64xbf16>, tensor<10x1x128x64xbf16>) -> tensor<10x1x128x64xbf16> return %1 : tensor<10x1x128x64xbf16> } } @@ -67,7 +66,7 @@ module attributes {} { func.func @index_negative_invalid_begin_negative(%arg0: tensor<10x3x128x64xbf16>) -> tensor<10x3x64x64xbf16> { %0 = tensor.empty() : tensor<10x3x64x64xbf16> // CHECK: error: 'ttir.index' op Invalid begin index for dimension 2. Expected value in range [-128, 128), got -129. Input shape: (10, 3, 128, 64) - %1 = "ttir.index"(%arg0, %0) <{dim = 2: i32, begin = -129: i32, end = 64: i32, step = 1: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<10x3x64x64xbf16>) -> tensor<10x3x64x64xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 2: i32, begin = -129: i32, end = 64: i32, step = 1: i32}> : (tensor<10x3x128x64xbf16>, tensor<10x3x64x64xbf16>) -> tensor<10x3x64x64xbf16> return %1 : tensor<10x3x64x64xbf16> } } @@ -79,7 +78,7 @@ module attributes {} { func.func @index_negative_invalid_end_positive(%arg0: tensor<10x3x128x64xbf16>) -> tensor<10x3x128x64xbf16> { %0 = tensor.empty() : tensor<10x3x128x64xbf16> // CHECK: error: 'ttir.index' op Invalid end index for dimension 1. Expected value in range [-3, 3], got 4. Input shape: (10, 3, 128, 64) - %1 = "ttir.index"(%arg0, %0) <{dim = 1: i32, begin = 0: i32, end = 4: i32, step = 1: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<10x3x128x64xbf16>) -> tensor<10x3x128x64xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 1: i32, begin = 0: i32, end = 4: i32, step = 1: i32}> : (tensor<10x3x128x64xbf16>, tensor<10x3x128x64xbf16>) -> tensor<10x3x128x64xbf16> return %1 : tensor<10x3x128x64xbf16> } } @@ -91,7 +90,7 @@ module attributes {} { func.func @index_negative_invalid_end_negative(%arg0: tensor<10x3x128x64xbf16>) -> tensor<10x3x128x64xbf16> { %0 = tensor.empty() : tensor<10x3x128x64xbf16> // CHECK: error: 'ttir.index' op Invalid end index for dimension 1. Expected value in range [-3, 3], got -4. Input shape: (10, 3, 128, 64) - %1 = "ttir.index"(%arg0, %0) <{dim = 1: i32, begin = -1: i32, end = -4: i32, step = -1: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<10x3x128x64xbf16>) -> tensor<10x3x128x64xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 1: i32, begin = -1: i32, end = -4: i32, step = -1: i32}> : (tensor<10x3x128x64xbf16>, tensor<10x3x128x64xbf16>) -> tensor<10x3x128x64xbf16> return %1 : tensor<10x3x128x64xbf16> } } @@ -103,7 +102,7 @@ module attributes {} { func.func @index_negative_step_is_zero(%arg0: tensor<10x3x128x64xbf16>) -> tensor<10x3x128x64xbf16> { %0 = tensor.empty() : tensor<10x3x128x64xbf16> // CHECK: error: 'ttir.index' op Step value for dimension 1 cannot be zero - %1 = "ttir.index"(%arg0, %0) <{dim = 1: i32, begin = -1: i32, end = -3: i32, step = 0: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<10x3x128x64xbf16>) -> tensor<10x3x128x64xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 1: i32, begin = -1: i32, end = -3: i32, step = 0: i32}> : (tensor<10x3x128x64xbf16>, tensor<10x3x128x64xbf16>) -> tensor<10x3x128x64xbf16> return %1 : tensor<10x3x128x64xbf16> } } @@ -115,7 +114,7 @@ module attributes {} { func.func @index_negative_begin_greater_than_end_positive_step(%arg0: tensor<10x3x128x64xbf16>) -> tensor<10x3x128x64xbf16> { %0 = tensor.empty() : tensor<10x3x128x64xbf16> // CHECK: error: 'ttir.index' op For positive step, begin index must be less than or equal to end index for dimension 2. Got begin: 2, end: 0, step: 1, input shape: (10, 3, 128, 64) - %1 = "ttir.index"(%arg0, %0) <{dim = 2: i32, begin = 2: i32, end = 0: i32, step = 1: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<10x3x128x64xbf16>) -> tensor<10x3x128x64xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 2: i32, begin = 2: i32, end = 0: i32, step = 1: i32}> : (tensor<10x3x128x64xbf16>, tensor<10x3x128x64xbf16>) -> tensor<10x3x128x64xbf16> return %1 : tensor<10x3x128x64xbf16> } } @@ -127,7 +126,7 @@ module attributes {} { func.func @index_negative_begin_less_than_end_negative_step(%arg0: tensor<10x3x128x64xbf16>) -> tensor<10x3x128x64xbf16> { %0 = tensor.empty() : tensor<10x3x128x64xbf16> // CHECK: error: 'ttir.index' op For negative step, begin index must be greater than or equal to end index for dimension 3. Got begin: 0, end: 64, step: -1, input shape: (10, 3, 128, 64) - %1 = "ttir.index"(%arg0, %0) <{dim = 3: i32, begin = 0: i32, end = 64: i32, step = -1: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<10x3x128x64xbf16>) -> tensor<10x3x128x64xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 3: i32, begin = 0: i32, end = 64: i32, step = -1: i32}> : (tensor<10x3x128x64xbf16>, tensor<10x3x128x64xbf16>) -> tensor<10x3x128x64xbf16> return %1 : tensor<10x3x128x64xbf16> } } @@ -139,7 +138,7 @@ module attributes {} { func.func @index_negative_invalid_output_shape(%arg0: tensor<10x3x128x64xbf16>) -> tensor<10x3x128x32xbf16> { %0 = tensor.empty() : tensor<10x3x128x32xbf16> // CHECK: error: 'ttir.index' op Mismatch in dimension 3 of the output tensor: expected size 16, but got 32 - %1 = "ttir.index"(%arg0, %0) <{dim = 3: i32, begin = 0: i32, end = 64: i32, step = 4: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<10x3x128x32xbf16>) -> tensor<10x3x128x32xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 3: i32, begin = 0: i32, end = 64: i32, step = 4: i32}> : (tensor<10x3x128x64xbf16>, tensor<10x3x128x32xbf16>) -> tensor<10x3x128x32xbf16> return %1 : tensor<10x3x128x32xbf16> } } diff --git a/test/ttmlir/Dialect/TTIR/index/index_tests_positive.mlir b/test/ttmlir/Dialect/TTIR/index/index_tests_positive.mlir index f3ccbfeda7..4a6f39999b 100644 --- a/test/ttmlir/Dialect/TTIR/index/index_tests_positive.mlir +++ b/test/ttmlir/Dialect/TTIR/index/index_tests_positive.mlir @@ -1,59 +1,58 @@ // RUN: ttmlir-opt %s | FileCheck %s -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @index_1d(%arg0: tensor<64xbf16>) -> tensor<32xbf16> { %0 = tensor.empty() : tensor<32xbf16> // CHECK: %[[C:.*]] = "ttir.index"[[C:.*]] - %1 = "ttir.index"(%arg0, %0) <{dim = 0: i32, begin = 0: i32, end = 32: i32, step = 1: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<64xbf16>, tensor<32xbf16>) -> tensor<32xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 0: i32, begin = 0: i32, end = 32: i32, step = 1: i32}> : (tensor<64xbf16>, tensor<32xbf16>) -> tensor<32xbf16> return %1 : tensor<32xbf16> } func.func @index_1d_step(%arg0: tensor<64xbf16>) -> tensor<16xbf16> { %0 = tensor.empty() : tensor<16xbf16> // CHECK: %[[C:.*]] = "ttir.index"[[C:.*]] - %1 = "ttir.index"(%arg0, %0) <{dim = 0: i32, begin = 0: i32, end = 32: i32, step = 2: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<64xbf16>, tensor<16xbf16>) -> tensor<16xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 0: i32, begin = 0: i32, end = 32: i32, step = 2: i32}> : (tensor<64xbf16>, tensor<16xbf16>) -> tensor<16xbf16> return %1 : tensor<16xbf16> } func.func @index_2d(%arg0: tensor<128x64xbf16>) -> tensor<128x32xbf16> { %0 = tensor.empty() : tensor<128x32xbf16> // CHECK: %[[C:.*]] = "ttir.index"[[C:.*]] - %1 = "ttir.index"(%arg0, %0) <{dim = 1: i32, begin = 0: i32, end = 32: i32, step = 1: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<128x64xbf16>, tensor<128x32xbf16>) -> tensor<128x32xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 1: i32, begin = 0: i32, end = 32: i32, step = 1: i32}> : (tensor<128x64xbf16>, tensor<128x32xbf16>) -> tensor<128x32xbf16> return %1 : tensor<128x32xbf16> } func.func @index_2d_step(%arg0: tensor<128x64xbf16>) -> tensor<128x16xbf16> { %0 = tensor.empty() : tensor<128x16xbf16> // CHECK: %[[C:.*]] = "ttir.index"[[C:.*]] - %1 = "ttir.index"(%arg0, %0) <{dim = 1: i32, begin = 32: i32, end = 64: i32, step = 2: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<128x64xbf16>, tensor<128x16xbf16>) -> tensor<128x16xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 1: i32, begin = 32: i32, end = 64: i32, step = 2: i32}> : (tensor<128x64xbf16>, tensor<128x16xbf16>) -> tensor<128x16xbf16> return %1 : tensor<128x16xbf16> } func.func @index_3d(%arg0: tensor<3x128x64xbf16>) -> tensor<3x128x32xbf16> { %0 = tensor.empty() : tensor<3x128x32xbf16> // CHECK: %[[C:.*]] = "ttir.index"[[C:.*]] - %1 = "ttir.index"(%arg0, %0) <{dim = 2: i32, begin = 0: i32, end = 32: i32, step = 1: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<3x128x64xbf16>, tensor<3x128x32xbf16>) -> tensor<3x128x32xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 2: i32, begin = 0: i32, end = 32: i32, step = 1: i32}> : (tensor<3x128x64xbf16>, tensor<3x128x32xbf16>) -> tensor<3x128x32xbf16> return %1 : tensor<3x128x32xbf16> } func.func @index_3d_step(%arg0: tensor<3x128x64xbf16>) -> tensor<3x128x8xbf16> { %0 = tensor.empty() : tensor<3x128x8xbf16> // CHECK: %[[C:.*]] = "ttir.index"[[C:.*]] - %1 = "ttir.index"(%arg0, %0) <{dim = 2: i32, begin = -1: i32, end = 0: i32, step = -8: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<3x128x64xbf16>, tensor<3x128x8xbf16>) -> tensor<3x128x8xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 2: i32, begin = -1: i32, end = 0: i32, step = -8: i32}> : (tensor<3x128x64xbf16>, tensor<3x128x8xbf16>) -> tensor<3x128x8xbf16> return %1 : tensor<3x128x8xbf16> } func.func @index_4d(%arg0: tensor<10x3x128x64xbf16>) -> tensor<10x3x128x32xbf16> { %0 = tensor.empty() : tensor<10x3x128x32xbf16> // CHECK: %[[C:.*]] = "ttir.index"[[C:.*]] - %1 = "ttir.index"(%arg0, %0) <{dim = 3: i32, begin = 0: i32, end = 32: i32, step = 1: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<10x3x128x32xbf16>) -> tensor<10x3x128x32xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 3: i32, begin = 0: i32, end = 32: i32, step = 1: i32}> : (tensor<10x3x128x64xbf16>, tensor<10x3x128x32xbf16>) -> tensor<10x3x128x32xbf16> return %1 : tensor<10x3x128x32xbf16> } func.func @index_4d_step(%arg0: tensor<10x3x128x64xbf16>) -> tensor<10x3x128x24xbf16> { %0 = tensor.empty() : tensor<10x3x128x24xbf16> // CHECK: %[[C:.*]] = "ttir.index"[[C:.*]] - %1 = "ttir.index"(%arg0, %0) <{dim = 3: i32, begin = 0: i32, end = -16: i32, step = 2: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<10x3x128x24xbf16>) -> tensor<10x3x128x24xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 3: i32, begin = 0: i32, end = -16: i32, step = 2: i32}> : (tensor<10x3x128x64xbf16>, tensor<10x3x128x24xbf16>) -> tensor<10x3x128x24xbf16> return %1 : tensor<10x3x128x24xbf16> } } diff --git a/test/ttmlir/Dialect/TTIR/linear/linear_tests_negative.mlir b/test/ttmlir/Dialect/TTIR/linear/linear_tests_negative.mlir index 522628160c..0154deff9a 100644 --- a/test/ttmlir/Dialect/TTIR/linear/linear_tests_negative.mlir +++ b/test/ttmlir/Dialect/TTIR/linear/linear_tests_negative.mlir @@ -2,193 +2,176 @@ // Negative tests for linear operation // Verify that the parsing fails if either of operands is a scalar -#any_device_tile = #tt.operand_constraint module { func.func @linear_negative_1d_1d_scalar_a(%arg0: tensor, %arg1: tensor<64xbf16>) -> tensor<1xbf16> { // CHECK: error: 'ttir.linear' op Input A must be at least a 1D tensor %0 = tensor.empty() : tensor<1xbf16> - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor, tensor<64xbf16>, tensor<1xbf16>) -> tensor<1xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor, tensor<64xbf16>, tensor<1xbf16>) -> tensor<1xbf16> return %1 : tensor<1xbf16> } } // ----- -#any_device_tile = #tt.operand_constraint module { func.func @linear_negative_1d_1d_scalar_b(%arg0: tensor<128xbf16>, %arg1: tensor) -> tensor<1xbf16> { // CHECK: error: 'ttir.linear' op Input B must be at least a 1D tensor %0 = tensor.empty() : tensor<1xbf16> - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128xbf16>, tensor, tensor<1xbf16>) -> tensor<1xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<128xbf16>, tensor, tensor<1xbf16>) -> tensor<1xbf16> return %1 : tensor<1xbf16> } } // ----- -#any_device_tile = #tt.operand_constraint module { func.func @linear_negative_1d_1d_scalar_bias(%arg0: tensor<128xbf16>, %arg1: tensor<128xbf16>, %bias: tensor) -> tensor<1xbf16> { // CHECK: error: 'ttir.linear' op Bias must be at least a 1D tensor %0 = tensor.empty() : tensor<1xbf16> - %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128xbf16>, tensor<128xbf16>, tensor, tensor<1xbf16>) -> tensor<1xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) : (tensor<128xbf16>, tensor<128xbf16>, tensor, tensor<1xbf16>) -> tensor<1xbf16> return %1 : tensor<1xbf16> } } // Verifty that the parsing fails if the output is a scalar // ----- -#any_device_tile = #tt.operand_constraint module { func.func @linear_negative_1d_1d_scalar_output(%arg0: tensor<128xbf16>, %arg1: tensor<128xbf16>) -> tensor { // CHECK: error: 'ttir.linear' op Scalar output is not supported, output must be at least a 1D tensor %0 = tensor.empty() : tensor - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128xbf16>, tensor<128xbf16>, tensor) -> tensor + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<128xbf16>, tensor<128xbf16>, tensor) -> tensor return %1 : tensor } } // ----- -#any_device_tile = #tt.operand_constraint module { func.func @linear_negative_1d_1d_output_dimension_mismatch(%arg0: tensor<128xbf16>, %arg1: tensor<128xbf16>) -> tensor<2xbf16> { // CHECK: error: 'ttir.linear' op Scalar output must be a 1D tensor of size 1 %0 = tensor.empty() : tensor<2xbf16> - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128xbf16>, tensor<128xbf16>, tensor<2xbf16>) -> tensor<2xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<128xbf16>, tensor<128xbf16>, tensor<2xbf16>) -> tensor<2xbf16> return %1 : tensor<2xbf16> } } // Inner dimension mismatch tests // ----- -#any_device_tile = #tt.operand_constraint module { func.func @linear_negative_1d_1d_inner_dimension_mismatch(%arg0: tensor<128xbf16>, %arg1: tensor<64xbf16>) -> tensor<1xbf16> { // CHECK: error: 'ttir.linear' op Input A[-1](128) and B[-2](64) must have matching inner dimensions %0 = tensor.empty() : tensor<1xbf16> - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128xbf16>, tensor<64xbf16>, tensor<1xbf16>) -> tensor<1xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<128xbf16>, tensor<64xbf16>, tensor<1xbf16>) -> tensor<1xbf16> return %1 : tensor<1xbf16> } } // ----- -#any_device_tile = #tt.operand_constraint module { func.func @linear_negative_1d_2d_inner_dimension_mismatch(%arg0: tensor<64xbf16>, %arg1: tensor<128x64xbf16>) -> tensor<64xbf16> { // CHECK: error: 'ttir.linear' op Input A[-1](64) and B[-2](128) must have matching inner dimensions %0 = tensor.empty() : tensor<64xbf16> - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64xbf16>, tensor<128x64xbf16>, tensor<64xbf16>) -> tensor<64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<64xbf16>, tensor<128x64xbf16>, tensor<64xbf16>) -> tensor<64xbf16> return %1 : tensor<64xbf16> } } // ----- -#any_device_tile = #tt.operand_constraint module { func.func @linear_negative_2d_1d_inner_dimension_mismatch(%arg0: tensor<64x128xbf16>, %arg1: tensor<64xbf16>) -> tensor<64xbf16> { // CHECK: error: 'ttir.linear' op Input A[-1](128) and B[-2](64) must have matching inner dimensions %0 = tensor.empty() : tensor<64xbf16> - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<64xbf16>, tensor<64xbf16>) -> tensor<64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<64xbf16>, tensor<64xbf16>) -> tensor<64xbf16> return %1 : tensor<64xbf16> } } // ----- -#any_device_tile = #tt.operand_constraint module { func.func @linear_negative_2d_2d_inner_dimension_mismatch(%arg0: tensor<64x128xbf16>, %arg1: tensor<64x128xbf16>) -> tensor<64x64xbf16> { // CHECK: error: 'ttir.linear' op Input A[-1](128) and B[-2](64) must have matching inner dimensions %0 = tensor.empty() : tensor<64x64xbf16> - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> return %1 : tensor<64x64xbf16> } } // ----- -#any_device_tile = #tt.operand_constraint module { func.func @linear_negative_nd_nd_inner_dimension_mismatch(%arg0: tensor<7x64x128xbf16>, %arg1: tensor<1x64x128xbf16>) -> tensor<7x64x64xbf16> { // CHECK: error: 'ttir.linear' op Input A[-1](128) and B[-2](64) must have matching inner dimensions %0 = tensor.empty() : tensor<7x64x64xbf16> - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<7x64x128xbf16>, tensor<1x64x128xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<7x64x128xbf16>, tensor<1x64x128xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> return %1 : tensor<7x64x64xbf16> } } // Batch dimension mismatch tests // ----- -#any_device_tile = #tt.operand_constraint module { func.func @linear_negative_nd_nd_same_rank_batch_broadcast_incompatible_1(%arg0: tensor<7x64x128xbf16>, %arg1: tensor<2x128x64xbf16>) -> tensor<7x64x64xbf16> { // CHECK: error: 'ttir.linear' op Batch dimensions of input A(7) and B(2) are not broadcast compatible %0 = tensor.empty() : tensor<7x64x64xbf16> - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<7x64x128xbf16>, tensor<2x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<7x64x128xbf16>, tensor<2x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> return %1 : tensor<7x64x64xbf16> } } // ----- -#any_device_tile = #tt.operand_constraint module { func.func @linear_negative_nd_nd_same_rank_batch_broadcast_incompatible_2(%arg0: tensor<2x7x64x128xbf16>, %arg1: tensor<7x1x128x64xbf16>) -> tensor<7x7x64x64xbf16> { // CHECK: error: 'ttir.linear' op Batch dimensions of input A(2,7) and B(7,1) are not broadcast compatible %0 = tensor.empty() : tensor<7x64x64xbf16> - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<2x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x7x64x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<2x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x7x64x64xbf16> return %1 : tensor<7x7x64x64xbf16> } } // ----- -#any_device_tile = #tt.operand_constraint module { func.func @linear_negative_nd_nd_different_rank_batch_broadcast_incompatible(%arg0: tensor<12x2x7x64x128xbf16>, %arg1: tensor<7x1x128x64xbf16>) -> tensor<12x7x7x64x64xbf16> { // CHECK: error: 'ttir.linear' op Batch dimensions of input A(12,2,7) and B(7,1) are not broadcast compatible %0 = tensor.empty() : tensor<12x7x7x64x64xbf16> - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<12x2x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<12x7x7x64x64xbf16>) -> tensor<12x7x7x64x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<12x2x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<12x7x7x64x64xbf16>) -> tensor<12x7x7x64x64xbf16> return %1 : tensor<12x7x7x64x64xbf16> } } // Bias shape mismatch tests // ----- -#any_device_tile = #tt.operand_constraint module { func.func @linear_negative_matmul_bias_broadcast_incompatible(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x64xbf16>, %bias: tensor<2x64xbf16>) -> tensor<64x64xbf16> { // CHECK: error: 'ttir.linear' op Bias shape(2,64) is not broadcast compatible with the matmul output shape(64,64) %0 = tensor.empty() : tensor<64x64xbf16> - %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<2x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<2x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> return %1 : tensor<64x64xbf16> } } // ----- -#any_device_tile = #tt.operand_constraint module { func.func @linear_negative_nd_nd_matmul_bias_broadcast_incompatible(%arg0: tensor<3x64x128xbf16>, %arg1: tensor<128x64xbf16>, %bias: tensor<2x64x64xbf16>) -> tensor<3x64x64xbf16> { // CHECK: error: 'ttir.linear' op Bias shape(2,64,64) is not broadcast compatible with the matmul output shape(3,64,64) %0 = tensor.empty() : tensor<3x64x64xbf16> - %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<3x64x128xbf16>, tensor<128x64xbf16>, tensor<2x64x64xbf16>, tensor<3x64x64xbf16>) -> tensor<3x64x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) : (tensor<3x64x128xbf16>, tensor<128x64xbf16>, tensor<2x64x64xbf16>, tensor<3x64x64xbf16>) -> tensor<3x64x64xbf16> return %1 : tensor<3x64x64xbf16> } } // Output shape mismatch tests // ----- -#any_device_tile = #tt.operand_constraint module { func.func @linear_negative_2d_2d_output_shape_mismatch(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x64xbf16>) -> tensor<64xbf16> { // CHECK: error: 'ttir.linear' op Output shape rank(1) must match the expected output shape rank(2) %0 = tensor.empty() : tensor<64xbf16> - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64xbf16>) -> tensor<64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64xbf16>) -> tensor<64xbf16> return %1 : tensor<64xbf16> } } // ----- -#any_device_tile = #tt.operand_constraint module { func.func @linear_negative_2d_2d_output_shape_mismatch(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x64xbf16>) -> tensor<64x128xbf16> { // CHECK: error: 'ttir.linear' op Output shape dimension[1](128) doesn't match the expected output shape dimension[1](64) %0 = tensor.empty() : tensor<64x128xbf16> - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> return %1 : tensor<64x128xbf16> } } diff --git a/test/ttmlir/Dialect/TTIR/matmul/matmul_tests_negative.mlir b/test/ttmlir/Dialect/TTIR/matmul/matmul_tests_negative.mlir index 67cd1af5e5..f15379c8f7 100644 --- a/test/ttmlir/Dialect/TTIR/matmul/matmul_tests_negative.mlir +++ b/test/ttmlir/Dialect/TTIR/matmul/matmul_tests_negative.mlir @@ -2,12 +2,11 @@ // Negative tests for matmul operation // Verify that the parsing fails if either of operands is a scalar -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @matmul_negative_1d_1d_inner_dimension_missmatch(%arg0: tensor, %arg1: tensor<64xbf16>) -> tensor<1xbf16> { // CHECK: error: 'ttir.matmul' op Input A must be at least a 1D tensor %0 = tensor.empty() : tensor<1xbf16> - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor, tensor<64xbf16>, tensor<1xbf16>) -> tensor<1xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor, tensor<64xbf16>, tensor<1xbf16>) -> tensor<1xbf16> return %1 : tensor<1xbf16> } } @@ -18,19 +17,19 @@ module attributes {} { func.func @matmul_negative_1d_1d_inner_dimension_missmatch(%arg0: tensor<128xbf16>, %arg1: tensor) -> tensor<1xbf16> { // CHECK: error: 'ttir.matmul' op Input B must be at least a 1D tensor %0 = tensor.empty() : tensor<1xbf16> - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128xbf16>, tensor, tensor<1xbf16>) -> tensor<1xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<128xbf16>, tensor, tensor<1xbf16>) -> tensor<1xbf16> return %1 : tensor<1xbf16> } } -// Verifty that the parsing fails if the output is a scalar +// Verify that the parsing fails if the output is a scalar // ----- #any_device_tile = #tt.operand_constraint module attributes {} { func.func @matmul_negative_1d_1d_inner_dimension_missmatch(%arg0: tensor<128xbf16>, %arg1: tensor<128xbf16>) -> tensor { // CHECK: error: 'ttir.matmul' op Scalar output is not supported, output must be at least a 1D tensor %0 = tensor.empty() : tensor - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128xbf16>, tensor<128xbf16>, tensor) -> tensor + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<128xbf16>, tensor<128xbf16>, tensor) -> tensor return %1 : tensor } } @@ -41,7 +40,7 @@ module attributes {} { func.func @matmul_negative_1d_1d_inner_dimension_missmatch(%arg0: tensor<128xbf16>, %arg1: tensor<128xbf16>) -> tensor<2xbf16> { // CHECK: error: 'ttir.matmul' op Scalar output must be a 1D tensor of size 1 %0 = tensor.empty() : tensor<2xbf16> - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128xbf16>, tensor<128xbf16>, tensor<2xbf16>) -> tensor<2xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<128xbf16>, tensor<128xbf16>, tensor<2xbf16>) -> tensor<2xbf16> return %1 : tensor<2xbf16> } } @@ -53,7 +52,7 @@ module attributes {} { func.func @matmul_negative_1d_1d_inner_dimension_missmatch(%arg0: tensor<128xbf16>, %arg1: tensor<64xbf16>) -> tensor<1xbf16> { // CHECK: error: 'ttir.matmul' op Input A[-1](128) and B[-2](64) must have matching inner dimensions %0 = tensor.empty() : tensor<1xbf16> - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128xbf16>, tensor<64xbf16>, tensor<1xbf16>) -> tensor<1xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<128xbf16>, tensor<64xbf16>, tensor<1xbf16>) -> tensor<1xbf16> return %1 : tensor<1xbf16> } } @@ -64,7 +63,7 @@ module attributes {} { func.func @matmul_negative_1d_2d_inner_dimension_missmatch(%arg0: tensor<64xbf16>, %arg1: tensor<128x64xbf16>) -> tensor<64xbf16> { // CHECK: error: 'ttir.matmul' op Input A[-1](64) and B[-2](128) must have matching inner dimensions %0 = tensor.empty() : tensor<64xbf16> - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64xbf16>, tensor<128x64xbf16>, tensor<64xbf16>) -> tensor<64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<64xbf16>, tensor<128x64xbf16>, tensor<64xbf16>) -> tensor<64xbf16> return %1 : tensor<64xbf16> } } @@ -75,7 +74,7 @@ module attributes {} { func.func @matmul_negative_2d_1d_inner_dimension_missmatch(%arg0: tensor<64x128xbf16>, %arg1: tensor<64xbf16>) -> tensor<64xbf16> { // CHECK: error: 'ttir.matmul' op Input A[-1](128) and B[-2](64) must have matching inner dimensions %0 = tensor.empty() : tensor<64xbf16> - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<64xbf16>, tensor<64xbf16>) -> tensor<64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<64xbf16>, tensor<64xbf16>) -> tensor<64xbf16> return %1 : tensor<64xbf16> } } @@ -86,7 +85,7 @@ module attributes {} { func.func @matmul_negative_2d_2d_inner_dimension_missmatch(%arg0: tensor<64x128xbf16>, %arg1: tensor<64x128xbf16>) -> tensor<64x64xbf16> { // CHECK: error: 'ttir.matmul' op Input A[-1](128) and B[-2](64) must have matching inner dimensions %0 = tensor.empty() : tensor<64x64xbf16> - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> return %1 : tensor<64x64xbf16> } } @@ -97,7 +96,7 @@ module attributes {} { func.func @matmul_negative_nd_nd_inner_dimension_missmatch(%arg0: tensor<7x64x128xbf16>, %arg1: tensor<1x64x128xbf16>) -> tensor<7x64x64xbf16> { // CHECK: error: 'ttir.matmul' op Input A[-1](128) and B[-2](64) must have matching inner dimensions %0 = tensor.empty() : tensor<7x64x64xbf16> - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<7x64x128xbf16>, tensor<1x64x128xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<7x64x128xbf16>, tensor<1x64x128xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> return %1 : tensor<7x64x64xbf16> } } @@ -109,7 +108,7 @@ module attributes {} { func.func @matmul_negative_nd_nd_same_rank_batch_broadcast_incompatible_1(%arg0: tensor<7x64x128xbf16>, %arg1: tensor<2x128x64xbf16>) -> tensor<7x64x64xbf16> { // CHECK: error: 'ttir.matmul' op Batch dimensions of input A(7) and B(2) are not broadcast compatible %0 = tensor.empty() : tensor<7x64x64xbf16> - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<7x64x128xbf16>, tensor<2x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<7x64x128xbf16>, tensor<2x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> return %1 : tensor<7x64x64xbf16> } } @@ -120,7 +119,7 @@ module attributes {} { func.func @matmul_negative_nd_nd_same_rank_batch_broadcast_incompatible_2(%arg0: tensor<2x7x64x128xbf16>, %arg1: tensor<7x1x128x64xbf16>) -> tensor<7x7x64x64xbf16> { // CHECK: error: 'ttir.matmul' op Batch dimensions of input A(2,7) and B(7,1) are not broadcast compatible %0 = tensor.empty() : tensor<7x64x64xbf16> - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<2x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x7x64x64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<2x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x7x64x64xbf16> return %1 : tensor<7x7x64x64xbf16> } } @@ -131,7 +130,7 @@ module attributes {} { func.func @matmul_negative_nd_nd_different_rank_batch_broadcast_incompatible(%arg0: tensor<12x2x7x64x128xbf16>, %arg1: tensor<7x1x128x64xbf16>) -> tensor<12x7x7x64x64xbf16> { // CHECK: error: 'ttir.matmul' op Batch dimensions of input A(12,2,7) and B(7,1) are not broadcast compatible %0 = tensor.empty() : tensor<12x7x7x64x64xbf16> - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<12x2x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<12x7x7x64x64xbf16>) -> tensor<12x7x7x64x64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<12x2x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<12x7x7x64x64xbf16>) -> tensor<12x7x7x64x64xbf16> return %1 : tensor<12x7x7x64x64xbf16> } } @@ -143,7 +142,7 @@ module attributes {} { func.func @matmul_negative_2d_2d_inner_dimension_missmatch(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x64xbf16>) -> tensor<64xbf16> { // CHECK: error: 'ttir.matmul' op Output shape rank(1) must match the expected output shape rank(2) %0 = tensor.empty() : tensor<64xbf16> - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64xbf16>) -> tensor<64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64xbf16>) -> tensor<64xbf16> return %1 : tensor<64xbf16> } } @@ -154,7 +153,7 @@ module attributes {} { func.func @matmul_negative_2d_2d_inner_dimension_missmatch(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x64xbf16>) -> tensor<64x128xbf16> { // CHECK: error: 'ttir.matmul' op Output shape dimension[1](128) doesn't match the expected output shape dimension[1](64) %0 = tensor.empty() : tensor<64x128xbf16> - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> return %1 : tensor<64x128xbf16> } } diff --git a/test/ttmlir/Dialect/TTIR/matmul/matmul_tests_positive.mlir b/test/ttmlir/Dialect/TTIR/matmul/matmul_tests_positive.mlir index 3823edbc3d..cfc77c0fbd 100644 --- a/test/ttmlir/Dialect/TTIR/matmul/matmul_tests_positive.mlir +++ b/test/ttmlir/Dialect/TTIR/matmul/matmul_tests_positive.mlir @@ -1,59 +1,58 @@ // RUN: ttmlir-opt %s | FileCheck %s -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @matmul_1d_1d(%arg0: tensor<128xbf16>, %arg1: tensor<128xbf16>) -> tensor<1xbf16> { %0 = tensor.empty() : tensor<1xbf16> // CHECK: %[[C:.*]] = "ttir.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128xbf16>, tensor<128xbf16>, tensor<1xbf16>) -> tensor<1xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<128xbf16>, tensor<128xbf16>, tensor<1xbf16>) -> tensor<1xbf16> return %1 : tensor<1xbf16> } func.func @matmul_1d_2d(%arg0: tensor<128xbf16>, %arg1: tensor<128x64xbf16>) -> tensor<64xbf16> { %0 = tensor.empty() : tensor<64xbf16> // CHECK: %[[C:.*]] = "ttir.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128xbf16>, tensor<128x64xbf16>, tensor<64xbf16>) -> tensor<64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<128xbf16>, tensor<128x64xbf16>, tensor<64xbf16>) -> tensor<64xbf16> return %1 : tensor<64xbf16> } func.func @matmul_2d_1d(%arg0: tensor<64x128xbf16>, %arg1: tensor<128xbf16>) -> tensor<64xbf16> { %0 = tensor.empty() : tensor<64xbf16> // CHECK: %[[C:.*]] = "ttir.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128xbf16>, tensor<64xbf16>) -> tensor<64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128xbf16>, tensor<64xbf16>) -> tensor<64xbf16> return %1 : tensor<64xbf16> } func.func @matmul_2d_2d(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x64xbf16>) -> tensor<64x64xbf16> { %0 = tensor.empty() : tensor<64x64xbf16> // CHECK: %[[C:.*]] = "ttir.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> return %1 : tensor<64x64xbf16> } func.func @matmul_1d_nd(%arg0: tensor<128xbf16>, %arg1: tensor<12x7x128x64xbf16>) -> tensor<12x7x64xbf16> { %0 = tensor.empty() : tensor<12x7x64xbf16> // CHECK: %[[C:.*]] = "ttir.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128xbf16>, tensor<12x7x128x64xbf16>, tensor<12x7x64xbf16>) -> tensor<12x7x64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<128xbf16>, tensor<12x7x128x64xbf16>, tensor<12x7x64xbf16>) -> tensor<12x7x64xbf16> return %1 : tensor<12x7x64xbf16> } func.func @matmul_nd_1d(%arg0: tensor<12x7x128x64xbf16>, %arg1: tensor<64xbf16>) -> tensor<12x7x128xbf16> { %0 = tensor.empty() : tensor<12x7x128xbf16> // CHECK: %[[C:.*]] = "ttir.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<12x7x128x64xbf16>, tensor<64xbf16>, tensor<12x7x128xbf16>) -> tensor<12x7x128xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<12x7x128x64xbf16>, tensor<64xbf16>, tensor<12x7x128xbf16>) -> tensor<12x7x128xbf16> return %1 : tensor<12x7x128xbf16> } func.func @matmul_2d_nd(%arg0: tensor<64x128xbf16>, %arg1: tensor<12x7x128x64xbf16>) -> tensor<12x7x64x64xbf16> { %0 = tensor.empty() : tensor<12x7x64x64xbf16> // CHECK: %[[C:.*]] = "ttir.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<12x7x128x64xbf16>, tensor<12x7x64x64xbf16>) -> tensor<12x7x64x64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<12x7x128x64xbf16>, tensor<12x7x64x64xbf16>) -> tensor<12x7x64x64xbf16> return %1 : tensor<12x7x64x64xbf16> } func.func @matmul_nd_2d(%arg0: tensor<12x7x128x64xbf16>, %arg1: tensor<64x128xbf16>) -> tensor<12x7x128x128xbf16> { %0 = tensor.empty() : tensor<12x7x128x128xbf16> // CHECK: %[[C:.*]] = "ttir.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<12x7x128x64xbf16>, tensor<64x128xbf16>, tensor<12x7x128x128xbf16>) -> tensor<12x7x128x128xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<12x7x128x64xbf16>, tensor<64x128xbf16>, tensor<12x7x128x128xbf16>) -> tensor<12x7x128x128xbf16> return %1 : tensor<12x7x128x128xbf16> } @@ -61,28 +60,28 @@ module attributes {} { func.func @matmul_nd_nd_same_rank_same_dims(%arg0: tensor<7x64x128xbf16>, %arg1: tensor<7x128x64xbf16>) -> tensor<7x64x64xbf16> { %0 = tensor.empty() : tensor<7x64x64xbf16> // CHECK: %[[C:.*]] = "ttir.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<7x64x128xbf16>, tensor<7x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<7x64x128xbf16>, tensor<7x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> return %1 : tensor<7x64x64xbf16> } func.func @matmul_nd_nd_same_rank_broadcastable_dims_1(%arg0: tensor<7x64x128xbf16>, %arg1: tensor<1x128x64xbf16>) -> tensor<7x64x64xbf16> { %0 = tensor.empty() : tensor<7x64x64xbf16> // CHECK: %[[C:.*]] = "ttir.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<7x64x128xbf16>, tensor<1x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<7x64x128xbf16>, tensor<1x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> return %1 : tensor<7x64x64xbf16> } func.func @matmul_nd_nd_same_rank_broadcastable_dims_2(%arg0: tensor<1x7x64x128xbf16>, %arg1: tensor<7x1x128x64xbf16>) -> tensor<7x7x64x64xbf16> { %0 = tensor.empty() : tensor<7x7x64x64xbf16> // CHECK: %[[C:.*]] = "ttir.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<1x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<7x7x64x64xbf16>) -> tensor<7x7x64x64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<1x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<7x7x64x64xbf16>) -> tensor<7x7x64x64xbf16> return %1 : tensor<7x7x64x64xbf16> } func.func @matmul_nd_nd_different_rank_broadcastable_dims_2(%arg0: tensor<12x1x7x64x128xbf16>, %arg1: tensor<7x1x128x64xbf16>) -> tensor<12x7x7x64x64xbf16> { %0 = tensor.empty() : tensor<12x7x7x64x64xbf16> // CHECK: %[[C:.*]] = "ttir.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<12x1x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<12x7x7x64x64xbf16>) -> tensor<12x7x7x64x64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<12x1x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<12x7x7x64x64xbf16>) -> tensor<12x7x7x64x64xbf16> return %1 : tensor<12x7x7x64x64xbf16> } } diff --git a/test/ttmlir/Dialect/TTIR/slice/slice_tests_negative.mlir b/test/ttmlir/Dialect/TTIR/slice/slice_tests_negative.mlir index ba640992f1..db444258ec 100644 --- a/test/ttmlir/Dialect/TTIR/slice/slice_tests_negative.mlir +++ b/test/ttmlir/Dialect/TTIR/slice/slice_tests_negative.mlir @@ -2,12 +2,11 @@ // Negative tests for slice operation // Verify that the parsing fails if the begins attribute is not a 3D tensor -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @slice_negative_invalid_shape(%arg0: tensor) -> tensor<1xbf16> { %0 = tensor.empty() : tensor<1xbf16> // CHECK: error: 'ttir.slice' op Input must be at least a 1D tensor - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32], ends = [0: i32], step = [1: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor, tensor<1xbf16>) -> tensor<1xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32], ends = [0: i32], step = [1: i32]}> : (tensor, tensor<1xbf16>) -> tensor<1xbf16> return %1 : tensor<1xbf16> } } @@ -19,7 +18,7 @@ module attributes {} { func.func @slice_negative_invalid_begins(%arg0: tensor<3x128x64xbf16>) -> tensor<1x64x64xbf16> { %0 = tensor.empty() : tensor<1x64x64xbf16> // CHECK: error: 'ttir.slice' op Begins, ends, and step attributes must have the same number of elements as the input tensor rank - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32], ends = [0: i32, 63: i32, 63: i32], step = [1: i32, 1: i32, 1: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<3x128x64xbf16>, tensor<1x64x64xbf16>) -> tensor<1x64x64xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32], ends = [0: i32, 63: i32, 63: i32], step = [1: i32, 1: i32, 1: i32]}> : (tensor<3x128x64xbf16>, tensor<1x64x64xbf16>) -> tensor<1x64x64xbf16> return %1 : tensor<1x64x64xbf16> } } @@ -31,7 +30,7 @@ module attributes {} { func.func @slice_negative_invalid_ends(%arg0: tensor<3x128x64xbf16>) -> tensor<1x64x64xbf16> { %0 = tensor.empty() : tensor<1x64x64xbf16> // CHECK: error: 'ttir.slice' op Begins, ends, and step attributes must have the same number of elements as the input tensor rank - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32], ends = [0: i32, 63: i32], step = [1: i32, 1: i32, 1: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<3x128x64xbf16>, tensor<1x64x64xbf16>) -> tensor<1x64x64xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32], ends = [0: i32, 63: i32], step = [1: i32, 1: i32, 1: i32]}> : (tensor<3x128x64xbf16>, tensor<1x64x64xbf16>) -> tensor<1x64x64xbf16> return %1 : tensor<1x64x64xbf16> } } @@ -43,7 +42,7 @@ module attributes {} { func.func @slice_negative_invalid_step(%arg0: tensor<3x128x64xbf16>) -> tensor<1x64x64xbf16> { %0 = tensor.empty() : tensor<1x64x64xbf16> // CHECK: error: 'ttir.slice' op Begins, ends, and step attributes must have the same number of elements as the input tensor rank - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32], ends = [0: i32, 63: i32, 63: i32], step = [1: i32, 1: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<3x128x64xbf16>, tensor<1x64x64xbf16>) -> tensor<1x64x64xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32], ends = [0: i32, 63: i32, 63: i32], step = [1: i32, 1: i32]}> : (tensor<3x128x64xbf16>, tensor<1x64x64xbf16>) -> tensor<1x64x64xbf16> return %1 : tensor<1x64x64xbf16> } } @@ -55,7 +54,7 @@ module attributes {} { func.func @slice_negative_invalid_output_datatype(%arg0: tensor<3x128x64xbf16>) -> tensor<1x64x64xf32> { %0 = tensor.empty() : tensor<1x64x64xf32> // CHECK: error: 'ttir.slice' op Output tensor must have the same element type as the input tensor - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32], ends = [0: i32, 63: i32, 63: i32], step = [1: i32, 1: i32, 1: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<3x128x64xbf16>, tensor<1x64x64xf32>) -> tensor<1x64x64xf32> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32], ends = [0: i32, 63: i32, 63: i32], step = [1: i32, 1: i32, 1: i32]}> : (tensor<3x128x64xbf16>, tensor<1x64x64xf32>) -> tensor<1x64x64xf32> return %1 : tensor<1x64x64xf32> } } @@ -67,7 +66,7 @@ module attributes {} { func.func @slice_negative_input_output_rank_missmatch(%arg0: tensor<3x128x64xbf16>) -> tensor<1x1x64x64xbf16> { %0 = tensor.empty() : tensor<1x1x64x64xbf16> // CHECK: error: 'ttir.slice' op Output tensor must have the same rank as the input tensor - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32], ends = [0: i32, 63: i32, 63: i32], step = [1: i32, 1: i32, 1: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<3x128x64xbf16>, tensor<1x1x64x64xbf16>) -> tensor<1x1x64x64xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32], ends = [0: i32, 63: i32, 63: i32], step = [1: i32, 1: i32, 1: i32]}> : (tensor<3x128x64xbf16>, tensor<1x1x64x64xbf16>) -> tensor<1x1x64x64xbf16> return %1 : tensor<1x1x64x64xbf16> } } @@ -79,7 +78,7 @@ module attributes {} { func.func @slice_negative_invalid_begin_positive(%arg0: tensor<10x3x128x64xbf16>) -> tensor<4x1x16x8xbf16> { %0 = tensor.empty() : tensor<4x1x16x8xbf16> // CHECK: error: 'ttir.slice' op Invalid begin index for dimension 2. Expected value in range [-128, 128), got 128. Input shape: (10, 3, 128, 64) - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 128: i32, 32: i32], ends = [10: i32, 3: i32, 128: i32, 64: i32], step = [3: i32, 3: i32, 8: i32, 4: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x16x8xbf16>) -> tensor<4x1x16x8xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 128: i32, 32: i32], ends = [10: i32, 3: i32, 128: i32, 64: i32], step = [3: i32, 3: i32, 8: i32, 4: i32]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x16x8xbf16>) -> tensor<4x1x16x8xbf16> return %1 : tensor<4x1x16x8xbf16> } } @@ -91,7 +90,7 @@ module attributes {} { func.func @slice_negative_invalid_begin_negative(%arg0: tensor<10x3x128x64xbf16>) -> tensor<4x1x16x8xbf16> { %0 = tensor.empty() : tensor<4x1x16x8xbf16> // CHECK: error: 'ttir.slice' op Invalid begin index for dimension 2. Expected value in range [-128, 128), got -129. Input shape: (10, 3, 128, 64) - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, -129: i32, 32: i32], ends = [10: i32, 3: i32, 128: i32, 64: i32], step = [3: i32, 3: i32, 8: i32, 4: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x16x8xbf16>) -> tensor<4x1x16x8xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, -129: i32, 32: i32], ends = [10: i32, 3: i32, 128: i32, 64: i32], step = [3: i32, 3: i32, 8: i32, 4: i32]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x16x8xbf16>) -> tensor<4x1x16x8xbf16> return %1 : tensor<4x1x16x8xbf16> } } @@ -103,7 +102,7 @@ module attributes {} { func.func @slice_negative_invalid_end_positive(%arg0: tensor<10x3x128x64xbf16>) -> tensor<4x1x16x8xbf16> { %0 = tensor.empty() : tensor<4x1x16x8xbf16> // CHECK: error: 'ttir.slice' op Invalid end index for dimension 3. Expected value in range [-64, 64], got 65. Input shape: (10, 3, 128, 64) - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32, 32: i32], ends = [10: i32, 3: i32, 128: i32, 65: i32], step = [3: i32, 3: i32, 8: i32, 4: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x16x8xbf16>) -> tensor<4x1x16x8xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32, 32: i32], ends = [10: i32, 3: i32, 128: i32, 65: i32], step = [3: i32, 3: i32, 8: i32, 4: i32]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x16x8xbf16>) -> tensor<4x1x16x8xbf16> return %1 : tensor<4x1x16x8xbf16> } } @@ -115,7 +114,7 @@ module attributes {} { func.func @slice_negative_invalid_end_negative(%arg0: tensor<10x3x128x64xbf16>) -> tensor<4x1x16x8xbf16> { %0 = tensor.empty() : tensor<4x1x16x8xbf16> // CHECK: error: 'ttir.slice' op Invalid end index for dimension 3. Expected value in range [-64, 64], got -65. Input shape: (10, 3, 128, 64) - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32, 32: i32], ends = [10: i32, 3: i32, 128: i32, -65: i32], step = [3: i32, 3: i32, 8: i32, 4: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x16x8xbf16>) -> tensor<4x1x16x8xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32, 32: i32], ends = [10: i32, 3: i32, 128: i32, -65: i32], step = [3: i32, 3: i32, 8: i32, 4: i32]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x16x8xbf16>) -> tensor<4x1x16x8xbf16> return %1 : tensor<4x1x16x8xbf16> } } @@ -127,7 +126,7 @@ module attributes {} { func.func @slice_negative_step_is_zero(%arg0: tensor<10x3x128x64xbf16>) -> tensor<4x1x16x8xbf16> { %0 = tensor.empty() : tensor<4x1x16x8xbf16> // CHECK: error: 'ttir.slice' op Step value for dimension 3 cannot be zero - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32, 32: i32], ends = [10: i32, 3: i32, 128: i32, 64: i32], step = [3: i32, 3: i32, 8: i32, 0: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x16x8xbf16>) -> tensor<4x1x16x8xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32, 32: i32], ends = [10: i32, 3: i32, 128: i32, 64: i32], step = [3: i32, 3: i32, 8: i32, 0: i32]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x16x8xbf16>) -> tensor<4x1x16x8xbf16> return %1 : tensor<4x1x16x8xbf16> } } @@ -139,7 +138,7 @@ module attributes {} { func.func @slice_negative_begin_greater_than_end_positive_step(%arg0: tensor<10x3x128x64xbf16>) -> tensor<4x1x16x8xbf16> { %0 = tensor.empty() : tensor<4x1x16x8xbf16> // CHECK: error: 'ttir.slice' op For positive step, begin index must be less than or equal to end index for dimension 0. Got begin: 9, end: 0, step: 3, input shape: (10, 3, 128, 64) - %1 = "ttir.slice"(%arg0, %0) <{begins = [9: i32, 0: i32, 0: i32, 32: i32], ends = [0: i32, 3: i32, 32: i32, 64: i32], step = [3: i32, 3: i32, 8: i32, 4: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x16x8xbf16>) -> tensor<4x1x16x8xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [9: i32, 0: i32, 0: i32, 32: i32], ends = [0: i32, 3: i32, 32: i32, 64: i32], step = [3: i32, 3: i32, 8: i32, 4: i32]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x16x8xbf16>) -> tensor<4x1x16x8xbf16> return %1 : tensor<4x1x16x8xbf16> } } @@ -150,7 +149,7 @@ module attributes {} { func.func @slice_negative_begin_greater_than_end_positive_step(%arg0: tensor<10x3x128x64xbf16>) -> tensor<4x1x8x8xbf16> { %0 = tensor.empty() : tensor<4x1x8x8xbf16> // CHECK: error: 'ttir.slice' op For positive step, begin index must be less than or equal to end index for dimension 2. Got begin: 96 (-32), end: 32 (-96), step: 8, input shape: (10, 3, 128, 64) - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, -32: i32, 32: i32], ends = [10: i32, 3: i32, -96: i32, 64: i32], step = [3: i32, 3: i32, 8: i32, 4: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x8x8xbf16>) -> tensor<4x1x8x8xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, -32: i32, 32: i32], ends = [10: i32, 3: i32, -96: i32, 64: i32], step = [3: i32, 3: i32, 8: i32, 4: i32]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x8x8xbf16>) -> tensor<4x1x8x8xbf16> return %1 : tensor<4x1x8x8xbf16> } } @@ -162,7 +161,7 @@ module attributes {} { func.func @slice_negative_begin_less_than_end_negative_step(%arg0: tensor<10x3x128x64xbf16>) -> tensor<4x1x16x8xbf16> { %0 = tensor.empty() : tensor<4x1x16x8xbf16> // CHECK: error: 'ttir.slice' op For negative step, begin index must be greater than or equal to end index for dimension 1. Got begin: 0 (-3), end: 2 (-1), step: -3, input shape: (10, 3, 128, 64) - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, -3: i32, 0: i32, 32: i32], ends = [10: i32, -1: i32, 32: i32, 128: i32], step = [3: i32, -3: i32, 8: i32, 8: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x16x8xbf16>) -> tensor<4x1x16x8xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, -3: i32, 0: i32, 32: i32], ends = [10: i32, -1: i32, 32: i32, 128: i32], step = [3: i32, -3: i32, 8: i32, 8: i32]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x16x8xbf16>) -> tensor<4x1x16x8xbf16> return %1 : tensor<4x1x16x8xbf16> } } @@ -173,7 +172,7 @@ module attributes {} { func.func @slice_negative_begin_less_than_end_negative_step(%arg0: tensor<10x3x128x64xbf16>) -> tensor<5x1x16x8xbf16> { %0 = tensor.empty() : tensor<5x1x16x8xbf16> // CHECK: error: 'ttir.slice' op For negative step, begin index must be greater than or equal to end index for dimension 0. Got begin: 0, end: 10, step: -2, input shape: (10, 3, 128, 64) - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32, 32: i32], ends = [10: i32, 3: i32, 128: i32, 64: i32], step = [-2: i32, 3: i32, 8: i32, 4: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<5x1x16x8xbf16>) -> tensor<5x1x16x8xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32, 32: i32], ends = [10: i32, 3: i32, 128: i32, 64: i32], step = [-2: i32, 3: i32, 8: i32, 4: i32]}> : (tensor<10x3x128x64xbf16>, tensor<5x1x16x8xbf16>) -> tensor<5x1x16x8xbf16> return %1 : tensor<5x1x16x8xbf16> } } @@ -185,7 +184,7 @@ module attributes {} { func.func @slice_negative_invalid_output_shape(%arg0: tensor<10x3x128x64xbf16>) -> tensor<4x1x16x16xbf16> { %0 = tensor.empty() : tensor<4x1x16x16xbf16> // CHECK: error: 'ttir.slice' op Mismatch in dimension 3 of the output tensor: expected size 8, but got 16 - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32, 32: i32], ends = [10: i32, 3: i32, 128: i32, 64: i32], step = [3: i32, 3: i32, 8: i32, 4: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x16x16xbf16>) -> tensor<4x1x16x16xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32, 32: i32], ends = [10: i32, 3: i32, 128: i32, 64: i32], step = [3: i32, 3: i32, 8: i32, 4: i32]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x16x16xbf16>) -> tensor<4x1x16x16xbf16> return %1 : tensor<4x1x16x16xbf16> } } diff --git a/test/ttmlir/Dialect/TTIR/slice/slice_tests_positive.mlir b/test/ttmlir/Dialect/TTIR/slice/slice_tests_positive.mlir index 1ff464b3e2..af5fa4cc9e 100644 --- a/test/ttmlir/Dialect/TTIR/slice/slice_tests_positive.mlir +++ b/test/ttmlir/Dialect/TTIR/slice/slice_tests_positive.mlir @@ -1,59 +1,58 @@ // RUN: ttmlir-opt %s | FileCheck %s -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @slice_1d(%arg0: tensor<64xbf16>) -> tensor<32xbf16> { %0 = tensor.empty() : tensor<32xbf16> // CHECK: %[[C:.*]] = "ttir.slice"[[C:.*]] - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32], ends = [32: i32], step = [1: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<64xbf16>, tensor<32xbf16>) -> tensor<32xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32], ends = [32: i32], step = [1: i32]}> : (tensor<64xbf16>, tensor<32xbf16>) -> tensor<32xbf16> return %1 : tensor<32xbf16> } func.func @slice_1d_step(%arg0: tensor<64xbf16>) -> tensor<16xbf16> { %0 = tensor.empty() : tensor<16xbf16> // CHECK: %[[C:.*]] = "ttir.slice"[[C:.*]] - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32], ends = [64: i32], step = [4: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<64xbf16>, tensor<16xbf16>) -> tensor<16xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32], ends = [64: i32], step = [4: i32]}> : (tensor<64xbf16>, tensor<16xbf16>) -> tensor<16xbf16> return %1 : tensor<16xbf16> } func.func @slice_2d(%arg0: tensor<128x64xbf16>) -> tensor<64x32xbf16> { %0 = tensor.empty() : tensor<64x32xbf16> // CHECK: %[[C:.*]] = "ttir.slice"[[C:.*]] - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32], ends = [64: i32, 32: i32], step = [1: i32, 1: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<128x64xbf16>, tensor<64x32xbf16>) -> tensor<64x32xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32], ends = [64: i32, 32: i32], step = [1: i32, 1: i32]}> : (tensor<128x64xbf16>, tensor<64x32xbf16>) -> tensor<64x32xbf16> return %1 : tensor<64x32xbf16> } func.func @slice_2d_step(%arg0: tensor<128x64xbf16>) -> tensor<32x16xbf16> { %0 = tensor.empty() : tensor<32x16xbf16> // CHECK: %[[C:.*]] = "ttir.slice"[[C:.*]] - %1 = "ttir.slice"(%arg0, %0) <{begins = [64: i32, 0: i32], ends = [128: i32, 64: i32], step = [2: i32, 4: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<128x64xbf16>, tensor<32x16xbf16>) -> tensor<32x16xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [64: i32, 0: i32], ends = [128: i32, 64: i32], step = [2: i32, 4: i32]}> : (tensor<128x64xbf16>, tensor<32x16xbf16>) -> tensor<32x16xbf16> return %1 : tensor<32x16xbf16> } func.func @slice_3d(%arg0: tensor<3x128x64xbf16>) -> tensor<1x64x64xbf16> { %0 = tensor.empty() : tensor<1x64x64xbf16> // CHECK: %[[C:.*]] = "ttir.slice"[[C:.*]] - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32], ends = [1: i32, 64: i32, 64: i32], step = [1: i32, 1: i32, 1: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<3x128x64xbf16>, tensor<1x64x64xbf16>) -> tensor<1x64x64xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32], ends = [1: i32, 64: i32, 64: i32], step = [1: i32, 1: i32, 1: i32]}> : (tensor<3x128x64xbf16>, tensor<1x64x64xbf16>) -> tensor<1x64x64xbf16> return %1 : tensor<1x64x64xbf16> } func.func @slice_3d_step(%arg0: tensor<3x128x64xbf16>) -> tensor<2x32x32xbf16> { %0 = tensor.empty() : tensor<2x32x32xbf16> // CHECK: %[[C:.*]] = "ttir.slice"[[C:.*]] - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 32: i32], ends = [3: i32, 128: i32, 64: i32], step = [2: i32, 4: i32, 1: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<3x128x64xbf16>, tensor<2x32x32xbf16>) -> tensor<2x32x32xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 32: i32], ends = [3: i32, 128: i32, 64: i32], step = [2: i32, 4: i32, 1: i32]}> : (tensor<3x128x64xbf16>, tensor<2x32x32xbf16>) -> tensor<2x32x32xbf16> return %1 : tensor<2x32x32xbf16> } func.func @slice_4d(%arg0: tensor<10x3x128x64xbf16>) -> tensor<5x3x32x64xbf16> { %0 = tensor.empty() : tensor<5x3x32x64xbf16> // CHECK: %[[C:.*]] = "ttir.slice"[[C:.*]] - %1 = "ttir.slice"(%arg0, %0) <{begins = [3: i32, 0: i32, 32: i32, 0: i32], ends = [8: i32, 3: i32, 64: i32, 64: i32], step = [1: i32, 1: i32, 1: i32, 1: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<5x3x32x64xbf16>) -> tensor<5x3x32x64xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [3: i32, 0: i32, 32: i32, 0: i32], ends = [8: i32, 3: i32, 64: i32, 64: i32], step = [1: i32, 1: i32, 1: i32, 1: i32]}> : (tensor<10x3x128x64xbf16>, tensor<5x3x32x64xbf16>) -> tensor<5x3x32x64xbf16> return %1 : tensor<5x3x32x64xbf16> } func.func @slice_4d_step(%arg0: tensor<10x3x128x64xbf16>) -> tensor<4x1x16x32xbf16> { %0 = tensor.empty() : tensor<4x1x16x32xbf16> // CHECK: %[[C:.*]] = "ttir.slice"[[C:.*]] - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 2: i32, 0: i32, -64: i32], ends = [10: i32, 0: i32, -1: i32, -1: i32], step = [3: i32, -2: i32, 8: i32, 2: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x16x32xbf16>) -> tensor<4x1x16x32xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 2: i32, 0: i32, -64: i32], ends = [10: i32, 0: i32, -1: i32, -1: i32], step = [3: i32, -2: i32, 8: i32, 2: i32]}> : (tensor<10x3x128x64xbf16>, tensor<4x1x16x32xbf16>) -> tensor<4x1x16x32xbf16> return %1 : tensor<4x1x16x32xbf16> } } diff --git a/test/ttmlir/Dialect/TTIR/test_allocate.mlir b/test/ttmlir/Dialect/TTIR/test_allocate.mlir index 5888cf3f62..4acbda2f95 100644 --- a/test/ttmlir/Dialect/TTIR/test_allocate.mlir +++ b/test/ttmlir/Dialect/TTIR/test_allocate.mlir @@ -1,5 +1,4 @@ // RUN: ttmlir-opt --ttir-load-system-desc --ttir-implicit-device --ttir-allocate %s | FileCheck %s -#any_device = #tt.operand_constraint #l1_ = #tt.memory_space #layout = #tt.metal_layout<(d0, d1) -> (d0, d1), undef, <1x1>, memref<64x128xf32, #l1_>, interleaved> module attributes {} { @@ -7,7 +6,7 @@ module attributes {} { // CHECK: %[[C:.*]] = "ttir.alloc"[[C:.*]] // CHECK-NOT: %[[C:.*]] = tensor.empty() : tensor<64x128xf32> %0 = tensor.empty() : tensor<64x128xf32, #layout> - %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32, #layout>, tensor<64x128xf32, #layout>, tensor<64x128xf32, #layout>) -> tensor<64x128xf32, #layout> + %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32, #layout>, tensor<64x128xf32, #layout>, tensor<64x128xf32, #layout>) -> tensor<64x128xf32, #layout> return %1 : tensor<64x128xf32, #layout> } } diff --git a/test/ttmlir/Dialect/TTIR/test_generic.mlir b/test/ttmlir/Dialect/TTIR/test_generic.mlir index ff50eef4be..0899746e19 100644 --- a/test/ttmlir/Dialect/TTIR/test_generic.mlir +++ b/test/ttmlir/Dialect/TTIR/test_generic.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-generic %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttir.generic"[[C:.*]] - %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTIR/test_layout.mlir b/test/ttmlir/Dialect/TTIR/test_layout.mlir index 3253f6d23c..232c69504f 100644 --- a/test/ttmlir/Dialect/TTIR/test_layout.mlir +++ b/test/ttmlir/Dialect/TTIR/test_layout.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-load-system-desc --ttir-implicit-device --ttir-layout %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<8x64x128xf32>, %arg1: tensor<8x64x128xf32>) -> tensor<8x64x128xf32> { // CHECK: %[[C:.*]] = tensor.empty() : tensor<8x64x128xf32, #layout> %0 = tensor.empty() : tensor<8x64x128xf32> - %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<8x64x128xf32>, tensor<8x64x128xf32>, tensor<8x64x128xf32>) -> tensor<8x64x128xf32> + %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<8x64x128xf32>, tensor<8x64x128xf32>, tensor<8x64x128xf32>) -> tensor<8x64x128xf32> return %1 : tensor<8x64x128xf32> } diff --git a/test/ttmlir/Dialect/TTNN/ccl/all_gather.mlir b/test/ttmlir/Dialect/TTNN/ccl/all_gather.mlir index cb2a7ad2b3..6f0cca7726 100644 --- a/test/ttmlir/Dialect/TTNN/ccl/all_gather.mlir +++ b/test/ttmlir/Dialect/TTNN/ccl/all_gather.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<1x1x32x32xbf16>) -> tensor<1x1x32x128xbf16> { %0 = tensor.empty() : tensor<1x1x32x128xbf16> // CHECK: %[[C:.*]] = "ttnn.all_gather"[[C:.*]] - %1 = "ttir.all_gather"(%arg0, %0) <{dim = 3 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<1x1x32x32xbf16>, tensor<1x1x32x128xbf16>) -> tensor<1x1x32x128xbf16> + %1 = "ttir.all_gather"(%arg0, %0) <{dim = 3 : si32}> : (tensor<1x1x32x32xbf16>, tensor<1x1x32x128xbf16>) -> tensor<1x1x32x128xbf16> return %1 : tensor<1x1x32x128xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/ccl/all_gather_negative.mlir b/test/ttmlir/Dialect/TTNN/ccl/all_gather_negative.mlir index d3f6ac3dae..3e5dec812d 100644 --- a/test/ttmlir/Dialect/TTNN/ccl/all_gather_negative.mlir +++ b/test/ttmlir/Dialect/TTNN/ccl/all_gather_negative.mlir @@ -1,10 +1,9 @@ // RUN: not ttmlir-opt --ttir-to-ttnn-backend-pipeline %s 2>&1 | FileCheck %s // CHECK: error: 'ttir.all_gather' op Invalid dimension for all gather op -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<1x1x32x32xbf16>) -> tensor<1x1x32x128xbf16> { %0 = tensor.empty() : tensor<1x1x32x128xbf16> - %1 = "ttir.all_gather"(%arg0, %0) <{dim = 4 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<1x1x32x32xbf16>, tensor<1x1x32x128xbf16>) -> tensor<1x1x32x128xbf16> + %1 = "ttir.all_gather"(%arg0, %0) <{dim = 4 : si32}> : (tensor<1x1x32x32xbf16>, tensor<1x1x32x128xbf16>) -> tensor<1x1x32x128xbf16> return %1 : tensor<1x1x32x128xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/concat/concat_dim_oob.mlir b/test/ttmlir/Dialect/TTNN/concat/concat_dim_oob.mlir index 5b93d0c505..1e5c1fcb60 100644 --- a/test/ttmlir/Dialect/TTNN/concat/concat_dim_oob.mlir +++ b/test/ttmlir/Dialect/TTNN/concat/concat_dim_oob.mlir @@ -1,10 +1,9 @@ // RUN: not ttmlir-opt --ttir-to-ttnn-backend-pipeline %s 2>&1 | FileCheck %s // CHECK: error: 'ttir.concat' op Invalid dimension 2 for concatenation. -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<32x32xf32>, %arg1: tensor<32x64xf32>) -> tensor<32x96xf32> { %0 = tensor.empty() : tensor<32x96xf32> - %1 = "ttir.concat"(%arg0, %arg1, %0) <{dim = 2 : si32, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x96xf32>) -> tensor<32x96xf32> + %1 = "ttir.concat"(%arg0, %arg1, %0) <{dim = 2 : si32}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x96xf32>) -> tensor<32x96xf32> return %1 : tensor<32x96xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/concat/concat_multiple_tensors.mlir b/test/ttmlir/Dialect/TTNN/concat/concat_multiple_tensors.mlir index 30bf6926bd..1695bcd970 100644 --- a/test/ttmlir/Dialect/TTNN/concat/concat_multiple_tensors.mlir +++ b/test/ttmlir/Dialect/TTNN/concat/concat_multiple_tensors.mlir @@ -1,5 +1,4 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward() -> tensor<32x224xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] @@ -11,7 +10,7 @@ module attributes {} { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %3 = tensor.empty() : tensor<32x224xf32> // CHECK: %[[C:.*]] = "ttnn.concat"[[C:.*]] - %4 = "ttir.concat"(%0, %1, %2, %3) <{dim = 1 : si32, operand_constraints = [#any_device, #any_device, #any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x128xf32>, tensor<32x224xf32>) -> tensor<32x224xf32> + %4 = "ttir.concat"(%0, %1, %2, %3) <{dim = 1 : si32}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x128xf32>, tensor<32x224xf32>) -> tensor<32x224xf32> return %4 : tensor<32x224xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/concat/concat_negative_dim.mlir b/test/ttmlir/Dialect/TTNN/concat/concat_negative_dim.mlir index f8a4f2db3d..b026f77324 100644 --- a/test/ttmlir/Dialect/TTNN/concat/concat_negative_dim.mlir +++ b/test/ttmlir/Dialect/TTNN/concat/concat_negative_dim.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<32x32xf32>, %arg1: tensor<32x64xf32>) -> tensor<32x96xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<32x96xf32> // CHECK: %[[C:.*]] = "ttnn.concat"[[C:.*]] - %1 = "ttir.concat"(%arg0, %arg1, %0) <{dim = -1 : si32, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x96xf32>) -> tensor<32x96xf32> + %1 = "ttir.concat"(%arg0, %arg1, %0) <{dim = -1 : si32}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x96xf32>) -> tensor<32x96xf32> return %1 : tensor<32x96xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/concat/concat_negative_dim_oob.mlir b/test/ttmlir/Dialect/TTNN/concat/concat_negative_dim_oob.mlir index 5d3a6fbd6d..4aebe9fdec 100644 --- a/test/ttmlir/Dialect/TTNN/concat/concat_negative_dim_oob.mlir +++ b/test/ttmlir/Dialect/TTNN/concat/concat_negative_dim_oob.mlir @@ -1,10 +1,9 @@ // RUN: not ttmlir-opt --ttir-to-ttnn-backend-pipeline %s 2>&1 | FileCheck %s // CHECK: error: 'ttir.concat' op Invalid dimension -3 for concatenation. -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<32x32xf32>, %arg1: tensor<32x64xf32>) -> tensor<32x96xf32> { %0 = tensor.empty() : tensor<32x96xf32> - %1 = "ttir.concat"(%arg0, %arg1, %0) <{dim = -3 : si32, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x96xf32>) -> tensor<32x96xf32> + %1 = "ttir.concat"(%arg0, %arg1, %0) <{dim = -3 : si32}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x96xf32>) -> tensor<32x96xf32> return %1 : tensor<32x96xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/concat/simple_concat.mlir b/test/ttmlir/Dialect/TTNN/concat/simple_concat.mlir index 0199d95ff7..1acb8252b1 100644 --- a/test/ttmlir/Dialect/TTNN/concat/simple_concat.mlir +++ b/test/ttmlir/Dialect/TTNN/concat/simple_concat.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<32x32xf32>, %arg1: tensor<32x64xf32>) -> tensor<32x96xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<32x96xf32> // CHECK: %[[C:.*]] = "ttnn.concat"[[C:.*]] - %1 = "ttir.concat"(%arg0, %arg1, %0) <{dim = 1 : si32, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x96xf32>) -> tensor<32x96xf32> + %1 = "ttir.concat"(%arg0, %arg1, %0) <{dim = 1 : si32}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x96xf32>) -> tensor<32x96xf32> return %1 : tensor<32x96xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/convolution/complex_conv_channel_first.mlir b/test/ttmlir/Dialect/TTNN/convolution/complex_conv_channel_first.mlir index 5428c0dec1..f4633b7a8b 100644 --- a/test/ttmlir/Dialect/TTNN/convolution/complex_conv_channel_first.mlir +++ b/test/ttmlir/Dialect/TTNN/convolution/complex_conv_channel_first.mlir @@ -1,5 +1,4 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device_tile = #tt.operand_constraint module @jit_convolution { func.func public @test_NCHW_IOHW_to_NHWC_OIHW_conv2d(%arg0: tensor<1x3x100x100xbf16>, %arg1: tensor<7x3x3x3xbf16>) -> tensor<1x7x100x100xbf16> { %0 = tensor.empty() : tensor<1x7x100x100xbf16> @@ -21,7 +20,6 @@ module @jit_convolution { >, feature_group_count = 1 : i64, input_dilation = array, - operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile], padding = array, weight_dilation = array, window_reversal = array, diff --git a/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_and/simple_and.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_and/simple_and.mlir index eca8b639d1..e6400a7529 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_and/simple_and.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_and/simple_and.mlir @@ -1,11 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s - -#any_device = #tt.operand_constraint module attributes {} { func.func @logical_and(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: {{.*}} = "ttnn.empty"{{.*}} - %1 = "ttir.logical_and"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.logical_and"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.logical_and" // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, diff --git a/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_or/simple_or.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_or/simple_or.mlir index 7a51b71599..bb35140eb2 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_or/simple_or.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_or/simple_or.mlir @@ -1,11 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s - -#any_device = #tt.operand_constraint module attributes {} { func.func @logical_or(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: {{.*}} = "ttnn.empty"{{.*}} - %1 = "ttir.logical_or"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.logical_or"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.logical_or" // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, diff --git a/test/ttmlir/Dialect/TTNN/eltwise/binary/minimum/simple_minimum.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/minimum/simple_minimum.mlir index 8ebdfe0a47..7b3576cb7f 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/binary/minimum/simple_minimum.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/minimum/simple_minimum.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.minimum"[[C:.*]] - %1 = "ttir.minimum"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.minimum"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/binary/remainder/simple_remainder.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/remainder/simple_remainder.mlir index 281dccfdd2..67d283c078 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/binary/remainder/simple_remainder.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/remainder/simple_remainder.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @remainder(%arg0: tensor<32x32xf32>, %arg1: tensor<32x32xf32>) -> tensor<32x32xf32> { %0 = tensor.empty() : tensor<32x32xf32> // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<32x32xf32, {{.*}} - %1 = "ttir.remainder"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> + %1 = "ttir.remainder"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> // CHECK: %[[REM:[0-9]+]] = "ttnn.remainder"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<32x32xf32, {{.*}} return %1 : tensor<32x32xf32> // CHECK: return {{.*}} : tensor<32x32xf32, {{.*}} diff --git a/test/ttmlir/Dialect/TTNN/eltwise/operand_broadcasts.mlir b/test/ttmlir/Dialect/TTNN/eltwise/operand_broadcasts.mlir index d5c1173b9e..9b5df3852d 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/operand_broadcasts.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/operand_broadcasts.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @bcast_one_dim(%arg0: tensor<2x64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<2x64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<2x64x128xf32> // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] - %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<2x64x128xf32>, tensor<64x128xf32>, tensor<2x64x128xf32>) -> tensor<2x64x128xf32> + %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<2x64x128xf32>, tensor<64x128xf32>, tensor<2x64x128xf32>) -> tensor<2x64x128xf32> return %1 : tensor<2x64x128xf32> } @@ -13,7 +12,7 @@ module attributes {} { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<17x16x15x14xf32> // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] - %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<17x16x15x14xf32>, tensor<15x1xf32>, tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> + %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<17x16x15x14xf32>, tensor<15x1xf32>, tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> return %1 : tensor<17x16x15x14xf32> } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/operand_broadcasts_negative.mlir b/test/ttmlir/Dialect/TTNN/eltwise/operand_broadcasts_negative.mlir index 1d26cb7f96..d0e89b66c2 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/operand_broadcasts_negative.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/operand_broadcasts_negative.mlir @@ -1,10 +1,9 @@ // RUN: not ttmlir-opt --ttir-load-system-desc --ttir-layout --convert-ttir-to-ttnn %s 2>&1 | FileCheck %s // CHECK: error: 'ttir.multiply' op Operands are not broadcast compatible -#any_device = #tt.operand_constraint module attributes {} { func.func @bcast_one_dim(%arg0: tensor<2x64x128xf32>, %arg1: tensor<4x64x128xf32>) -> tensor<4x64x128xf32> { %0 = tensor.empty() : tensor<4x64x128xf32> - %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<2x64x128xf32>, tensor<4x64x128xf32>, tensor<4x64x128xf32>) -> tensor<4x64x128xf32> + %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<2x64x128xf32>, tensor<4x64x128xf32>, tensor<4x64x128xf32>) -> tensor<4x64x128xf32> return %1 : tensor<4x64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/abs/simple_abs.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/abs/simple_abs.mlir index e1b2862f7f..eceb8d058b 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/abs/simple_abs.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/abs/simple_abs.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.abs"[[C:.*]] - %1 = "ttir.abs"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.abs"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/cast/simple_cast.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/cast/simple_cast.mlir index 4f7e09543f..6e50381078 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/cast/simple_cast.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/cast/simple_cast.mlir @@ -3,7 +3,7 @@ module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xbf16> { %0 = tensor.empty() : tensor<64x128xbf16> - %1 = "ttir.typecast"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %1 = "ttir.typecast"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xbf16>) -> tensor<64x128xbf16> // CHECK: %[[C:.*]] = "ttnn.typecast" // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xbf16, diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/cbrt/simple_cbrt.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/cbrt/simple_cbrt.mlir index bb7254f91b..bdb78fed83 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/cbrt/simple_cbrt.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/cbrt/simple_cbrt.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.cbrt"[[C:.*]] - %1 = "ttir.cbrt"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.cbrt"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/ceil/simple_ceil.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/ceil/simple_ceil.mlir index fb90280e3a..d0250d5cd8 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/ceil/simple_ceil.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/ceil/simple_ceil.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.ceil"[[C:.*]] - %1 = "ttir.ceil"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.ceil"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/cos/simple_cos.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/cos/simple_cos.mlir index 2e53a4f3fc..e990aa59c1 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/cos/simple_cos.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/cos/simple_cos.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.cos"[[C:.*]] - %1 = "ttir.cos"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.cos"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/expm1/simple_expm1.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/expm1/simple_expm1.mlir index 59a7b2a18d..bbcbf5dd6f 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/expm1/simple_expm1.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/expm1/simple_expm1.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> - %1 = "ttir.expm1"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.expm1"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> // CHECK: %{{[0-9]+}} = "ttnn.expm1"(%{{[0-9]+}}, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> return %1 : tensor<64x128xf32> // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/floor/simple_floor.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/floor/simple_floor.mlir index 820e429ecf..fd418fbda5 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/floor/simple_floor.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/floor/simple_floor.mlir @@ -1,5 +1,4 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @floor(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %{{[0-9]+}} = "ttnn.empty" @@ -9,7 +8,7 @@ module attributes {} { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.floor"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.floor"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/isfinite/simple_isfinite.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/isfinite/simple_isfinite.mlir index 3089da6692..7745adf067 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/isfinite/simple_isfinite.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/isfinite/simple_isfinite.mlir @@ -1,5 +1,4 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @is_finite(%arg0: tensor<64x128xbf16>) -> tensor<64x128xbf16> { // CHECK: %[[C:.*]] = "ttnn.empty" @@ -9,7 +8,7 @@ module attributes {} { // CHECK-SAME: tensor<64x128xbf16, // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.isfinite"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %1 = "ttir.isfinite"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> return %1 : tensor<64x128xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/log1p/simple_log1p.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/log1p/simple_log1p.mlir index b65aa3c21f..4258e639cb 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/log1p/simple_log1p.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/log1p/simple_log1p.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> - %1 = "ttir.log1p"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.log1p"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> // CHECK: %{{[0-9]+}} = "ttnn.log1p"(%{{[0-9]+}}, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> return %1 : tensor<64x128xf32> // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/logical_not/simple_not.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/logical_not/simple_not.mlir index 54375b4514..a80dffca88 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/logical_not/simple_not.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/logical_not/simple_not.mlir @@ -1,11 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s - -#any_device = #tt.operand_constraint module attributes {} { func.func @logical_not(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: {{.*}} = "ttnn.empty"{{.*}} - %1 = "ttir.logical_not"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.logical_not"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.logical_not" // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/negate/simple_neg.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/negate/simple_neg.mlir index e786434a4b..aa63ee6e52 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/negate/simple_neg.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/negate/simple_neg.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.neg"[[C:.*]] - %1 = "ttir.neg"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.neg"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/reciprocal/simple_reciprocal.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/reciprocal/simple_reciprocal.mlir index 0f940c0fca..fd98ade3ef 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/reciprocal/simple_reciprocal.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/reciprocal/simple_reciprocal.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.reciprocal"[[C:.*]] - %1 = "ttir.reciprocal"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.reciprocal"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/relu/simple_relu.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/relu/simple_relu.mlir index 1d75b8ee02..d6b46aae65 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/relu/simple_relu.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/relu/simple_relu.mlir @@ -1,5 +1,4 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint #l1 = #ttnn.buffer_type #system = #ttnn.buffer_type #ttnn_layout = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <1x1>, memref<64x128xf32, #system>> @@ -10,7 +9,7 @@ module attributes {} { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32, #ttnn_layout1> // CHECK: %[[C:.*]] = "ttnn.relu"[[C:.*]] - %1 = "ttir.relu"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32, #ttnn_layout>, tensor<64x128xf32, #ttnn_layout1>) -> tensor<64x128xf32, #ttnn_layout1> + %1 = "ttir.relu"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32, #ttnn_layout>, tensor<64x128xf32, #ttnn_layout1>) -> tensor<64x128xf32, #ttnn_layout1> return %1 : tensor<64x128xf32, #ttnn_layout1> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/rsqrt/simple_rsqrt.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/rsqrt/simple_rsqrt.mlir index f86d6f59e6..b7a339d229 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/rsqrt/simple_rsqrt.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/rsqrt/simple_rsqrt.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.rsqrt"[[C:.*]] - %1 = "ttir.rsqrt"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.rsqrt"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/sigmoid/simple_sigmoid.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/sigmoid/simple_sigmoid.mlir index c884577176..d3762db91b 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/sigmoid/simple_sigmoid.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/sigmoid/simple_sigmoid.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.sigmoid"[[C:.*]] - %1 = "ttir.sigmoid"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.sigmoid"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/sign/simple_sign.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/sign/simple_sign.mlir index c82547bff3..170eb1b53c 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/sign/simple_sign.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/sign/simple_sign.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> - %1 = "ttir.sign"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.sign"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> // CHECK: %{{[0-9]+}} = "ttnn.sign"(%{{[0-9]+}}, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> return %1 : tensor<64x128xf32> // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/sin/simple_sin.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/sin/simple_sin.mlir index dfe4c7a18c..a1ebaa368b 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/sin/simple_sin.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/sin/simple_sin.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.sin"[[C:.*]] - %1 = "ttir.sin"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.sin"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/sqrt/simple_sqrt.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/sqrt/simple_sqrt.mlir index 3802f00da2..bd468bd8ee 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/sqrt/simple_sqrt.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/sqrt/simple_sqrt.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.sqrt"[[C:.*]] - %1 = "ttir.sqrt"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.sqrt"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/embedding/embedding_1d_tensor.mlir b/test/ttmlir/Dialect/TTNN/embedding/embedding_1d_tensor.mlir index 45318423b3..192697ed79 100644 --- a/test/ttmlir/Dialect/TTNN/embedding/embedding_1d_tensor.mlir +++ b/test/ttmlir/Dialect/TTNN/embedding/embedding_1d_tensor.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<32xbf16>, %arg1: tensor<512x128xbf16>) -> tensor<32x128xbf16> { %0 = tensor.empty() : tensor<32x128xbf16> // CHECK: %[[C:.*]] = "ttnn.embedding"[[C:.*]] - %1 = "ttir.embedding"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32xbf16>, tensor<512x128xbf16>, tensor<32x128xbf16>) -> tensor<32x128xbf16> + %1 = "ttir.embedding"(%arg0, %arg1, %0) : (tensor<32xbf16>, tensor<512x128xbf16>, tensor<32x128xbf16>) -> tensor<32x128xbf16> return %1 : tensor<32x128xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/embedding/embedding_non_tile.mlir b/test/ttmlir/Dialect/TTNN/embedding/embedding_non_tile.mlir index 1d28136685..cd039a0fb9 100644 --- a/test/ttmlir/Dialect/TTNN/embedding/embedding_non_tile.mlir +++ b/test/ttmlir/Dialect/TTNN/embedding/embedding_non_tile.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<1x32xbf16>, %arg1: tensor<512x128xbf16>) -> tensor<1x32x128xbf16> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<1x32x128xbf16> // CHECK: %[[C:.*]] = "ttnn.embedding"[[C:.*]] - %1 = "ttir.embedding"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x32xbf16>, tensor<512x128xbf16>, tensor<1x32x128xbf16>) -> tensor<1x32x128xbf16> + %1 = "ttir.embedding"(%arg0, %arg1, %0) : (tensor<1x32xbf16>, tensor<512x128xbf16>, tensor<1x32x128xbf16>) -> tensor<1x32x128xbf16> return %1 : tensor<1x32x128xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/embedding/simple_embedding.mlir b/test/ttmlir/Dialect/TTNN/embedding/simple_embedding.mlir index e5fb1421c7..e55d9a879c 100644 --- a/test/ttmlir/Dialect/TTNN/embedding/simple_embedding.mlir +++ b/test/ttmlir/Dialect/TTNN/embedding/simple_embedding.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<32x32xbf16>, %arg1: tensor<512x128xbf16>) -> tensor<32x32x128xbf16> { %0 = tensor.empty() : tensor<32x32x128xbf16> // CHECK: %[[C:.*]] = "ttnn.embedding"[[C:.*]] - %1 = "ttir.embedding"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xbf16>, tensor<512x128xbf16>, tensor<32x32x128xbf16>) -> tensor<32x32x128xbf16> + %1 = "ttir.embedding"(%arg0, %arg1, %0) : (tensor<32x32xbf16>, tensor<512x128xbf16>, tensor<32x32x128xbf16>) -> tensor<32x32x128xbf16> return %1 : tensor<32x32x128xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/linear/linear_tests_positive.mlir b/test/ttmlir/Dialect/TTNN/linear/linear_tests_positive.mlir index 0e248623da..ef0a6729ef 100644 --- a/test/ttmlir/Dialect/TTNN/linear/linear_tests_positive.mlir +++ b/test/ttmlir/Dialect/TTNN/linear/linear_tests_positive.mlir @@ -1,5 +1,4 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device_tile = #tt.operand_constraint module { func.func @linear_1d_1d(%arg0: tensor<128xbf16>, %arg1: tensor<128xbf16>) -> tensor<1xbf16> { // CHECK: "ttnn.empty" @@ -10,7 +9,7 @@ module { // CHECK-SAME: tensor<128xbf16 // CHECK-SAME: tensor<1xbf16 // CHECK-SAME: tensor<1xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128xbf16>, tensor<128xbf16>, tensor<1xbf16>) -> tensor<1xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<128xbf16>, tensor<128xbf16>, tensor<1xbf16>) -> tensor<1xbf16> return %1 : tensor<1xbf16> } @@ -24,7 +23,7 @@ module { // CHECK-SAME: tensor<1xbf16 // CHECK-SAME: tensor<1xbf16 // CHECK-SAME: tensor<1xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128xbf16>, tensor<128xbf16>, tensor<1xbf16>, tensor<1xbf16>) -> tensor<1xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) : (tensor<128xbf16>, tensor<128xbf16>, tensor<1xbf16>, tensor<1xbf16>) -> tensor<1xbf16> return %1 : tensor<1xbf16> } @@ -38,7 +37,7 @@ module { // CHECK-SAME: tensor<128xbf16 // CHECK-SAME: tensor<128xbf16 // CHECK-SAME: tensor<128xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128xbf16>, tensor<128xbf16>, tensor<128xbf16>, tensor<128xbf16>) -> tensor<128xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) : (tensor<128xbf16>, tensor<128xbf16>, tensor<128xbf16>, tensor<128xbf16>) -> tensor<128xbf16> return %1 : tensor<128xbf16> } @@ -51,7 +50,7 @@ module { // CHECK-SAME: tensor<128xbf16 // CHECK-SAME: tensor<64xbf16 // CHECK-SAME: tensor<64xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128xbf16>, tensor<64xbf16>) -> tensor<64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128xbf16>, tensor<64xbf16>) -> tensor<64xbf16> return %1 : tensor<64xbf16> } @@ -64,7 +63,7 @@ module { // CHECK-SAME: tensor<128x64xbf16 // CHECK-SAME: tensor<64x64xbf16 // CHECK-SAME: tensor<64x64xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> return %1 : tensor<64x64xbf16> } @@ -78,7 +77,7 @@ module { // CHECK-SAME: tensor<64x64xbf16 // CHECK-SAME: tensor<64x64xbf16 // CHECK-SAME: tensor<64x64xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> return %1 : tensor<64x64xbf16> } @@ -91,7 +90,7 @@ module { // CHECK-SAME: tensor<12x7x128x64xbf16 // CHECK-SAME: tensor<12x7x64xbf16 // CHECK-SAME: tensor<12x7x64xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128xbf16>, tensor<12x7x128x64xbf16>, tensor<12x7x64xbf16>) -> tensor<12x7x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<128xbf16>, tensor<12x7x128x64xbf16>, tensor<12x7x64xbf16>) -> tensor<12x7x64xbf16> return %1 : tensor<12x7x64xbf16> } @@ -104,7 +103,7 @@ module { // CHECK-SAME: tensor<64xbf16 // CHECK-SAME: tensor<12x7x128xbf16 // CHECK-SAME: tensor<12x7x128xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<12x7x128x64xbf16>, tensor<64xbf16>, tensor<12x7x128xbf16>) -> tensor<12x7x128xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<12x7x128x64xbf16>, tensor<64xbf16>, tensor<12x7x128xbf16>) -> tensor<12x7x128xbf16> return %1 : tensor<12x7x128xbf16> } @@ -117,7 +116,7 @@ module { // CHECK-SAME: tensor<12x7x128x64xbf16 // CHECK-SAME: tensor<12x7x64x64xbf16 // CHECK-SAME: tensor<12x7x64x64xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<12x7x128x64xbf16>, tensor<12x7x64x64xbf16>) -> tensor<12x7x64x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<12x7x128x64xbf16>, tensor<12x7x64x64xbf16>) -> tensor<12x7x64x64xbf16> return %1 : tensor<12x7x64x64xbf16> } @@ -130,7 +129,7 @@ module { // CHECK-SAME: tensor<64x128xbf16 // CHECK-SAME: tensor<12x7x128x128xbf16 // CHECK-SAME: tensor<12x7x128x128xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<12x7x128x64xbf16>, tensor<64x128xbf16>, tensor<12x7x128x128xbf16>) -> tensor<12x7x128x128xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<12x7x128x64xbf16>, tensor<64x128xbf16>, tensor<12x7x128x128xbf16>) -> tensor<12x7x128x128xbf16> return %1 : tensor<12x7x128x128xbf16> } @@ -144,7 +143,7 @@ module { // CHECK-SAME: tensor<7x128x64xbf16 // CHECK-SAME: tensor<7x64x64xbf16 // CHECK-SAME: tensor<7x64x64xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<7x64x128xbf16>, tensor<7x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<7x64x128xbf16>, tensor<7x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> return %1 : tensor<7x64x64xbf16> } @@ -157,7 +156,7 @@ module { // CHECK-SAME: tensor<1x128x64xbf16 // CHECK-SAME: tensor<7x64x64xbf16 // CHECK-SAME: tensor<7x64x64xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<7x64x128xbf16>, tensor<1x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<7x64x128xbf16>, tensor<1x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> return %1 : tensor<7x64x64xbf16> } @@ -170,7 +169,7 @@ module { // CHECK-SAME: tensor<7x1x128x64xbf16 // CHECK-SAME: tensor<7x7x64x64xbf16 // CHECK-SAME: tensor<7x7x64x64xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<1x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<7x7x64x64xbf16>) -> tensor<7x7x64x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<1x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<7x7x64x64xbf16>) -> tensor<7x7x64x64xbf16> return %1 : tensor<7x7x64x64xbf16> } @@ -183,7 +182,7 @@ module { // CHECK-SAME: tensor<7x1x128x64xbf16 // CHECK-SAME: tensor<12x7x7x64x64xbf16 // CHECK-SAME: tensor<12x7x7x64x64xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<12x1x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<12x7x7x64x64xbf16>) -> tensor<12x7x7x64x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<12x1x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<12x7x7x64x64xbf16>) -> tensor<12x7x7x64x64xbf16> return %1 : tensor<12x7x7x64x64xbf16> } @@ -197,7 +196,7 @@ module { // CHECK-SAME: tensor<64xbf16 // CHECK-SAME: tensor<14x7x32x64xbf16 // CHECK-SAME: tensor<14x7x32x64xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<14x7x32x32xbf16>, tensor<14x1x32x64xbf16>, tensor<64xbf16>, tensor<14x7x32x64xbf16>) -> tensor<14x7x32x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) : (tensor<14x7x32x32xbf16>, tensor<14x1x32x64xbf16>, tensor<64xbf16>, tensor<14x7x32x64xbf16>) -> tensor<14x7x32x64xbf16> return %1 : tensor<14x7x32x64xbf16> } @@ -210,7 +209,7 @@ module { // CHECK-SAME: tensor<4x3x128x32xbf16 // CHECK-SAME: tensor<14x4x3x64x32xbf16 // CHECK-SAME: tensor<14x4x3x64x32xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<3x64x128xbf16>, tensor<4x3x128x32xbf16>, tensor<14x4x3x64x32xbf16>, tensor<14x4x3x64x32xbf16>) -> tensor<14x4x3x64x32xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) : (tensor<3x64x128xbf16>, tensor<4x3x128x32xbf16>, tensor<14x4x3x64x32xbf16>, tensor<14x4x3x64x32xbf16>) -> tensor<14x4x3x64x32xbf16> return %1 : tensor<14x4x3x64x32xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/linear/simple_linear.mlir b/test/ttmlir/Dialect/TTNN/linear/simple_linear.mlir index 56728eb52b..44165e05d1 100644 --- a/test/ttmlir/Dialect/TTNN/linear/simple_linear.mlir +++ b/test/ttmlir/Dialect/TTNN/linear/simple_linear.mlir @@ -1,5 +1,4 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device_tile = #tt.operand_constraint module { func.func @simple_linear_without_bias(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x64xbf16>) -> tensor<64x64xbf16> { @@ -11,7 +10,7 @@ module { // CHECK-SAME: tensor<128x64xbf16 // CHECK-SAME: tensor<64x64xbf16 // CHECK-SAME: tensor<64x64xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> return %1 : tensor<64x64xbf16> } @@ -25,7 +24,7 @@ module { // CHECK-SAME: tensor<64x64xbf16 // CHECK-SAME: tensor<64x64xbf16 // CHECK-SAME: tensor<64x64xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> return %1 : tensor<64x64xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/matmul/matmul_tests_negative.mlir b/test/ttmlir/Dialect/TTNN/matmul/matmul_tests_negative.mlir index 252aaea9fc..7ca7efeec3 100644 --- a/test/ttmlir/Dialect/TTNN/matmul/matmul_tests_negative.mlir +++ b/test/ttmlir/Dialect/TTNN/matmul/matmul_tests_negative.mlir @@ -2,7 +2,6 @@ // Negative tests for matmul operation // Verify that the parsing fails if either of operands is a scalar -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @matmul_negative_1d_1d_inner_dimension_missmatch(%arg0: tensor, %arg1: tensor<64xbf16>) -> tensor<1xbf16> { // CHECK: error: 'ttnn.matmul' op Input A must be at least a 1D tensor @@ -13,7 +12,6 @@ module attributes {} { } // ----- -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @matmul_negative_1d_1d_inner_dimension_missmatch(%arg0: tensor<128xbf16>, %arg1: tensor) -> tensor<1xbf16> { // CHECK: error: 'ttnn.matmul' op Input B must be at least a 1D tensor @@ -23,9 +21,8 @@ module attributes {} { } } -// Verifty that the parsing fails if the output is a scalar +// Verify that the parsing fails if the output is a scalar // ----- -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @matmul_negative_1d_1d_inner_dimension_missmatch(%arg0: tensor<128xbf16>, %arg1: tensor<128xbf16>) -> tensor { // CHECK: error: 'ttnn.matmul' op Scalar output is not supported, output must be at least a 1D tensor @@ -36,7 +33,6 @@ module attributes {} { } // ----- -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @matmul_negative_1d_1d_inner_dimension_missmatch(%arg0: tensor<128xbf16>, %arg1: tensor<128xbf16>) -> tensor<2xbf16> { // CHECK: error: 'ttnn.matmul' op Scalar output must be a 1D tensor of size 1 @@ -48,7 +44,6 @@ module attributes {} { // Inner dimension mismatch tests // ----- -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @matmul_negative_1d_1d_inner_dimension_missmatch(%arg0: tensor<128xbf16>, %arg1: tensor<64xbf16>) -> tensor<1xbf16> { // CHECK: error: 'ttnn.matmul' op Input A[-1](128) and B[-2](64) must have matching inner dimensions @@ -59,7 +54,6 @@ module attributes {} { } // ----- -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @matmul_negative_1d_2d_inner_dimension_missmatch(%arg0: tensor<64xbf16>, %arg1: tensor<128x64xbf16>) -> tensor<64xbf16> { // CHECK: error: 'ttnn.matmul' op Input A[-1](64) and B[-2](128) must have matching inner dimensions @@ -70,7 +64,6 @@ func.func @matmul_negative_1d_2d_inner_dimension_missmatch(%arg0: tensor<64xbf16 } // ----- -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @matmul_negative_2d_1d_inner_dimension_missmatch(%arg0: tensor<64x128xbf16>, %arg1: tensor<64xbf16>) -> tensor<64xbf16> { // CHECK: error: 'ttnn.matmul' op Input A[-1](128) and B[-2](64) must have matching inner dimensions @@ -81,7 +74,6 @@ module attributes {} { } // ----- -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @matmul_negative_2d_2d_inner_dimension_missmatch(%arg0: tensor<64x128xbf16>, %arg1: tensor<64x128xbf16>) -> tensor<64x64xbf16> { // CHECK: error: 'ttnn.matmul' op Input A[-1](128) and B[-2](64) must have matching inner dimensions @@ -92,7 +84,6 @@ module attributes {} { } // ----- -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @matmul_negative_nd_nd_inner_dimension_missmatch(%arg0: tensor<7x64x128xbf16>, %arg1: tensor<1x64x128xbf16>) -> tensor<7x64x64xbf16> { // CHECK: error: 'ttnn.matmul' op Input A[-1](128) and B[-2](64) must have matching inner dimensions @@ -104,7 +95,6 @@ module attributes {} { // Batch dimension mismatch tests // ----- -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @matmul_negative_nd_nd_same_rank_batch_broadcast_incompatible_1(%arg0: tensor<7x64x128xbf16>, %arg1: tensor<2x128x64xbf16>) -> tensor<7x64x64xbf16> { // CHECK: error: 'ttnn.matmul' op Batch dimensions of input A(7) and B(2) are not broadcast compatible @@ -115,7 +105,6 @@ module attributes {} { } // ----- -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @matmul_negative_nd_nd_same_rank_batch_broadcast_incompatible_2(%arg0: tensor<2x7x64x128xbf16>, %arg1: tensor<7x1x128x64xbf16>) -> tensor<7x7x64x64xbf16> { // CHECK: error: 'ttnn.matmul' op Batch dimensions of input A(2,7) and B(7,1) are not broadcast compatible @@ -126,7 +115,6 @@ module attributes {} { } // ----- -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @matmul_negative_nd_nd_different_rank_batch_broadcast_incompatible(%arg0: tensor<12x2x7x64x128xbf16>, %arg1: tensor<7x1x128x64xbf16>) -> tensor<12x7x7x64x64xbf16> { // CHECK: error: 'ttnn.matmul' op Batch dimensions of input A(12,2,7) and B(7,1) are not broadcast compatible @@ -138,7 +126,6 @@ module attributes {} { // Output shape mismatch tests // ----- -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @matmul_negative_2d_2d_inner_dimension_missmatch(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x64xbf16>) -> tensor<64xbf16> { // CHECK: error: 'ttnn.matmul' op Output shape rank(1) must match the expected output shape rank(2) @@ -149,7 +136,6 @@ module attributes {} { } // ----- -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @matmul_negative_2d_2d_inner_dimension_missmatch(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x64xbf16>) -> tensor<64x128xbf16> { // CHECK: error: 'ttnn.matmul' op Output shape dimension[1](128) doesn't match the expected output shape dimension[1](64) diff --git a/test/ttmlir/Dialect/TTNN/matmul/matmul_tests_positive.mlir b/test/ttmlir/Dialect/TTNN/matmul/matmul_tests_positive.mlir index c1921ce8ba..a62e532110 100644 --- a/test/ttmlir/Dialect/TTNN/matmul/matmul_tests_positive.mlir +++ b/test/ttmlir/Dialect/TTNN/matmul/matmul_tests_positive.mlir @@ -1,59 +1,58 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @matmul_1d_1d(%arg0: tensor<128xbf16>, %arg1: tensor<128xbf16>) -> tensor<1xbf16> { %0 = tensor.empty() : tensor<1xbf16> // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128xbf16>, tensor<128xbf16>, tensor<1xbf16>) -> tensor<1xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<128xbf16>, tensor<128xbf16>, tensor<1xbf16>) -> tensor<1xbf16> return %1 : tensor<1xbf16> } func.func @matmul_1d_2d(%arg0: tensor<128xbf16>, %arg1: tensor<128x64xbf16>) -> tensor<64xbf16> { %0 = tensor.empty() : tensor<64xbf16> // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128xbf16>, tensor<128x64xbf16>, tensor<64xbf16>) -> tensor<64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<128xbf16>, tensor<128x64xbf16>, tensor<64xbf16>) -> tensor<64xbf16> return %1 : tensor<64xbf16> } func.func @matmul_2d_1d(%arg0: tensor<64x128xbf16>, %arg1: tensor<128xbf16>) -> tensor<64xbf16> { %0 = tensor.empty() : tensor<64xbf16> // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128xbf16>, tensor<64xbf16>) -> tensor<64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128xbf16>, tensor<64xbf16>) -> tensor<64xbf16> return %1 : tensor<64xbf16> } func.func @matmul_2d_2d(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x64xbf16>) -> tensor<64x64xbf16> { %0 = tensor.empty() : tensor<64x64xbf16> // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> return %1 : tensor<64x64xbf16> } func.func @matmul_1d_nd(%arg0: tensor<128xbf16>, %arg1: tensor<12x7x128x64xbf16>) -> tensor<12x7x64xbf16> { %0 = tensor.empty() : tensor<12x7x64xbf16> // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<128xbf16>, tensor<12x7x128x64xbf16>, tensor<12x7x64xbf16>) -> tensor<12x7x64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<128xbf16>, tensor<12x7x128x64xbf16>, tensor<12x7x64xbf16>) -> tensor<12x7x64xbf16> return %1 : tensor<12x7x64xbf16> } func.func @matmul_nd_1d(%arg0: tensor<12x7x128x64xbf16>, %arg1: tensor<64xbf16>) -> tensor<12x7x128xbf16> { %0 = tensor.empty() : tensor<12x7x128xbf16> // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<12x7x128x64xbf16>, tensor<64xbf16>, tensor<12x7x128xbf16>) -> tensor<12x7x128xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<12x7x128x64xbf16>, tensor<64xbf16>, tensor<12x7x128xbf16>) -> tensor<12x7x128xbf16> return %1 : tensor<12x7x128xbf16> } func.func @matmul_2d_nd(%arg0: tensor<64x128xbf16>, %arg1: tensor<12x7x128x64xbf16>) -> tensor<12x7x64x64xbf16> { %0 = tensor.empty() : tensor<12x7x64x64xbf16> // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<12x7x128x64xbf16>, tensor<12x7x64x64xbf16>) -> tensor<12x7x64x64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<12x7x128x64xbf16>, tensor<12x7x64x64xbf16>) -> tensor<12x7x64x64xbf16> return %1 : tensor<12x7x64x64xbf16> } func.func @matmul_nd_2d(%arg0: tensor<12x7x128x64xbf16>, %arg1: tensor<64x128xbf16>) -> tensor<12x7x128x128xbf16> { %0 = tensor.empty() : tensor<12x7x128x128xbf16> // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<12x7x128x64xbf16>, tensor<64x128xbf16>, tensor<12x7x128x128xbf16>) -> tensor<12x7x128x128xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<12x7x128x64xbf16>, tensor<64x128xbf16>, tensor<12x7x128x128xbf16>) -> tensor<12x7x128x128xbf16> return %1 : tensor<12x7x128x128xbf16> } @@ -61,28 +60,28 @@ module attributes {} { func.func @matmul_nd_nd_same_rank_same_dims(%arg0: tensor<7x64x128xbf16>, %arg1: tensor<7x128x64xbf16>) -> tensor<7x64x64xbf16> { %0 = tensor.empty() : tensor<7x64x64xbf16> // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<7x64x128xbf16>, tensor<7x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<7x64x128xbf16>, tensor<7x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> return %1 : tensor<7x64x64xbf16> } func.func @matmul_nd_nd_same_rank_broadcastable_dims_1(%arg0: tensor<7x64x128xbf16>, %arg1: tensor<1x128x64xbf16>) -> tensor<7x64x64xbf16> { %0 = tensor.empty() : tensor<7x64x64xbf16> // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<7x64x128xbf16>, tensor<1x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<7x64x128xbf16>, tensor<1x128x64xbf16>, tensor<7x64x64xbf16>) -> tensor<7x64x64xbf16> return %1 : tensor<7x64x64xbf16> } func.func @matmul_nd_nd_same_rank_broadcastable_dims_2(%arg0: tensor<1x7x64x128xbf16>, %arg1: tensor<7x1x128x64xbf16>) -> tensor<7x7x64x64xbf16> { %0 = tensor.empty() : tensor<7x7x64x64xbf16> // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<1x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<7x7x64x64xbf16>) -> tensor<7x7x64x64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<1x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<7x7x64x64xbf16>) -> tensor<7x7x64x64xbf16> return %1 : tensor<7x7x64x64xbf16> } func.func @matmul_nd_nd_different_rank_broadcastable_dims_2(%arg0: tensor<12x1x7x64x128xbf16>, %arg1: tensor<7x1x128x64xbf16>) -> tensor<12x7x7x64x64xbf16> { %0 = tensor.empty() : tensor<12x7x7x64x64xbf16> // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<12x1x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<12x7x7x64x64xbf16>) -> tensor<12x7x7x64x64xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<12x1x7x64x128xbf16>, tensor<7x1x128x64xbf16>, tensor<12x7x7x64x64xbf16>) -> tensor<12x7x7x64x64xbf16> return %1 : tensor<12x7x7x64x64xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/matmul/simple_matmul.mlir b/test/ttmlir/Dialect/TTNN/matmul/simple_matmul.mlir index f82ed85752..87db650783 100644 --- a/test/ttmlir/Dialect/TTNN/matmul/simple_matmul.mlir +++ b/test/ttmlir/Dialect/TTNN/matmul/simple_matmul.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device_tile = #tt.operand_constraint // CHECK: #[[TILED_LAYOUT:.*]] = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <1x1>, memref<2x4x!tt.tile<32x32, bf16>, #dram>, > module attributes {} { func.func @forward(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x96xbf16>) -> tensor<64x96xbf16> { %0 = tensor.empty() : tensor<64x96xbf16> // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> return %1 : tensor<64x96xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/multiple_func.mlir b/test/ttmlir/Dialect/TTNN/multiple_func.mlir index f23863e5b1..3961fac038 100644 --- a/test/ttmlir/Dialect/TTNN/multiple_func.mlir +++ b/test/ttmlir/Dialect/TTNN/multiple_func.mlir @@ -1,5 +1,4 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @main(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] @@ -10,7 +9,7 @@ module attributes {} { } func.func private @do_mult(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>, %arg2: tensor<64x128xf32>) -> tensor<64x128xf32> { - %0 = "ttir.multiply"(%arg0, %arg1, %arg2) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %0 = "ttir.multiply"(%arg0, %arg1, %arg2) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %0 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/optimizer/input_layout_loc_override.mlir b/test/ttmlir/Dialect/TTNN/optimizer/input_layout_loc_override.mlir index 4a4575f8d1..97892500aa 100644 --- a/test/ttmlir/Dialect/TTNN/optimizer/input_layout_loc_override.mlir +++ b/test/ttmlir/Dialect/TTNN/optimizer/input_layout_loc_override.mlir @@ -1,5 +1,4 @@ // RUN: ttmlir-opt --mlir-print-debuginfo --ttir-to-ttnn-backend-pipeline="enable-optimizer=true memory-layout-analysis-enabled=true override-output-layout=matmul_1_in_1_layout=1x1:l1:interleaved:tile:bf16" %s | FileCheck %s -#any_device = #tt.operand_constraint #loc = loc("Matmul":4294967295:0) // CHECK-DAG: #[[LOC_MATMUL_IN0:.*]] = loc("matmul_1_in_0_layout"(#loc3)) // CHECK-DAG: #[[LOC_MATMUL_IN1:.*]] = loc("matmul_1_in_1_layout"(#loc3)) @@ -12,7 +11,7 @@ module attributes {} { // CHECK-DAG: %{{.*}} = "ttnn.to_device"{{.*}} loc(#[[LOC_MATMUL_IN0]]) // CHECK-DAG: %{{.*}} = "ttnn.to_device"{{.*}} <{memory_config = #ttnn.memory_config<#l1_, <<4x3>>, >}> : {{.*}} -> tensor<128x96xbf16, #[[IN_1_LAYOUT]]> loc(#[[LOC_MATMUL_IN1]]) // CHECK-DAG: %{{.*}} = "ttnn.matmul"{{.*}} loc(#[[LOC_MATMUL]]) - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xbf16>, tensor<128x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> loc(#loc2) + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> loc(#loc2) return %1 : tensor<64x96xbf16> } loc(#loc) } loc(#loc) diff --git a/test/ttmlir/Dialect/TTNN/optimizer/l1_interleaved_policy/all_l1_interleaved_policy.mlir b/test/ttmlir/Dialect/TTNN/optimizer/l1_interleaved_policy/all_l1_interleaved_policy.mlir index 70ebaddb8d..6d2e5b6231 100644 --- a/test/ttmlir/Dialect/TTNN/optimizer/l1_interleaved_policy/all_l1_interleaved_policy.mlir +++ b/test/ttmlir/Dialect/TTNN/optimizer/l1_interleaved_policy/all_l1_interleaved_policy.mlir @@ -1,5 +1,4 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="enable-optimizer=true memory-layout-analysis-enabled=true memory-layout-analysis-policy=L1Interleaved" %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x96xbf16>, %arg2: tensor<64x96xbf16>, %arg3: tensor<96x32xbf16>, %arg4: tensor<64x32xbf16>) -> tensor<64x32xbf16> { // CHECK: #[[L1_:.*]] = #ttnn.buffer_type @@ -7,22 +6,22 @@ module attributes {} { // CHECK: #[[LAYOUT_10:.*]] = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <{{.*}}>, memref<{{.*}}, #l1_>, > %0 = tensor.empty() : tensor<64x96xbf16> // CHECK: %{{.*}} = "ttnn.matmul"{{.*}} -> tensor<64x96xbf16, #[[LAYOUT_7]]> - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xbf16>, tensor<128x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> %2 = tensor.empty() : tensor<64x96xbf16> // CHECK: %{{.*}} = "ttnn.add"{{.*}} -> tensor<64x96xbf16, #[[LAYOUT_7]]> - %3 = "ttir.add"(%1, %arg2, %2) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x96xbf16>, tensor<64x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> + %3 = "ttir.add"(%1, %arg2, %2) <{operandSegmentSizes = array}> : (tensor<64x96xbf16>, tensor<64x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> %4 = tensor.empty() : tensor<64x96xbf16> // CHECK: %{{.*}} = "ttnn.relu"{{.*}} -> tensor<64x96xbf16, #[[LAYOUT_7]]> - %5 = "ttir.relu"(%3, %4) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> + %5 = "ttir.relu"(%3, %4) <{operandSegmentSizes = array}> : (tensor<64x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> %6 = tensor.empty() : tensor<64x32xbf16> // CHECK: %{{.*}} = "ttnn.matmul"{{.*}} -> tensor<64x32xbf16, #[[LAYOUT_10]]> - %7 = "ttir.matmul"(%5, %arg3, %6) <{operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x96xbf16>, tensor<96x32xbf16>, tensor<64x32xbf16>) -> tensor<64x32xbf16> + %7 = "ttir.matmul"(%5, %arg3, %6) : (tensor<64x96xbf16>, tensor<96x32xbf16>, tensor<64x32xbf16>) -> tensor<64x32xbf16> %8 = tensor.empty() : tensor<64x32xbf16> // CHECK: %{{.*}} = "ttnn.add"{{.*}} -> tensor<64x32xbf16, #[[LAYOUT_10]]> - %9 = "ttir.add"(%7, %arg4, %8) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x32xbf16>, tensor<64x32xbf16>, tensor<64x32xbf16>) -> tensor<64x32xbf16> + %9 = "ttir.add"(%7, %arg4, %8) <{operandSegmentSizes = array}> : (tensor<64x32xbf16>, tensor<64x32xbf16>, tensor<64x32xbf16>) -> tensor<64x32xbf16> %10 = tensor.empty() : tensor<64x32xbf16> // CHECK: %{{.*}} = "ttnn.relu"{{.*}} -> tensor<64x32xbf16, #[[LAYOUT_10]]> - %11 = "ttir.relu"(%9, %10) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x32xbf16>, tensor<64x32xbf16>) -> tensor<64x32xbf16> + %11 = "ttir.relu"(%9, %10) <{operandSegmentSizes = array}> : (tensor<64x32xbf16>, tensor<64x32xbf16>) -> tensor<64x32xbf16> return %11 : tensor<64x32xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/optimizer/l1_interleaved_policy/mnist_l1_interleaved.mlir b/test/ttmlir/Dialect/TTNN/optimizer/l1_interleaved_policy/mnist_l1_interleaved.mlir index a4cee76569..b404bbdbb9 100644 --- a/test/ttmlir/Dialect/TTNN/optimizer/l1_interleaved_policy/mnist_l1_interleaved.mlir +++ b/test/ttmlir/Dialect/TTNN/optimizer/l1_interleaved_policy/mnist_l1_interleaved.mlir @@ -1,5 +1,4 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="enable-optimizer=true memory-layout-analysis-enabled=true memory-layout-analysis-policy=L1Interleaved" %s | FileCheck %s -#any_device = #tt.operand_constraint #loc = loc("MNISTLinear":4294967295:0) module @"tt-forge-graph" attributes {} { func.func @main(%arg0: tensor<1x784xf32> loc("MNISTLinear":4294967295:0), %arg1: tensor<1x10xf32> loc("MNISTLinear":4294967295:0), %arg2: tensor<256x10xf32> loc("MNISTLinear":4294967295:0), %arg3: tensor<1x256xf32> loc("MNISTLinear":4294967295:0), %arg4: tensor<784x256xf32> loc("MNISTLinear":4294967295:0)) -> tensor<1x10xf32> { @@ -7,23 +6,23 @@ module @"tt-forge-graph" attributes {} { // CHECK: #[[LAYOUT_6:.*]] = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <{{.*}}>, memref<{{.*}}, #l1_>, > // CHECK: #[[LAYOUT_7:.*]] = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <{{.*}}>, memref<{{.*}}, #l1_>, > %0 = tensor.empty() : tensor<1x256xf32> loc(#loc8) - // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] -> tensor<1x256xf32, #[[LAYOUT_6]]> - %1 = "ttir.matmul"(%arg0, %arg4, %0) <{operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x784xf32>, tensor<784x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc8) + // CHECK: %{{.*}} = "ttnn.matmul"{{.*}} -> tensor<1x256xf32, #[[LAYOUT_L1]]> + %1 = "ttir.matmul"(%arg0, %arg4, %0) : (tensor<1x784xf32>, tensor<784x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc8) %2 = tensor.empty() : tensor<1x256xf32> loc(#loc9) - // CHECK: %[[C:.*]] = "ttnn.add"[[C:.*]] -> tensor<1x256xf32, #[[LAYOUT_6]]> - %3 = "ttir.add"(%1, %arg3, %2) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x256xf32>, tensor<1x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc9) + // CHECK: %{{.*}} = "ttnn.add"{{.*}} -> tensor<1x256xf32, #[[LAYOUT_L1]]> + %3 = "ttir.add"(%1, %arg3, %2) <{operandSegmentSizes = array}> : (tensor<1x256xf32>, tensor<1x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc9) %4 = tensor.empty() : tensor<1x256xf32> loc(#loc10) - // CHECK: %[[C:.*]] = "ttnn.relu"[[C:.*]] -> tensor<1x256xf32, #[[LAYOUT_6]]> - %5 = "ttir.relu"(%3, %4) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<1x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc10) + // CHECK: %{{.*}} = "ttnn.relu"{{.*}} -> tensor<1x256xf32, #[[LAYOUT_L1]]> + %5 = "ttir.relu"(%3, %4) <{operandSegmentSizes = array}> : (tensor<1x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc10) %6 = tensor.empty() : tensor<1x10xf32> loc(#loc11) - // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] -> tensor<1x10xf32, #[[LAYOUT_7]]> - %7 = "ttir.matmul"(%5, %arg2, %6) <{operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x256xf32>, tensor<256x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc11) + // CHECK: %{{.*}} = "ttnn.matmul"{{.*}} -> tensor<1x10xf32, #[[LAYOUT_L1]]> + %7 = "ttir.matmul"(%5, %arg2, %6) : (tensor<1x256xf32>, tensor<256x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc11) %8 = tensor.empty() : tensor<1x10xf32> loc(#loc12) - // CHECK: %[[C:.*]] = "ttnn.add"[[C:.*]] -> tensor<1x10xf32, #[[LAYOUT_7]]> - %9 = "ttir.add"(%7, %arg1, %8) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x10xf32>, tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc12) + // CHECK: %{{.*}} = "ttnn.add"{{.*}} -> tensor<1x10xf32, #[[LAYOUT_L1]]> + %9 = "ttir.add"(%7, %arg1, %8) <{operandSegmentSizes = array}> : (tensor<1x10xf32>, tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc12) %10 = tensor.empty() : tensor<1x10xf32> loc(#loc13) // CHECK: %{{.*}} = "ttnn.softmax"{{.*}} -> tensor<1x10xf32, #[[LAYOUT_7]]> - %11 = "ttir.softmax"(%9, %10) <{dimension = 1 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc13) + %11 = "ttir.softmax"(%9, %10) <{dimension = 1 : si32}> : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc13) return %11 : tensor<1x10xf32> loc(#loc7) } loc(#loc) } loc(#loc) diff --git a/test/ttmlir/Dialect/TTNN/optimizer/l1_interleaved_policy/simple_join_tests/dram_A_l1_BC.mlir b/test/ttmlir/Dialect/TTNN/optimizer/l1_interleaved_policy/simple_join_tests/dram_A_l1_BC.mlir index b5715b5a13..6d1f9c4b87 100644 --- a/test/ttmlir/Dialect/TTNN/optimizer/l1_interleaved_policy/simple_join_tests/dram_A_l1_BC.mlir +++ b/test/ttmlir/Dialect/TTNN/optimizer/l1_interleaved_policy/simple_join_tests/dram_A_l1_BC.mlir @@ -10,7 +10,6 @@ // => // DRAM: A; L1: BC // -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<2048x2048xbf16>, %arg1: tensor<2048x2048xbf16>, %arg2: tensor<2048x8192xbf16>, %arg3: tensor<2048x8192xbf16>) -> tensor<2048x8192xbf16> { // CHECK: #[[L1_:.*]] = #ttnn.buffer_type @@ -18,13 +17,13 @@ module attributes {} { // CHECK-DAG: #[[LAYOUT_5:.*]] = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <{{.*}}>, memref<256x1024xbf16, #l1_>, %0 = tensor.empty() : tensor<2048x2048xbf16> // CHECK-DAG: %{{.*}} = "ttnn.add"{{.*}} -> tensor<2048x2048xbf16, #[[LAYOUT_3]]> - %1 = "ttir.add"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<2048x2048xbf16>, tensor<2048x2048xbf16>, tensor<2048x2048xbf16>) -> tensor<2048x2048xbf16> + %1 = "ttir.add"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<2048x2048xbf16>, tensor<2048x2048xbf16>, tensor<2048x2048xbf16>) -> tensor<2048x2048xbf16> %2 = tensor.empty() : tensor<2048x8192xbf16> // CHECK-DAG: %{{.*}} = "ttnn.add"{{.*}} -> tensor<2048x8192xbf16, #[[LAYOUT_5]]> - %3 = "ttir.add"(%arg2, %arg3, %2) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<2048x8192xbf16>, tensor<2048x8192xbf16>, tensor<2048x8192xbf16>) -> tensor<2048x8192xbf16> + %3 = "ttir.add"(%arg2, %arg3, %2) <{operandSegmentSizes = array}> : (tensor<2048x8192xbf16>, tensor<2048x8192xbf16>, tensor<2048x8192xbf16>) -> tensor<2048x8192xbf16> %4 = tensor.empty() : tensor<2048x8192xbf16> // CHECK-DAG: %{{.*}} = "ttnn.matmul"{{.*}} -> tensor<2048x8192xbf16, #[[LAYOUT_5]]> - %5 = "ttir.matmul"(%1, %3, %4) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<2048x2048xbf16>, tensor<2048x8192xbf16>, tensor<2048x8192xbf16>) -> tensor<2048x8192xbf16> + %5 = "ttir.matmul"(%1, %3, %4) : (tensor<2048x2048xbf16>, tensor<2048x8192xbf16>, tensor<2048x8192xbf16>) -> tensor<2048x8192xbf16> return %5 : tensor<2048x8192xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/optimizer/mnist_sharding.mlir b/test/ttmlir/Dialect/TTNN/optimizer/mnist_sharding.mlir index 55f3a60548..4d1b813cad 100644 --- a/test/ttmlir/Dialect/TTNN/optimizer/mnist_sharding.mlir +++ b/test/ttmlir/Dialect/TTNN/optimizer/mnist_sharding.mlir @@ -1,27 +1,28 @@ -// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="enable-optimizer=true memory-layout-analysis-enabled=true" %s | FileCheck %s -#any_device = #tt.operand_constraint +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path% enable-optimizer=true memory-layout-analysis-enabled=true" %s > %t.mlir +// RUN: FileCheck %s --input-file=%t.mlir +// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn #loc = loc("MNISTLinear":4294967295:0) module @"tt-forge-graph" attributes {} { func.func @main(%arg0: tensor<1x784xf32> loc("MNISTLinear":4294967295:0), %arg1: tensor<1x10xf32> loc("MNISTLinear":4294967295:0), %arg2: tensor<256x10xf32> loc("MNISTLinear":4294967295:0), %arg3: tensor<1x256xf32> loc("MNISTLinear":4294967295:0), %arg4: tensor<784x256xf32> loc("MNISTLinear":4294967295:0)) -> tensor<1x10xf32> { // CHECK: #[[LAYOUT_10:.*]] = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <1x8>, memref<1x32xf32, #l1_>, > // CHECK: #[[LAYOUT_11:.*]] = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <1x1>, memref<1x10xf32, #l1_>, > %0 = tensor.empty() : tensor<1x256xf32> loc(#loc8) - // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] -> tensor<1x256xf32, #[[LAYOUT_10]]> - %1 = "ttir.matmul"(%arg0, %arg4, %0) <{operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x784xf32>, tensor<784x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc8) + // CHECK: %{{.*}} = "ttnn.matmul"{{.*}} -> tensor<1x256xf32, #[[LAYOUT_10]]> + %1 = "ttir.matmul"(%arg0, %arg4, %0) : (tensor<1x784xf32>, tensor<784x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc8) %2 = tensor.empty() : tensor<1x256xf32> loc(#loc9) - // CHECK: %[[C:.*]] = "ttnn.add"[[C:.*]] -> tensor<1x256xf32, #[[LAYOUT_10]]> - %3 = "ttir.add"(%1, %arg3, %2) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x256xf32>, tensor<1x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc9) + // CHECK: %{{.*}} = "ttnn.add"{{.*}} -> tensor<1x256xf32, #[[LAYOUT_10]]> + %3 = "ttir.add"(%1, %arg3, %2) <{operandSegmentSizes = array}> : (tensor<1x256xf32>, tensor<1x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc9) %4 = tensor.empty() : tensor<1x256xf32> loc(#loc10) - // CHECK: %[[C:.*]] = "ttnn.relu"[[C:.*]] -> tensor<1x256xf32, #[[LAYOUT_10]]> - %5 = "ttir.relu"(%3, %4) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<1x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc10) + // CHECK: %{{.*}} = "ttnn.relu"{{.*}} -> tensor<1x256xf32, #[[LAYOUT_10]]> + %5 = "ttir.relu"(%3, %4) <{operandSegmentSizes = array}> : (tensor<1x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc10) %6 = tensor.empty() : tensor<1x10xf32> loc(#loc11) - // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] -> tensor<1x10xf32, #[[LAYOUT_11]]> - %7 = "ttir.matmul"(%5, %arg2, %6) <{operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x256xf32>, tensor<256x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc11) + // CHECK: %{{.*}} = "ttnn.matmul"{{.*}} -> tensor<1x10xf32, #[[LAYOUT_11]]> + %7 = "ttir.matmul"(%5, %arg2, %6) : (tensor<1x256xf32>, tensor<256x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc11) %8 = tensor.empty() : tensor<1x10xf32> loc(#loc12) - // CHECK: %[[C:.*]] = "ttnn.add"[[C:.*]] -> tensor<1x10xf32, #[[LAYOUT_11]]> - %9 = "ttir.add"(%7, %arg1, %8) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x10xf32>, tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc12) + // CHECK: %{{.*}} = "ttnn.add"{{.*}} -> tensor<1x10xf32, #[[LAYOUT_11]]> + %9 = "ttir.add"(%7, %arg1, %8) <{operandSegmentSizes = array}> : (tensor<1x10xf32>, tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc12) %10 = tensor.empty() : tensor<1x10xf32> loc(#loc13) - %11 = "ttir.softmax"(%9, %10) <{dimension = 1 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc13) + %11 = "ttir.softmax"(%9, %10) <{dimension = 1 : si32}> : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc13) return %11 : tensor<1x10xf32> loc(#loc7) } loc(#loc) } loc(#loc) diff --git a/test/ttmlir/Dialect/TTNN/optimizer/sharding_matmul_override_0.mlir b/test/ttmlir/Dialect/TTNN/optimizer/sharding_matmul_override_0.mlir index fbe476cbab..e893e5d2c7 100644 --- a/test/ttmlir/Dialect/TTNN/optimizer/sharding_matmul_override_0.mlir +++ b/test/ttmlir/Dialect/TTNN/optimizer/sharding_matmul_override_0.mlir @@ -1,14 +1,13 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="enable-optimizer=true memory-layout-analysis-enabled=true max-legal-layouts=0" %s | FileCheck %s -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x96xbf16>, %arg2: tensor<96x64xbf16>) -> tensor<64x64xbf16> { // CHECK: #[[LAYOUT_7:ttnn_layout7]] = #ttnn.ttnn_layout<{{.*}}, memref<{{.*}}, #dram>, {{.*}}> %0 = tensor.empty() : tensor<64x96xbf16> // CHECK: {{.*}} = "ttnn.matmul"{{.*}} -> tensor<64x96xbf16, #[[LAYOUT_7]]> - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> %2 = tensor.empty() : tensor<64x64xbf16> // CHECK: {{.*}} = "ttnn.matmul"{{.*}} -> tensor<64x64xbf16, #[[LAYOUT_7]]> - %3 = "ttir.matmul"(%1, %arg2, %2) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x96xbf16>, tensor<96x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> + %3 = "ttir.matmul"(%1, %arg2, %2) : (tensor<64x96xbf16>, tensor<96x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> return %3 : tensor<64x64xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/optimizer/sharding_matmul_override_32.mlir b/test/ttmlir/Dialect/TTNN/optimizer/sharding_matmul_override_32.mlir index 8c372be463..aa4616360b 100644 --- a/test/ttmlir/Dialect/TTNN/optimizer/sharding_matmul_override_32.mlir +++ b/test/ttmlir/Dialect/TTNN/optimizer/sharding_matmul_override_32.mlir @@ -1,14 +1,13 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="enable-optimizer=true memory-layout-analysis-enabled=true max-legal-layouts=32" %s | FileCheck %s -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x96xbf16>, %arg2: tensor<96x64xbf16>) -> tensor<64x64xbf16> { // CHECK: #[[L1_:.*]] = #ttnn.buffer_type // CHECK: #[[LAYOUT_7:ttnn_layout7]] = #ttnn.ttnn_layout<{{.*}}, memref<{{.*}}, #l1_>, {{.*}}> %0 = tensor.empty() : tensor<64x96xbf16> // CHECK: {{.*}} = "ttnn.matmul"{{.*}} -> tensor<64x96xbf16, #[[LAYOUT_7]]> - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> %2 = tensor.empty() : tensor<64x64xbf16> - %3 = "ttir.matmul"(%1, %arg2, %2) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x96xbf16>, tensor<96x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> + %3 = "ttir.matmul"(%1, %arg2, %2) : (tensor<64x96xbf16>, tensor<96x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> return %3 : tensor<64x64xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/remove_empty_op.mlir b/test/ttmlir/Dialect/TTNN/remove_empty_op.mlir index 9640d91e27..19c9bb9a75 100644 --- a/test/ttmlir/Dialect/TTNN/remove_empty_op.mlir +++ b/test/ttmlir/Dialect/TTNN/remove_empty_op.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<4x2x32x32xbf16>) -> tensor<2x4x32x32xbf16> { // CHECK-NOT: "ttnn.empty" %0 = tensor.empty() : tensor<2x4x32x32xbf16> // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] - %1 = "ttir.reshape"(%arg0, %0) <{shape = [2: i32, 4: i32, 32: i32, 32: i32] , operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x2x32x32xbf16>, tensor<2x4x32x32xbf16>) -> tensor<2x4x32x32xbf16> + %1 = "ttir.reshape"(%arg0, %0) <{shape = [2: i32, 4: i32, 32: i32, 32: i32]}> : (tensor<4x2x32x32xbf16>, tensor<2x4x32x32xbf16>) -> tensor<2x4x32x32xbf16> return %1 : tensor<2x4x32x32xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/simple_broadcast.mlir b/test/ttmlir/Dialect/TTNN/simple_broadcast.mlir index e7aac7e2e9..251924caa1 100644 --- a/test/ttmlir/Dialect/TTNN/simple_broadcast.mlir +++ b/test/ttmlir/Dialect/TTNN/simple_broadcast.mlir @@ -1,13 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device_tile = #tt.operand_constraint module @jit_broadcast attributes {mhlo.num_partitions = 1 : i32, mhlo.num_replicas = 1 : i32} { func.func public @main(%arg0: tensor<1xf32> {mhlo.layout_mode = "default"}, %arg1: tensor<512x512xf32> {mhlo.layout_mode = "default"}) -> (tensor<512x512xf32> {jax.result_info = "", mhlo.layout_mode = "default"}) { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] // CHECK-NOT: %[[C:.*]] = "ttnn.broadcast"[[C:.*]] %0 = tensor.empty() : tensor<512x512xf32> - %1 = "ttir.broadcast"(%arg0, %0) <{dimension = [1], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<1xf32>, tensor<512x512xf32>) -> tensor<512x512xf32> + %1 = "ttir.broadcast"(%arg0, %0) <{dimension = [1]}> : (tensor<1xf32>, tensor<512x512xf32>) -> tensor<512x512xf32> %2 = tensor.empty() : tensor<512x512xf32> - %3 = "ttir.maximum"(%1, %arg1, %2) <{operandSegmentSizes = array, operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<512x512xf32>, tensor<512x512xf32>, tensor<512x512xf32>) -> tensor<512x512xf32> + %3 = "ttir.maximum"(%1, %arg1, %2) <{operandSegmentSizes = array}> : (tensor<512x512xf32>, tensor<512x512xf32>, tensor<512x512xf32>) -> tensor<512x512xf32> return %3 : tensor<512x512xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/simple_compare.mlir b/test/ttmlir/Dialect/TTNN/simple_compare.mlir index 3a0ce12eca..873ae745ce 100644 --- a/test/ttmlir/Dialect/TTNN/simple_compare.mlir +++ b/test/ttmlir/Dialect/TTNN/simple_compare.mlir @@ -1,7 +1,4 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s - -#any_device = #tt.operand_constraint - module attributes {} { func.func @equal(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { // CHECK: %[[C:.*]] = "ttnn.empty" @@ -12,7 +9,7 @@ module attributes {} { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.eq"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + %1 = "ttir.eq"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> return %1 : tensor<13x31xf32> } } @@ -27,7 +24,7 @@ module attributes {} { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.ne"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + %1 = "ttir.ne"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> return %1 : tensor<13x31xf32> } } @@ -42,7 +39,7 @@ module attributes {} { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.ge"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + %1 = "ttir.ge"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> return %1 : tensor<13x31xf32> } } @@ -57,7 +54,7 @@ module attributes {} { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.gt"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + %1 = "ttir.gt"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> return %1 : tensor<13x31xf32> } } @@ -72,7 +69,7 @@ module attributes {} { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.le"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + %1 = "ttir.le"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> return %1 : tensor<13x31xf32> } } @@ -87,7 +84,7 @@ module attributes {} { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.lt"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + %1 = "ttir.lt"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> return %1 : tensor<13x31xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/simple_constant.mlir b/test/ttmlir/Dialect/TTNN/simple_constant.mlir index 017a1baf0c..53de9a5ee1 100644 --- a/test/ttmlir/Dialect/TTNN/simple_constant.mlir +++ b/test/ttmlir/Dialect/TTNN/simple_constant.mlir @@ -1,5 +1,4 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @test_empty_int8() -> tensor<64x128xi8> { %0 = "ttir.constant"() <{value = dense<0> : tensor<64x128xi8>}> : () -> tensor<64x128xi8> diff --git a/test/ttmlir/Dialect/TTNN/simple_div.mlir b/test/ttmlir/Dialect/TTNN/simple_div.mlir index 2dad760031..15d2b4820c 100644 --- a/test/ttmlir/Dialect/TTNN/simple_div.mlir +++ b/test/ttmlir/Dialect/TTNN/simple_div.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.div"[[C:.*]] - %1 = "ttir.div"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.div"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/simple_get_dimension_size.mlir b/test/ttmlir/Dialect/TTNN/simple_get_dimension_size.mlir index 6b37e89d73..f3bd6dab00 100644 --- a/test/ttmlir/Dialect/TTNN/simple_get_dimension_size.mlir +++ b/test/ttmlir/Dialect/TTNN/simple_get_dimension_size.mlir @@ -1,5 +1,4 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<13x21x3xf32>) -> tensor<1xi32> { %0 = "ttir.get_dimension_size"(%arg0) <{dimension = 1 : i32}> : (tensor<13x21x3xf32>) -> tensor<1xi32> diff --git a/test/ttmlir/Dialect/TTNN/simple_max.mlir b/test/ttmlir/Dialect/TTNN/simple_max.mlir index ce791beb4c..34a0120b25 100644 --- a/test/ttmlir/Dialect/TTNN/simple_max.mlir +++ b/test/ttmlir/Dialect/TTNN/simple_max.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<512x32xbf16>) -> tensor<512xbf16> { %0 = tensor.empty() : tensor<512xbf16> // CHECK: %[[C:.*]] = "ttnn.max"[[C:.*]] - %1 = "ttir.max"(%arg0, %0) <{dim_arg = [1: i32], keep_dim = false, operand_constraints = [#any_device, #any_device]}> : (tensor<512x32xbf16>, tensor<512xbf16>) -> tensor<512xbf16> + %1 = "ttir.max"(%arg0, %0) <{dim_arg = [1: i32], keep_dim = false}> : (tensor<512x32xbf16>, tensor<512xbf16>) -> tensor<512xbf16> return %1 : tensor<512xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/simple_maximum.mlir b/test/ttmlir/Dialect/TTNN/simple_maximum.mlir index 2cf8b525a1..cd87754fa0 100644 --- a/test/ttmlir/Dialect/TTNN/simple_maximum.mlir +++ b/test/ttmlir/Dialect/TTNN/simple_maximum.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.maximum"[[C:.*]] - %1 = "ttir.maximum"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.maximum"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/simple_mean.mlir b/test/ttmlir/Dialect/TTNN/simple_mean.mlir index a0fe0523a7..efcba0a136 100644 --- a/test/ttmlir/Dialect/TTNN/simple_mean.mlir +++ b/test/ttmlir/Dialect/TTNN/simple_mean.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module { func.func @forward(%arg0: tensor<512x1024xbf16>) -> tensor<512x32xbf16> { %0 = tensor.empty() : tensor<512x32xbf16> // CHECK: %[[C:.*]] = "ttnn.mean"[[C:.*]] - %1 = "ttir.mean"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true, operand_constraints = [#any_device, #any_device]}> : (tensor<512x1024xbf16>, tensor<512x32xbf16>) -> tensor<512x32xbf16> + %1 = "ttir.mean"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true}> : (tensor<512x1024xbf16>, tensor<512x32xbf16>) -> tensor<512x32xbf16> return %1 : tensor<512x32xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/simple_multiply.mlir b/test/ttmlir/Dialect/TTNN/simple_multiply.mlir index 8421d96893..795f65efe0 100644 --- a/test/ttmlir/Dialect/TTNN/simple_multiply.mlir +++ b/test/ttmlir/Dialect/TTNN/simple_multiply.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] - %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/simple_reshape.mlir b/test/ttmlir/Dialect/TTNN/simple_reshape.mlir index 6b7c0edfe8..29e651239d 100644 --- a/test/ttmlir/Dialect/TTNN/simple_reshape.mlir +++ b/test/ttmlir/Dialect/TTNN/simple_reshape.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s| FileCheck %s -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<4x2x32x32xbf16>) -> tensor<2x4x32x32xbf16> { %0 = tensor.empty() : tensor<2x4x32x32xbf16> // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] - %1 = "ttir.reshape"(%arg0, %0) <{shape = [2: i32, 4: i32, 32: i32, 32: i32] , operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x2x32x32xbf16>, tensor<2x4x32x32xbf16>) -> tensor<2x4x32x32xbf16> + %1 = "ttir.reshape"(%arg0, %0) <{shape = [2: i32, 4: i32, 32: i32, 32: i32]}> : (tensor<4x2x32x32xbf16>, tensor<2x4x32x32xbf16>) -> tensor<2x4x32x32xbf16> return %1 : tensor<2x4x32x32xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/simple_slice.mlir b/test/ttmlir/Dialect/TTNN/simple_slice.mlir index cc4e2063c1..d8ff26bc3e 100644 --- a/test/ttmlir/Dialect/TTNN/simple_slice.mlir +++ b/test/ttmlir/Dialect/TTNN/simple_slice.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s| FileCheck %s -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<4x32x32xbf16>) -> tensor<2x16x16xbf16> { %0 = tensor.empty() : tensor<2x16x16xbf16> // CHECK: %[[C:.*]] = "ttnn.slice"[[C:.*]] - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32], ends = [2: i32, 16: i32, 16: i32], step = [1: i32, 1: i32, 1: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x32x32xbf16>, tensor<2x16x16xbf16>) -> tensor<2x16x16xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32], ends = [2: i32, 16: i32, 16: i32], step = [1: i32, 1: i32, 1: i32]}> : (tensor<4x32x32xbf16>, tensor<2x16x16xbf16>) -> tensor<2x16x16xbf16> return %1 : tensor<2x16x16xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/simple_squeeze.mlir b/test/ttmlir/Dialect/TTNN/simple_squeeze.mlir index 34367c4736..e8bac061e6 100644 --- a/test/ttmlir/Dialect/TTNN/simple_squeeze.mlir +++ b/test/ttmlir/Dialect/TTNN/simple_squeeze.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s| FileCheck %s -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<1x2x1x32x32xbf16>) -> tensor<1x2x32x32xbf16> { %0 = tensor.empty() : tensor<1x2x32x32xbf16> // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] - %1 = "ttir.squeeze"(%arg0, %0) <{dim = -3 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<1x2x1x32x32xbf16>, tensor<1x2x32x32xbf16>) -> tensor<1x2x32x32xbf16> + %1 = "ttir.squeeze"(%arg0, %0) <{dim = -3 : si32}> : (tensor<1x2x1x32x32xbf16>, tensor<1x2x32x32xbf16>) -> tensor<1x2x32x32xbf16> return %1 : tensor<1x2x32x32xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/simple_subtract.mlir b/test/ttmlir/Dialect/TTNN/simple_subtract.mlir index 9716ac2919..f4c69ea401 100644 --- a/test/ttmlir/Dialect/TTNN/simple_subtract.mlir +++ b/test/ttmlir/Dialect/TTNN/simple_subtract.mlir @@ -1,11 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.subtract"[[C:.*]] - %1 = "ttir.subtract"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.subtract"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/simple_subtract_to_add.mlir b/test/ttmlir/Dialect/TTNN/simple_subtract_to_add.mlir index 59c4eb9011..4703a1fdd4 100644 --- a/test/ttmlir/Dialect/TTNN/simple_subtract_to_add.mlir +++ b/test/ttmlir/Dialect/TTNN/simple_subtract_to_add.mlir @@ -1,5 +1,4 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<1x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] @@ -7,7 +6,7 @@ module attributes {} { // CHECK: %[[C:.*]] = "ttnn.neg"[[C:.*]] // CHECK: %[[C:.*]] = "ttnn.add"[[C:.*]] // CHECK-NOT: %[[C:.*]] = "ttnn.subtract"[[C:.*]] - %1 = "ttir.subtract"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<1x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.subtract"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<1x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/simple_sum.mlir b/test/ttmlir/Dialect/TTNN/simple_sum.mlir index 1b183dee61..2b107b068a 100644 --- a/test/ttmlir/Dialect/TTNN/simple_sum.mlir +++ b/test/ttmlir/Dialect/TTNN/simple_sum.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<512x1024xbf16>) -> tensor<512x32xbf16> { %0 = tensor.empty() : tensor<512x32xbf16> // CHECK: %[[C:.*]] = "ttnn.sum"[[C:.*]] - %1 = "ttir.sum"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true, operand_constraints = [#any_device, #any_device]}> : (tensor<512x1024xbf16>, tensor<512x32xbf16>) -> tensor<512x32xbf16> + %1 = "ttir.sum"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true}> : (tensor<512x1024xbf16>, tensor<512x32xbf16>) -> tensor<512x32xbf16> return %1 : tensor<512x32xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/simple_unsqueeze.mlir b/test/ttmlir/Dialect/TTNN/simple_unsqueeze.mlir index 2400b6b5ed..95daab27e0 100644 --- a/test/ttmlir/Dialect/TTNN/simple_unsqueeze.mlir +++ b/test/ttmlir/Dialect/TTNN/simple_unsqueeze.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s| FileCheck %s -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<4x2x32x32xbf16>) -> tensor<4x1x2x32x32xbf16> { %0 = tensor.empty() : tensor<4x1x2x32x32xbf16> // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] - %1 = "ttir.unsqueeze"(%arg0, %0) <{dim = -4 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x2x32x32xbf16>, tensor<4x1x2x32x32xbf16>) -> tensor<4x1x2x32x32xbf16> + %1 = "ttir.unsqueeze"(%arg0, %0) <{dim = -4 : si32}> : (tensor<4x2x32x32xbf16>, tensor<4x1x2x32x32xbf16>) -> tensor<4x1x2x32x32xbf16> return %1 : tensor<4x1x2x32x32xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/softmax/simple_softmax.mlir b/test/ttmlir/Dialect/TTNN/softmax/simple_softmax.mlir index ec05a3006e..0d7bfc90a8 100644 --- a/test/ttmlir/Dialect/TTNN/softmax/simple_softmax.mlir +++ b/test/ttmlir/Dialect/TTNN/softmax/simple_softmax.mlir @@ -1,15 +1,14 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<512x1024xbf16>) -> tensor<512x1024xbf16> { %0 = tensor.empty() : tensor<512x1024xbf16> // CHECK: %[[C:.*]] = "ttnn.softmax"[[C:.*]] // Check for positive dimension attribute - %1 = "ttir.softmax"(%arg0, %0) <{dimension = 1 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16> + %1 = "ttir.softmax"(%arg0, %0) <{dimension = 1 : si32}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16> %2 = tensor.empty() : tensor<512x1024xbf16> // CHECK: %[[C:.*]] = "ttnn.softmax"[[C:.*]] // Check for negative dimension attribute - %3 = "ttir.softmax"(%1, %2) <{dimension = -1 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16> + %3 = "ttir.softmax"(%1, %2) <{dimension = -1 : si32}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16> return %3 : tensor<512x1024xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/softmax/softmax_negative_1.mlir b/test/ttmlir/Dialect/TTNN/softmax/softmax_negative_1.mlir index 8e9b2f0834..ce302b1393 100644 --- a/test/ttmlir/Dialect/TTNN/softmax/softmax_negative_1.mlir +++ b/test/ttmlir/Dialect/TTNN/softmax/softmax_negative_1.mlir @@ -1,10 +1,9 @@ // RUN: not ttmlir-opt --ttir-load-system-desc --ttir-layout --convert-ttir-to-ttnn %s 2>&1 | FileCheck %s // CHECK: error: 'ttir.softmax' op Dimension attribute must be within the bounds of the input tensor -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<512x1024xbf16>) -> tensor<512x1024xbf16> { %0 = tensor.empty() : tensor<512x1024xbf16> - %1 = "ttir.softmax"(%arg0, %0) <{dimension = 2 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16> + %1 = "ttir.softmax"(%arg0, %0) <{dimension = 2 : si32}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16> return %1 : tensor<512x1024xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/softmax/softmax_negative_2.mlir b/test/ttmlir/Dialect/TTNN/softmax/softmax_negative_2.mlir index 43a0a97f31..e42e0335c2 100644 --- a/test/ttmlir/Dialect/TTNN/softmax/softmax_negative_2.mlir +++ b/test/ttmlir/Dialect/TTNN/softmax/softmax_negative_2.mlir @@ -1,10 +1,9 @@ // RUN: not ttmlir-opt --ttir-load-system-desc --ttir-layout --convert-ttir-to-ttnn %s 2>&1 | FileCheck %s // CHECK: error: 'ttir.softmax' op Dimension attribute must be within the bounds of the input tensor -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<512x1024xbf16>) -> tensor<512x1024xbf16> { %0 = tensor.empty() : tensor<512x1024xbf16> - %1 = "ttir.softmax"(%arg0, %0) <{dimension = -3 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16> + %1 = "ttir.softmax"(%arg0, %0) <{dimension = -3 : si32}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16> return %1 : tensor<512x1024xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/transpose/simple_transpose.mlir b/test/ttmlir/Dialect/TTNN/transpose/simple_transpose.mlir index fbf377df1f..0e495edd79 100644 --- a/test/ttmlir/Dialect/TTNN/transpose/simple_transpose.mlir +++ b/test/ttmlir/Dialect/TTNN/transpose/simple_transpose.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xbf16>) -> tensor<128x64xbf16> { %0 = tensor.empty() : tensor<128x64xbf16> // CHECK: %[[C:.*]] = "ttnn.transpose"[[C:.*]] - %1 = "ttir.transpose"(%arg0, %0) <{dim0 = 0 : si32, dim1 = 1 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>) -> tensor<128x64xbf16> + %1 = "ttir.transpose"(%arg0, %0) <{dim0 = 0 : si32, dim1 = 1 : si32}> : (tensor<64x128xbf16>, tensor<128x64xbf16>) -> tensor<128x64xbf16> return %1 : tensor<128x64xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/transpose/simple_transpose_8x16_reverse_dims.mlir b/test/ttmlir/Dialect/TTNN/transpose/simple_transpose_8x16_reverse_dims.mlir index 70640d0418..9425518055 100644 --- a/test/ttmlir/Dialect/TTNN/transpose/simple_transpose_8x16_reverse_dims.mlir +++ b/test/ttmlir/Dialect/TTNN/transpose/simple_transpose_8x16_reverse_dims.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x16xbf16>) -> tensor<16x64xbf16> { %0 = tensor.empty() : tensor<16x64xbf16> // CHECK: %[[C:.*]] = "ttnn.transpose"[[C:.*]] - %1 = "ttir.transpose"(%arg0, %0) <{dim0 = 1 : si32, dim1 = 0 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<64x16xbf16>, tensor<16x64xbf16>) -> tensor<16x64xbf16> + %1 = "ttir.transpose"(%arg0, %0) <{dim0 = 1 : si32, dim1 = 0 : si32}> : (tensor<64x16xbf16>, tensor<16x64xbf16>) -> tensor<16x64xbf16> return %1 : tensor<16x64xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/transpose/simple_transpose_8x8.mlir b/test/ttmlir/Dialect/TTNN/transpose/simple_transpose_8x8.mlir index b9cedf2269..3cfddd7eea 100644 --- a/test/ttmlir/Dialect/TTNN/transpose/simple_transpose_8x8.mlir +++ b/test/ttmlir/Dialect/TTNN/transpose/simple_transpose_8x8.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<32x32xbf16>) -> tensor<32x32xbf16> { %0 = tensor.empty() : tensor<32x32xbf16> // CHECK: %[[C:.*]] = "ttnn.transpose"[[C:.*]] - %1 = "ttir.transpose"(%arg0, %0) <{dim0 = 0 : si32, dim1 = 1 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<32x32xbf16>, tensor<32x32xbf16>) -> tensor<32x32xbf16> + %1 = "ttir.transpose"(%arg0, %0) <{dim0 = 0 : si32, dim1 = 1 : si32}> : (tensor<32x32xbf16>, tensor<32x32xbf16>) -> tensor<32x32xbf16> return %1 : tensor<32x32xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/transpose/simple_transpose_negative_dims.mlir b/test/ttmlir/Dialect/TTNN/transpose/simple_transpose_negative_dims.mlir index 035475bc42..74506b10c8 100644 --- a/test/ttmlir/Dialect/TTNN/transpose/simple_transpose_negative_dims.mlir +++ b/test/ttmlir/Dialect/TTNN/transpose/simple_transpose_negative_dims.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<32x32xbf16>) -> tensor<32x32xbf16> { %0 = tensor.empty() : tensor<32x32xbf16> // CHECK: %[[C:.*]] = "ttnn.transpose"[[C:.*]] - %1 = "ttir.transpose"(%arg0, %0) <{dim0 = -1 : si32, dim1 = -2 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<32x32xbf16>, tensor<32x32xbf16>) -> tensor<32x32xbf16> + %1 = "ttir.transpose"(%arg0, %0) <{dim0 = -1 : si32, dim1 = -2 : si32}> : (tensor<32x32xbf16>, tensor<32x32xbf16>) -> tensor<32x32xbf16> return %1 : tensor<32x32xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/transpose/transpose_twice.mlir b/test/ttmlir/Dialect/TTNN/transpose/transpose_twice.mlir index f18e0e5cbc..b78d86b015 100644 --- a/test/ttmlir/Dialect/TTNN/transpose/transpose_twice.mlir +++ b/test/ttmlir/Dialect/TTNN/transpose/transpose_twice.mlir @@ -1,13 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<1x16x32x64xbf16>) -> tensor<1x32x64x16xbf16> { %0 = tensor.empty() : tensor<1x64x32x16xbf16> // CHECK: %[[C:.*]] = "ttnn.transpose"[[C:.*]] - %1 = "ttir.transpose"(%arg0, %0) <{dim0 = -3 : si32, dim1 = -1 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<1x16x32x64xbf16>, tensor<1x64x32x16xbf16>) -> tensor<1x64x32x16xbf16> + %1 = "ttir.transpose"(%arg0, %0) <{dim0 = -3 : si32, dim1 = -1 : si32}> : (tensor<1x16x32x64xbf16>, tensor<1x64x32x16xbf16>) -> tensor<1x64x32x16xbf16> %2 = tensor.empty() : tensor<1x32x64x16xbf16> // CHECK: %[[C:.*]] = "ttnn.transpose - %3 = "ttir.transpose"(%1, %2) <{dim0 = -3 : si32, dim1 = -2 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<1x64x32x16xbf16>, tensor<1x32x64x16xbf16>) -> tensor<1x32x64x16xbf16> + %3 = "ttir.transpose"(%1, %2) <{dim0 = -3 : si32, dim1 = -2 : si32}> : (tensor<1x64x32x16xbf16>, tensor<1x32x64x16xbf16>) -> tensor<1x32x64x16xbf16> return %3 : tensor<1x32x64x16xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/ttir_to_ttnn_pipeline_custom_opt.mlir b/test/ttmlir/Dialect/TTNN/ttir_to_ttnn_pipeline_custom_opt.mlir index 112a941a81..8b66eff720 100644 --- a/test/ttmlir/Dialect/TTNN/ttir_to_ttnn_pipeline_custom_opt.mlir +++ b/test/ttmlir/Dialect/TTNN/ttir_to_ttnn_pipeline_custom_opt.mlir @@ -1,12 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="enable-optimizer=false" %s | FileCheck %s -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: #[[LAYOUT_1:.*]] = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <1x1>, memref<64x128xf32, #dram>, > // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] -> tensor<64x128xf32, #[[LAYOUT_1:.*]]> - %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Silicon/TTMetal/simple_constant.mlir b/test/ttmlir/Silicon/TTMetal/simple_constant.mlir index e7556331ca..3f98250312 100644 --- a/test/ttmlir/Silicon/TTMetal/simple_constant.mlir +++ b/test/ttmlir/Silicon/TTMetal/simple_constant.mlir @@ -2,13 +2,11 @@ // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttmetal-to-flatbuffer %t.mlir > %t.ttm -#any_device = #tt.operand_constraint - func.func public @add5(%arg0: tensor<32x32xf32>) -> tensor<32x32xf32> { // CHECK: %[[C:.*]] = "ttmetal.alloc"[[C:.*]] // CHECK: %[[C:.*]] = "ttmetal.host_write"[[C:.*]] %0 = "ttir.constant"() <{value = dense<5.0> : tensor<32x32xf32>}> : () -> tensor<32x32xf32> %1 = tensor.empty() : tensor<32x32xf32> - %2 = "ttir.add"(%arg0, %0, %1) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> + %2 = "ttir.add"(%arg0, %0, %1) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> return %2 : tensor<32x32xf32> } diff --git a/test/ttmlir/Silicon/TTMetal/simple_eltwise.mlir b/test/ttmlir/Silicon/TTMetal/simple_eltwise.mlir index 4b1c3c39fa..b9b10706bf 100644 --- a/test/ttmlir/Silicon/TTMetal/simple_eltwise.mlir +++ b/test/ttmlir/Silicon/TTMetal/simple_eltwise.mlir @@ -1,14 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttmetal-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttmetal-to-flatbuffer %t.mlir > %t.ttm - -#any_device = #tt.operand_constraint - func.func @multiply(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttmetal.alloc"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttmetal.dispatch"[[C:.*]] - %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } @@ -16,7 +13,7 @@ func.func @add(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<6 // CHECK: %[[C:.*]] = "ttmetal.alloc"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttmetal.dispatch"[[C:.*]] - %1 = "ttir.add"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.add"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } @@ -24,7 +21,7 @@ func.func @exp(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttmetal.alloc"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttmetal.dispatch"[[C:.*]] - %1 = "ttir.exp"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.exp"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } @@ -32,6 +29,6 @@ func.func @div(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<6 // CHECK: %[[C:.*]] = "ttmetal.alloc"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttmetal.dispatch"[[C:.*]] - %1 = "ttir.div"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.div"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTMetal/simple_reduce.mlir b/test/ttmlir/Silicon/TTMetal/simple_reduce.mlir index cdde621c2a..a6ab52acbb 100644 --- a/test/ttmlir/Silicon/TTMetal/simple_reduce.mlir +++ b/test/ttmlir/Silicon/TTMetal/simple_reduce.mlir @@ -1,5 +1,4 @@ // RUN: ttmlir-opt --ttir-to-ttmetal-backend-pipeline="system-desc-path=%system_desc_path%" %s | FileCheck %s -#any_device = #tt.operand_constraint #l1_ = #tt.memory_space #layout1 = #tt.metal_layout<(d0, d1) -> (d0, d1), undef, <4x4>, memref<64x96xf32, #l1_>> #layout2 = #tt.metal_layout<(d0, d1) -> (d0, d1), undef, <4x1>, memref<64x32xf32, #l1_>> @@ -9,8 +8,7 @@ func.func @reduceW(%arg0: tensor<256x384xf32, #layout1>) -> tensor<256x32xf32, # // CHECK: %[[C:.*]] = "ttmetal.dispatch"[[C:.*]] %1 = "ttir.sum"(%arg0, %0) <{operandSegmentSizes = array, dim_arg = [-1: i32], - keep_dim = true, - operand_constraints = [#any_device, #any_device, #any_device]}> : + keep_dim = true}> : (tensor<256x384xf32, #layout1>, tensor<256x32xf32, #layout2>) -> tensor<256x32xf32, #layout2> return %1 : tensor<256x32xf32, #layout2> } @@ -21,8 +19,7 @@ func.func @reduceH(%arg0: tensor<256x384xf32, #layout1>) -> tensor<32x384xf32, # // CHECK: %[[C:.*]] = "ttmetal.dispatch"[[C:.*]] %1 = "ttir.sum"(%arg0, %0) <{operandSegmentSizes = array, dim_arg = [-2: i32], - keep_dim = true, - operand_constraints = [#any_device, #any_device, #any_device]}> : + keep_dim = true}> : (tensor<256x384xf32, #layout1>, tensor<32x384xf32, #layout3>) -> tensor<32x384xf32, #layout3> return %1 : tensor<32x384xf32, #layout3> } @@ -33,8 +30,7 @@ func.func @reduceWH(%arg0: tensor<256x384xf32, #layout1>) -> tensor<32x32xf32, # // CHECK: %[[C:.*]] = "ttmetal.dispatch"[[C:.*]] %1 = "ttir.sum"(%arg0, %0) <{operandSegmentSizes = array, dim_arg = [-1: i32, -2: i32], - keep_dim = true, - operand_constraints = [#any_device, #any_device, #any_device]}> : + keep_dim = true}> : (tensor<256x384xf32, #layout1>, tensor<32x32xf32, #layout4>) -> tensor<32x32xf32, #layout4> return %1 : tensor<32x32xf32, #layout4> } @@ -44,8 +40,7 @@ func.func @maxReduceWH(%arg0: tensor<256x384xf32, #layout1>) -> tensor<32x32xf32 // CHECK: %[[C:.*]] = "ttmetal.dispatch"[[C:.*]] %1 = "ttir.max" (%arg0, %0) <{operandSegmentSizes = array, dim_arg = [-1: i32, -2: i32], - keep_dim = true, - operand_constraints = [#any_device, #any_device, #any_device]}> : + keep_dim = true}> : (tensor<256x384xf32, #layout1>, tensor<32x32xf32, #layout4>) -> tensor<32x32xf32, #layout4> return %1 : tensor<32x32xf32, #layout4> } diff --git a/test/ttmlir/Silicon/TTMetal/simple_reduce_1x1.mlir b/test/ttmlir/Silicon/TTMetal/simple_reduce_1x1.mlir index 2df51c9e52..2038cfa083 100644 --- a/test/ttmlir/Silicon/TTMetal/simple_reduce_1x1.mlir +++ b/test/ttmlir/Silicon/TTMetal/simple_reduce_1x1.mlir @@ -1,5 +1,4 @@ // RUN: ttmlir-opt --ttir-to-ttmetal-backend-pipeline="system-desc-path=%system_desc_path%" %s | FileCheck %s -#any_device = #tt.operand_constraint #l1_ = #tt.memory_space func.func @reduceW(%arg0: tensor<64x256xf32>) -> tensor<64x32xf32> { @@ -7,8 +6,7 @@ func.func @reduceW(%arg0: tensor<64x256xf32>) -> tensor<64x32xf32> { // CHECK: %[[C:.*]] = "ttmetal.dispatch"[[C:.*]] %1 = "ttir.sum"(%arg0, %0) <{operandSegmentSizes = array, dim_arg = [-1: i32], - keep_dim = true, - operand_constraints = [#any_device, #any_device, #any_device]}> : + keep_dim = true}> : (tensor<64x256xf32>, tensor<64x32xf32>) -> tensor<64x32xf32> return %1 : tensor<64x32xf32> } @@ -18,8 +16,7 @@ func.func @reduceH(%arg0: tensor<256x64xf32>) -> tensor<32x64xf32> { // CHECK: %[[C:.*]] = "ttmetal.dispatch"[[C:.*]] %1 = "ttir.sum"(%arg0, %0) <{operandSegmentSizes = array, dim_arg = [-2: i32], - keep_dim = true, - operand_constraints = [#any_device, #any_device, #any_device]}> : + keep_dim = true}> : (tensor<256x64xf32>, tensor<32x64xf32>) -> tensor<32x64xf32> return %1 : tensor<32x64xf32> } @@ -29,8 +26,7 @@ func.func @reduceWH(%arg0: tensor<256x64xf32>) -> tensor<32x32xf32> { // CHECK: %[[C:.*]] = "ttmetal.dispatch"[[C:.*]] %1 = "ttir.sum"(%arg0, %0) <{operandSegmentSizes = array, dim_arg = [-1: i32, -2: i32], - keep_dim = true, - operand_constraints = [#any_device, #any_device, #any_device]}> : + keep_dim = true}> : (tensor<256x64xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> return %1 : tensor<32x32xf32> } diff --git a/test/ttmlir/Silicon/TTNN/ccl/all_gather.mlir b/test/ttmlir/Silicon/TTNN/ccl/all_gather.mlir index edf0a4eafe..9e5972e138 100644 --- a/test/ttmlir/Silicon/TTNN/ccl/all_gather.mlir +++ b/test/ttmlir/Silicon/TTNN/ccl/all_gather.mlir @@ -3,13 +3,10 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn // UNSUPPORTED: true // REQUIRES: multi-chip -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @forward(%arg0: tensor<1x1x32x32xf32>) -> tensor<1x1x32x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<1x1x32x128xf32> // CHECK: %[[C:.*]] = "ttnn.all_gather"[[C:.*]] - %1 = "ttir.all_gather"(%arg0, %0) <{dim = 3 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<1x1x32x32xf32>, tensor<1x1x32x128xf32>) -> tensor<1x1x32x128xf32> + %1 = "ttir.all_gather"(%arg0, %0) <{dim = 3 : si32}> : (tensor<1x1x32x32xf32>, tensor<1x1x32x128xf32>) -> tensor<1x1x32x128xf32> return %1 : tensor<1x1x32x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/complex_conv_channel_first.mlir b/test/ttmlir/Silicon/TTNN/complex_conv_channel_first.mlir index 8b0b0dec6b..ca773e9781 100644 --- a/test/ttmlir/Silicon/TTNN/complex_conv_channel_first.mlir +++ b/test/ttmlir/Silicon/TTNN/complex_conv_channel_first.mlir @@ -1,7 +1,6 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device_tile = #tt.operand_constraint module @jit_convolution { func.func public @test_NCHW_IOHW_to_NHWC_OIHW_conv2d(%arg0: tensor<1x3x100x100xbf16>, %arg1: tensor<7x3x3x3xbf16>) -> tensor<1x7x100x100xbf16> { %0 = tensor.empty() : tensor<1x7x100x100xbf16> @@ -23,7 +22,6 @@ module @jit_convolution { >, feature_group_count = 1 : i64, input_dilation = array, - operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile], padding = array, weight_dilation = array, window_reversal = array, diff --git a/test/ttmlir/Silicon/TTNN/deallocate.mlir b/test/ttmlir/Silicon/TTNN/deallocate.mlir index 1e2f0b3c3c..cdba160160 100644 --- a/test/ttmlir/Silicon/TTNN/deallocate.mlir +++ b/test/ttmlir/Silicon/TTNN/deallocate.mlir @@ -1,36 +1,35 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint #loc = loc("Dealloc":4294967295:0) module @"dealloc_test" attributes {} { func.func @main(%arg0: tensor<1x784xf32> loc("Dealloc":4294967295:0), %arg1: tensor<1x10xf32> loc("Dealloc":4294967295:0), %arg2: tensor<256x10xf32> loc("Dealloc":4294967295:0), %arg3: tensor<1x256xf32> loc("Dealloc":4294967295:0), %arg4: tensor<784x256xf32> loc("Dealloc":4294967295:0)) -> tensor<1x10xf32> { %0 = tensor.empty() : tensor<1x256xf32> loc(#loc8) - %1 = "ttir.matmul"(%arg0, %arg4, %0) <{operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x784xf32>, tensor<784x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc8) + %1 = "ttir.matmul"(%arg0, %arg4, %0) : (tensor<1x784xf32>, tensor<784x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc8) // CHECK: %{{.+}} = "ttnn.matmul"([[I1:%.+]], [[I2:%.+]], [[O1:%.+]]) {{.+}} -> tensor<1x256xf32, {{.+}}> // CHECK: "ttnn.deallocate"([[I2]]) {{.+}} : (tensor<784x256xf32, {{.+}}) -> () // CHECK: "ttnn.deallocate"([[I1]]) {{.+}} : (tensor<1x784xf32, {{.+}}>) -> () %2 = tensor.empty() : tensor<1x256xf32> loc(#loc9) - %3 = "ttir.add"(%1, %arg3, %2) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x256xf32>, tensor<1x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc9) + %3 = "ttir.add"(%1, %arg3, %2) <{operandSegmentSizes = array}> : (tensor<1x256xf32>, tensor<1x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc9) // CHECK: %{{.+}} = "ttnn.add"([[I1:%.+]], [[I2:%.+]], [[O2:%.+]]) {{.+}} -> tensor<1x256xf32, {{.+}}> // CHECK: "ttnn.deallocate"([[I2]]) {{.+}} : (tensor<1x256xf32, {{.+}}>) -> () // CHECK: "ttnn.deallocate"([[O1]]) {{.+}} : (tensor<1x256xf32, {{.+}}>) -> () %4 = tensor.empty() : tensor<1x256xf32> loc(#loc10) - %5 = "ttir.relu"(%3, %4) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<1x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc10) + %5 = "ttir.relu"(%3, %4) <{operandSegmentSizes = array}> : (tensor<1x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc10) // CHECK: %{{.+}} = "ttnn.relu"([[I1:%.+]], [[O3:%.+]]) {{.+}} -> tensor<1x256xf32, {{.+}}> // CHECK: "ttnn.deallocate"([[O2]]) {{.+}} : (tensor<1x256xf32, {{.+}}>) -> () %6 = tensor.empty() : tensor<1x10xf32> loc(#loc11) - %7 = "ttir.matmul"(%5, %arg2, %6) <{operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x256xf32>, tensor<256x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc11) + %7 = "ttir.matmul"(%5, %arg2, %6) : (tensor<1x256xf32>, tensor<256x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc11) // CHECK: %{{.+}} = "ttnn.matmul"([[I1:%.+]], [[I2:%.+]], [[O4:%.+]]) {{.+}} -> tensor<1x10xf32, {{.+}}> // CHECK: "ttnn.deallocate"([[I2]]) {{.+}} : (tensor<256x10xf32, {{.+}}>) -> () // CHECK: "ttnn.deallocate"([[O3]]) {{.+}} : (tensor<1x256xf32,{{.+}}>) -> () %8 = tensor.empty() : tensor<1x10xf32> loc(#loc12) - %9 = "ttir.add"(%7, %arg1, %8) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x10xf32>, tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc12) + %9 = "ttir.add"(%7, %arg1, %8) <{operandSegmentSizes = array}> : (tensor<1x10xf32>, tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc12) // CHECK: %{{.+}} = "ttnn.add"([[I1:%.+]], [[I2:%.+]], [[O5:%.+]]) {{.+}} -> tensor<1x10xf32,{{.+}}> // CHECK: "ttnn.deallocate"([[I2]]) {{.+}} : (tensor<1x10xf32, {{.+}}>) -> () // CHECK: "ttnn.deallocate"([[O4]]) {{.+}} : (tensor<1x10xf32, {{.+}}>) -> () %10 = tensor.empty() : tensor<1x10xf32> loc(#loc13) - %11 = "ttir.softmax"(%9, %10) <{dimension = 1 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc13) + %11 = "ttir.softmax"(%9, %10) <{dimension = 1 : si32}> : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc13) return %11 : tensor<1x10xf32> loc(#loc7) } loc(#loc) } loc(#loc) diff --git a/test/ttmlir/Silicon/TTNN/embedding/embedding_1d_tensor.mlir b/test/ttmlir/Silicon/TTNN/embedding/embedding_1d_tensor.mlir index f4850e4f87..96c8609dc0 100644 --- a/test/ttmlir/Silicon/TTNN/embedding/embedding_1d_tensor.mlir +++ b/test/ttmlir/Silicon/TTNN/embedding/embedding_1d_tensor.mlir @@ -1,13 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<32xbf16>, %arg1: tensor<512x128xbf16>) -> tensor<32x128xbf16> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<32x128xbf16> // CHECK: %[[C:.*]] = "ttnn.embedding"[[C:.*]] - %1 = "ttir.embedding"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32xbf16>, tensor<512x128xbf16>, tensor<32x128xbf16>) -> tensor<32x128xbf16> + %1 = "ttir.embedding"(%arg0, %arg1, %0) : (tensor<32xbf16>, tensor<512x128xbf16>, tensor<32x128xbf16>) -> tensor<32x128xbf16> return %1 : tensor<32x128xbf16> } } diff --git a/test/ttmlir/Silicon/TTNN/embedding/embedding_non_tile.mlir b/test/ttmlir/Silicon/TTNN/embedding/embedding_non_tile.mlir index c26634771d..11397f27ac 100644 --- a/test/ttmlir/Silicon/TTNN/embedding/embedding_non_tile.mlir +++ b/test/ttmlir/Silicon/TTNN/embedding/embedding_non_tile.mlir @@ -1,13 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<1x32xbf16>, %arg1: tensor<512x128xbf16>) -> tensor<1x32x128xbf16> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<1x32x128xbf16> // CHECK: %[[C:.*]] = "ttnn.embedding"[[C:.*]] - %1 = "ttir.embedding"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x32xbf16>, tensor<512x128xbf16>, tensor<1x32x128xbf16>) -> tensor<1x32x128xbf16> + %1 = "ttir.embedding"(%arg0, %arg1, %0) : (tensor<1x32xbf16>, tensor<512x128xbf16>, tensor<1x32x128xbf16>) -> tensor<1x32x128xbf16> return %1 : tensor<1x32x128xbf16> } } diff --git a/test/ttmlir/Silicon/TTNN/embedding/simple_embedding.mlir b/test/ttmlir/Silicon/TTNN/embedding/simple_embedding.mlir index 343bb5e76a..583aa82e0f 100644 --- a/test/ttmlir/Silicon/TTNN/embedding/simple_embedding.mlir +++ b/test/ttmlir/Silicon/TTNN/embedding/simple_embedding.mlir @@ -1,13 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<32x32xbf16>, %arg1: tensor<512x128xbf16>) -> tensor<32x32x128xbf16> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<32x32x128xbf16> // CHECK: %[[C:.*]] = "ttnn.embedding"[[C:.*]] - %1 = "ttir.embedding"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xbf16>, tensor<512x128xbf16>, tensor<32x32x128xbf16>) -> tensor<32x32x128xbf16> + %1 = "ttir.embedding"(%arg0, %arg1, %0) : (tensor<32x32xbf16>, tensor<512x128xbf16>, tensor<32x32x128xbf16>) -> tensor<32x32x128xbf16> return %1 : tensor<32x32x128xbf16> } } diff --git a/test/ttmlir/Silicon/TTNN/multi_device.mlir b/test/ttmlir/Silicon/TTNN/multi_device.mlir index 448f671696..c927c0d2b9 100644 --- a/test/ttmlir/Silicon/TTNN/multi_device.mlir +++ b/test/ttmlir/Silicon/TTNN/multi_device.mlir @@ -3,13 +3,10 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn // UNSUPPORTED: true // REQUIRES: multi-chip -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @multiply(%arg0: tensor<8x64x128xf32>, %arg1: tensor<8x64x128xf32>) -> tensor<8x64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<8x64x128xf32> // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] - %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<8x64x128xf32>, tensor<8x64x128xf32>, tensor<8x64x128xf32>) -> tensor<8x64x128xf32> + %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<8x64x128xf32>, tensor<8x64x128xf32>, tensor<8x64x128xf32>) -> tensor<8x64x128xf32> return %1 : tensor<8x64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/operand_broadcasts.mlir b/test/ttmlir/Silicon/TTNN/operand_broadcasts.mlir index 2a985d65f4..1b919ec1de 100644 --- a/test/ttmlir/Silicon/TTNN/operand_broadcasts.mlir +++ b/test/ttmlir/Silicon/TTNN/operand_broadcasts.mlir @@ -1,13 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint module attributes {} { func.func @bcast_one_dim(%arg0: tensor<2x64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<2x64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<2x64x128xf32> // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] - %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<2x64x128xf32>, tensor<64x128xf32>, tensor<2x64x128xf32>) -> tensor<2x64x128xf32> + %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<2x64x128xf32>, tensor<64x128xf32>, tensor<2x64x128xf32>) -> tensor<2x64x128xf32> return %1 : tensor<2x64x128xf32> } @@ -15,7 +14,7 @@ module attributes {} { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<17x16x15x14xf32> // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] - %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<17x16x15x14xf32>, tensor<15x1xf32>, tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> + %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<17x16x15x14xf32>, tensor<15x1xf32>, tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> return %1 : tensor<17x16x15x14xf32> } diff --git a/test/ttmlir/Silicon/TTNN/optimizer/simple_fork_join.mlir b/test/ttmlir/Silicon/TTNN/optimizer/simple_fork_join.mlir index 981c26b499..e323e10249 100644 --- a/test/ttmlir/Silicon/TTNN/optimizer/simple_fork_join.mlir +++ b/test/ttmlir/Silicon/TTNN/optimizer/simple_fork_join.mlir @@ -2,17 +2,16 @@ // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn // UNSUPPORTED: true -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xbf16>, %arg1: tensor<64x128xbf16>, %arg2: tensor<64x128xbf16>, %arg3: tensor<64x128xbf16>) -> tensor<64x128xbf16> { %0 = tensor.empty() : tensor<64x128xbf16> - %1 = "ttir.add"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %1 = "ttir.add"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> %2 = tensor.empty() : tensor<64x128xbf16> - %3 = "ttir.add"(%arg2, %arg3, %2) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %3 = "ttir.add"(%arg2, %arg3, %2) <{operandSegmentSizes = array}> : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> %4 = tensor.empty() : tensor<64x128xbf16> - %5 = "ttir.add"(%1, %3, %4) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %5 = "ttir.add"(%1, %3, %4) <{operandSegmentSizes = array}> : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> %6 = tensor.empty() : tensor<64x128xbf16> - %7 = "ttir.relu"(%5, %6) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %7 = "ttir.relu"(%5, %6) <{operandSegmentSizes = array}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> return %7 : tensor<64x128xbf16> } } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/mnist.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/mnist.mlir index 0193ec36b1..8d1b6393b4 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/mnist.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/mnist.mlir @@ -1,6 +1,5 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint #loc = loc("MNISTLinear":4294967295:0) module @"tt-forge-graph" attributes {} { func.func @main(%arg0: tensor<1x784xf32> loc("MNISTLinear":4294967295:0), %arg1: tensor<1x10xf32> loc("MNISTLinear":4294967295:0), %arg2: tensor<256x10xf32> loc("MNISTLinear":4294967295:0), %arg3: tensor<1x256xf32> loc("MNISTLinear":4294967295:0), %arg4: tensor<784x256xf32> loc("MNISTLinear":4294967295:0)) -> tensor<1x10xf32> { @@ -8,21 +7,21 @@ module @"tt-forge-graph" attributes {} { // CHECK: #[[LAYOUT_11:.*]] = #tt.metal_layout<(d0, d1) -> (d0, d1), undef, <1x1>, memref<1x10xf32, #l1_>, block_sharded> %0 = tensor.empty() : tensor<1x256xf32> loc(#loc8) // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] -> tensor<1x256xf32, #[[LAYOUT_10]]> - %1 = "ttir.matmul"(%arg0, %arg4, %0) <{operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x784xf32>, tensor<784x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc8) + %1 = "ttir.matmul"(%arg0, %arg4, %0) : (tensor<1x784xf32>, tensor<784x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc8) %2 = tensor.empty() : tensor<1x256xf32> loc(#loc9) // CHECK: %[[C:.*]] = "ttnn.add"[[C:.*]] -> tensor<1x256xf32, #[[LAYOUT_10]]> - %3 = "ttir.add"(%1, %arg3, %2) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x256xf32>, tensor<1x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc9) + %3 = "ttir.add"(%1, %arg3, %2) <{operandSegmentSizes = array}> : (tensor<1x256xf32>, tensor<1x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc9) %4 = tensor.empty() : tensor<1x256xf32> loc(#loc10) // CHECK: %[[C:.*]] = "ttnn.relu"[[C:.*]] -> tensor<1x256xf32, #[[LAYOUT_10]]> - %5 = "ttir.relu"(%3, %4) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<1x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc10) + %5 = "ttir.relu"(%3, %4) <{operandSegmentSizes = array}> : (tensor<1x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc10) %6 = tensor.empty() : tensor<1x10xf32> loc(#loc11) // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] -> tensor<1x10xf32, #[[LAYOUT_11]]> - %7 = "ttir.matmul"(%5, %arg2, %6) <{operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x256xf32>, tensor<256x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc11) + %7 = "ttir.matmul"(%5, %arg2, %6) : (tensor<1x256xf32>, tensor<256x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc11) %8 = tensor.empty() : tensor<1x10xf32> loc(#loc12) // CHECK: %[[C:.*]] = "ttnn.add"[[C:.*]] -> tensor<1x10xf32, #[[LAYOUT_11]]> - %9 = "ttir.add"(%7, %arg1, %8) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x10xf32>, tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc12) + %9 = "ttir.add"(%7, %arg1, %8) <{operandSegmentSizes = array}> : (tensor<1x10xf32>, tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc12) %10 = tensor.empty() : tensor<1x10xf32> loc(#loc13) - %11 = "ttir.softmax"(%9, %10) <{dimension = 1 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc13) + %11 = "ttir.softmax"(%9, %10) <{dimension = 1 : si32}> : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc13) return %11 : tensor<1x10xf32> loc(#loc7) } loc(#loc) } loc(#loc) diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_and.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_and.mlir index b4569ef61f..d279685203 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_and.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_and.mlir @@ -1,14 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn - -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @logical_and(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: {{.*}} = "ttnn.empty"{{.*}} - %1 = "ttir.logical_and"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.logical_and"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.logical_and" // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_ceil.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_ceil.mlir index 2e7f55428c..d554baf2e3 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_ceil.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_ceil.mlir @@ -1,13 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @ceil(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) // CHECK: %{{[0-9]+}} = "ttnn.ceil"(%{{[0-9]+}}, [[VAL0]]) - %1 = "ttir.ceil"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.ceil"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_concat.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_concat.mlir index 122364cac6..c889afec3d 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_concat.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_concat.mlir @@ -1,13 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @concat(%arg0: tensor<32x32xf32>, %arg1: tensor<32x64xf32>) -> tensor<32x96xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<32x96xf32> // CHECK: %[[C:.*]] = "ttnn.concat"[[C:.*]] - %1 = "ttir.concat"(%arg0, %arg1, %0) <{dim = 1 : si32, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x96xf32>) -> tensor<32x96xf32> + %1 = "ttir.concat"(%arg0, %arg1, %0) <{dim = 1 : si32}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x96xf32>) -> tensor<32x96xf32> return %1 : tensor<32x96xf32> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_conv.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_conv.mlir index 543f057630..13708ef16a 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_conv.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_conv.mlir @@ -1,12 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<1x32x32x64xbf16>, %arg1: tensor<64x64x3x3xbf16>, %arg2: tensor<1x1x1x64xbf16>) -> tensor<1x32x32x64xbf16> { %0 = tensor.empty() : tensor<1x32x32x64xbf16> // CHECK: %[[C:.*]] = "ttnn.conv2d"[[C:.*]] - %1 = "ttir.conv2d"(%arg0, %arg1, %arg2, %0) <{stride_height=1: si32, stride_width=1: si32, dilation_height=1: si32, dilation_width=1: si32, groups=1: si32, padding_left=1: si32, padding_right=1: si32, padding_top=1: si32, padding_bottom=1: si32, is_convtranspose2d=0: si32, output_height_transpose=0: si32, output_width_transpose=0: si32, stride_transpose=0: si32, operand_constraints = [#any_device, #any_device, #any_device, #any_device]}> : (tensor<1x32x32x64xbf16>, tensor<64x64x3x3xbf16>, tensor<1x1x1x64xbf16>, tensor<1x32x32x64xbf16>) -> tensor<1x32x32x64xbf16> + %1 = "ttir.conv2d"(%arg0, %arg1, %arg2, %0) <{stride_height=1: si32, stride_width=1: si32, dilation_height=1: si32, dilation_width=1: si32, groups=1: si32, padding_left=1: si32, padding_right=1: si32, padding_top=1: si32, padding_bottom=1: si32, is_convtranspose2d=0: si32, output_height_transpose=0: si32, output_width_transpose=0: si32, stride_transpose=0: si32}> : (tensor<1x32x32x64xbf16>, tensor<64x64x3x3xbf16>, tensor<1x1x1x64xbf16>, tensor<1x32x32x64xbf16>) -> tensor<1x32x32x64xbf16> return %1 : tensor<1x32x32x64xbf16> } } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_cosine.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_cosine.mlir index ede823439e..2596e4a132 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_cosine.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_cosine.mlir @@ -1,13 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @cosine(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) // CHECK: %{{[0-9]+}} = "ttnn.cos"(%{{[0-9]+}}, [[VAL0]]) - %1 = "ttir.cos"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.cos"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_div.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_div.mlir index 249c8e3143..a6b6a55a49 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_div.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_div.mlir @@ -1,13 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @div(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.div"[[C:.*]] - %1 = "ttir.div"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.div"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_embedding.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_embedding.mlir index 343bb5e76a..583aa82e0f 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_embedding.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_embedding.mlir @@ -1,13 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<32x32xbf16>, %arg1: tensor<512x128xbf16>) -> tensor<32x32x128xbf16> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<32x32x128xbf16> // CHECK: %[[C:.*]] = "ttnn.embedding"[[C:.*]] - %1 = "ttir.embedding"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xbf16>, tensor<512x128xbf16>, tensor<32x32x128xbf16>) -> tensor<32x32x128xbf16> + %1 = "ttir.embedding"(%arg0, %arg1, %0) : (tensor<32x32xbf16>, tensor<512x128xbf16>, tensor<32x32x128xbf16>) -> tensor<32x32x128xbf16> return %1 : tensor<32x32x128xbf16> } } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_eq.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_eq.mlir index 39fdcd6d10..44ff28faf9 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_eq.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_eq.mlir @@ -1,10 +1,6 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn - -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - module attributes {} { func.func @equal(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { // CHECK: %[[C:.*]] = "ttnn.empty @@ -15,7 +11,7 @@ module attributes {} { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.eq"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + %1 = "ttir.eq"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> return %1 : tensor<13x31xf32> } } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_expm1.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_expm1.mlir index 27cf6f80e9..7d035174c0 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_expm1.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_expm1.mlir @@ -1,13 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @expm1(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> - %1 = "ttir.expm1"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.expm1"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> // CHECK: %{{[0-9]+}} = "ttnn.expm1"(%{{[0-9]+}}, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> return %1 : tensor<64x128xf32> // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_floor.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_floor.mlir index fa77817a84..d739275349 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_floor.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_floor.mlir @@ -1,9 +1,6 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @floor(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %{{[0-9]+}} = "ttnn.empty" // CHECK-SAME: [[TENSOR:tensor<64x128xf32,]] @@ -12,6 +9,6 @@ func.func @floor(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.floor"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.floor"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_ge.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_ge.mlir index 64e3b16e3c..07a6a56f12 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_ge.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_ge.mlir @@ -1,13 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @ge(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.ge"[[C:.*]] - %1 = "ttir.ge"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.ge"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_gt.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_gt.mlir index 8357146605..e02ed1e954 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_gt.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_gt.mlir @@ -1,10 +1,6 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn - -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - module attributes {} { func.func @greater_than(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { // CHECK: %[[C:.*]] = "ttnn.empty @@ -15,7 +11,7 @@ module attributes {} { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.gt"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + %1 = "ttir.gt"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> return %1 : tensor<13x31xf32> } } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_isfinite.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_isfinite.mlir index f1489a5ebd..b8dc64fb72 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_isfinite.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_isfinite.mlir @@ -1,8 +1,6 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint func.func @is_finite(%arg0: tensor<64x128xbf16>) -> tensor<64x128xbf16> { // CHECK: %[[C:.*]] = "ttnn.empty" @@ -12,6 +10,6 @@ func.func @is_finite(%arg0: tensor<64x128xbf16>) -> tensor<64x128xbf16> { // CHECK-SAME: tensor<64x128xbf16, // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.isfinite"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %1 = "ttir.isfinite"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> return %1 : tensor<64x128xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_linear.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_linear.mlir index 6da5d3910e..ab073ef75d 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_linear.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_linear.mlir @@ -2,7 +2,6 @@ // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device_tile = #tt.operand_constraint module { func.func @linear(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x64xbf16>, %bias: tensor<64x64xbf16>) -> tensor<64x64xbf16> { // CHECK: "ttnn.empty" @@ -14,7 +13,7 @@ module { // CHECK-SAME: tensor<64x64xbf16 // CHECK-SAME: tensor<64x64xbf16 // CHECK-SAME: tensor<64x64xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> return %1 : tensor<64x64xbf16> } } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_log.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_log.mlir index b3de1bba4d..d4a7ed331b 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_log.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_log.mlir @@ -1,13 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @log(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) // CHECK: %{{[0-9]+}} = "ttnn.log"(%{{[0-9]+}}, [[VAL0]]) - %1 = "ttir.log"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.log"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_log1p.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_log1p.mlir index 2c32cc817b..3d50d3e88f 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_log1p.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_log1p.mlir @@ -2,13 +2,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @log1p(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> - %1 = "ttir.log1p"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.log1p"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> // CHECK: %{{[0-9]+}} = "ttnn.log1p"(%{{[0-9]+}}, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> return %1 : tensor<64x128xf32> // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_lt.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_lt.mlir index 1b3bca82cd..1f95207ba3 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_lt.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_lt.mlir @@ -1,10 +1,6 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn - -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - module attributes {} { func.func @less_than(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { // CHECK: %[[C:.*]] = "ttnn.empty @@ -15,7 +11,7 @@ module attributes {} { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.lt"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + %1 = "ttir.lt"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> return %1 : tensor<13x31xf32> } } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_matmul.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_matmul.mlir index 9c240b0ab7..f221001bb0 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_matmul.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_matmul.mlir @@ -1,13 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device_tile = #tt.operand_constraint // CHECK: #[[TILED_LAYOUT:.*]] = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <1x1>, memref<2x4x!tt.tile<32x32, bf16>, #dram>, > module attributes {} { func.func @forward(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x96xbf16>) -> tensor<64x96xbf16> { %0 = tensor.empty() : tensor<64x96xbf16> // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> return %1 : tensor<64x96xbf16> } } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_max.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_max.mlir index fda1410054..1011fad896 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_max.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_max.mlir @@ -1,11 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint - func.func @max(%arg0: tensor<1x1x512x64xbf16>) -> tensor<1x1x512xbf16> { %0 = tensor.empty() : tensor<1x1x512xbf16> // CHECK: %[[C:.*]] = "ttnn.max"[[C:.*]] - %1 = "ttir.max"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true, operand_constraints = [#any_device, #any_device]}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> + %1 = "ttir.max"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> return %1 : tensor<1x1x512xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_maximum.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_maximum.mlir index 3642a5511c..3893bc9f0c 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_maximum.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_maximum.mlir @@ -1,13 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @maximum(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.maximum"[[C:.*]] - %1 = "ttir.maximum"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.maximum"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_maxpool2d.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_maxpool2d.mlir index 4722e9c52d..4fdd836dd8 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_maxpool2d.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_maxpool2d.mlir @@ -1,12 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<1x128x128x32xbf16>) -> tensor<1x64x64x32xbf16> { %0 = tensor.empty() : tensor<1x64x64x32xbf16> // CHECK: %[[C:.*]] = "ttnn.max_pool2d"[[C:.*]] - %1 = "ttir.max_pool2d"(%arg0, %0) <{kernel_height=2: si32, kernel_width=2: si32, stride_height=2: si32, stride_width=2: si32, dilation_height=1: si32, dilation_width=1: si32, ceil_mode=false, padding_left=0: si32, padding_right=0: si32, padding_top=0: si32, padding_bottom=0: si32, operand_constraints = [#any_device, #any_device]}> : (tensor<1x128x128x32xbf16>, tensor<1x64x64x32xbf16>) -> tensor<1x64x64x32xbf16> + %1 = "ttir.max_pool2d"(%arg0, %0) <{kernel_height=2: si32, kernel_width=2: si32, stride_height=2: si32, stride_width=2: si32, dilation_height=1: si32, dilation_width=1: si32, ceil_mode=false, padding_left=0: si32, padding_right=0: si32, padding_top=0: si32, padding_bottom=0: si32}> : (tensor<1x128x128x32xbf16>, tensor<1x64x64x32xbf16>) -> tensor<1x64x64x32xbf16> return %1 : tensor<1x64x64x32xbf16> } } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_multiply.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_multiply.mlir index 8b53113c27..7991cbc786 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_multiply.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_multiply.mlir @@ -1,13 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @multiply(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] - %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_ne.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_ne.mlir index 78e5b12458..300e66226d 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_ne.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_ne.mlir @@ -1,10 +1,6 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn - -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - module attributes {} { func.func @not_equal(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { // CHECK: %[[C:.*]] = "ttnn.empty @@ -15,7 +11,7 @@ module attributes {} { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.ne"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + %1 = "ttir.ne"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> return %1 : tensor<13x31xf32> } } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_neg.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_neg.mlir index b0aaaa8fda..907541764d 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_neg.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_neg.mlir @@ -1,12 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @negate(%arg0: tensor<32x32xf32>) -> tensor<32x32xf32> { %0 = tensor.empty() : tensor<32x32xf32> // CHECK: %[[C:.*]] = "ttnn.neg"[[C:.*]] - %1 = "ttir.neg"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> + %1 = "ttir.neg"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> return %1 : tensor<32x32xf32> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_not.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_not.mlir index c3429abd73..b9d07674ea 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_not.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_not.mlir @@ -1,14 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn - -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @logical_not(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: {{.*}} = "ttnn.empty"{{.*}} - %1 = "ttir.logical_not"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.logical_not"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.logical_not" // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_or.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_or.mlir index 21287a7397..e6c7ec5550 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_or.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_or.mlir @@ -1,14 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn - -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @logical_or(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: {{.*}} = "ttnn.empty"{{.*}} - %1 = "ttir.logical_or"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.logical_or"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.logical_or" // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_reciprocal.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_reciprocal.mlir index 8a5bf39f3b..d17444e4b8 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_reciprocal.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_reciprocal.mlir @@ -1,13 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @reciprocal(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.reciprocal"[[C:.*]] - %1 = "ttir.reciprocal"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.reciprocal"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_relu.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_relu.mlir index cec787a3b8..0ae23ec155 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_relu.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_relu.mlir @@ -1,13 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @relu(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.relu"[[C:.*]] - %1 = "ttir.relu"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.relu"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_remainder.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_remainder.mlir index 68375a9e06..e358d663ec 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_remainder.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_remainder.mlir @@ -1,13 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @remainder(%arg0: tensor<32x32xf32>, %arg1: tensor<32x32xf32>) -> tensor<32x32xf32> { %0 = tensor.empty() : tensor<32x32xf32> // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<32x32xf32, {{.*}} - %1 = "ttir.remainder"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> + %1 = "ttir.remainder"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> // CHECK: %[[REM:[0-9]+]] = "ttnn.remainder"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<32x32xf32, {{.*}} return %1 : tensor<32x32xf32> // CHECK: return {{.*}} : tensor<32x32xf32, {{.*}} diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_rsqrt.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_rsqrt.mlir index 61a5f40556..4c85d11caf 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_rsqrt.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_rsqrt.mlir @@ -1,13 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @rsqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.rsqrt"[[C:.*]] - %1 = "ttir.rsqrt"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.rsqrt"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sigmoid.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sigmoid.mlir index 1084d53216..9583be9577 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sigmoid.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sigmoid.mlir @@ -1,13 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @sigmoid(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.sigmoid"[[C:.*]] - %1 = "ttir.sigmoid"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.sigmoid"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sign.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sign.mlir index 543a54d3e7..26fe2b2d0e 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sign.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sign.mlir @@ -1,13 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @sign(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> - %1 = "ttir.sign"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.sign"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> // CHECK: %{{[0-9]+}} = "ttnn.sign"(%{{[0-9]+}}, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> return %1 : tensor<64x128xf32> // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sine.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sine.mlir index 36f71d8e6a..61fe517ead 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sine.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sine.mlir @@ -1,13 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @sine(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) // CHECK: %{{[0-9]+}} = "ttnn.sin"(%{{[0-9]+}}, [[VAL0]]) - %1 = "ttir.sin"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.sin"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_slice.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_slice.mlir index 8d43cb7dc0..e321017815 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_slice.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_slice.mlir @@ -1,12 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<4x32x32xbf16>) -> tensor<2x16x16xbf16> { %0 = tensor.empty() : tensor<2x16x16xbf16> // CHECK: %[[C:.*]] = "ttnn.slice"[[C:.*]] - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32], ends = [2: i32, 16: i32, 16: i32], step = [1: i32, 1: i32, 1: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x32x32xbf16>, tensor<2x16x16xbf16>) -> tensor<2x16x16xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32], ends = [2: i32, 16: i32, 16: i32], step = [1: i32, 1: i32, 1: i32]}> : (tensor<4x32x32xbf16>, tensor<2x16x16xbf16>) -> tensor<2x16x16xbf16> return %1 : tensor<2x16x16xbf16> } } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_softmax.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_softmax.mlir index cdf8fae8df..34d4300192 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_softmax.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_softmax.mlir @@ -1,17 +1,14 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @softmax(%arg0: tensor<512x1024xbf16>) -> tensor<512x1024xbf16> { %0 = tensor.empty() : tensor<512x1024xbf16> // CHECK: %[[C:.*]] = "ttnn.softmax"[[C:.*]] // Check for positive dimension attribute - %1 = "ttir.softmax"(%arg0, %0) <{dimension = 1 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16> + %1 = "ttir.softmax"(%arg0, %0) <{dimension = 1 : si32}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16> %2 = tensor.empty() : tensor<512x1024xbf16> // CHECK: %[[C:.*]] = "ttnn.softmax"[[C:.*]] // Check for negative dimension attribute - %3 = "ttir.softmax"(%1, %2) <{dimension = -1 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16> + %3 = "ttir.softmax"(%1, %2) <{dimension = -1 : si32}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16> return %3 : tensor<512x1024xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sqrt.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sqrt.mlir index eeba82ec7a..72e7bb579f 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sqrt.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sqrt.mlir @@ -1,13 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @sqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.sqrt"[[C:.*]] - %1 = "ttir.sqrt"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.sqrt"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_subtract.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_subtract.mlir index 75f9b0b7d7..679994dc53 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_subtract.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_subtract.mlir @@ -1,13 +1,10 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @subtract(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.subtract"[[C:.*]] - %1 = "ttir.subtract"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.subtract"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sum.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sum.mlir index 432264760b..f0beb34b2a 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sum.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sum.mlir @@ -1,11 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint - func.func @sum(%arg0: tensor<1x1x512x64xbf16>) -> tensor<1x1x512xbf16> { %0 = tensor.empty() : tensor<1x1x512xbf16> // CHECK: %[[C:.*]] = "ttnn.sum"[[C:.*]] - %1 = "ttir.sum"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true, operand_constraints = [#any_device, #any_device]}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> + %1 = "ttir.sum"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> return %1 : tensor<1x1x512xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_transpose.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_transpose.mlir index d9587863c3..0f2fc1b98d 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_transpose.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_transpose.mlir @@ -1,12 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @transpose(%arg0: tensor<64x128xbf16>) -> tensor<128x64xbf16> { %0 = tensor.empty() : tensor<128x64xbf16> // CHECK: %[[C:.*]] = "ttnn.transpose"[[C:.*]] - %1 = "ttir.transpose"(%arg0, %0) <{dim0 = 0 : si32, dim1 = 1 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>) -> tensor<128x64xbf16> + %1 = "ttir.transpose"(%arg0, %0) <{dim0 = 0 : si32, dim1 = 1 : si32}> : (tensor<64x128xbf16>, tensor<128x64xbf16>) -> tensor<128x64xbf16> return %1 : tensor<128x64xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_typecast.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_typecast.mlir index cb4f2d64f7..82666195dd 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_typecast.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_typecast.mlir @@ -2,12 +2,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint func.func @typecast(%arg0: tensor<64x128xf32>) -> tensor<64x128xbf16> { %0 = tensor.empty() : tensor<64x128xbf16> // CHECK: %[[C:.*]] = "ttnn.typecast" // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xbf16, - %1 = "ttir.typecast"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %1 = "ttir.typecast"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xbf16>) -> tensor<64x128xbf16> return %1 : tensor<64x128xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/sharded/simple_eltwise_sharded.mlir b/test/ttmlir/Silicon/TTNN/sharded/simple_eltwise_sharded.mlir index ff8caa4f7f..d74b582ede 100644 --- a/test/ttmlir/Silicon/TTNN/sharded/simple_eltwise_sharded.mlir +++ b/test/ttmlir/Silicon/TTNN/sharded/simple_eltwise_sharded.mlir @@ -1,14 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path% enable-optimizer=false" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#l1_block_sharded = #tt.operand_constraint -#l1_height_sharded = #tt.operand_constraint - func.func @subtract(%arg0: tensor<224x64xf32>, %arg1: tensor<224x64xf32>) -> tensor<224x64xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<224x64xf32> // CHECK: %[[C:.*]] = "ttnn.subtract"[[C:.*]] - %1 = "ttir.subtract"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#l1_block_sharded, #l1_block_sharded, #l1_block_sharded]}> : (tensor<224x64xf32>, tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> + %1 = "ttir.subtract"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<224x64xf32>, tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> return %1 : tensor<224x64xf32> } @@ -16,7 +13,7 @@ func.func @div(%arg0: tensor<224x64xf32>, %arg1: tensor<224x64xf32>) -> tensor<2 // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<224x64xf32> // CHECK: %[[C:.*]] = "ttnn.div"[[C:.*]] - %1 = "ttir.div"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#l1_block_sharded, #l1_block_sharded, #l1_block_sharded]}> : (tensor<224x64xf32>, tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> + %1 = "ttir.div"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<224x64xf32>, tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> return %1 : tensor<224x64xf32> } @@ -24,7 +21,7 @@ func.func @multiply(%arg0: tensor<224x64xf32>, %arg1: tensor<224x64xf32>) -> ten // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<224x64xf32> // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] - %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#l1_block_sharded, #l1_block_sharded, #l1_block_sharded]}> : (tensor<224x64xf32>, tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> + %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<224x64xf32>, tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> return %1 : tensor<224x64xf32> } @@ -32,7 +29,7 @@ func.func @relu(%arg0: tensor<224x64xf32>) -> tensor<224x64xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<224x64xf32> // CHECK: %[[C:.*]] = "ttnn.relu"[[C:.*]] - %1 = "ttir.relu"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#l1_block_sharded, #l1_block_sharded]}> : (tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> + %1 = "ttir.relu"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> return %1 : tensor<224x64xf32> } @@ -40,21 +37,21 @@ func.func @ge(%arg0: tensor<224x64xf32>, %arg1: tensor<224x64xf32>) -> tensor<22 // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<224x64xf32> // CHECK: %[[C:.*]] = "ttnn.ge"[[C:.*]] - %1 = "ttir.ge"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#l1_block_sharded, #l1_block_sharded, #l1_block_sharded]}> : (tensor<224x64xf32>, tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> + %1 = "ttir.ge"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<224x64xf32>, tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> return %1 : tensor<224x64xf32> } func.func @reshape(%arg0: tensor<4x2x224x64xbf16>) -> tensor<2x4x224x64xbf16> { %0 = tensor.empty() : tensor<2x4x224x64xbf16> // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] - %1 = "ttir.reshape"(%arg0, %0) <{shape = [2: i32, 4: i32, 224: i32, 64: i32] , operand_constraints = [#l1_height_sharded, #l1_height_sharded]}> : (tensor<4x2x224x64xbf16>, tensor<2x4x224x64xbf16>) -> tensor<2x4x224x64xbf16> + %1 = "ttir.reshape"(%arg0, %0) <{shape = [2: i32, 4: i32, 224: i32, 64: i32]}> : (tensor<4x2x224x64xbf16>, tensor<2x4x224x64xbf16>) -> tensor<2x4x224x64xbf16> return %1 : tensor<2x4x224x64xbf16> } func.func @squeeze(%arg0: tensor<1x2x1x224x64xbf16>) -> tensor<1x2x224x64xbf16> { %0 = tensor.empty() : tensor<1x2x224x64xbf16> // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] - %1 = "ttir.squeeze"(%arg0, %0) <{dim = 2 : si32, operand_constraints = [#l1_height_sharded, #l1_height_sharded]}> : (tensor<1x2x1x224x64xbf16>, tensor<1x2x224x64xbf16>) -> tensor<1x2x224x64xbf16> + %1 = "ttir.squeeze"(%arg0, %0) <{dim = 2 : si32}> : (tensor<1x2x1x224x64xbf16>, tensor<1x2x224x64xbf16>) -> tensor<1x2x224x64xbf16> return %1 : tensor<1x2x224x64xbf16> } @@ -62,7 +59,7 @@ func.func @reciprocal(%arg0: tensor<224x64xf32>) -> tensor<224x64xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<224x64xf32> // CHECK: %[[C:.*]] = "ttnn.reciprocal"[[C:.*]] - %1 = "ttir.reciprocal"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#l1_block_sharded, #l1_block_sharded]}> : (tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> + %1 = "ttir.reciprocal"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> return %1 : tensor<224x64xf32> } @@ -70,7 +67,7 @@ func.func @sigmoid(%arg0: tensor<224x64xf32>) -> tensor<224x64xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<224x64xf32> // CHECK: %[[C:.*]] = "ttnn.sigmoid"[[C:.*]] - %1 = "ttir.sigmoid"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#l1_block_sharded, #l1_block_sharded]}> : (tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> + %1 = "ttir.sigmoid"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> return %1 : tensor<224x64xf32> } @@ -78,7 +75,7 @@ func.func @sqrt(%arg0: tensor<224x64xf32>) -> tensor<224x64xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<224x64xf32> // CHECK: %[[C:.*]] = "ttnn.sqrt"[[C:.*]] - %1 = "ttir.sqrt"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#l1_block_sharded, #l1_block_sharded]}> : (tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> + %1 = "ttir.sqrt"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> return %1 : tensor<224x64xf32> } diff --git a/test/ttmlir/Silicon/TTNN/simple_compare.mlir b/test/ttmlir/Silicon/TTNN/simple_compare.mlir index f53ba7530d..5263c4fe41 100644 --- a/test/ttmlir/Silicon/TTNN/simple_compare.mlir +++ b/test/ttmlir/Silicon/TTNN/simple_compare.mlir @@ -1,10 +1,6 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn - -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - module attributes {} { func.func @equal(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { // CHECK: %[[C:.*]] = "ttnn.empty @@ -15,7 +11,7 @@ module attributes {} { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.eq"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + %1 = "ttir.eq"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> return %1 : tensor<13x31xf32> } @@ -28,7 +24,7 @@ module attributes {} { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.ne"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + %1 = "ttir.ne"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> return %1 : tensor<13x31xf32> } @@ -41,7 +37,7 @@ module attributes {} { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.ge"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + %1 = "ttir.ge"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> return %1 : tensor<13x31xf32> } @@ -54,7 +50,7 @@ module attributes {} { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.gt"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + %1 = "ttir.gt"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> return %1 : tensor<13x31xf32> } @@ -67,7 +63,7 @@ module attributes {} { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.le"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + %1 = "ttir.le"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> return %1 : tensor<13x31xf32> } @@ -80,7 +76,7 @@ module attributes {} { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.lt"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + %1 = "ttir.lt"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> return %1 : tensor<13x31xf32> } } diff --git a/test/ttmlir/Silicon/TTNN/simple_conv.mlir b/test/ttmlir/Silicon/TTNN/simple_conv.mlir index 543f057630..13708ef16a 100644 --- a/test/ttmlir/Silicon/TTNN/simple_conv.mlir +++ b/test/ttmlir/Silicon/TTNN/simple_conv.mlir @@ -1,12 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<1x32x32x64xbf16>, %arg1: tensor<64x64x3x3xbf16>, %arg2: tensor<1x1x1x64xbf16>) -> tensor<1x32x32x64xbf16> { %0 = tensor.empty() : tensor<1x32x32x64xbf16> // CHECK: %[[C:.*]] = "ttnn.conv2d"[[C:.*]] - %1 = "ttir.conv2d"(%arg0, %arg1, %arg2, %0) <{stride_height=1: si32, stride_width=1: si32, dilation_height=1: si32, dilation_width=1: si32, groups=1: si32, padding_left=1: si32, padding_right=1: si32, padding_top=1: si32, padding_bottom=1: si32, is_convtranspose2d=0: si32, output_height_transpose=0: si32, output_width_transpose=0: si32, stride_transpose=0: si32, operand_constraints = [#any_device, #any_device, #any_device, #any_device]}> : (tensor<1x32x32x64xbf16>, tensor<64x64x3x3xbf16>, tensor<1x1x1x64xbf16>, tensor<1x32x32x64xbf16>) -> tensor<1x32x32x64xbf16> + %1 = "ttir.conv2d"(%arg0, %arg1, %arg2, %0) <{stride_height=1: si32, stride_width=1: si32, dilation_height=1: si32, dilation_width=1: si32, groups=1: si32, padding_left=1: si32, padding_right=1: si32, padding_top=1: si32, padding_bottom=1: si32, is_convtranspose2d=0: si32, output_height_transpose=0: si32, output_width_transpose=0: si32, stride_transpose=0: si32}> : (tensor<1x32x32x64xbf16>, tensor<64x64x3x3xbf16>, tensor<1x1x1x64xbf16>, tensor<1x32x32x64xbf16>) -> tensor<1x32x32x64xbf16> return %1 : tensor<1x32x32x64xbf16> } } diff --git a/test/ttmlir/Silicon/TTNN/simple_eltwise.mlir b/test/ttmlir/Silicon/TTNN/simple_eltwise.mlir index b0fb94cc6d..a0452f01f8 100644 --- a/test/ttmlir/Silicon/TTNN/simple_eltwise.mlir +++ b/test/ttmlir/Silicon/TTNN/simple_eltwise.mlir @@ -1,14 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @add(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.add"[[C:.*]] - %1 = "ttir.add"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.add"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } @@ -16,7 +13,7 @@ func.func @ceil(%arg0: tensor<32x32xf32>) -> tensor<32x32xf32> { %0 = tensor.empty() : tensor<32x32xf32> // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) // CHECK: %{{[0-9]+}} = "ttnn.ceil"(%{{[0-9]+}}, [[VAL0]]) - %1 = "ttir.ceil"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> + %1 = "ttir.ceil"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> return %1 : tensor<32x32xf32> } @@ -27,7 +24,7 @@ func.func @clamp(%arg0: tensor<64x128xbf16>) -> tensor<64x128xbf16> { // CHECK: = "ttnn.clamp"(%[[LAYOUT]]) // CHECK-SAME: {max = 3.000000e+00 : f32, min = 2.000000e+00 : f32} // CHECK-SAME: [[TENSOR:tensor<64x128xbf16]], #ttnn_layout{{[0-9]+}}>) -> [[TENSOR]] - %1 = "ttir.clamp"(%arg0, %0) <{max = 3.000000e+00 : f32, min = 2.000000e+00 : f32, operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %1 = "ttir.clamp"(%arg0, %0) <{max = 3.000000e+00 : f32, min = 2.000000e+00 : f32}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> return %1 : tensor<64x128xbf16> } @@ -35,7 +32,7 @@ func.func @concat(%arg0: tensor<32x32xf32>, %arg1: tensor<32x64xf32>) -> tensor< // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<32x96xf32> // CHECK: %[[C:.*]] = "ttnn.concat"[[C:.*]] - %1 = "ttir.concat"(%arg0, %arg1, %0) <{dim = 1 : si32, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x96xf32>) -> tensor<32x96xf32> + %1 = "ttir.concat"(%arg0, %arg1, %0) <{dim = 1 : si32}> : (tensor<32x32xf32>, tensor<32x64xf32>, tensor<32x96xf32>) -> tensor<32x96xf32> return %1 : tensor<32x96xf32> } @@ -43,7 +40,7 @@ func.func @cosine(%arg0: tensor<32x32xf32>) -> tensor<32x32xf32> { %0 = tensor.empty() : tensor<32x32xf32> // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) // CHECK: %{{[0-9]+}} = "ttnn.cos"(%{{[0-9]+}}, [[VAL0]]) - %1 = "ttir.cos"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> + %1 = "ttir.cos"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> return %1 : tensor<32x32xf32> } @@ -51,7 +48,7 @@ func.func @div(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<6 // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.div"[[C:.*]] - %1 = "ttir.div"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.div"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } @@ -63,7 +60,7 @@ func.func @floor(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.floor"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.floor"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } @@ -75,7 +72,7 @@ func.func @is_finite(%arg0: tensor<64x128xbf16>) -> tensor<64x128xbf16> { // CHECK-SAME: tensor<64x128xbf16, // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.isfinite"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %1 = "ttir.isfinite"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> return %1 : tensor<64x128xbf16> } @@ -88,7 +85,7 @@ func.func @minimum(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tens // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.minimum"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.minimum"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } @@ -96,7 +93,7 @@ func.func @ge(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64 // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.ge"[[C:.*]] - %1 = "ttir.ge"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.ge"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } @@ -104,7 +101,7 @@ func.func @maximum(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tens // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.maximum"[[C:.*]] - %1 = "ttir.maximum"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.maximum"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } @@ -112,14 +109,14 @@ func.func @multiply(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> ten // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] - %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } func.func @negate(%arg0: tensor<32x32xf32>) -> tensor<32x32xf32> { %0 = tensor.empty() : tensor<32x32xf32> // CHECK: %[[C:.*]] = "ttnn.neg"[[C:.*]] - %1 = "ttir.neg"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> + %1 = "ttir.neg"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> return %1 : tensor<32x32xf32> } @@ -127,7 +124,7 @@ func.func @reciprocal(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.reciprocal"[[C:.*]] - %1 = "ttir.reciprocal"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.reciprocal"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } @@ -135,7 +132,7 @@ func.func @relu(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.relu"[[C:.*]] - %1 = "ttir.relu"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.relu"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } @@ -143,21 +140,21 @@ func.func @leaky_relu(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty" %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.leaky_relu" - %1 = "ttir.leaky_relu"(%arg0, %0) <{parameter = 0.01 : f32, operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.leaky_relu"(%arg0, %0) <{parameter = 0.01 : f32, operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } func.func @reshape(%arg0: tensor<4x2x32x32xbf16>) -> tensor<2x4x32x32xbf16> { %0 = tensor.empty() : tensor<2x4x32x32xbf16> // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] - %1 = "ttir.reshape"(%arg0, %0) <{shape = [2: i32, 4: i32, 32: i32, 32: i32] , operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x2x32x32xbf16>, tensor<2x4x32x32xbf16>) -> tensor<2x4x32x32xbf16> + %1 = "ttir.reshape"(%arg0, %0) <{shape = [2: i32, 4: i32, 32: i32, 32: i32]}> : (tensor<4x2x32x32xbf16>, tensor<2x4x32x32xbf16>) -> tensor<2x4x32x32xbf16> return %1 : tensor<2x4x32x32xbf16> } func.func @squeeze(%arg0: tensor<1x2x1x32x32xbf16>) -> tensor<1x2x32x32xbf16> { %0 = tensor.empty() : tensor<1x2x32x32xbf16> // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] - %1 = "ttir.squeeze"(%arg0, %0) <{dim = 2 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<1x2x1x32x32xbf16>, tensor<1x2x32x32xbf16>) -> tensor<1x2x32x32xbf16> + %1 = "ttir.squeeze"(%arg0, %0) <{dim = 2 : si32}> : (tensor<1x2x1x32x32xbf16>, tensor<1x2x32x32xbf16>) -> tensor<1x2x32x32xbf16> return %1 : tensor<1x2x32x32xbf16> } @@ -165,7 +162,7 @@ func.func @subtract(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> ten // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.subtract"[[C:.*]] - %1 = "ttir.subtract"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.subtract"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } @@ -173,7 +170,7 @@ func.func @rsqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.rsqrt"[[C:.*]] - %1 = "ttir.rsqrt"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.rsqrt"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } @@ -181,7 +178,7 @@ func.func @sigmoid(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.sigmoid"[[C:.*]] - %1 = "ttir.sigmoid"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.sigmoid"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } @@ -189,7 +186,7 @@ func.func @sqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.sqrt"[[C:.*]] - %1 = "ttir.sqrt"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.sqrt"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } @@ -197,7 +194,7 @@ func.func @sine(%arg0: tensor<32x32xf32>) -> tensor<32x32xf32> { %0 = tensor.empty() : tensor<32x32xf32> // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) // CHECK: %{{[0-9]+}} = "ttnn.sin"(%{{[0-9]+}}, [[VAL0]]) - %1 = "ttir.sin"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> + %1 = "ttir.sin"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> return %1 : tensor<32x32xf32> } @@ -205,11 +202,11 @@ func.func @softmax(%arg0: tensor<512x1024xbf16>) -> tensor<512x1024xbf16> { %0 = tensor.empty() : tensor<512x1024xbf16> // CHECK: %[[C:.*]] = "ttnn.softmax"[[C:.*]] // Check for positive dimension attribute - %1 = "ttir.softmax"(%arg0, %0) <{dimension = 1 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16> + %1 = "ttir.softmax"(%arg0, %0) <{dimension = 1 : si32}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16> %2 = tensor.empty() : tensor<512x1024xbf16> // CHECK: %[[C:.*]] = "ttnn.softmax"[[C:.*]] // Check for negative dimension attribute - %3 = "ttir.softmax"(%1, %2) <{dimension = -1 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16> + %3 = "ttir.softmax"(%1, %2) <{dimension = -1 : si32}> : (tensor<512x1024xbf16>, tensor<512x1024xbf16>) -> tensor<512x1024xbf16> return %3 : tensor<512x1024xbf16> } @@ -217,7 +214,7 @@ func.func @cbrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.cbrt"[[C:.*]] - %1 = "ttir.cbrt"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.cbrt"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } @@ -226,7 +223,7 @@ func.func @typecast(%arg0: tensor<64x128xf32>) -> tensor<64x128xbf16> { // CHECK: %[[C:.*]] = "ttnn.typecast" // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xbf16, - %1 = "ttir.typecast"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %1 = "ttir.typecast"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xbf16>) -> tensor<64x128xbf16> return %1 : tensor<64x128xbf16> } @@ -234,14 +231,14 @@ func.func @log(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<6 // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.log"[[C:.*]] - %1 = "ttir.log"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.log"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } func.func @log1p(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> - %1 = "ttir.log1p"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.log1p"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> // CHECK: %{{[0-9]+}} = "ttnn.log1p"(%{{[0-9]+}}, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> return %1 : tensor<64x128xf32> // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> @@ -250,7 +247,7 @@ func.func @log1p(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { func.func @expm1(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> - %1 = "ttir.expm1"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.expm1"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> // CHECK: %{{[0-9]+}} = "ttnn.expm1"(%{{[0-9]+}}, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> return %1 : tensor<64x128xf32> // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> @@ -259,7 +256,7 @@ func.func @expm1(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { func.func @sign(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> - %1 = "ttir.sign"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.sign"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> // CHECK: %{{[0-9]+}} = "ttnn.sign"(%{{[0-9]+}}, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> return %1 : tensor<64x128xf32> // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> @@ -268,7 +265,7 @@ func.func @sign(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { func.func @remainder(%arg0: tensor<32x32xf32>, %arg1: tensor<32x32xf32>) -> tensor<32x32xf32> { %0 = tensor.empty() : tensor<32x32xf32> // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<32x32xf32, {{.*}} - %1 = "ttir.remainder"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xf32>, tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> + %1 = "ttir.remainder"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> // CHECK: %[[REM:[0-9]+]] = "ttnn.remainder"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<32x32xf32, {{.*}} return %1 : tensor<32x32xf32> // CHECK: return {{.*}} : tensor<32x32xf32, {{.*}} @@ -283,9 +280,9 @@ func.func @get_dimension_size(%arg0: tensor<13x21x3xf32>) -> tensor<1xi32> { func.func @test_where(%arg0: tensor<13x37xbf16>, %arg1: tensor<13x37xbf16>) -> tensor<13x37xbf16> { %0 = tensor.empty() : tensor<13x37xbf16> - %1 = "ttir.eq"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<13x37xbf16>, tensor<13x37xbf16>, tensor<13x37xbf16>) -> tensor<13x37xbf16> + %1 = "ttir.eq"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x37xbf16>, tensor<13x37xbf16>, tensor<13x37xbf16>) -> tensor<13x37xbf16> %2 = tensor.empty() : tensor<13x37xbf16> - %3 = "ttir.where"(%1, %arg0, %arg1, %2) <{operandSegmentSizes = array, operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<13x37xbf16>, tensor<13x37xbf16>, tensor<13x37xbf16>, tensor<13x37xbf16>) -> tensor<13x37xbf16> + %3 = "ttir.where"(%1, %arg0, %arg1, %2) <{operandSegmentSizes = array}> : (tensor<13x37xbf16>, tensor<13x37xbf16>, tensor<13x37xbf16>, tensor<13x37xbf16>) -> tensor<13x37xbf16> // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} // CHECK: %[[VAL1:[0-9]+]] = "ttnn.eq"(%{{[0-9]+}}, %{{[0-9]+}}, %[[EMPTY]]) // CHECK: %{{[0-9]+}} = "ttnn.where"(%[[VAL1]], %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) @@ -300,7 +297,7 @@ func.func @gelu(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, - %1 = "ttir.gelu"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.gelu"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } @@ -308,7 +305,7 @@ func.func @tan(%arg0: tensor<64x128xbf16>) -> tensor<64x128xbf16> { %0 = tensor.empty() : tensor<64x128xbf16> // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) // CHECK: %{{[0-9]+}} = "ttnn.tan"(%{{[0-9]+}}, [[VAL0]]) - %1 = "ttir.tan"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %1 = "ttir.tan"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> return %1 : tensor<64x128xbf16> } @@ -316,20 +313,20 @@ func.func @tanh(%arg0: tensor<64x128xbf16>) -> tensor<64x128xbf16> { %0 = tensor.empty() : tensor<64x128xbf16> // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) // CHECK: %{{[0-9]+}} = "ttnn.tanh"(%{{[0-9]+}}, [[VAL0]]) - %1 = "ttir.tanh"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %1 = "ttir.tanh"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> return %1 : tensor<64x128xbf16> } func.func @addint32(%arg0: tensor<64x128xi32>, %arg1: tensor<64x128xi32>) -> tensor<64x128xi32> { %0 = tensor.empty() : tensor<64x128xi32> - %1 = "ttir.add"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xi32>, tensor<64x128xi32>, tensor<64x128xi32>) -> tensor<64x128xi32> + %1 = "ttir.add"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xi32>, tensor<64x128xi32>, tensor<64x128xi32>) -> tensor<64x128xi32> return %1 : tensor<64x128xi32> } func.func @scatter(%arg0: tensor<1x3x320x320xf32>, %arg1: tensor<1x3x32x32xf32>) -> tensor<1x3x320x320xf32> { %0 = tensor.empty() : tensor<1x3x320x320xf32> %1 = tensor.empty() : tensor<1x1xi32> - %2 = "ttir.scatter"(%arg0, %1, %arg1, %0) <{index_vector_dim = 1 : i32, indices_are_sorted = false, input_batching_dims = array, inserted_window_dims = array, operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile, #any_device_tile], scatter_dims_to_operand_dims = array, scatter_indices_batching_dims = array, unique_indices = false, update_window_dims = array}> ({ + %2 = "ttir.scatter"(%arg0, %1, %arg1, %0) <{index_vector_dim = 1 : i32, indices_are_sorted = false, input_batching_dims = array, inserted_window_dims = array, scatter_dims_to_operand_dims = array, scatter_indices_batching_dims = array, unique_indices = false, update_window_dims = array}> ({ ^bb0(%arg3: tensor<1xf32>, %arg4: tensor<1xf32>): "ttir.yield"(%arg4) : (tensor<1xf32>) -> () }) : (tensor<1x3x320x320xf32>, tensor<1x1xi32>, tensor<1x3x32x32xf32>, tensor<1x3x320x320xf32>) -> tensor<1x3x320x320xf32> diff --git a/test/ttmlir/Silicon/TTNN/simple_index.mlir b/test/ttmlir/Silicon/TTNN/simple_index.mlir index 6e5ead92a9..fcd163dff2 100644 --- a/test/ttmlir/Silicon/TTNN/simple_index.mlir +++ b/test/ttmlir/Silicon/TTNN/simple_index.mlir @@ -1,12 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<4x32x32xbf16>) -> tensor<4x32x16xbf16> { %0 = tensor.empty() : tensor<4x32x16xbf16> // CHECK: %[[C:.*]] = "ttnn.slice"[[C:.*]] - %1 = "ttir.index"(%arg0, %0) <{dim = 2: i32, begin = 0: i32, end = 32: i32, step = 2: i32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x32x32xbf16>, tensor<4x32x16xbf16>) -> tensor<4x32x16xbf16> + %1 = "ttir.index"(%arg0, %0) <{dim = 2: i32, begin = 0: i32, end = 32: i32, step = 2: i32}> : (tensor<4x32x32xbf16>, tensor<4x32x16xbf16>) -> tensor<4x32x16xbf16> return %1 : tensor<4x32x16xbf16> } } diff --git a/test/ttmlir/Silicon/TTNN/simple_linear.mlir b/test/ttmlir/Silicon/TTNN/simple_linear.mlir index f53de38cf3..b65bf99db8 100644 --- a/test/ttmlir/Silicon/TTNN/simple_linear.mlir +++ b/test/ttmlir/Silicon/TTNN/simple_linear.mlir @@ -2,7 +2,6 @@ // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device_tile = #tt.operand_constraint module { func.func @simple_linear_without_bias(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x64xbf16>) -> tensor<64x64xbf16> { // CHECK: "ttnn.empty" @@ -13,7 +12,7 @@ module { // CHECK-SAME: tensor<128x64xbf16 // CHECK-SAME: tensor<64x64xbf16 // CHECK-SAME: tensor<64x64xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> return %1 : tensor<64x64xbf16> } @@ -27,7 +26,7 @@ module { // CHECK-SAME: tensor<64x64xbf16 // CHECK-SAME: tensor<64x64xbf16 // CHECK-SAME: tensor<64x64xbf16 - %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> + %1 = "ttir.linear"(%arg0, %arg1, %bias, %0) : (tensor<64x128xbf16>, tensor<128x64xbf16>, tensor<64x64xbf16>, tensor<64x64xbf16>) -> tensor<64x64xbf16> return %1 : tensor<64x64xbf16> } } diff --git a/test/ttmlir/Silicon/TTNN/simple_logical.mlir b/test/ttmlir/Silicon/TTNN/simple_logical.mlir index e5d68f5ecf..558f815c71 100644 --- a/test/ttmlir/Silicon/TTNN/simple_logical.mlir +++ b/test/ttmlir/Silicon/TTNN/simple_logical.mlir @@ -1,15 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn - -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - module attributes {} { func.func @logical_and(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: {{.*}} = "ttnn.empty"{{.*}} - %1 = "ttir.logical_and"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.logical_and"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.logical_and" // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, @@ -20,7 +16,7 @@ module attributes {} { func.func @logical_not(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: {{.*}} = "ttnn.empty"{{.*}} - %1 = "ttir.logical_not"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.logical_not"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.logical_not" // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, @@ -30,7 +26,7 @@ module attributes {} { func.func @logical_or(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> // CHECK: {{.*}} = "ttnn.empty"{{.*}} - %1 = "ttir.logical_or"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + %1 = "ttir.logical_or"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> // CHECK: %[[C:.*]] = "ttnn.logical_or" // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, @@ -46,7 +42,7 @@ module attributes {} { // CHECK-SAME: [[TENSOR]] // CHECK-SAME: [[TENSOR]] // CHECK-SAME: -> [[TENSOR]] - %1 = "ttir.logical_xor"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %1 = "ttir.logical_xor"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> return %1 : tensor<64x128xbf16> } } diff --git a/test/ttmlir/Silicon/TTNN/simple_matmul.mlir b/test/ttmlir/Silicon/TTNN/simple_matmul.mlir index 9c240b0ab7..f221001bb0 100644 --- a/test/ttmlir/Silicon/TTNN/simple_matmul.mlir +++ b/test/ttmlir/Silicon/TTNN/simple_matmul.mlir @@ -1,13 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device_tile = #tt.operand_constraint // CHECK: #[[TILED_LAYOUT:.*]] = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <1x1>, memref<2x4x!tt.tile<32x32, bf16>, #dram>, > module attributes {} { func.func @forward(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x96xbf16>) -> tensor<64x96xbf16> { %0 = tensor.empty() : tensor<64x96xbf16> // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] - %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<64x128xbf16>, tensor<128x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> return %1 : tensor<64x96xbf16> } } diff --git a/test/ttmlir/Silicon/TTNN/simple_maxpool2d.mlir b/test/ttmlir/Silicon/TTNN/simple_maxpool2d.mlir index 4722e9c52d..4fdd836dd8 100644 --- a/test/ttmlir/Silicon/TTNN/simple_maxpool2d.mlir +++ b/test/ttmlir/Silicon/TTNN/simple_maxpool2d.mlir @@ -1,12 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<1x128x128x32xbf16>) -> tensor<1x64x64x32xbf16> { %0 = tensor.empty() : tensor<1x64x64x32xbf16> // CHECK: %[[C:.*]] = "ttnn.max_pool2d"[[C:.*]] - %1 = "ttir.max_pool2d"(%arg0, %0) <{kernel_height=2: si32, kernel_width=2: si32, stride_height=2: si32, stride_width=2: si32, dilation_height=1: si32, dilation_width=1: si32, ceil_mode=false, padding_left=0: si32, padding_right=0: si32, padding_top=0: si32, padding_bottom=0: si32, operand_constraints = [#any_device, #any_device]}> : (tensor<1x128x128x32xbf16>, tensor<1x64x64x32xbf16>) -> tensor<1x64x64x32xbf16> + %1 = "ttir.max_pool2d"(%arg0, %0) <{kernel_height=2: si32, kernel_width=2: si32, stride_height=2: si32, stride_width=2: si32, dilation_height=1: si32, dilation_width=1: si32, ceil_mode=false, padding_left=0: si32, padding_right=0: si32, padding_top=0: si32, padding_bottom=0: si32}> : (tensor<1x128x128x32xbf16>, tensor<1x64x64x32xbf16>) -> tensor<1x64x64x32xbf16> return %1 : tensor<1x64x64x32xbf16> } } diff --git a/test/ttmlir/Silicon/TTNN/simple_mean.mlir b/test/ttmlir/Silicon/TTNN/simple_mean.mlir index f8ca09f6c5..0a3250936b 100644 --- a/test/ttmlir/Silicon/TTNN/simple_mean.mlir +++ b/test/ttmlir/Silicon/TTNN/simple_mean.mlir @@ -2,12 +2,11 @@ // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn // UNSUPPORTED: true -#any_device = #tt.operand_constraint module { func.func @forward(%arg0: tensor<512x1024xbf16>) -> tensor<512x32xbf16> { %0 = tensor.empty() : tensor<512x32xbf16> // CHECK: %[[C:.*]] = "ttnn.mean"[[C:.*]] - %1 = "ttir.mean"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true, operand_constraints = [#any_device, #any_device]}> : (tensor<512x1024xbf16>, tensor<512x32xbf16>) -> tensor<512x32xbf16> + %1 = "ttir.mean"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true}> : (tensor<512x1024xbf16>, tensor<512x32xbf16>) -> tensor<512x32xbf16> return %1 : tensor<512x32xbf16> } } diff --git a/test/ttmlir/Silicon/TTNN/simple_reductions.mlir b/test/ttmlir/Silicon/TTNN/simple_reductions.mlir index 908a2c67f9..28eaf47fa5 100644 --- a/test/ttmlir/Silicon/TTNN/simple_reductions.mlir +++ b/test/ttmlir/Silicon/TTNN/simple_reductions.mlir @@ -1,52 +1,50 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint - func.func @sum(%arg0: tensor<1x1x512x64xbf16>) -> tensor<1x1x512xbf16> { %0 = tensor.empty() : tensor<1x1x512xbf16> // CHECK: %[[C:.*]] = "ttnn.sum"[[C:.*]] - %1 = "ttir.sum"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true, operand_constraints = [#any_device, #any_device]}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> + %1 = "ttir.sum"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> return %1 : tensor<1x1x512xbf16> } func.func @sum_last_2_dims(%arg0: tensor<1x32x512x64xbf16>) -> tensor<1x32xbf16> { %0 = tensor.empty() : tensor<1x32xbf16> // CHECK: %[[C:.*]] = "ttnn.sum"[[C:.*]] - %1 = "ttir.sum"(%arg0, %0) <{dim_arg = [-1: i32, -2: i32], keep_dim = true, operand_constraints = [#any_device, #any_device]}> : (tensor<1x32x512x64xbf16>, tensor<1x32xbf16>) -> tensor<1x32xbf16> + %1 = "ttir.sum"(%arg0, %0) <{dim_arg = [-1: i32, -2: i32], keep_dim = true}> : (tensor<1x32x512x64xbf16>, tensor<1x32xbf16>) -> tensor<1x32xbf16> return %1 : tensor<1x32xbf16> } func.func @sum_first_dim(%arg0: tensor<64x10xf32>) -> tensor<1x10xf32> { %0 = tensor.empty() : tensor<1x10xf32> - %1 = "ttir.sum"(%arg0, %0) <{dim_arg = [-2 : i32], keep_dim = true, operand_constraints = [#any_device, #any_device]}> : (tensor<64x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> + %1 = "ttir.sum"(%arg0, %0) <{dim_arg = [-2 : i32], keep_dim = true}> : (tensor<64x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> return %1: tensor<1x10xf32> } func.func @mean(%arg0: tensor<1x1x512x64xbf16>) -> tensor<1x1x512xbf16> { %0 = tensor.empty() : tensor<1x1x512xbf16> // CHECK: %[[C:.*]] = "ttnn.mean"[[C:.*]] - %1 = "ttir.mean"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true, operand_constraints = [#any_device, #any_device]}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> + %1 = "ttir.mean"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> return %1 : tensor<1x1x512xbf16> } func.func @mean_last_2_dims(%arg0: tensor<1x32x512x64xbf16>) -> tensor<1x32xbf16> { %0 = tensor.empty() : tensor<1x32xbf16> // CHECK: %[[C:.*]] = "ttnn.mean"[[C:.*]] - %1 = "ttir.mean"(%arg0, %0) <{dim_arg = [-1: i32, -2: i32], keep_dim = true, operand_constraints = [#any_device, #any_device]}> : (tensor<1x32x512x64xbf16>, tensor<1x32xbf16>) -> tensor<1x32xbf16> + %1 = "ttir.mean"(%arg0, %0) <{dim_arg = [-1: i32, -2: i32], keep_dim = true}> : (tensor<1x32x512x64xbf16>, tensor<1x32xbf16>) -> tensor<1x32xbf16> return %1 : tensor<1x32xbf16> } func.func @max(%arg0: tensor<1x1x512x64xbf16>) -> tensor<1x1x512xbf16> { %0 = tensor.empty() : tensor<1x1x512xbf16> // CHECK: %[[C:.*]] = "ttnn.max"[[C:.*]] - %1 = "ttir.max"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true, operand_constraints = [#any_device, #any_device]}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> + %1 = "ttir.max"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> return %1 : tensor<1x1x512xbf16> } func.func @max_last_2_dims(%arg0: tensor<1x32x512x64xbf16>) -> tensor<1x32xbf16> { %0 = tensor.empty() : tensor<1x32xbf16> // CHECK: %[[C:.*]] = "ttnn.max"[[C:.*]] - %1 = "ttir.max"(%arg0, %0) <{dim_arg = [-1: i32, -2: i32], keep_dim = true, operand_constraints = [#any_device, #any_device]}> : (tensor<1x32x512x64xbf16>, tensor<1x32xbf16>) -> tensor<1x32xbf16> + %1 = "ttir.max"(%arg0, %0) <{dim_arg = [-1: i32, -2: i32], keep_dim = true}> : (tensor<1x32x512x64xbf16>, tensor<1x32xbf16>) -> tensor<1x32xbf16> return %1 : tensor<1x32xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/simple_slice.mlir b/test/ttmlir/Silicon/TTNN/simple_slice.mlir index 8d43cb7dc0..e321017815 100644 --- a/test/ttmlir/Silicon/TTNN/simple_slice.mlir +++ b/test/ttmlir/Silicon/TTNN/simple_slice.mlir @@ -1,12 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device_tile = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<4x32x32xbf16>) -> tensor<2x16x16xbf16> { %0 = tensor.empty() : tensor<2x16x16xbf16> // CHECK: %[[C:.*]] = "ttnn.slice"[[C:.*]] - %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32], ends = [2: i32, 16: i32, 16: i32], step = [1: i32, 1: i32, 1: i32], operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<4x32x32xbf16>, tensor<2x16x16xbf16>) -> tensor<2x16x16xbf16> + %1 = "ttir.slice"(%arg0, %0) <{begins = [0: i32, 0: i32, 0: i32], ends = [2: i32, 16: i32, 16: i32], step = [1: i32, 1: i32, 1: i32]}> : (tensor<4x32x32xbf16>, tensor<2x16x16xbf16>) -> tensor<2x16x16xbf16> return %1 : tensor<2x16x16xbf16> } } diff --git a/test/ttmlir/Silicon/TTNN/simple_typecast.mlir b/test/ttmlir/Silicon/TTNN/simple_typecast.mlir index cb4f2d64f7..82666195dd 100644 --- a/test/ttmlir/Silicon/TTNN/simple_typecast.mlir +++ b/test/ttmlir/Silicon/TTNN/simple_typecast.mlir @@ -2,12 +2,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint func.func @typecast(%arg0: tensor<64x128xf32>) -> tensor<64x128xbf16> { %0 = tensor.empty() : tensor<64x128xbf16> // CHECK: %[[C:.*]] = "ttnn.typecast" // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xbf16, - %1 = "ttir.typecast"(%arg0, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %1 = "ttir.typecast"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xbf16>) -> tensor<64x128xbf16> return %1 : tensor<64x128xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/transpose.mlir b/test/ttmlir/Silicon/TTNN/transpose.mlir index 184b6b8076..b9805dd3c7 100644 --- a/test/ttmlir/Silicon/TTNN/transpose.mlir +++ b/test/ttmlir/Silicon/TTNN/transpose.mlir @@ -1,33 +1,30 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -#any_device = #tt.operand_constraint -#any_device_tile = #tt.operand_constraint - func.func @transpose(%arg0: tensor<64x128xbf16>) -> tensor<128x64xbf16> { %0 = tensor.empty() : tensor<128x64xbf16> // CHECK: %[[C:.*]] = "ttnn.transpose"[[C:.*]] - %1 = "ttir.transpose"(%arg0, %0) <{dim0 = 0 : si32, dim1 = 1 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>) -> tensor<128x64xbf16> + %1 = "ttir.transpose"(%arg0, %0) <{dim0 = 0 : si32, dim1 = 1 : si32}> : (tensor<64x128xbf16>, tensor<128x64xbf16>) -> tensor<128x64xbf16> return %1 : tensor<128x64xbf16> } func.func @transpose_8x8(%arg0: tensor<32x32xbf16>) -> tensor<32x32xbf16> { %0 = tensor.empty() : tensor<32x32xbf16> // CHECK: %[[C:.*]] = "ttnn.transpose"[[C:.*]] - %1 = "ttir.transpose"(%arg0, %0) <{dim0 = 0 : si32, dim1 = 1 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<32x32xbf16>, tensor<32x32xbf16>) -> tensor<32x32xbf16> + %1 = "ttir.transpose"(%arg0, %0) <{dim0 = 0 : si32, dim1 = 1 : si32}> : (tensor<32x32xbf16>, tensor<32x32xbf16>) -> tensor<32x32xbf16> return %1 : tensor<32x32xbf16> } func.func @transpose_8x16_reverse_dims(%arg0: tensor<64x16xbf16>) -> tensor<16x64xbf16> { %0 = tensor.empty() : tensor<16x64xbf16> // CHECK: %[[C:.*]] = "ttnn.transpose"[[C:.*]] - %1 = "ttir.transpose"(%arg0, %0) <{dim0 = 1 : si32, dim1 = 0 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<64x16xbf16>, tensor<16x64xbf16>) -> tensor<16x64xbf16> + %1 = "ttir.transpose"(%arg0, %0) <{dim0 = 1 : si32, dim1 = 0 : si32}> : (tensor<64x16xbf16>, tensor<16x64xbf16>) -> tensor<16x64xbf16> return %1 : tensor<16x64xbf16> } func.func @transpose_negative_dims(%arg0: tensor<32x32xbf16>) -> tensor<32x32xbf16> { %0 = tensor.empty() : tensor<32x32xbf16> // CHECK: %[[C:.*]] = "ttnn.transpose"[[C:.*]] - %1 = "ttir.transpose"(%arg0, %0) <{dim0 = -1 : si32, dim1 = -2 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<32x32xbf16>, tensor<32x32xbf16>) -> tensor<32x32xbf16> + %1 = "ttir.transpose"(%arg0, %0) <{dim0 = -1 : si32, dim1 = -2 : si32}> : (tensor<32x32xbf16>, tensor<32x32xbf16>) -> tensor<32x32xbf16> return %1 : tensor<32x32xbf16> } diff --git a/test/ttmlir/Translate/TTNN/1d_tensor.mlir b/test/ttmlir/Translate/TTNN/1d_tensor.mlir index 6958127374..5752be5cee 100644 --- a/test/ttmlir/Translate/TTNN/1d_tensor.mlir +++ b/test/ttmlir/Translate/TTNN/1d_tensor.mlir @@ -1,8 +1,6 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | ttmlir-translate --ttnn-to-flatbuffer -#any_device = #tt.operand_constraint - func.func @embedding_1d_tensor(%arg0: tensor<32xf32>, %arg1: tensor<512x128xf32>) -> tensor<32x128xf32> { %0 = tensor.empty() : tensor<32x128xf32> - %1 = "ttir.embedding"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32xf32>, tensor<512x128xf32>, tensor<32x128xf32>) -> tensor<32x128xf32> + %1 = "ttir.embedding"(%arg0, %arg1, %0) : (tensor<32xf32>, tensor<512x128xf32>, tensor<32x128xf32>) -> tensor<32x128xf32> return %1 : tensor<32x128xf32> } diff --git a/test/unittests/TestScheduler/TestScheduler.cpp b/test/unittests/TestScheduler/TestScheduler.cpp index 7d44bb0794..8a4f89a929 100644 --- a/test/unittests/TestScheduler/TestScheduler.cpp +++ b/test/unittests/TestScheduler/TestScheduler.cpp @@ -119,11 +119,9 @@ TEST_F(SchedulerBase, FixedSchedule) { mlir::Value lhs = func.getBody().getBlocks().front().getArgument(0); mlir::Value rhs = func.getBody().getBlocks().front().getArgument(1); - mlir::ArrayAttr attrs = builder.getArrayAttr(createOperandConstraints()); - // First operation has arg1 and arg2 and %0 as dps operand - ttir::TTIROp op = builder.create(builder.getUnknownLoc(), lhs, - rhs, dest, attrs); + ttir::TTIROp op = + builder.create(builder.getUnknownLoc(), lhs, rhs, dest); // Create a chain of operations by using the result of the previous operation llvm::SmallVector operands = {rhs, @@ -137,8 +135,7 @@ TEST_F(SchedulerBase, FixedSchedule) { mlir::Value lhs = operands[operands.size() - 2]; mlir::Value rhs = operands[operands.size() - 1]; dest = createEmptyTensor(); - op = builder.create(builder.getUnknownLoc(), lhs, rhs, dest, - attrs); + op = builder.create(builder.getUnknownLoc(), lhs, rhs, dest); operands.push_back(op.getOperation()->getResult(0)); ops.push_back(op); } @@ -172,11 +169,9 @@ TEST_F(SchedulerBase, SingleOp) { mlir::Value lhs = func.getBody().getBlocks().front().getArgument(0); mlir::Value rhs = func.getBody().getBlocks().front().getArgument(1); - mlir::ArrayAttr attrs = builder.getArrayAttr(createOperandConstraints()); - // First operation has arg1 and arg2 and %0 as dps operand - ttir::TTIROp op = builder.create(builder.getUnknownLoc(), lhs, - rhs, dest, attrs); + ttir::TTIROp op = + builder.create(builder.getUnknownLoc(), lhs, rhs, dest); mlir::tt::scheduler::Scheduler scheduler(&func); ASSERT_TRUE(scheduler.hasUnscheduledOps()); @@ -199,9 +194,8 @@ TEST_F(SchedulerBase, VerifyFork) { mlir::Value dest = createEmptyTensor(); mlir::Value lhs = func.getBody().getBlocks().front().getArgument(0); mlir::Value rhs = func.getBody().getBlocks().front().getArgument(1); - mlir::ArrayAttr attrs = builder.getArrayAttr(createOperandConstraints()); - ttir::TTIROp op = builder.create(builder.getUnknownLoc(), lhs, - rhs, dest, attrs); + ttir::TTIROp op = + builder.create(builder.getUnknownLoc(), lhs, rhs, dest); std::vector ops; ops.push_back(op); @@ -212,12 +206,10 @@ TEST_F(SchedulerBase, VerifyFork) { // Create the second operation which works on the result of the first // operation and arg1 dest = createEmptyTensor(); - op = builder.create(builder.getUnknownLoc(), lhs, rhs, dest, - attrs); + op = builder.create(builder.getUnknownLoc(), lhs, rhs, dest); ops.push_back(op); dest = createEmptyTensor(); - op = builder.create(builder.getUnknownLoc(), lhs, rhs, dest, - attrs); + op = builder.create(builder.getUnknownLoc(), lhs, rhs, dest); ops.push_back(op); // Create the third operation which works on the result of the second and @@ -225,8 +217,7 @@ TEST_F(SchedulerBase, VerifyFork) { lhs = ops[ops.size() - 2].getOperation()->getResult(0); rhs = ops[ops.size() - 1].getOperation()->getResult(0); dest = createEmptyTensor(); - op = builder.create(builder.getUnknownLoc(), lhs, rhs, dest, - attrs); + op = builder.create(builder.getUnknownLoc(), lhs, rhs, dest); ops.push_back(op); mlir::tt::scheduler::Scheduler scheduler(&func);