Skip to content

Commit

Permalink
Merge branch 'llvm:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
sjarus authored Jul 2, 2024
2 parents 35fa6b2 + ca0e906 commit 607a8b7
Show file tree
Hide file tree
Showing 55 changed files with 4,490 additions and 828 deletions.
14 changes: 14 additions & 0 deletions docs/development.md
Original file line number Diff line number Diff line change
Expand Up @@ -429,6 +429,20 @@ cd projects/pt1
python -m e2e_testing.main -f 'AtenEmbeddingBag'
```

The default mode of running tests uses the multi-processing framework and is
not tolerant of certain types of errors. If encountering native crashes/hangs,
enable debug variables to run sequentially/in-process with more verbosity:

```
export TORCH_MLIR_TEST_CONCURRENCY=1
export TORCH_MLIR_TEST_VERBOSE=1
```

In this way, you can run under `gdb`, etc and get useful results. Having env
vars like this makes it easy to set in GH action files, etc. Note that the
verbose flags are very verbose. Basic sequential progress reports will be
printed regardless when not running in parallel.

## Running unit tests.

To run all of the unit tests, run:
Expand Down
2 changes: 1 addition & 1 deletion externals/llvm-project
Submodule llvm-project updated 3473 files
10 changes: 10 additions & 0 deletions include/torch-mlir/Conversion/TorchOnnxToTorch/Patterns.h
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,16 @@ struct OpBinder {
return success();
}

ParseResult tensorOperandTypes(llvm::SmallVector<mlir::Type> &typeList) {
for (auto operand : op->getOperands()) {
auto t = toValidTensorType(operand.getType());
if (!t)
return failure();
typeList.push_back(t);
}
return success();
}

// The importer imports Onnx.GraphProto attributes as regions attached to the
// op.
ParseResult getRegionAtIndex(mlir::Region *&region, int64_t idx) {
Expand Down
301 changes: 301 additions & 0 deletions include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -4671,6 +4671,54 @@ def Torch_AtenMinimumOp : Torch_Op<"aten.minimum", [
}];
}

def Torch_AtenFmaxOp : Torch_Op<"aten.fmax", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::fmax : (Tensor, Tensor) -> (Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self,
AnyTorchTensorType:$other
);
let results = (outs
AnyTorchOptionalTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenFmaxOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 2, 1);
}
void AtenFmaxOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 2, 1);
}
}];
}

def Torch_AtenFminOp : Torch_Op<"aten.fmin", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::fmin : (Tensor, Tensor) -> (Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self,
AnyTorchTensorType:$other
);
let results = (outs
AnyTorchOptionalTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenFminOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 2, 1);
}
void AtenFminOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 2, 1);
}
}];
}

def Torch_AtenMishOp : Torch_Op<"aten.mish", [
AllowsTypeRefinement,
HasValueSemantics,
Expand Down Expand Up @@ -8586,6 +8634,54 @@ def Torch_AtenLinalgQrOp : Torch_Op<"aten.linalg_qr", [
}];
}

def Torch_AtenLinalgDetOp : Torch_Op<"aten.linalg_det", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::linalg_det : (Tensor) -> (Tensor)`";
let arguments = (ins
AnyTorchTensorType:$A
);
let results = (outs
AnyTorchOptionalTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenLinalgDetOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 1, 1);
}
void AtenLinalgDetOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 1, 1);
}
}];
}

def Torch_Aten_LinalgDetOp : Torch_Op<"aten._linalg_det", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::_linalg_det : (Tensor) -> (Tensor, Tensor, Tensor)`";
let arguments = (ins
AnyTorchTensorType:$A
);
let results = (outs
AnyTorchOptionalTensorType:$result,
AnyTorchOptionalTensorType:$LU,
AnyTorchOptionalTensorType:$pivots
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult Aten_LinalgDetOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 1, 3);
}
void Aten_LinalgDetOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 1, 3);
}
}];
}

def Torch_AtenFrobeniusNormDimOp : Torch_Op<"aten.frobenius_norm.dim", [
AllowsTypeRefinement,
HasValueSemantics,
Expand Down Expand Up @@ -11367,6 +11463,32 @@ def Torch_AtenAminOp : Torch_Op<"aten.amin", [
}];
}

def Torch_AtenAminmaxOp : Torch_Op<"aten.aminmax", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::aminmax : (Tensor, int?, bool) -> (Tensor, Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self,
AnyTorchOptionalIntType:$dim,
Torch_BoolType:$keepdim
);
let results = (outs
AnyTorchOptionalTensorType:$min,
AnyTorchOptionalTensorType:$max
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenAminmaxOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 3, 2);
}
void AtenAminmaxOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 3, 2);
}
}];
}

def Torch_AtenToDtypeOp : Torch_Op<"aten.to.dtype", [
AllowsTypeRefinement,
ReadOnly
Expand Down Expand Up @@ -12370,6 +12492,32 @@ def Torch_AtenFftFftOp : Torch_Op<"aten.fft_fft", [
}];
}

def Torch_AtenFftIfftOp : Torch_Op<"aten.fft_ifft", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::fft_ifft : (Tensor, int?, int, str?) -> (Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self,
AnyTorchOptionalIntType:$n,
Torch_IntType:$dim,
AnyTorchOptionalStringType:$norm
);
let results = (outs
AnyTorchOptionalTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenFftIfftOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 4, 1);
}
void AtenFftIfftOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 4, 1);
}
}];
}

def Torch_AtenFmodTensorOp : Torch_Op<"aten.fmod.Tensor", [
AllowsTypeRefinement,
HasValueSemantics,
Expand Down Expand Up @@ -12533,6 +12681,36 @@ def Torch_AtenKthvalueOp : Torch_Op<"aten.kthvalue", [
let hasVerifier = 1;
}

def Torch_AtenStftOp : Torch_Op<"aten.stft", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::stft : (Tensor, int, int?, int?, Tensor?, bool, bool?, bool?) -> (Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self,
Torch_IntType:$n_fft,
AnyTorchOptionalIntType:$hop_length,
AnyTorchOptionalIntType:$win_length,
AnyTorchOptionalTensorType:$window,
Torch_BoolType:$normalized,
AnyTorchOptionalBoolType:$onesided,
AnyTorchOptionalBoolType:$return_complex
);
let results = (outs
AnyTorchOptionalTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenStftOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 8, 1);
}
void AtenStftOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 8, 1);
}
}];
}

def Torch_AtenAliasCopyOp : Torch_Op<"aten.alias_copy", [
AllowsTypeRefinement,
HasValueSemantics,
Expand Down Expand Up @@ -15517,6 +15695,36 @@ def Torch_AtenScalarImplicitOp : Torch_Op<"aten.ScalarImplicit", [
let hasCanonicalizer = 1;
}

def Torch_AtenTriuIndicesOp : Torch_Op<"aten.triu_indices", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::triu_indices : (int, int, int, int?, int?, Device?, bool?) -> (Tensor)`";
let arguments = (ins
Torch_IntType:$row,
Torch_IntType:$col,
Torch_IntType:$offset,
AnyTorchOptionalIntType:$dtype,
AnyTorchOptionalIntType:$layout,
AnyTorchOptionalDeviceType:$device,
AnyTorchOptionalBoolType:$pin_memory
);
let results = (outs
AnyTorchOptionalTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenTriuIndicesOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 7, 1);
}
void AtenTriuIndicesOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 7, 1);
}
}];
let hasVerifier = 1;
}

def Torch_Aten_SoftmaxBackwardDataOp : Torch_Op<"aten._softmax_backward_data", [
AllowsTypeRefinement,
HasValueSemantics,
Expand Down Expand Up @@ -16600,3 +16808,96 @@ def Torch_QuantizedLinearOp : Torch_Op<"quantized.linear", [
}];
}

def Torch_TorchvisionDeformConv2dOp : Torch_Op<"torchvision.deform_conv2d", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `torchvision::deform_conv2d : (Tensor, Tensor, Tensor, Tensor, Tensor, int, int, int, int, int, int, int, int, bool) -> (Tensor)`";
let arguments = (ins
AnyTorchTensorType:$input,
AnyTorchTensorType:$weight,
AnyTorchTensorType:$offset,
AnyTorchTensorType:$mask,
AnyTorchTensorType:$bias,
Torch_IntType:$stride_h,
Torch_IntType:$stride_w,
Torch_IntType:$pad_h,
Torch_IntType:$pad_w,
Torch_IntType:$dilation_h,
Torch_IntType:$dilation_w,
Torch_IntType:$groups,
Torch_IntType:$offset_groups,
Torch_BoolType:$use_mask
);
let results = (outs
AnyTorchOptionalTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult TorchvisionDeformConv2dOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 14, 1);
}
void TorchvisionDeformConv2dOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 14, 1);
}
}];
}

def Torch_TorchvisionRoiAlignOp : Torch_Op<"torchvision.roi_align", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `torchvision::roi_align : (Tensor, Tensor, float, int, int, int, bool) -> (Tensor)`";
let arguments = (ins
AnyTorchTensorType:$input,
AnyTorchTensorType:$rois,
Torch_FloatType:$spatial_scale,
Torch_IntType:$pooled_height,
Torch_IntType:$pooled_width,
Torch_IntType:$sampling_ratio,
Torch_BoolType:$aligned
);
let results = (outs
AnyTorchOptionalTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult TorchvisionRoiAlignOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 7, 1);
}
void TorchvisionRoiAlignOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 7, 1);
}
}];
}

def Torch_TorchvisionRoiPoolOp : Torch_Op<"torchvision.roi_pool", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `torchvision::roi_pool : (Tensor, Tensor, float, int, int) -> (Tensor, Tensor)`";
let arguments = (ins
AnyTorchTensorType:$input,
AnyTorchTensorType:$rois,
Torch_FloatType:$spatial_scale,
Torch_IntType:$pooled_height,
Torch_IntType:$pooled_width
);
let results = (outs
AnyTorchOptionalTensorType:$result0,
AnyTorchOptionalTensorType:$result1
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult TorchvisionRoiPoolOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 5, 2);
}
void TorchvisionRoiPoolOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 5, 2);
}
}];
}

Loading

0 comments on commit 607a8b7

Please sign in to comment.