Skip to content

Commit

Permalink
Merge branch 'llvm:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
sjarus authored Aug 8, 2024
2 parents 50f0589 + f91f816 commit 8c7b91d
Show file tree
Hide file tree
Showing 55 changed files with 3,531 additions and 494 deletions.
8 changes: 4 additions & 4 deletions .github/workflows/RollPyTorch.yml
Original file line number Diff line number Diff line change
Expand Up @@ -53,19 +53,19 @@ jobs:
sudo apt-get install unzip
# Fetch the most recent nightly torchvision release
VISION_RELEASE=$(python -m pip index versions -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html --pre torchvision | grep "Available versions" | tr ' ' '\n' | grep "^[0-9]" | sort --version-sort --reverse | head -n1 | tr -d ',' | sed 's/\([^+]*\).*/\1/')
VISION_RELEASE=$(python -m pip index versions -f https://download.pytorch.org/whl/nightly/cpu/torchvision/ --pre torchvision | grep "Available versions" | tr ' ' '\n' | grep "^[0-9]" | sort --version-sort --reverse | head -n1 | tr -d ',' | sed 's/\([^+]*\).*/\1/')
echo "Found torchvision release ${VISION_RELEASE}"
# Fetch the whl file associated with the nightly torchvision release
rm -f torch*.whl
python -m pip download -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html --pre "torchvision==${VISION_RELEASE}"
python -m pip download -f https://download.pytorch.org/whl/nightly/cpu/torchvision/ --pre "torchvision==${VISION_RELEASE}"
# Downloading the torchvision WHL also downloads the PyTorch WHL file
# Read the version from the downloaded whl file without extracting it
PT_RELEASE=$(unzip -p torch-*.whl 'torch-*/METADATA' | grep "^Version:" | awk '{ print $2 }' | sed 's/\([^+]*\).*/\1/')
echo "Found torch release ${PT_RELEASE}"
printf -- "-f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html\n--pre\ntorch==%s\n" "${PT_RELEASE}" > pytorch-requirements.txt
printf -- "-f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html\n--pre\ntorchvision==%s\n" "${VISION_RELEASE}" > torchvision-requirements.txt
printf -- "-f https://download.pytorch.org/whl/nightly/cpu/torch\n--pre\ntorch==%s\n" "${PT_RELEASE}" > pytorch-requirements.txt
printf -- "-f https://download.pytorch.org/whl/nightly/cpu/torchvision\n--pre\ntorchvision==%s\n" "${VISION_RELEASE}" > torchvision-requirements.txt
# Read the commit hash from the downloaded whl file without extracting it
PT_HASH=$(unzip -p torch-"${PT_RELEASE}"*.whl torch/version.py | grep git_version | tail -1 | awk '{ print $3 }' | tr -d "'")
Expand Down
4 changes: 2 additions & 2 deletions build_tools/python_deploy/build_linux_packages.sh
Original file line number Diff line number Diff line change
Expand Up @@ -439,11 +439,11 @@ function build_torch_mlir() {
nightly)
echo ":::: Using nightly dependencies"
python -m pip install --no-cache-dir -r /main_checkout/torch-mlir/requirements.txt \
--extra-index-url https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html
--extra-index-url https://download.pytorch.org/whl/nightly/cpu/torch/
CMAKE_GENERATOR=Ninja \
TORCH_MLIR_PYTHON_PACKAGE_VERSION=${TORCH_MLIR_PYTHON_PACKAGE_VERSION} \
python -m pip wheel -v --no-build-isolation -w /wheelhouse /main_checkout/torch-mlir \
-f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html \
-f https://download.pytorch.org/whl/nightly/cpu/torch/ \
-r /main_checkout/torch-mlir/whl-requirements.txt
;;
stable)
Expand Down
2 changes: 1 addition & 1 deletion build_tools/python_deploy/build_windows.ps1
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ Write-Host "Build Deps installation completed successfully"
Write-Host "Building torch-mlir"
$env:CMAKE_GENERATOR='Ninja'
$env:TORCH_MLIR_ENABLE_LTC='0'
python -m pip wheel -v -w wheelhouse ./ -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html -r whl-requirements.txt
python -m pip wheel -v -w wheelhouse ./ -f https://download.pytorch.org/whl/nightly/cpu/torch/ -r whl-requirements.txt

Write-Host "Build completed successfully"

Expand Down
2 changes: 1 addition & 1 deletion externals/llvm-project
Submodule llvm-project updated 14370 files
2 changes: 1 addition & 1 deletion externals/stablehlo
Submodule stablehlo updated 144 files
10 changes: 10 additions & 0 deletions include/torch-mlir/Conversion/TorchOnnxToTorch/Patterns.h
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,16 @@ struct OpBinder {
return failure();
}

ParseResult optionalS64IntegerAttr(int64_t &value, StringRef nameSuffix) {
SmallString<64> name("torch.onnx.");
name.append(nameSuffix);
auto attr = op->getAttr(name);
if (!attr) {
return failure();
}
return s64IntegerAttr(value, nameSuffix);
}

ParseResult f32FloatAttr(float &value, StringRef nameSuffix,
float defaultValue = 0.0f) {
SmallString<64> name("torch.onnx.");
Expand Down
6 changes: 6 additions & 0 deletions include/torch-mlir/Conversion/TorchOnnxToTorch/Utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,12 @@ struct onnx_list_of_constant_ints_op_binder {
}
return true;
}
if (ElementsAttr attr = dyn_cast_or_null<ElementsAttr>(
constOp->getAttr("torch.onnx.value"))) {
for (auto axis : attr.getValues<llvm::APInt>())
bind_values.push_back(axis.getSExtValue());
return true;
}
return false;
}
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,13 @@ Value scalarToStablehloTensor(ConversionPatternRewriter &rewriter,
Value promoteType(PatternRewriter &rewriter, Location loc, Value input,
Type outElementType);

FailureOr<Value> getBroadcastResultShape(PatternRewriter &rewriter,
Operation *op, ArrayRef<Value> tensors,
size_t dimSizeIndexBits);

Value promoteAndBroadcast(ConversionPatternRewriter &rewriter, Value input,
TensorType outType);
TensorType outType,
std::optional<Value> bcastSizeTensor);

SmallVector<int64_t> toPositiveDims(ArrayRef<int64_t> dims, int64_t rank);

Expand Down
192 changes: 192 additions & 0 deletions include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -4650,6 +4650,35 @@ def Torch_AtenFakeQuantizePerTensorAffineTensorQparamsOp : Torch_Op<"aten.fake_q
}];
}

def Torch_Aten_FakeQuantizePerTensorAffineCachemaskTensorQparamsOp : Torch_Op<"aten._fake_quantize_per_tensor_affine_cachemask_tensor_qparams", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams : (Tensor, Tensor, Tensor, Tensor, int, int) -> (Tensor, Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self,
AnyTorchTensorType:$scale,
AnyTorchTensorType:$zero_point,
AnyTorchTensorType:$fake_quant_enabled,
Torch_IntType:$quant_min,
Torch_IntType:$quant_max
);
let results = (outs
AnyTorchOptionalTensorType:$output,
AnyTorchOptionalTensorType:$mask
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult Aten_FakeQuantizePerTensorAffineCachemaskTensorQparamsOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 6, 2);
}
void Aten_FakeQuantizePerTensorAffineCachemaskTensorQparamsOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 6, 2);
}
}];
}

def Torch_AtenFakeQuantizePerChannelAffineOp : Torch_Op<"aten.fake_quantize_per_channel_affine", [
AllowsTypeRefinement,
HasValueSemantics,
Expand Down Expand Up @@ -4678,6 +4707,35 @@ def Torch_AtenFakeQuantizePerChannelAffineOp : Torch_Op<"aten.fake_quantize_per_
}];
}

def Torch_AtenFakeQuantizePerChannelAffineCachemaskOp : Torch_Op<"aten.fake_quantize_per_channel_affine_cachemask", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::fake_quantize_per_channel_affine_cachemask : (Tensor, Tensor, Tensor, int, int, int) -> (Tensor, Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self,
AnyTorchTensorType:$scale,
AnyTorchTensorType:$zero_point,
Torch_IntType:$axis,
Torch_IntType:$quant_min,
Torch_IntType:$quant_max
);
let results = (outs
AnyTorchOptionalTensorType:$output,
AnyTorchOptionalTensorType:$mask
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenFakeQuantizePerChannelAffineCachemaskOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 6, 2);
}
void AtenFakeQuantizePerChannelAffineCachemaskOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 6, 2);
}
}];
}

def Torch_AtenMaximumOp : Torch_Op<"aten.maximum", [
AllowsTypeRefinement,
HasValueSemantics,
Expand Down Expand Up @@ -7671,6 +7729,7 @@ def Torch_Aten_AdaptiveAvgPool2dOp : Torch_Op<"aten._adaptive_avg_pool2d", [
printDefaultTorchOp(printer, *this, 2, 1);
}
}];
let hasCanonicalizer = 1;
}

def Torch_Aten_AdaptiveAvgPool2dBackwardOp : Torch_Op<"aten._adaptive_avg_pool2d_backward", [
Expand Down Expand Up @@ -8737,6 +8796,30 @@ def Torch_Aten_LinalgDetOp : Torch_Op<"aten._linalg_det", [
}];
}

def Torch_AtenLinalgSlogdetOp : Torch_Op<"aten.linalg_slogdet", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::linalg_slogdet : (Tensor) -> (Tensor, Tensor)`";
let arguments = (ins
AnyTorchTensorType:$A
);
let results = (outs
AnyTorchOptionalTensorType:$sign,
AnyTorchOptionalTensorType:$logabsdet
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenLinalgSlogdetOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 1, 2);
}
void AtenLinalgSlogdetOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 1, 2);
}
}];
}

def Torch_AtenFrobeniusNormDimOp : Torch_Op<"aten.frobenius_norm.dim", [
AllowsTypeRefinement,
HasValueSemantics,
Expand Down Expand Up @@ -12702,6 +12785,35 @@ def Torch_AtenUniqueConsecutiveOp : Torch_Op<"aten.unique_consecutive", [
}];
}

def Torch_AtenUniqueDimOp : Torch_Op<"aten.unique_dim", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::unique_dim : (Tensor, int, bool, bool, bool) -> (Tensor, Tensor, Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self,
Torch_IntType:$dim,
Torch_BoolType:$sorted,
Torch_BoolType:$return_inverse,
Torch_BoolType:$return_counts
);
let results = (outs
AnyTorchOptionalTensorType:$result0,
AnyTorchOptionalTensorType:$result1,
AnyTorchOptionalTensorType:$result2
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenUniqueDimOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 5, 3);
}
void AtenUniqueDimOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 5, 3);
}
}];
}

def Torch_AtenLinspaceOp : Torch_Op<"aten.linspace", [
AllowsTypeRefinement,
HasValueSemantics,
Expand Down Expand Up @@ -15437,6 +15549,31 @@ def Torch_Aten__Not__Op : Torch_Op<"aten.__not__", [
let hasFolder = 1;
}

def Torch_Aten__Or__BoolOp : Torch_Op<"aten.__or__.bool", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::__or__.bool : (bool, bool) -> (bool)`";
let arguments = (ins
Torch_BoolType:$a,
Torch_BoolType:$b
);
let results = (outs
Torch_BoolType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult Aten__Or__BoolOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 2, 1);
}
void Aten__Or__BoolOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 2, 1);
}
}];
let hasFolder = 1;
}

def Torch_AtenLenTOp : Torch_Op<"aten.len.t", [
AllowsTypeRefinement,
HasValueSemantics,
Expand Down Expand Up @@ -15857,6 +15994,36 @@ def Torch_AtenTriuIndicesOp : Torch_Op<"aten.triu_indices", [
let hasVerifier = 1;
}

def Torch_AtenTrilIndicesOp : Torch_Op<"aten.tril_indices", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::tril_indices : (int, int, int, int?, int?, Device?, bool?) -> (Tensor)`";
let arguments = (ins
Torch_IntType:$row,
Torch_IntType:$col,
Torch_IntType:$offset,
AnyTorchOptionalIntType:$dtype,
AnyTorchOptionalIntType:$layout,
AnyTorchOptionalDeviceType:$device,
AnyTorchOptionalBoolType:$pin_memory
);
let results = (outs
AnyTorchOptionalTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenTrilIndicesOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 7, 1);
}
void AtenTrilIndicesOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 7, 1);
}
}];
let hasVerifier = 1;
}

def Torch_Aten_SoftmaxBackwardDataOp : Torch_Op<"aten._softmax_backward_data", [
AllowsTypeRefinement,
HasValueSemantics,
Expand Down Expand Up @@ -17033,3 +17200,28 @@ def Torch_TorchvisionRoiPoolOp : Torch_Op<"torchvision.roi_pool", [
}];
}

def Torch_TorchvisionNmsOp : Torch_Op<"torchvision.nms", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `torchvision::nms : (Tensor, Tensor, float) -> (Tensor)`";
let arguments = (ins
AnyTorchTensorType:$dets,
AnyTorchTensorType:$scores,
Torch_FloatType:$iou_threshold
);
let results = (outs
AnyTorchOptionalTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult TorchvisionNmsOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 3, 1);
}
void TorchvisionNmsOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 3, 1);
}
}];
}

8 changes: 5 additions & 3 deletions lib/Conversion/TorchOnnxToTorch/DefaultDomainAtoF.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -759,6 +759,8 @@ void mlir::torch::onnx_c::populateDefaultDomainAtoF(
Value cstTwo = rewriter.create<Torch::ConstantIntOp>(
binder.getLoc(), rewriter.getI64IntegerAttr(2));
auto scalarTensorType = rewriter.getType<Torch::ValueTensorType>(
ArrayRef<int64_t>{}, rewriter.getIntegerType(64, /*signed*/ 1));
auto selectTensorType = rewriter.getType<Torch::ValueTensorType>(
ArrayRef<int64_t>{1}, rewriter.getIntegerType(64, /*signed*/ 1));

int64_t lastChangeDim = 0;
Expand Down Expand Up @@ -790,7 +792,7 @@ void mlir::torch::onnx_c::populateDefaultDomainAtoF(
Value kTensor = rewriter.create<Torch::PrimNumToTensorScalarOp>(
binder.getLoc(), scalarTensorType, k);
Value sel = rewriter.create<Torch::AtenIndexSelectOp>(
binder.getLoc(), scalarTensorType, shape, cstZero, kTensor);
binder.getLoc(), selectTensorType, shape, cstZero, kTensor);
Value outputDimSize = rewriter.create<Torch::AtenItemOp>(
binder.getLoc(), rewriter.getType<Torch::IntType>(), sel);
Value inputDimSize = rewriter.create<Torch::AtenSizeIntOp>(
Expand Down Expand Up @@ -2340,7 +2342,7 @@ void mlir::torch::onnx_c::populateDefaultDomainAtoF(
Value input;
float alpha;
if (binder.tensorOperand(input) ||
binder.f32FloatAttr(alpha, "alpha") ||
binder.f32FloatAttr(alpha, "alpha", 1.0) ||
binder.tensorResultType(resultType))
return failure();
Value cstAlpha = rewriter.create<Torch::ConstantFloatOp>(
Expand Down Expand Up @@ -2535,7 +2537,7 @@ void mlir::torch::onnx_c::populateDefaultDomainAtoF(
return success();
});
patterns.onOp(
"Flatten", 13, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
"Flatten", 11, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
// Flatten means to partition the input tensor's dimensions
// into a "left range" spanning 0 to axis - 1 and a "right range"
// spanning axis to rank - 1. Each range is then collapsed
Expand Down
Loading

0 comments on commit 8c7b91d

Please sign in to comment.