diff --git a/docker/onnx-mlir.py b/docker/onnx-mlir.py index a039e97237..cdb61e2d21 100755 --- a/docker/onnx-mlir.py +++ b/docker/onnx-mlir.py @@ -1,23 +1,22 @@ #!/usr/bin/env python3 -import os -import re -import shutil -import stat -import subprocess -import sys +# SPDX-License-Identifier: Apache-2.0 -DOCKER_SOCKET = '/var/run/docker.sock' -ONNX_MLIR_IMAGE = 'onnxmlirczar/onnx-mlir' -WORK_DIR = '/workdir' -OUTPUT_DIR = '/output' -EMIT_IR_OPTS = [ '--EmitONNXBasic', - '--EmitONNXIR', - '--EmitMLIR', - '--EmitLLVMIR' ] -EMIT_BIN_OPTS = [ '--EmitLib', - '--EmitObj', - '--EmitJNI' ] +########################## onnx-mlir.py ######################################## +# +# Copyright 2022 The IBM Research Authors. +# +################################################################################ +# +# This file scans for certain patterns (listed below) and generate an md table, +# which list the operations supported, and optionally the unsupported operations. +# Among the options, we can also list the TODOs in the table. +# Invoke with the `-h` argument to list options. +# +# Limitation: currently handle at most one OP/LIMIT/TODO line per operation. +# Script currently invoked by the `onnx_mlir_supported_ops` make target. +# +################################################################################ # When running onnx-mlir inside a docker container, the directory # containing the input ONNX model file must be mounted into the @@ -25,16 +24,48 @@ # # This convenient script will do that automatically to make it # as if you are running onnx-mlir directly on the host. + +import os +import re +import shutil +import stat +import subprocess +import sys + +DOCKER_SOCKET = '/var/run/docker.sock' +ONNX_MLIR_IMAGE = 'onnxmlirczar/onnx-mlir' +KNOWN_INPUT_TYPE = ( '.onnx', '.json', '.mlir' ) + +mount_dirs = [] +mount_args = [] +onnx_mlir_args = [] + +# mount host path into container +def mount_path(path): + global mount_dirs, mount_args, onnx_mlir_args + + p = os.path.abspath(path) + d = os.path.dirname(p) + f = os.path.basename(p) + + # Haven't seen this directory before + if not d in mount_dirs: + mount_dirs += [ d ] + mount_args += [ '-v', d + ':' + d ] + onnx_mlir_args += [ p ] + else: + onnx_mlir_args += [ path ] + def main(): # Make sure docker client is installed if not shutil.which('docker'): print('docker client not found') - return + sys.exit(1) # Make sure docker daemon is running if not stat.S_ISSOCK(os.stat(DOCKER_SOCKET).st_mode): print('docker daemon not running') - return + sys.exit(1) # Pull the latest onnxmlirczar/onnx-mlir image, if image # is already up-to-date, pull will do nothing. @@ -48,112 +79,59 @@ def main(): print(line if re.match('^([0-9a-f]{12})|Error', line) else '', end='', flush=True) proc.wait() - if (proc.returncode != 0): + if proc.returncode: print("docker pull failed") - return - - # Go through the command line options and locate the - # input ONNX model file. - argi = 0 - ionnx = None - argo = 0 - obase = None - argv = sys.argv - argc = len(sys.argv) - - for i, arg in enumerate(argv): - # File specified on the first argument, defaults to --EmitLib - if i == 1 and not argv[i].startswith('-'): - argi = i - ionnx = argv[i] - # If a file is not specified on the first argument, it must be - # specified after a valid --EmitXXX option. - elif (arg in EMIT_IR_OPTS+EMIT_BIN_OPTS and i < argc-1 and - not argv[i+1].startswith('-')): - # File specified more than once, treat as not specified - if ionnx: - sys.exit("Too many --EmitXXX options") - if (arg in EMIT_BIN_OPTS and sys.platform != 'linux'): - print(('Warning: host {} is not linux, ' + - 'output not directly usable').format(sys.platform)) - argi = i + 1 - ionnx = argv[argi] - elif (arg == "-o" and i < argc-1 and - not argv[i+1].startswith('-')): - if obase: - sys.exit("Too many -o options") - argo = i + 1 - obase = argv[argo] + sys.exit(proc.returncode) # Prepare the arguments for docker run args = [ 'docker', 'run', '--rm', '-ti' ] - # Construct the mount option if an input ONNX model file is found - if ionnx: - p = os.path.abspath(ionnx) - d = os.path.dirname(p) - f = os.path.basename(p) - - # Add the mount option, directory containing the input - # ONNX model file will be mounted under /workdir inside - # the container. If /workdir doesn't exist, it will be - # created. - args.append('-v') - args.append(d + ':' + WORK_DIR + d) - - # Change directory into /workdir - #args.append('-w') - #args.append(WORK_DIR) - - # Rewrite the original input ONNX model file, which will - # reside under /workdir inside the container. - argv[argi] = WORK_DIR + p - - # Construct the mount option if -o is specified - if obase: - # Check invalid -o values such as ".", "..", "/.", "./", etc. - if re.match('(.*/)*\.*$', obase): - sys.exit("Invalid value for -o option") - - p = os.path.abspath(obase) - d = os.path.dirname(p) - f = os.path.basename(p) - - # Add the mount option, directory containing the output - # files will be mounted under /output inside the container. - # If /output/... doesn't exist, it will be - # created. - args.append('-v') - args.append(d + ':' + OUTPUT_DIR + d) - - # Rewrite the original output basename, which will - # reside under /output inside the container. - argv[argo] = OUTPUT_DIR + p + # Go through the command line options and locate the known + # file types. For each file located, construct a docker mount + # option that mounts the host directory into the container. + # + # Also do the same for the output path specified by the -o + # option. + argv = sys.argv + argc = len(sys.argv) + + global mount_dirs, mount_args, onnx_mlir_args + + verbose = False + for i in range(1, argc): + if argv[i].endswith(KNOWN_INPUT_TYPE): + mount_path(argv[i]) + elif argv[i-1] == '-o' and not argv[i].startswith('-'): + mount_path(argv[i]) + elif argv[i] == '-v': + verbose = True + onnx_mlir_args += [ argv[i] ] + else: + onnx_mlir_args += [ argv[i] ] # Add effective uid and gid - args.append('-u') - args.append(str(os.geteuid()) + ':' + str(os.getegid())) + args += [ '-u', str(os.geteuid()) + ':' + str(os.getegid()) ] + + # Add mount options + args += mount_args # Add image name - args.append(ONNX_MLIR_IMAGE) + args += [ ONNX_MLIR_IMAGE ] # Pass in all the original arguments - argv.remove(argv[0]) - args.extend(argv) - # print(args) #debug only + args += onnx_mlir_args + + if verbose: + print(args) # Run onnx-mlir in the container proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) for line in proc.stdout: - # Remove first occurrence of /workdir or /output before printing - print(re.sub(WORK_DIR + '|' + OUTPUT_DIR, '', - line.decode('utf-8'), 1), - end='', flush=True) + print(line.decode('utf-8'), end='', flush=True) proc.wait() - if (proc.returncode != 0): - print(os.strerror(proc.returncode)) + sys.exit(proc.returncode) if __name__ == "__main__": main() diff --git a/src/Builder/CMakeLists.txt b/src/Builder/CMakeLists.txt index e39a07f29a..324ec19e4c 100644 --- a/src/Builder/CMakeLists.txt +++ b/src/Builder/CMakeLists.txt @@ -7,6 +7,7 @@ add_onnx_mlir_library(OMBuilder FrontendDialectHelper.cpp FrontendDialectTransformer.cpp + ModelInputShaper.cpp LINK_LIBS PUBLIC OMHasOnnxSubgraphOpInterface diff --git a/src/Builder/FrontendDialectTransformer.cpp b/src/Builder/FrontendDialectTransformer.cpp index 3f6ec0be86..a91e6a08ec 100644 --- a/src/Builder/FrontendDialectTransformer.cpp +++ b/src/Builder/FrontendDialectTransformer.cpp @@ -27,6 +27,7 @@ #include "include/onnx-mlir/Compiler/OMCompilerTypes.h" #include "src/Builder/FrontendDialectTransformer.hpp" +#include "src/Builder/ModelInputShaper.hpp" #include "src/Builder/SymbolTable.hpp" #include "src/Dialect/ONNX/ONNXOps.hpp" #include "src/Dialect/ONNX/ONNXOpsHelper.hpp" @@ -43,13 +44,10 @@ SUPPRESS_WARNINGS_POP #include -#include +#include #include -#include #include -#include -#include -#include +#include #include #define DEBUG_TYPE "frontend_dialect_transformer" @@ -69,42 +67,16 @@ using SymbolToOnnxTypeMapping = SymbolMapping; class FrontendGenImpl { public: explicit FrontendGenImpl(MLIRContext &context) - : context_(context), builder_(&context), - force_dim_dynamic_enabled_(false) { + : context_(context), builder_(&context) { module_ = ModuleOp::create(UnknownLoc::get(&context)); InitHandlerMap(); - if (const char *envInputString = std::getenv("IMPORTER_FORCE_DYNAMIC")) { - force_dim_dynamic_enabled_ = true; - std::stringstream envString; - envString << envInputString; - std::string dynamicInput; - while (getline(envString, dynamicInput, '|')) { - size_t pos = dynamicInput.find(':'); - std::string inputString = dynamicInput.substr(0, pos); - std::string dimString = dynamicInput.substr(pos + 1); - - std::stringstream dimIndices(dimString); - std::string dimIndex; - std::vector dims; - while (getline(dimIndices, dimIndex, ',')) { - dims.emplace_back(stoi(dimIndex)); - } - // Default to the all dimensions if dims are not specified. - if (dims.empty()) - dims.emplace_back(-1); - forced_inputs_dims.insert(std::make_pair(stoi(inputString), dims)); - } - // Default to the all inputs and dimensions. - if (forced_inputs_dims.empty()) - forced_inputs_dims.insert(std::make_pair(-1, std::vector(1, -1))); - } } ModuleOp ImportONNXModel( const onnx::ModelProto &model, ImportOptions options) { options_ = options; + modelInputShaper_.setShapeInformation(options_.shapeInformation); SetOpSetImport(model); // Determines which opsets to use. - SetCustomShapeInfo(); // Set custom shapes for the inputs if available. importGraph(model.graph()); return module_; } @@ -123,75 +95,7 @@ class FrontendGenImpl { // mapping between string name and symbol ValueSymbolMapping frontend_symbols_; - // Flag to change the inputs of function to unknown dimension. - // Temporarily added to use the test cases with static shape to test. - // The values are set by enviroment variable IMPORTER_FORCE_DYNAMIC - // The Backus–Naur Form (BNF) for IMPORTER_FORCE_DYNAMIC is as follows. - // - // :== `'` `'` - // ::= | `|` - // `:` - // ::= | `,` - // ::= - // ::= - // ::= -1 | - // ::= | - // ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 - // - // Value `-1` semantically represents all inputs or all dimensions, and it - // has the highest priority. E.g. `'0: -1, 0'` means all dimensions of the - // first input will be changed. Input and dimension indices start from 0. - // - // Examples: - // 1. IMPORTER_FORCE_DYNAMIC='-1:-1' - // - change all dimensions in all inputs to unknown dimensions. - // 2. IMPORTER_FORCE_DYNAMIC='-1:0' - // - change the first dimension in all inputs to unknown dimensions. - // 3. IMPORTER_FORCE_DYNAMIC='1:-1' - // - change all dimensions in the second input to unknown dimensions. - // 4. IMPORTER_FORCE_DYNAMIC='1:0,1' - // - change the first and second dimensions in the second input to unknown - // dimensions. - // 5. IMPORTER_FORCE_DYNAMIC='0:1|1:0,1' - // - change the second dimension in the first input to unknown dimensions, - // and - // - change the first and second dimensions in the second input to unknown - // dimensions, - - bool force_dim_dynamic_enabled_; - // A map from an input index to a list of dim indices those are changed to - // dynamic. Default value corresponds to IMPORTER_FORCE_DYNAMIC='-1:-1' - std::map> forced_inputs_dims; - - // Custom shape information for the graph inputs. - std::map> inputs_shape_information; - void SetCustomShapeInfo() { - // Use the custom shape for the inputs if avaiable. - if (options_.shapeInformation.empty()) { - return; - } - - std::stringstream shapeInfoString(options_.shapeInformation); - std::string shapeString; - while (getline(shapeInfoString, shapeString, ',')) { - size_t pos = shapeString.find(':'); - std::string inputString = shapeString.substr(0, pos); - std::string dimString = shapeString.substr(pos + 1); - - int64_t inputID = std::stoi(inputString); - assert(inputID >= 0 && "input_id must be >= 0"); - - std::stringstream dimSizes(dimString); - std::string dimStr; - std::vector dims; - while (getline(dimSizes, dimStr, 'x')) { - int64_t dimSize = std::stoi(dimStr); - assert((dimSize == -1 || dimSize > 0) && "dim must be -1 or > 0"); - dims.emplace_back(dimSize); - } - inputs_shape_information.insert(std::make_pair(inputID, dims)); - } - } + ModelInputShaper modelInputShaper_; using ImportHandlerType = void (onnx_mlir::detail::FrontendGenImpl::*)( const onnx::NodeProto &); @@ -426,47 +330,19 @@ class FrontendGenImpl { llvm::SmallVector outputNames; // Import the input tensor types that are not constant and not initialized. - int numInputs = 0; + int inputIndex = 0; for (const auto &input : graph.input()) { AddValueInfo(input); if (initializerNames.count(input.name()) == 0) { inputNames.push_back(input.name()); - auto argTy = ImportType(input.type()); - auto shapedTy = argTy.dyn_cast(); - // Change the first dimension to unknown (-1) for test purpose only - if (shapedTy && force_dim_dynamic_enabled_ && - ((forced_inputs_dims.find(-1) != forced_inputs_dims.end()) || - (forced_inputs_dims.find(numInputs) != - forced_inputs_dims.end()))) { - std::vector forced_dims; - if (forced_inputs_dims.find(-1) != forced_inputs_dims.end()) - forced_dims = forced_inputs_dims.at(-1); - else - forced_dims = forced_inputs_dims.at(numInputs); - auto argShape = shapedTy.getShape(); - llvm::SmallVector newDims; - for (unsigned int i = 0; i < argShape.size(); i++) { - if (llvm::is_contained(forced_dims, -1) || - llvm::is_contained(forced_dims, i)) { - newDims.push_back(-1); - } else { - newDims.push_back(argShape[i]); - } - } - argTy = RankedTensorType::get(newDims, shapedTy.getElementType()); - } else if (shapedTy && !inputs_shape_information.empty() && - (inputs_shape_information.find(numInputs) != - inputs_shape_information.end())) { - // Change to the custom shape if users provide. - std::vector shape = inputs_shape_information.at(numInputs); - argTy = RankedTensorType::get(shape, shapedTy.getElementType()); - } + Type argTy = ImportType(input.type()); + argTy = modelInputShaper_.reshape(inputIndex, argTy); argTypes.emplace_back(argTy); // numInputs is the number of graph inputs not contained within the // initializer - ++numInputs; + ++inputIndex; } } diff --git a/src/Builder/ModelInputShaper.cpp b/src/Builder/ModelInputShaper.cpp new file mode 100644 index 0000000000..a5cafc564f --- /dev/null +++ b/src/Builder/ModelInputShaper.cpp @@ -0,0 +1,118 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +//===--------------------- ModelInputShaper.cpp ---------------------------===// +// +// Helper class to override ONNX model input shapes. +// +//===----------------------------------------------------------------------===// + +#include "src/Builder/ModelInputShaper.hpp" + +#include "mlir/IR/BuiltinTypes.h" +#include "llvm/ADT/STLExtras.h" + +#include +#include +#include + +using namespace mlir; + +namespace onnx_mlir { + +ModelInputShaper::ModelInputShaper() : force_dim_dynamic_enabled_(false) { + if (const char *envInputString = std::getenv("IMPORTER_FORCE_DYNAMIC")) { + force_dim_dynamic_enabled_ = true; + std::stringstream envString; + envString << envInputString; + std::string dynamicInput; + while (std::getline(envString, dynamicInput, '|')) { + size_t pos = dynamicInput.find(':'); + std::string inputString = dynamicInput.substr(0, pos); + std::string dimString = dynamicInput.substr(pos + 1); + + std::stringstream dimIndices(dimString); + std::string dimIndex; + std::vector dims; + while (std::getline(dimIndices, dimIndex, ',')) { + dims.emplace_back(stoi(dimIndex)); + } + // Default to the all dimensions if dims are not specified. + if (dims.empty()) + dims.emplace_back(-1); + forced_inputs_dims_.emplace(std::stoi(inputString), dims); + } + // Default to the all inputs and dimensions. + if (forced_inputs_dims_.empty()) + forced_inputs_dims_.emplace(-1, std::vector(1, -1)); + } +} + +void ModelInputShaper::setShapeInformation( + const std::string &shapeInformation) { + if (!shapeInformation.empty()) { + std::stringstream shapeInfoString(shapeInformation); + std::string shapeString; + while (std::getline(shapeInfoString, shapeString, ',')) { + size_t pos = shapeString.find(':'); + std::string inputString = shapeString.substr(0, pos); + std::string dimString = shapeString.substr(pos + 1); + + int64_t inputID = std::stoi(inputString); + assert(inputID >= 0 && "input_id must be >= 0"); + + std::stringstream dimSizes(dimString); + std::string dimStr; + std::vector dims; + while (std::getline(dimSizes, dimStr, 'x')) { + int64_t dimSize = std::stoi(dimStr); + assert((dimSize == -1 || dimSize > 0) && "dim must be -1 or > 0"); + dims.emplace_back(dimSize); + } + inputs_shape_information_.insert(std::make_pair(inputID, dims)); + } + } +} + +namespace { +RankedTensorType forceShape( + RankedTensorType tensorTy, const std::vector &forcedDims) { + auto shape = tensorTy.getShape(); + llvm::SmallVector newDims; + for (unsigned int i = 0; i < shape.size(); i++) { + if (llvm::is_contained(forcedDims, -1) || + llvm::is_contained(forcedDims, i)) { + newDims.push_back(-1); + } else { + newDims.push_back(shape[i]); + } + } + return RankedTensorType::get(newDims, tensorTy.getElementType()); +} +} // namespace + +Type ModelInputShaper::reshape(int inputIndex, Type inputType) const { + if (auto tensorTy = inputType.dyn_cast()) { + // Make dims unknown (-1) if applicable. + if (force_dim_dynamic_enabled_ && tensorTy.hasRank()) { + auto rankedTensorTy = tensorTy.cast(); + auto it = forced_inputs_dims_.find(-1); + if (it != forced_inputs_dims_.end()) + return forceShape(rankedTensorTy, it->second); + it = forced_inputs_dims_.find(inputIndex); + if (it != forced_inputs_dims_.end()) + return forceShape(rankedTensorTy, it->second); + } + + // Change to the custom shape if users provide. + auto it = inputs_shape_information_.find(inputIndex); + if (it != inputs_shape_information_.end()) + return RankedTensorType::get(it->second, tensorTy.getElementType()); + } + + // Default to not reshape. + return inputType; +} + +} // namespace onnx_mlir diff --git a/src/Builder/ModelInputShaper.hpp b/src/Builder/ModelInputShaper.hpp new file mode 100644 index 0000000000..7910262687 --- /dev/null +++ b/src/Builder/ModelInputShaper.hpp @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +//===--------------------- ModelInputShaper.hpp ---------------------------===// +// +// Helper class to override ONNX model input shapes. +// +//===----------------------------------------------------------------------===// + +#pragma once + +#include +#include +#include + +#include "mlir/IR/Types.h" + +namespace onnx_mlir { + +// Sets shapes of ONNX model inputs. +// +// Reads environment variable IMPORTER_FORCE_DYNAMIC to change input +// shapes to unknown dimension. +// Temporarily added to use the test cases with static shape to test. +// The Backus–Naur Form (BNF) for IMPORTER_FORCE_DYNAMIC is as follows. +// +// :== `'` `'` +// ::= | `|` +// `:` +// ::= | `,` +// ::= +// ::= +// ::= -1 | +// ::= | +// ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 +// +// Value `-1` semantically represents all inputs or all dimensions, and it +// has the highest priority. E.g. `'0: -1, 0'` means all dimensions of the +// first input will be changed. Input and dimension indices start from 0. +// +// Examples: +// 1. IMPORTER_FORCE_DYNAMIC='-1:-1' +// - change all dimensions in all inputs to unknown dimensions. +// 2. IMPORTER_FORCE_DYNAMIC='-1:0' +// - change the first dimension in all inputs to unknown dimensions. +// 3. IMPORTER_FORCE_DYNAMIC='1:-1' +// - change all dimensions in the second input to unknown dimensions. +// 4. IMPORTER_FORCE_DYNAMIC='1:0,1' +// - change the first and second dimensions in the second input to unknown +// dimensions. +// 5. IMPORTER_FORCE_DYNAMIC='0:1|1:0,1' +// - change the second dimension in the first input to unknown dimensions, +// and +// - change the first and second dimensions in the second input to unknown +// dimensions +class ModelInputShaper { +public: + ModelInputShaper(); + + // shapeInformation specifies custom shapes for the inputs of the ONNX model, + // e.g. setting static shapes for dynamic inputs. + // See the documentation of the shapeInformation flag in CompilerOptions.cpp. + void setShapeInformation(const std::string &shapeInformation); + + // Takes the input type at the given input index and + // returns the input type with any changes to the shape specified by + // the environment variable IMPORTER_FORCE_DYNAMIC + // or any shapeInformation set in setShapeInformation. + mlir::Type reshape(int inputIndex, mlir::Type inputType) const; + +private: + // Whether environment variable IMPORTER_FORCE_DYNAMIC is set. + bool force_dim_dynamic_enabled_; + + // A map from an input index to a list of dim indices those are changed to + // dynamic. Default value corresponds to IMPORTER_FORCE_DYNAMIC='-1:-1'. + std::map> forced_inputs_dims_; + + // Custom shape information for the graph inputs. + std::map> inputs_shape_information_; +}; + +} // namespace onnx_mlir diff --git a/src/Builder/OpBuildTable.inc b/src/Builder/OpBuildTable.inc index fdf3302cf9..b985594909 100644 --- a/src/Builder/OpBuildTable.inc +++ b/src/Builder/OpBuildTable.inc @@ -69,6 +69,7 @@ op_dialect_version_map_["GlobalMaxPool"] = {1}; op_dialect_version_map_["Gradient"] = {1}; op_dialect_version_map_["Greater"] = {13}; op_dialect_version_map_["GreaterOrEqual"] = {16}; +op_dialect_version_map_["GridSample"] = {16}; op_dialect_version_map_["HardSigmoid"] = {6}; op_dialect_version_map_["Hardmax"] = {13}; op_dialect_version_map_["HardSwish"] = {14}; @@ -143,14 +144,14 @@ op_dialect_version_map_["Relu"] = {14}; op_dialect_version_map_["Reshape"] = {14}; op_dialect_version_map_["Resize"] = {13, 11, 10}; op_dialect_version_map_["ReverseSequence"] = {10}; -op_dialect_version_map_["RoiAlign"] = {10}; +op_dialect_version_map_["RoiAlign"] = {16}; op_dialect_version_map_["Round"] = {11}; op_dialect_version_map_["SVMClassifier"] = {1}; op_dialect_version_map_["SVMRegressor"] = {1}; op_dialect_version_map_["Scaler"] = {1}; op_dialect_version_map_["Scan"] = {16}; op_dialect_version_map_["Scatter"] = {11}; -op_dialect_version_map_["ScatterElements"] = {13}; +op_dialect_version_map_["ScatterElements"] = {16}; op_dialect_version_map_["ScatterND"] = {16}; op_dialect_version_map_["Selu"] = {6}; op_dialect_version_map_["SequenceAt"] = {11}; @@ -313,6 +314,8 @@ import_handler_map_["Greater"] = &onnx_mlir::detail::FrontendGenImpl::buildOperation; import_handler_map_["GreaterOrEqual"] = &onnx_mlir::detail::FrontendGenImpl::buildOperation; +import_handler_map_["GridSample"] = + &onnx_mlir::detail::FrontendGenImpl::buildOperation; import_handler_map_["HardSigmoid"] = &onnx_mlir::detail::FrontendGenImpl::buildOperation; import_handler_map_["HardSwish"] = @@ -659,6 +662,7 @@ op_dialect_top_version_map_["GlobalLpPool"] = 2; op_dialect_top_version_map_["GlobalMaxPool"] = 1; op_dialect_top_version_map_["Greater"] = 13; op_dialect_top_version_map_["GreaterOrEqual"] = 16; +op_dialect_top_version_map_["GridSample"] = 16; op_dialect_top_version_map_["HardSigmoid"] = 6; op_dialect_top_version_map_["HardSwish"] = 14; op_dialect_top_version_map_["Hardmax"] = 13; @@ -726,11 +730,11 @@ op_dialect_top_version_map_["Relu"] = 14; op_dialect_top_version_map_["Reshape"] = 14; op_dialect_top_version_map_["Resize"] = 10; op_dialect_top_version_map_["ReverseSequence"] = 10; -op_dialect_top_version_map_["RoiAlign"] = 10; +op_dialect_top_version_map_["RoiAlign"] = 16; op_dialect_top_version_map_["Round"] = 11; op_dialect_top_version_map_["Scan"] = 16; op_dialect_top_version_map_["Scatter"] = 11; -op_dialect_top_version_map_["ScatterElements"] = 13; +op_dialect_top_version_map_["ScatterElements"] = 16; op_dialect_top_version_map_["ScatterND"] = 16; op_dialect_top_version_map_["Selu"] = 6; op_dialect_top_version_map_["SequenceAt"] = 11; diff --git a/src/Dialect/ONNX/ONNXOps.cpp b/src/Dialect/ONNX/ONNXOps.cpp index d4666c62fb..30ede78252 100644 --- a/src/Dialect/ONNX/ONNXOps.cpp +++ b/src/Dialect/ONNX/ONNXOps.cpp @@ -5613,6 +5613,11 @@ LogicalResult ONNXZipMapOp::inferShapes( return emitError(NOT_IMPLEMENTED_MESSAGE); } +LogicalResult ONNXGridSampleOp::inferShapes( + std::function doShapeInference) { + return emitError(NOT_IMPLEMENTED_MESSAGE); +} + #define NOT_IMPLEMENTED_INFERSHAPE(T) \ LogicalResult T::inferShapes( \ std::function doShapeInference) { \ diff --git a/src/Dialect/ONNX/ONNXOps.td.inc b/src/Dialect/ONNX/ONNXOps.td.inc index ef43608862..394396a22f 100644 --- a/src/Dialect/ONNX/ONNXOps.td.inc +++ b/src/Dialect/ONNX/ONNXOps.td.inc @@ -2152,6 +2152,38 @@ def ONNXGreaterOrEqualOp:ONNX_Op<"GreaterOrEqual", }]; } +def ONNXGridSampleOp:ONNX_Op<"GridSample", + [NoSideEffect, DeclareOpInterfaceMethods]> { + let summary = "ONNX GridSample operation"; + let description = [{ + Given an `input` and a flow-field `grid`, computes the `output` using `input` values and pixel locations from `grid`. + Currently, only spatial (4-D) inputs are supported. For `input` with shape (N, C, H, W) and `grid` with shape (N, H_out, W_out, 2), + the `output` will have shape (N, C, H_out, W_out). + For each output location `output[N, C, H_out, W_out]`, the size-2 vector `grid[N, H_out, W_out]` specifies `input` pixel locations `x` and `y`, + which are used to interpolate the output value `output[N, C, H_out, W_out]`. + + The GridSample operator is often used in doing grid generator and sampler in the [Spatial Transformer Networks](https://arxiv.org/abs/1506.02025). + See also in [torch.nn.functional.grid_sample](https://pytorch.org/docs/master/generated/torch.nn.functional.grid_sample.html#torch-nn-functional-grid-sample). + }]; + let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$X, + AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$grid, + DefaultValuedAttr:$align_corners, + DefaultValuedStrAttr:$mode, + DefaultValuedStrAttr:$padding_mode); + let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); + let extraClassDeclaration = [{ + static int getNumberOfOperands() { + return 2; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {-1}; + } + }]; +} + def ONNXHardSigmoidOp:ONNX_Op<"HardSigmoid", [NoSideEffect, DeclareOpInterfaceMethods]> { let summary = "ONNX HardSigmoid operation"; @@ -4933,6 +4965,7 @@ def ONNXRoiAlignOp:ONNX_Op<"RoiAlign", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$rois, TensorOf<[I64]>:$batch_indices, + DefaultValuedStrAttr:$coordinate_transformation_mode, DefaultValuedStrAttr:$mode, DefaultValuedAttr:$output_height, DefaultValuedAttr:$output_width, @@ -5235,12 +5268,25 @@ def ONNXScatterElementsOp:ONNX_Op<"ScatterElements", entry in `indices` and the index-value for dimension != axis is obtained from the index of the entry itself. - For instance, in a 2-D tensor case, the update corresponding to the [i][j] entry - is performed as below: + `reduction` allows specification of an optional reduction operation, which is applied to all values in `updates` + tensor into `output` at the specified `indices`. + In cases where `reduction` is set to \"none\", indices should not have duplicate entries: that is, if idx1 != idx2, + then indices[idx1] != indices[idx2]. For instance, in a 2-D tensor case, the update + corresponding to the [i][j] entry is performed as below: ``` output[indices[i][j]][j] = updates[i][j] if axis = 0, output[i][indices[i][j]] = updates[i][j] if axis = 1, ``` + When `reduction` is set to \"add\", the update corresponding to the [i][j] entry is performed as below: + ``` + output[indices[i][j]][j] += updates[i][j] if axis = 0, + output[i][indices[i][j]] += updates[i][j] if axis = 1, + ``` + When `reduction` is set to \"mul\", the update corresponding to the [i][j] entry is performed as below: + ``` + output[indices[i][j]][j] *= updates[i][j] if axis = 0, + output[i][indices[i][j]] *= updates[i][j] if axis = 1, + ``` This operator is the inverse of GatherElements. It is similar to Torch's Scatter operation. @@ -5277,7 +5323,8 @@ def ONNXScatterElementsOp:ONNX_Op<"ScatterElements", let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$data, AnyTypeOf<[TensorOf<[I32]>, TensorOf<[I64]>]>:$indices, AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$updates, - DefaultValuedAttr:$axis); + DefaultValuedAttr:$axis, + DefaultValuedStrAttr:$reduction); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ static int getNumberOfOperands() { diff --git a/src/Transform/ONNX/Decompose.td b/src/Transform/ONNX/Decompose.td index 8c1d46d410..fdd8212d78 100644 --- a/src/Transform/ONNX/Decompose.td +++ b/src/Transform/ONNX/Decompose.td @@ -374,7 +374,7 @@ def UnsqueezeV11Pattern : Pat< // Express Scatter (deprecated) using ScatterElements. def ScatterPattern : Pat< (ONNXScatterOp $data, $indices, $updates, $axis), - (ONNXScatterElementsOp $data, $indices, $updates, $axis) + (ONNXScatterElementsOp $data, $indices, $updates, $axis, (GetNullStringAttr)) >; #endif // ONNX_DECOMPOSE diff --git a/test/mlir/onnx/parse/test_gridsample.json b/test/mlir/onnx/parse/test_gridsample.json deleted file mode 100644 index 29f81feeee..0000000000 --- a/test/mlir/onnx/parse/test_gridsample.json +++ /dev/null @@ -1,127 +0,0 @@ -// RUN: onnx-mlir --EmitONNXBasic --printIR %s 2> test_gridsample.log; cat test_gridsample.log | FileCheck --check-prefix=FAILED %s - -// test_gridsample.json is an onnx model from the onnx backend test -// third_party/onnx/onnx/backend/test/case/node/gridsample.py -// -// Parsing fails because GridSample was added in ONNX v17 and not yet added to onnx-mlir - -// json is generated with: -// utils/onnx2json.py third_party/onnx/onnx/backend/test/data/node/test_gridsample/model.onnx -{ - "irVersion": "8", - "producerName": "backend-test", - "graph": { - "node": [ - { - "input": [ - "X", - "Grid" - ], - "output": [ - "Y" - ], - "opType": "GridSample", - "attribute": [ - { - "name": "align_corners", - "i": "0", - "type": "INT" - }, - { - "name": "mode", - "s": "YmlsaW5lYXI=", - "type": "STRING" - }, - { - "name": "padding_mode", - "s": "emVyb3M=", - "type": "STRING" - } - ] - } - ], - "name": "test_gridsample", - "input": [ - { - "name": "X", - "type": { - "tensorType": { - "elemType": 1, - "shape": { - "dim": [ - { - "dimValue": "1" - }, - { - "dimValue": "1" - }, - { - "dimValue": "4" - }, - { - "dimValue": "4" - } - ] - } - } - } - }, - { - "name": "Grid", - "type": { - "tensorType": { - "elemType": 1, - "shape": { - "dim": [ - { - "dimValue": "1" - }, - { - "dimValue": "6" - }, - { - "dimValue": "6" - }, - { - "dimValue": "2" - } - ] - } - } - } - } - ], - "output": [ - { - "name": "Y", - "type": { - "tensorType": { - "elemType": 1, - "shape": { - "dim": [ - { - "dimValue": "1" - }, - { - "dimValue": "1" - }, - { - "dimValue": "6" - }, - { - "dimValue": "6" - } - ] - } - } - } - } - ] - }, - "opsetImport": [ - { - "version": "16" - } - ] -} -// FAILED: GridSample this Op is not supported by onnx-mlir's onnx dialect diff --git a/utils/gen_onnx_mlir.py b/utils/gen_onnx_mlir.py index 92c99a7e0b..da34d55015 100755 --- a/utils/gen_onnx_mlir.py +++ b/utils/gen_onnx_mlir.py @@ -133,6 +133,7 @@ 'Gradient': [1], 'Greater': [13], 'GreaterOrEqual': [16], + 'GridSample': [16], 'HardSigmoid': [6], 'Hardmax': [13], 'HardSwish': [14], @@ -207,14 +208,14 @@ 'Reshape': [14], 'Resize': [13, 11, 10], 'ReverseSequence': [10], - 'RoiAlign': [10], + 'RoiAlign': [16], 'Round': [11], 'SVMClassifier': [1], 'SVMRegressor': [1], 'Scaler': [1], 'Scan': [16], 'Scatter': [11], - 'ScatterElements': [13], + 'ScatterElements': [16], 'ScatterND': [16], 'Selu': [6], 'SequenceAt': [11],