diff --git a/core/conversion/converters/impl/interpolate.cpp b/core/conversion/converters/impl/interpolate.cpp index a81ad8832c..58052bd46b 100644 --- a/core/conversion/converters/impl/interpolate.cpp +++ b/core/conversion/converters/impl/interpolate.cpp @@ -15,7 +15,6 @@ namespace { /* * Helper functions */ -#if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) void create_plugin( ConversionCtx* ctx, const torch::jit::Node* n, @@ -24,11 +23,14 @@ void create_plugin( std::vector in_shape, std::vector out_shape, std::vector out_size, - std::string mode) { + std::vector scales, + std::string mode, + bool align_corners, + bool use_scales=false) { LOG_WARNING("Interpolation layer will be run through ATen, not TensorRT. Performance may be lower than expected"); auto creator = new plugins::InterpolatePluginCreator(); - auto plugin = creator->createPlugin(name, in_shape, out_shape, out_size, mode, false); + auto plugin = creator->createPlugin(name, in_shape, out_shape, out_size, scales, mode, align_corners, use_scales); auto resize_layer = ctx->net->addPluginV2(reinterpret_cast(&in), 1, *plugin); TRTORCH_CHECK(resize_layer, "Unable to create interpolation plugin from node" << *n); @@ -39,7 +41,6 @@ void create_plugin( LOG_DEBUG("Output tensor shape: " << layer_output->getDimensions()); } -#endif void resize_layer_size( ConversionCtx* ctx, @@ -89,8 +90,8 @@ void resize_layer_size( // if interpolation mode is linear, align corners must have been set to true. // else, don't use align corners. if (mode == nvinfer1::ResizeMode::kLINEAR) { -#if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) - TRTORCH_CHECK(align_corners, "resize layer only support align_corner with TensorRT <= 7.0"); +#if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) // IF TRT VERSION <= 7.0 + TRTORCH_CHECK(align_corners, "resize layer (linear) only supports align_corners=True in TensorRT <= 7.0"); resize_layer->setAlignCorners(true); #else resize_layer->setAlignCorners(align_corners); @@ -102,58 +103,6 @@ void resize_layer_size( LOG_DEBUG("Output tensor shape: " << layer_output->getDimensions()); } -bool upsample_triilinear3d(ConversionCtx* ctx, const torch::jit::Node* n, args& args) { - auto in = args[0].ITensor(); - auto in_shape = util::toVec(in->getDimensions()); - bool align_corners = args[2].unwrapToBool(); - - if (args[1].IValue()->isNone() && (args[3].IValue()->isNone() || args[4].IValue()->isNone() || args[5].IValue()->isNone())) { - TRTORCH_THROW_ERROR("Unable to convert node: " << util::node_info(n) - << "\nOne of size or scale_factor should be defined"); - } else if (!args[3].IValue()->isNone() && !args[4].IValue()->isNone() && !args[5].IValue()->isNone()) { - // Case 1: user uses scales - float scale_d = args[3].IValue()->toDouble(); - float scale_h = args[4].IValue()->toDouble(); - float scale_w = args[5].IValue()->toDouble(); - std::vector padded_scales(in_shape.size(), 1); - padded_scales[padded_scales.size() - 3] = scale_d; - padded_scales[padded_scales.size() - 2] = scale_h; - padded_scales[padded_scales.size() - 1] = scale_w; -#if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) - if (!align_corners) { - TRTORCH_THROW_ERROR("Unable to convert node: " << util::node_info(n) - << "\nupsample_linear3d only supports align_corner with TensorRT <= 7.0."); - } else { - resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, true); - } -#else - resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, align_corners); -#endif - } else { - // Case 2: user uses output size - auto out_size = util::toVec(util::toDims(args[1].unwrapToIntList())); - TRTORCH_ASSERT( - out_size.size() == 3, - "aten::upsample_trilinear3d input Tensor and output size dimension mismatch"); - - auto out_shape = in_shape; - std::copy(out_size.begin(), out_size.end(), out_shape.begin() + (in_shape.size() - out_size.size())); -#if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) - if (!align_corners) { - // align_corners not supported in TensorRT, create plugin and - // run layer through PyTorch - create_plugin(ctx, n, in, "trilinear3d", in_shape, out_shape, out_size, std::string("trilinear")); - } else { - resize_layer_size(ctx, n, in, out_shape, {}, nvinfer1::ResizeMode::kLINEAR, true); - } -#else - resize_layer_size(ctx, n, in, out_shape, {}, nvinfer1::ResizeMode::kLINEAR, align_corners); -#endif - } - - return true; -} - /* * Interpolate Converter */ @@ -362,7 +311,7 @@ auto interpolate_registrations TRTORCH_UNUSED = float scale = args[3].IValue()->toDouble(); std::vector padded_scales(in_shape.size(), 1); padded_scales[padded_scales.size() - 1] = scale; - #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) + #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) // IF TRT VERSION <= 7.0 if (!align_corners) { TRTORCH_THROW_ERROR("Unable to convert node: " << util::node_info(n) << "\nupsample_linear1d only supports align_corner with TensorRT <= 7.0."); @@ -370,7 +319,16 @@ auto interpolate_registrations TRTORCH_UNUSED = resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, true); } #else - resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, align_corners); + TRTORCH_CHECK(!(align_corners && ctx->input_is_dynamic), + "TRTorch currently does not support the compilation of dynamc engines from code using using PyTorch [bi/tri]linear interpolation via scale factor and align_corners=True"); + if (align_corners) { + // Align corners and scale factor behave slightly different together in TRT and PyTorch so run the + // layer in ATen to maintain consistancy between TRTorch and PyTorch + // https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.interpolate + create_plugin(ctx, n, in, "linear1d", in_shape, {}, {}, {scale}, std::string("linear"), align_corners, true); + } else { + resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, align_corners); + } #endif } else { // Case 2: user uses output size @@ -380,11 +338,11 @@ auto interpolate_registrations TRTORCH_UNUSED = auto out_shape = in_shape; std::copy(out_size.begin(), out_size.end(), out_shape.begin() + (in_shape.size() - out_size.size())); - #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) + #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) // IF TRT VERSION <= 7.0 if (!align_corners) { // align_corners not supported in TensorRT, create plugin and // run layer through PyTorch - create_plugin(ctx, n, in, "linear1d", in_shape, out_shape, out_size, std::string("linear")); + create_plugin(ctx, n, in, "linear1d", in_shape, out_shape, out_size, {}, std::string("linear"), align_corners); } else { resize_layer_size(ctx, n, in, out_shape, {}, nvinfer1::ResizeMode::kLINEAR, true); } @@ -412,7 +370,7 @@ auto interpolate_registrations TRTORCH_UNUSED = float scale = scale_factors[0]; std::vector padded_scales(in_shape.size(), 1); padded_scales[padded_scales.size() - 1] = scale; - #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) + #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) // IF TRT VERSION <= 7.0 if (!align_corners) { TRTORCH_THROW_ERROR("Unable to convert node: " << util::node_info(n) << "\nupsample_linear1d only supports align_corner with TensorRT <= 7.0."); @@ -420,7 +378,16 @@ auto interpolate_registrations TRTORCH_UNUSED = resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, true); } #else - resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, align_corners); + TRTORCH_CHECK(!(align_corners && ctx->input_is_dynamic), + "TRTorch currently does not support the compilation of dynamc engines from code using PyTorch [bi/tri]linear interpolation via scale factor and align_corners=True"); + if (align_corners) { + // Align corners and scale factor behave slightly different together in TRT and PyTorch so run the + // layer in ATen to maintain consistancy between TRTorch and PyTorch + // https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.interpolate + create_plugin(ctx, n, in, "linear1d", in_shape, {}, {}, {scale}, std::string("linear"), align_corners, true); + } else { + resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, align_corners); + } #endif } else { // Case 2: user uses output size @@ -430,11 +397,11 @@ auto interpolate_registrations TRTORCH_UNUSED = auto out_shape = in_shape; std::copy(out_size.begin(), out_size.end(), out_shape.begin() + (in_shape.size() - out_size.size())); - #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) + #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) // IF TRT VERSION <= 7.0 if (!align_corners) { // align_corners not supported in TensorRT, create plugin and // run layer through PyTorch - create_plugin(ctx, n, in, "linear1d", in_shape, out_shape, out_size, std::string("linear")); + create_plugin(ctx, n, in, "linear1d", in_shape, out_shape, out_size, {}, std::string("linear"), align_corners); } else { resize_layer_size(ctx, n, in, out_shape, {}, nvinfer1::ResizeMode::kLINEAR, true); } @@ -462,7 +429,7 @@ auto interpolate_registrations TRTORCH_UNUSED = std::vector padded_scales(in_shape.size(), 1); padded_scales[padded_scales.size() - 2] = scale_h; padded_scales[padded_scales.size() - 1] = scale_w; - #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) + #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) // IF TRT VERSION <= 7.0 if (!align_corners) { TRTORCH_THROW_ERROR("Unable to convert node: " << util::node_info(n) << "\nupsample_linear2d only supports align_corner with TensorRT <= 7.0."); @@ -470,7 +437,16 @@ auto interpolate_registrations TRTORCH_UNUSED = resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, true); } #else - resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, align_corners); + TRTORCH_CHECK(!(align_corners && ctx->input_is_dynamic), + "TRTorch currently does not support the compilation of dynamc engines from code using PyTorch [bi/tri]linear interpolation via scale factor and align_corners=True"); + if (align_corners) { + // Align corners and scale factor behave slightly different together in TRT and PyTorch so run the + // layer in ATen to maintain consistancy between TRTorch and PyTorch + // https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.interpolate + create_plugin(ctx, n, in, "bilinear2d", in_shape, {}, {}, {scale_h, scale_w}, std::string("bilinear"), align_corners, true); + } else { + resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, align_corners); + } #endif } else { // Case 2: user uses output size @@ -482,11 +458,11 @@ auto interpolate_registrations TRTORCH_UNUSED = auto out_shape = in_shape; std::copy(out_size.begin(), out_size.end(), out_shape.begin() + (in_shape.size() - out_size.size())); - #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) + #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) // IF TRT VERSION <= 7.0 if (!align_corners) { // align_corners not supported in TensorRT, create plugin and // run layer through PyTorch - create_plugin(ctx, n, in, "bilinear2d", in_shape, out_shape, out_size, std::string("bilinear")); + create_plugin(ctx, n, in, "bilinear2d", in_shape, out_shape, out_size, {}, std::string("bilinear"), align_corners); } else { resize_layer_size(ctx, n, in, out_shape, {}, nvinfer1::ResizeMode::kLINEAR, true); } @@ -516,7 +492,7 @@ auto interpolate_registrations TRTORCH_UNUSED = std::vector padded_scales(in_shape.size(), 1); padded_scales[padded_scales.size() - 2] = scale_h; padded_scales[padded_scales.size() - 1] = scale_w; - #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) + #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) // IF TRT VERSION <= 7.0 if (!align_corners) { TRTORCH_THROW_ERROR("Unable to convert node: " << util::node_info(n) << "\nupsample_linear2d only supports align_corner with TensorRT <= 7.0."); @@ -524,7 +500,16 @@ auto interpolate_registrations TRTORCH_UNUSED = resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, true); } #else - resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, align_corners); + TRTORCH_CHECK(!(align_corners && ctx->input_is_dynamic), + "TRTorch currently does not support the compilation of dynamc engines from code using PyTorch [bi/tri]linear interpolation via scale factor and align_corners=True"); + if (align_corners) { + // Align corners and scale factor behave slightly different together in TRT and PyTorch so run the + // layer in ATen to maintain consistancy between TRTorch and PyTorch + // https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.interpolate + create_plugin(ctx, n, in, "bilinear2d", in_shape, {}, {}, scale_factors.vec(), std::string("bilinear"), align_corners, true); + } else { + resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, align_corners); + } #endif } else { // Case 2: user uses output size @@ -536,11 +521,11 @@ auto interpolate_registrations TRTORCH_UNUSED = auto out_shape = in_shape; std::copy(out_size.begin(), out_size.end(), out_shape.begin() + (in_shape.size() - out_size.size())); - #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) + #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) // IF TRT VERSION <= 7.0 if (!align_corners) { // align_corners not supported in TensorRT, create plugin and // run layer through PyTorch - create_plugin(ctx, n, in, "bilinear2d", in_shape, out_shape, out_size, std::string("bilinear")); + create_plugin(ctx, n, in, "bilinear2d", in_shape, out_shape, out_size, {}, std::string("bilinear"), align_corners); } else { resize_layer_size(ctx, n, in, out_shape, {}, nvinfer1::ResizeMode::kLINEAR, true); } @@ -570,7 +555,7 @@ auto interpolate_registrations TRTORCH_UNUSED = padded_scales[padded_scales.size() - 3] = scale_d; padded_scales[padded_scales.size() - 2] = scale_h; padded_scales[padded_scales.size() - 1] = scale_w; - #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) + #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) // IF TRT VERSION <= 7.0 if (!align_corners) { TRTORCH_THROW_ERROR("Unable to convert node: " << util::node_info(n) << "\nupsample_linear3d only supports align_corner with TensorRT <= 7.0."); @@ -578,7 +563,16 @@ auto interpolate_registrations TRTORCH_UNUSED = resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, true); } #else - resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, align_corners); + TRTORCH_CHECK(!(align_corners && ctx->input_is_dynamic), + "TRTorch currently does not support the compilation of dynamc engines from code using PyTorch [bi/tri]linear interpolation via scale factor and align_corners=True"); + if (align_corners) { + // Align corners and scale factor behave slightly different together in TRT and PyTorch so run the + // layer in ATen to maintain consistancy between TRTorch and PyTorch + // https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.interpolate + create_plugin(ctx, n, in, "trilinear3d", in_shape, {}, {}, {scale_d, scale_h, scale_w}, std::string("trilinear"), align_corners, true); + } else { + resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, align_corners); + } #endif } else { // Case 2: user uses output size @@ -589,11 +583,11 @@ auto interpolate_registrations TRTORCH_UNUSED = auto out_shape = in_shape; std::copy(out_size.begin(), out_size.end(), out_shape.begin() + (in_shape.size() - out_size.size())); - #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) + #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) // IF TRT VERSION <= 7.0 if (!align_corners) { // align_corners not supported in TensorRT, create plugin and // run layer through PyTorch - create_plugin(ctx, n, in, "trilinear3d", in_shape, out_shape, out_size, std::string("trilinear")); + create_plugin(ctx, n, in, "trilinear3d", in_shape, out_shape, out_size, {}, std::string("trilinear"), align_corners); } else { resize_layer_size(ctx, n, in, out_shape, {}, nvinfer1::ResizeMode::kLINEAR, true); } @@ -625,7 +619,7 @@ auto interpolate_registrations TRTORCH_UNUSED = padded_scales[padded_scales.size() - 3] = scale_d; padded_scales[padded_scales.size() - 2] = scale_h; padded_scales[padded_scales.size() - 1] = scale_w; - #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) + #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) // IF TRT VERSION <= 7.0 if (!align_corners) { TRTORCH_THROW_ERROR("Unable to convert node: " << util::node_info(n) << "\nupsample_linear3d only supports align_corner with TensorRT <= 7.0."); @@ -633,7 +627,16 @@ auto interpolate_registrations TRTORCH_UNUSED = resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, true); } #else - resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, align_corners); + TRTORCH_CHECK(!(align_corners && ctx->input_is_dynamic), + "TRTorch currently does not support the compilation of dynamc engines from code using PyTorch [bi/tri]linear interpolation via scale factor and align_corners=True"); + if (align_corners) { + // Align corners and scale factor behave slightly different together in TRT and PyTorch so run the + // layer in ATen to maintain consistancy between TRTorch and PyTorch + // https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.interpolate + create_plugin(ctx, n, in, "trilinear3d", in_shape, {}, {}, scale_factors.vec(), std::string("trilinear"), align_corners, true); + } else { + resize_layer_size(ctx, n, in, {}, padded_scales, nvinfer1::ResizeMode::kLINEAR, align_corners); + } #endif } else { // Case 2: user uses output size @@ -644,11 +647,11 @@ auto interpolate_registrations TRTORCH_UNUSED = auto out_shape = in_shape; std::copy(out_size.begin(), out_size.end(), out_shape.begin() + (in_shape.size() - out_size.size())); - #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) + #if NV_TENSORRT_MAJOR < 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR < 1) // IF TRT VERSION <= 7.0 if (!align_corners) { // align_corners not supported in TensorRT, create plugin and // run layer through PyTorch - create_plugin(ctx, n, in, "trilinear3d", in_shape, out_shape, out_size, std::string("trilinear")); + create_plugin(ctx, n, in, "trilinear3d", in_shape, out_shape, out_size, {}, std::string("trilinear"), align_corners); } else { resize_layer_size(ctx, n, in, out_shape, {}, nvinfer1::ResizeMode::kLINEAR, true); } diff --git a/core/conversion/converters/impl/plugins/interpolate_plugin.cpp b/core/conversion/converters/impl/plugins/interpolate_plugin.cpp index 9abf6ced54..1308c9fb2d 100644 --- a/core/conversion/converters/impl/plugins/interpolate_plugin.cpp +++ b/core/conversion/converters/impl/plugins/interpolate_plugin.cpp @@ -17,9 +17,31 @@ InterpolatePlugin::InterpolatePlugin( std::vector in_shape, std::vector out_shape, std::vector size, + std::vector scales, std::string mode, - bool align_corners) - : in_shape_(in_shape), out_shape_(out_shape), size_(size), mode_(mode), align_corners_(align_corners) {} + bool align_corners, + bool use_scales) + : in_shape_(in_shape), out_shape_(out_shape), size_(size), scales_(scales), mode_(mode), align_corners_(align_corners), use_scales_(use_scales) { + if (use_scales) { + TRTORCH_ASSERT(mode_ != "adaptive_pool2d", "use_scales is not valid for adaptive_pool2d"); + TRTORCH_ASSERT(scales_.size() != 0, "Attempted to use interpolate plugin without providing scales while use_scales=true"); + at::Tensor input = at::randint(1, 10, in_shape, {at::kCUDA}); + at::Tensor output; + + if (mode_ == "linear") { + output = at::upsample_linear1d(input, c10::nullopt, align_corners_, scales_[0]); + } else if (mode_ == "bilinear") { + output = at::upsample_bilinear2d(input, c10::nullopt, align_corners_, scales_); + std::cout << output.sizes() << std::endl; + } else if (mode_ == "trilinear") { + output = at::upsample_trilinear3d(input, c10::nullopt, align_corners_, scales_); + } + + out_shape_ = output.sizes().vec(); + } else { + TRTORCH_ASSERT((size_.size() != 0 && out_shape_.size() != 0), "Attempted to use interpolate plugin without providing output size while use_scales=false"); + } + } InterpolatePlugin::InterpolatePlugin(const char* data, size_t length) { std::istringstream data_stream(std::string(data, length)); @@ -42,6 +64,11 @@ InterpolatePlugin::InterpolatePlugin(const char* data, size_t length) { input_archive.read("size", value); size_ = value.toIntVector(); } + { + torch::IValue value; + input_archive.read("scales", value); + scales_ = value.toDoubleVector(); + } { torch::IValue value; input_archive.read("mode", value); @@ -52,6 +79,11 @@ InterpolatePlugin::InterpolatePlugin(const char* data, size_t length) { input_archive.read("align_corners", value); align_corners_ = value.toBool(); } + { + torch::IValue value; + input_archive.read("use_scales", value); + use_scales_ = value.toBool(); + } } std::vector InterpolatePlugin::getInputShape() { @@ -83,7 +115,7 @@ const char* InterpolatePlugin::getPluginNamespace() const { } nvinfer1::IPluginV2DynamicExt* InterpolatePlugin::clone() const { - return new InterpolatePlugin(in_shape_, out_shape_, size_, mode_, align_corners_); + return new InterpolatePlugin(in_shape_, out_shape_, size_, scales_, mode_, align_corners_, use_scales_); } nvinfer1::DimsExprs InterpolatePlugin::getOutputDimensions( @@ -93,9 +125,27 @@ nvinfer1::DimsExprs InterpolatePlugin::getOutputDimensions( nvinfer1::IExprBuilder& exprBuilder) { nvinfer1::DimsExprs output(inputs[0]); - for (unsigned int i = 0; i < out_shape_.size(); i++) { - output.d[i] = exprBuilder.constant(out_shape_[i]); - } + // TODO: This should enable the case of using this plugin with dynamic shape, scale factor and align corners == true to cover + // the different implementations between PyTorch and TRT. However TRT currently does not support doubles + // for ExprBuilder constants. Once that is possible enable this code and remove the code in the constructor + // if (use_scales_) { + // auto input_dimsexprs = inputs[0]; + // output.d[0] = exprBuilder.operation(DimensionOperation::kMAX, *input_dimsexprs.d[0], *exprBuilder.constant(0)); + // if (mode_ == "linear") { + // output.d[1] = exprBuilder.operation(DimensionOperation::kPROD, *input_dimsexprs.d[1], *exprBuilder.constant(scales_[1])); + // } else if (mode_ == "bilinear") { + // output.d[1] = exprBuilder.operation(DimensionOperation::kPROD, *input_dimsexprs.d[1], *exprBuilder.constant(scales_[1])); + // output.d[2] = exprBuilder.operation(DimensionOperation::kPROD, *input_dimsexprs.d[2], *exprBuilder.constant(scales_[2])); + // } else if (mode_ == "trilinear") { + // output.d[1] = exprBuilder.operation(DimensionOperation::kPROD, *input_dimsexprs.d[1], *exprBuilder.constant(scales_[1])); + // output.d[2] = exprBuilder.operation(DimensionOperation::kPROD, *input_dimsexprs.d[2], *exprBuilder.constant(scales_[2])); + // output.d[3] = exprBuilder.operation(DimensionOperation::kPROD, *input_dimsexprs.d[3], *exprBuilder.constant(scales_[3])); + // } + // } else { + for (unsigned int i = 0; i < out_shape_.size(); i++) { + output.d[i] = exprBuilder.constant(out_shape_[i]); + } + //} return output; } @@ -131,8 +181,10 @@ std::string InterpolatePlugin::serializeToString() const { output_archive.write("in_shape", torch::IValue(in_shape_)); output_archive.write("out_shape", torch::IValue(out_shape_)); output_archive.write("size", torch::IValue(size_)); + output_archive.write("scales", torch::IValue(scales_)); output_archive.write("mode", torch::IValue(mode_)); output_archive.write("align_corners", torch::IValue(align_corners_)); + output_archive.write("use_scales", torch::IValue(use_scales_)); std::ostringstream data_str; output_archive.save_to(data_str); @@ -201,14 +253,24 @@ int InterpolatePlugin::enqueue( cudaStreamWaitEvent(torch_stream.stream(), event, 0); - if (mode_ == "linear") { - at::upsample_linear1d_out(output, input, {size_[0]}, align_corners_); - } else if (mode_ == "bilinear") { - at::upsample_bilinear2d_out(output, input, {size_[0], size_[1]}, align_corners_); - } else if (mode_ == "trilinear") { - at::upsample_trilinear3d_out(output, input, {size_[0], size_[1], size_[2]}, align_corners_); - } else if (mode_ == "adaptive_pool2d") { - at::adaptive_avg_pool2d_out(output, input, {size_[0], size_[1]}); + if (use_scales_) { + if (mode_ == "linear") { + at::upsample_linear1d_out(output, input, {}, align_corners_, scales_[0]); + } else if (mode_ == "bilinear") { + at::upsample_bilinear2d_out(output, input, {}, align_corners_, scales_[0], scales_[1]); + } else if (mode_ == "trilinear") { + at::upsample_trilinear3d_out(output, input, {}, align_corners_, scales_[0], scales_[1], scales_[2]); + } + } else { + if (mode_ == "linear") { + at::upsample_linear1d_out(output, input, {size_[0]}, align_corners_); + } else if (mode_ == "bilinear") { + at::upsample_bilinear2d_out(output, input, {size_[0], size_[1]}, align_corners_); + } else if (mode_ == "trilinear") { + at::upsample_trilinear3d_out(output, input, {size_[0], size_[1], size_[2]}, align_corners_); + } else if (mode_ == "adaptive_pool2d") { + at::adaptive_avg_pool2d_out(output, input, {size_[0], size_[1]}); + } } cudaEvent_t torch_event; @@ -234,11 +296,27 @@ int InterpolatePlugin::enqueue( stream); cudaStreamSynchronize(stream); - at::Tensor input = at::from_blob((void*)input_blob, util::toVec(inputDesc->dims), tensor_options_); + at::Tensor input = at::from_blob((void*)input_blob, util::toVec(inputDesc->dims), tensor_options_); at::Tensor output; - if (mode_ == "adaptive_pool2d") { - output = at::adaptive_avg_pool2d(input, {size_[0], size_[1]}); + if (use_scales_) { + if (mode_ == "linear") { + output = at::upsample_linear1d(input, c10::nullopt, align_corners_, {scales_[0]}); + } else if (mode_ == "bilinear") { + output = at::upsample_bilinear2d(input, c10::nullopt, align_corners_, scales_); + } else if (mode_ == "trilinear") { + output = at::upsample_trilinear3d(input, c10::nullopt, align_corners_, scales_); + } + } else { + if (mode_ == "linear") { + output = at::upsample_linear1d(input, {size_[0]}, align_corners_); + } else if (mode_ == "bilinear") { + output = at::upsample_bilinear2d(input, {size_[0], size_[1]}, align_corners_); + } else if (mode_ == "trilinear") { + output = at::upsample_trilinear3d(input, {size_[0], size_[1], size_[2]}, align_corners_); + } else if (mode_ == "adaptive_pool2d") { + output = at::adaptive_avg_pool2d(input, {size_[0], size_[1]}); + } } cudaMemcpyAsync( @@ -277,10 +355,12 @@ InterpolatePlugin* InterpolatePluginCreator::createPlugin( std::vector in_shape, std::vector out_shape, std::vector size, + std::vector scales, std::string mode, - bool align_corners) { + bool align_corners, + bool use_scales) { name_ = name; - return new InterpolatePlugin(in_shape, out_shape, size, mode, align_corners); + return new InterpolatePlugin(in_shape, out_shape, size, scales, mode, align_corners, use_scales); } nvinfer1::IPluginV2* InterpolatePluginCreator::deserializePlugin( diff --git a/core/conversion/converters/impl/plugins/interpolate_plugin.h b/core/conversion/converters/impl/plugins/interpolate_plugin.h index 746ed14238..bc8ea11c9b 100644 --- a/core/conversion/converters/impl/plugins/interpolate_plugin.h +++ b/core/conversion/converters/impl/plugins/interpolate_plugin.h @@ -31,8 +31,10 @@ class InterpolatePlugin : public nvinfer1::IPluginV2DynamicExt { std::vector in_shape_; std::vector out_shape_; std::vector size_; + std::vector scales_; std::string mode_; bool align_corners_; + bool use_scales_; protected: // To prevent compiler warnings @@ -49,8 +51,10 @@ class InterpolatePlugin : public nvinfer1::IPluginV2DynamicExt { std::vector in_shape, std::vector out_shape, std::vector size, + std::vector scales, std::string mode, - bool align_corners); + bool align_corners, + bool use_scales); InterpolatePlugin(const char* data, size_t length); @@ -136,12 +140,14 @@ class InterpolatePluginCreator : public nvinfer1::IPluginCreator { nvinfer1::IPluginV2* createPlugin(const char* name, const nvinfer1::PluginFieldCollection* fc) override; InterpolatePlugin* createPlugin( - const char* name, - std::vector in_shape, - std::vector out_shape, - std::vector size, - std::string mode, - bool align_corners); + const char* name, + std::vector in_shape, + std::vector out_shape, + std::vector size, + std::vector scales, + std::string mode, + bool align_corners, + bool use_scales); nvinfer1::IPluginV2* deserializePlugin(const char* name, const void* serialData, size_t serialLength) override; diff --git a/core/conversion/converters/impl/pooling.cpp b/core/conversion/converters/impl/pooling.cpp index 6e32f69acb..4da1d411fa 100644 --- a/core/conversion/converters/impl/pooling.cpp +++ b/core/conversion/converters/impl/pooling.cpp @@ -317,7 +317,7 @@ auto pooling_registrations TRTORCH_UNUSED = auto creator = new plugins::InterpolatePluginCreator(); auto plugin = creator->createPlugin( - "adaptive_pool2d", in_shape, out_shape, out_size, std::string("adaptive_pool2d"), false); + "adaptive_pool2d", in_shape, out_shape, out_size, {}, std::string("adaptive_pool2d"), false, false); auto pooling_layer = ctx->net->addPluginV2(reinterpret_cast(&in), 1, *plugin); diff --git a/tests/core/conversion/converters/test_interpolate.cpp b/tests/core/conversion/converters/test_interpolate.cpp index a3d3aee5e5..59d97312ab 100644 --- a/tests/core/conversion/converters/test_interpolate.cpp +++ b/tests/core/conversion/converters/test_interpolate.cpp @@ -5,7 +5,7 @@ #include "torch/csrc/jit/ir/irparser.h" -#define ATEN_UPSAMPLE_TESTS(name, graph_src, input_shape) \ +#define ATEN_INTERPOLATE_TESTS(name, graph_src, input_shape) \ TEST(Converters, name##StaticConvertsCorrectly) { \ const auto graph = graph_src; \ \ @@ -44,7 +44,27 @@ ASSERT_TRUE(trtorch::tests::util::almostEqual(jit_results[0], trt, 2e-6)); \ } -ATEN_UPSAMPLE_TESTS(ATenUpsampleNearest1dOutputSize, +#define ATEN_INTERPOLATE_STATIC_ONLY_TEST(name, graph_src, input_shape) \ + TEST(Converters, name##StaticConvertsCorrectly) { \ + const auto graph = graph_src; \ + \ + auto g = std::make_shared(); \ + torch::jit::parseIR(graph, &*g); \ + \ + auto in = at::randint(1, 10, input_shape, {at::kCUDA}); \ + auto jit_in = at::clone(in); \ + auto params = trtorch::core::conversion::get_named_params(g->inputs(), {}); \ + auto jit_results = trtorch::tests::util::RunGraph(g, params, {jit_in}); \ + \ + auto trt_in = at::clone(in); \ + params = trtorch::core::conversion::get_named_params(g->inputs(), {}); \ + \ + auto trt_results = trtorch::tests::util::RunGraphEngine(g, params, {trt_in}); \ + auto trt = trt_results[0].reshape(jit_results[0].sizes()); \ + ASSERT_TRUE(trtorch::tests::util::almostEqual(jit_results[0], trt, 2e-6)); \ + } + +ATEN_INTERPOLATE_TESTS(ATenUpsampleNearest1dOutputSize, R"IR( graph(%0 : Tensor): %2 : int = prim::Constant[value=10]() @@ -54,7 +74,7 @@ ATEN_UPSAMPLE_TESTS(ATenUpsampleNearest1dOutputSize, return (%5))IR", std::vector({10, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleNearest1dScales, +ATEN_INTERPOLATE_TESTS(ATenUpsampleNearest1dScales, R"IR( graph(%0 : Tensor): %1 : int = prim::Constant[value=8]() @@ -64,7 +84,7 @@ ATEN_UPSAMPLE_TESTS(ATenUpsampleNearest1dScales, return (%5))IR", std::vector({10, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleNearest1dVecScaleFactors, +ATEN_INTERPOLATE_TESTS(ATenUpsampleNearest1dVecScaleFactors, R"IR( graph(%0 : Tensor): %2 : None = prim::Constant() @@ -74,7 +94,7 @@ ATEN_UPSAMPLE_TESTS(ATenUpsampleNearest1dVecScaleFactors, return (%5))IR", std::vector({10, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleNearest2dOutputSize, +ATEN_INTERPOLATE_TESTS(ATenUpsampleNearest2dOutputSize, R"IR( graph(%0 : Tensor): %2 : int = prim::Constant[value=10]() @@ -84,7 +104,7 @@ ATEN_UPSAMPLE_TESTS(ATenUpsampleNearest2dOutputSize, return (%5))IR", std::vector({10, 2, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleNearest2dScales, +ATEN_INTERPOLATE_TESTS(ATenUpsampleNearest2dScales, R"IR( graph(%0 : Tensor): %2 : int = prim::Constant[value=8]() @@ -94,7 +114,7 @@ ATEN_UPSAMPLE_TESTS(ATenUpsampleNearest2dScales, return (%5))IR", std::vector({10, 2, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleNearest2dVecScaleFactors, +ATEN_INTERPOLATE_TESTS(ATenUpsampleNearest2dVecScaleFactors, R"IR( graph(%0 : Tensor): %2 : None = prim::Constant() @@ -104,7 +124,7 @@ ATEN_UPSAMPLE_TESTS(ATenUpsampleNearest2dVecScaleFactors, return (%5))IR", std::vector({10, 2, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleNearest3dOutputSize, +ATEN_INTERPOLATE_TESTS(ATenUpsampleNearest3dOutputSize, R"IR( graph(%0 : Tensor): %2 : int = prim::Constant[value=10]() @@ -114,7 +134,7 @@ ATEN_UPSAMPLE_TESTS(ATenUpsampleNearest3dOutputSize, return (%5))IR", std::vector({10, 2, 2, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleNearest3dScales, +ATEN_INTERPOLATE_TESTS(ATenUpsampleNearest3dScales, R"IR( graph(%0 : Tensor): %2 : int = prim::Constant[value=8]() @@ -124,7 +144,7 @@ ATEN_UPSAMPLE_TESTS(ATenUpsampleNearest3dScales, return (%5))IR", std::vector({10, 2, 2, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleNearest3dVecScaleFactors, +ATEN_INTERPOLATE_TESTS(ATenUpsampleNearest3dVecScaleFactors, R"IR( graph(%0 : Tensor): %2 : None = prim::Constant() @@ -134,7 +154,7 @@ ATEN_UPSAMPLE_TESTS(ATenUpsampleNearest3dVecScaleFactors, return (%5))IR", std::vector({10, 2, 2, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleLinear1dOutputSizeWithAlignCorners, +ATEN_INTERPOLATE_TESTS(ATenUpsampleLinear1dOutputSizeWithAlignCorners, R"IR( graph(%0 : Tensor): %2 : int = prim::Constant[value=10]() @@ -145,7 +165,7 @@ ATEN_UPSAMPLE_TESTS(ATenUpsampleLinear1dOutputSizeWithAlignCorners, return (%6))IR", std::vector({10, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleLinear1dOutputSizeWithoutAlignCorners, +ATEN_INTERPOLATE_TESTS(ATenUpsampleLinear1dOutputSizeWithoutAlignCorners, R"IR( graph(%0 : Tensor): %2 : int = prim::Constant[value=10]() @@ -156,7 +176,7 @@ ATEN_UPSAMPLE_TESTS(ATenUpsampleLinear1dOutputSizeWithoutAlignCorners, return (%6))IR", std::vector({10, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleLinear1dScalesWithoutAlignCorners, +ATEN_INTERPOLATE_TESTS(ATenUpsampleLinear1dScalesWithoutAlignCorners, R"IR( graph(%0 : Tensor): %2 : int = prim::Constant[value=8]() @@ -167,40 +187,40 @@ ATEN_UPSAMPLE_TESTS(ATenUpsampleLinear1dScalesWithoutAlignCorners, return (%6))IR", std::vector({10, 2, 2})); -// ATEN_UPSAMPLE_TESTS(ATenUpsampleLinear1dScalesWithAlignCorners, -// R"IR( -// graph(%0 : Tensor): -// %2 : int = prim::Constant[value=8]() -// %3 : int[] = prim::ListConstruct(%2) -// %4 : bool = prim::Constant[value=1]() -// %5 : float = prim::Constant[value=4.0]() -// %6 : Tensor = aten::upsample_linear1d(%0, %3, %4, %5) -// return (%6))IR", -// std::vector({10, 2, 2})); +ATEN_INTERPOLATE_STATIC_ONLY_TEST(ATenUpsampleLinear1dScalesWithAlignCorners, + R"IR( + graph(%0 : Tensor): + %2 : int = prim::Constant[value=8]() + %3 : int[] = prim::ListConstruct(%2) + %4 : bool = prim::Constant[value=1]() + %5 : float = prim::Constant[value=4.0]() + %6 : Tensor = aten::upsample_linear1d(%0, %3, %4, %5) + return (%6))IR", +std::vector({10, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleLinear1dVecScaleFactorsWithoutAlignCorners, +ATEN_INTERPOLATE_TESTS(ATenUpsampleLinear1dVecScaleFactorsWithoutAlignCorners, R"IR( graph(%0 : Tensor): %3 : None = prim::Constant() %4 : bool = prim::Constant[value=0]() %5 : float = prim::Constant[value=4.0]() %6 : float[] = prim::ListConstruct(%5) - %6 : Tensor = aten::upsample_linear1d(%0, %3, %4, %6) - return (%6))IR", + %7 : Tensor = aten::upsample_linear1d(%0, %3, %4, %6) + return (%7))IR", std::vector({10, 2, 2})); -// ATEN_UPSAMPLE_TESTS(ATenUpsampleLinear1dVecScaleFactorsWithAlignCorners, - // R"IR( - // graph(%0 : Tensor): - // %3 : None = prim::Constant() - // %4 : bool = prim::Constant[value=1]() - // %5 : float = prim::Constant[value=4.0]() - // %6 : float[] = prim::ListConstruct(%5) - // %6 : Tensor = aten::upsample_linear1d(%0, %3, %4, %6) - // return (%6))IR", - // std::vector({10, 2, 2})); +ATEN_INTERPOLATE_STATIC_ONLY_TEST(ATenUpsampleLinear1dVecScaleFactorsWithAlignCorners, + R"IR( + graph(%0 : Tensor): + %3 : None = prim::Constant() + %4 : bool = prim::Constant[value=1]() + %5 : float = prim::Constant[value=4.0]() + %6 : float[] = prim::ListConstruct(%5) + %7 : Tensor = aten::upsample_linear1d(%0, %3, %4, %6) + return (%7))IR", + std::vector({10, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleBilinear2dOutputSizeWithAlignCorners, +ATEN_INTERPOLATE_TESTS(ATenUpsampleBilinear2dOutputSizeWithAlignCorners, R"IR( graph(%0 : Tensor): %2 : int = prim::Constant[value=10]() @@ -211,7 +231,7 @@ ATEN_UPSAMPLE_TESTS(ATenUpsampleBilinear2dOutputSizeWithAlignCorners, return (%6))IR", std::vector({10, 2, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleBilinear2dOutputSizeWithoutAlignCorners, +ATEN_INTERPOLATE_TESTS(ATenUpsampleBilinear2dOutputSizeWithoutAlignCorners, R"IR( graph(%0 : Tensor): %2 : int = prim::Constant[value=10]() @@ -222,7 +242,7 @@ ATEN_UPSAMPLE_TESTS(ATenUpsampleBilinear2dOutputSizeWithoutAlignCorners, return (%6))IR", std::vector({10, 2, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleBilinear2dScalesWithoutAlignCorners, +ATEN_INTERPOLATE_TESTS(ATenUpsampleBilinear2dScalesWithoutAlignCorners, R"IR( graph(%0 : Tensor): %2 : int = prim::Constant[value=8]() @@ -233,40 +253,40 @@ ATEN_UPSAMPLE_TESTS(ATenUpsampleBilinear2dScalesWithoutAlignCorners, return (%6))IR", std::vector({10, 2, 2, 2})); -// ATEN_UPSAMPLE_TESTS(ATenUpsampleBilinear2dScalesWithAlignCorners, - // R"IR( - // graph(%0 : Tensor): - // %2 : int = prim::Constant[value=8]() - // %3 : int[] = prim::ListConstruct(%2, %2) - // %4 : bool = prim::Constant[value=1]() - // %5 : float = prim::Constant[value=4.0]() - // %6 : Tensor = aten::upsample_bilinear2d(%0, %3, %4, %5, %5) - // return (%6))IR", -// std::vector({10, 2, 2, 2})); +ATEN_INTERPOLATE_STATIC_ONLY_TEST(ATenUpsampleBilinear2dScalesWithAlignCorners, + R"IR( + graph(%0 : Tensor): + %2 : int = prim::Constant[value=8]() + %3 : int[] = prim::ListConstruct(%2, %2) + %4 : bool = prim::Constant[value=1]() + %5 : float = prim::Constant[value=4.0]() + %6 : Tensor = aten::upsample_bilinear2d(%0, %3, %4, %5, %5) + return (%6))IR", + std::vector({10, 2, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleBilinear2dVecScaleFactorsWithoutAlignCorners, +ATEN_INTERPOLATE_TESTS(ATenUpsampleBilinear2dVecScaleFactorsWithoutAlignCorners, R"IR( graph(%0 : Tensor): %3 : None = prim::Constant() %4 : bool = prim::Constant[value=0]() %5 : float = prim::Constant[value=4.0]() %6 : float[] = prim::ListConstruct(%5, %5) - %6 : Tensor = aten::upsample_bilinear2d(%0, %3, %4, %6) - return (%6))IR", + %7 : Tensor = aten::upsample_bilinear2d(%0, %3, %4, %6) + return (%7))IR", std::vector({10, 2, 2, 2})); -// ATEN_UPSAMPLE_TESTS(ATenUpsampleBilinear2dVecScaleFactorsWithAlignCorners, -// R"IR( -// graph(%0 : Tensor): -// %3 : None = prim::Constant() -// %4 : bool = prim::Constant[value=1]() -// %5 : float = prim::Constant[value=4.0]() -// %6 : float[] = prim::ListConstruct(%5, %5) -// %6 : Tensor = aten::upsample_bilinear2d(%0, %3, %4, %6) -// return (%6))IR", -// std::vector({10, 2, 2, 2})); +ATEN_INTERPOLATE_STATIC_ONLY_TEST(ATenUpsampleBilinear2dVecScaleFactorsWithAlignCorners, + R"IR( + graph(%0 : Tensor): + %3 : None = prim::Constant() + %4 : bool = prim::Constant[value=1]() + %5 : float = prim::Constant[value=4.0]() + %6 : float[] = prim::ListConstruct(%5, %5) + %7 : Tensor = aten::upsample_bilinear2d(%0, %3, %4, %6) + return (%7))IR", + std::vector({10, 2, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleTrilinear3dOutputSizeWithAlignCorners, +ATEN_INTERPOLATE_TESTS(ATenUpsampleTrilinear3dOutputSizeWithAlignCorners, R"IR( graph(%0 : Tensor): %2 : int = prim::Constant[value=10]() @@ -277,7 +297,7 @@ ATEN_UPSAMPLE_TESTS(ATenUpsampleTrilinear3dOutputSizeWithAlignCorners, return (%6))IR", std::vector({10, 2, 2, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleTrilinear3dOutputSizeWithoutAlignCorners, +ATEN_INTERPOLATE_TESTS(ATenUpsampleTrilinear3dOutputSizeWithoutAlignCorners, R"IR( graph(%0 : Tensor): %2 : int = prim::Constant[value=10]() @@ -288,7 +308,7 @@ ATEN_UPSAMPLE_TESTS(ATenUpsampleTrilinear3dOutputSizeWithoutAlignCorners, return (%6))IR", std::vector({10, 2, 2, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleTrilinear3dScalesWithoutAlignCorners, +ATEN_INTERPOLATE_TESTS(ATenUpsampleTrilinear3dScalesWithoutAlignCorners, R"IR( graph(%0 : Tensor): %2 : int = prim::Constant[value=8]() @@ -299,35 +319,35 @@ ATEN_UPSAMPLE_TESTS(ATenUpsampleTrilinear3dScalesWithoutAlignCorners, return (%6))IR", std::vector({10, 2, 2, 2, 2})); -// ATEN_UPSAMPLE_TESTS(ATenUpsampleTrilinear3dScalesWithAlignCorners, - // R"IR( - // graph(%0 : Tensor): - // %2 : int = prim::Constant[value=8]() - // %3 : int[] = prim::ListConstruct(%2, %2) - // %4 : bool = prim::Constant[value=1]() - // %5 : float = prim::Constant[value=4.0]() - // %6 : Tensor = aten::upsample_bilinear2d(%0, %3, %4, %5, %5) - // return (%6))IR", -// std::vector({10, 2, 2, 2})); +ATEN_INTERPOLATE_STATIC_ONLY_TEST(ATenUpsampleTrilinear3dScalesWithAlignCorners, + R"IR( + graph(%0 : Tensor): + %2 : int = prim::Constant[value=8]() + %3 : int[] = prim::ListConstruct(%2, %2, %2) + %4 : bool = prim::Constant[value=1]() + %5 : float = prim::Constant[value=4.0]() + %6 : Tensor = aten::upsample_trilinear3d(%0, %3, %4, %5, %5, %5) + return (%6))IR", + std::vector({10, 2, 2, 2, 2})); -ATEN_UPSAMPLE_TESTS(ATenUpsampleTrilinear3dVecScaleFactorsWithoutAlignCorners, +ATEN_INTERPOLATE_TESTS(ATenUpsampleTrilinear3dVecScaleFactorsWithoutAlignCorners, R"IR( graph(%0 : Tensor): %3 : None = prim::Constant() %4 : bool = prim::Constant[value=0]() %5 : float = prim::Constant[value=4.0]() %6 : float[] = prim::ListConstruct(%5, %5, %5) - %6 : Tensor = aten::upsample_trilinear3d(%0, %3, %4, %6) - return (%6))IR", + %7 : Tensor = aten::upsample_trilinear3d(%0, %3, %4, %6) + return (%7))IR", std::vector({10, 2, 2, 2, 2})); -// ATEN_UPSAMPLE_TESTS(ATenUpsampleTrilinear3dVecScaleFactorsWithAlignCorners, -// R"IR( -// graph(%0 : Tensor): -// %3 : None = prim::Constant() -// %4 : bool = prim::Constant[value=1]() -// %5 : float = prim::Constant[value=4.0]() -// %6 : float[] = prim::ListConstruct(%5, %5, %5) -// %6 : Tensor = aten::upsample_trilinear3d(%0, %3, %4, %6) -// return (%6))IR", -// std::vector({10, 2, 2, 2})); \ No newline at end of file +ATEN_INTERPOLATE_STATIC_ONLY_TEST(ATenUpsampleTrilinear3dVecScaleFactorsWithAlignCorners, + R"IR( + graph(%0 : Tensor): + %3 : None = prim::Constant() + %4 : bool = prim::Constant[value=1]() + %5 : float = prim::Constant[value=4.0]() + %6 : float[] = prim::ListConstruct(%5, %5, %5) + %7 : Tensor = aten::upsample_trilinear3d(%0, %3, %4, %6) + return (%7))IR", + std::vector({10, 2, 2, 2, 2})); \ No newline at end of file diff --git a/tests/util/util.cpp b/tests/util/util.cpp index 17169bd229..89720728c2 100644 --- a/tests/util/util.cpp +++ b/tests/util/util.cpp @@ -16,7 +16,7 @@ bool checkRtol(const at::Tensor& diff, const std::vector inputs, flo } bool almostEqual(const at::Tensor& a, const at::Tensor& b, float threshold) { - LOG_DEBUG(a << std::endl << b << std::endl); + LOG_GRAPH(a << std::endl << b << std::endl); auto a_float = a.toType(at::kFloat); auto b_float = b.toType(at::kFloat); return checkRtol(a_float - b_float, {a_float, b_float}, threshold);