diff --git a/core/conversion/converters/impl/interpolate.cpp b/core/conversion/converters/impl/interpolate.cpp index 6d83b1f8f1..3adcd42a56 100755 --- a/core/conversion/converters/impl/interpolate.cpp +++ b/core/conversion/converters/impl/interpolate.cpp @@ -15,24 +15,34 @@ auto interpolate_registrations = RegisterNodeConversionPatterns() .pattern({ "aten::upsample_nearest1d(Tensor self, int[1] output_size, float? scales=None) -> (Tensor)", [](ConversionCtx* ctx, const torch::jit::Node*n, args& args) -> bool { - TRTORCH_ASSERT(args[0].IValue()->isTensor(), "Input expected to be of type Tensor"); + std::cout << "GOT IN HERE!!!!!!" << std::endl; auto in = args[0].ITensor(); auto in_shape = util::toVec(in->getDimensions()); - + // Case 1: user uses output size and not scales if (!args[1].IValue()->isNone() && args[2].IValue()->isNone()) { - auto output_size = util::toDims(args[1].unwrapToIntList()); + auto out_size = util::toVec(util::toDims(args[1].unwrapToIntList())); - TRTORCH_ASSERT(output_size.nbDims == 1, "aten::upsample_nearest1d input Tensor and output size dimension mismatch"); + TRTORCH_ASSERT(out_size.size() == 1, "aten::upsample_nearest1d input Tensor and output size dimension mismatch"); + + auto out_shape = in_shape; + std::copy(out_size.begin(), out_size.end(), out_shape.begin() + (in_shape.size() - out_size.size())); + + // remove padding that TensorRT adds automatically + // out_shape.erase(out_shape.begin(), out_shape.begin()+1); auto resize_layer = ctx->net->addResize(*in); TRTORCH_CHECK(resize_layer, "Unable to create interpolation (resizing) layer from node" << *n); - resize_layer->setOutputDimensions(output_size); + resize_layer->setOutputDimensions(util::toDims(out_shape)); resize_layer->setResizeMode(nvinfer1::ResizeMode::kNEAREST); + resize_layer->setName(util::node_info(n).c_str()); + + auto layer_output = ctx->AssociateValueAndTensor(n->outputs()[0], resize_layer->getOutput(0)); + LOG_DEBUG("Output tensor shape: " << layer_output->getDimensions()); } else { - LOG_DEBUG("scale factor parameters not supported yet."); + LOG_DEBUG("scale factor parameter not supported yet."); } return true; @@ -40,24 +50,27 @@ auto interpolate_registrations = RegisterNodeConversionPatterns() }).pattern({ "aten::upsample_nearest2d(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None) -> (Tensor)", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { - // std::raise(SIGINT); - - TRTORCH_ASSERT(args[0].IValue()->isTensor(), "Input expected to be of type Tensor"); - auto in = args[0].ITensor(); auto in_shape = util::toVec(in->getDimensions()); // Case 1: user uses output_size and not scales_h, scales_w if (!args[1].IValue()->isNone() && args[2].IValue()->isNone() && args[3].IValue()->isNone()){ - auto output_size = util::toDims(args[1].unwrapToIntList()); - - TRTORCH_ASSERT( (output_size.nbDims == 1 || output_size.nbDims == 2), "aten::upsample_nearest2d input Tensor and output size dimension mismatch"); + auto out_size = util::toVec(util::toDims(args[1].unwrapToIntList())); + TRTORCH_ASSERT(out_size.size() == 2, "aten::upsample_nearest2d input Tensor and output size dimension mismatch"); + + auto out_shape = in_shape; + std::copy(out_size.begin(), out_size.end(), out_shape.begin() + (in_shape.size() - out_size.size())); + auto resize_layer = ctx->net->addResize(*in); TRTORCH_CHECK(resize_layer, "Unable to create interpolation (resizing) layer from node" << *n); - resize_layer->setOutputDimensions(output_size); + resize_layer->setOutputDimensions(util::toDims(out_shape)); resize_layer->setResizeMode(nvinfer1::ResizeMode::kNEAREST); + resize_layer->setName(util::node_info(n).c_str()); + + auto layer_output = ctx->AssociateValueAndTensor(n->outputs()[0], resize_layer->getOutput(0)); + LOG_DEBUG("Output tensor shape: " << layer_output->getDimensions()); } else { LOG_DEBUG("scale factor parameters not supported yet."); } @@ -67,22 +80,27 @@ auto interpolate_registrations = RegisterNodeConversionPatterns() }).pattern({ "aten::upsample_nearest3d(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> (Tensor)", [](ConversionCtx* ctx, const torch::jit::Node*n, args& args) -> bool { - TRTORCH_ASSERT(args[0].IValue()->isTensor(), "Input expected to be of type Tensor"); - auto in = args[0].ITensor(); auto in_shape = util::toVec(in->getDimensions()); // Case 1: user uses output size and not scales_d, scales_h, scales_w if (!args[1].IValue()->isNone() && args[2].IValue()->isNone() && args[3].IValue()->isNone() && args[4].IValue()->isNone()) { - auto output_size = util::toDims(args[1].unwrapToIntList()); + auto out_size = util::toVec(util::toDims(args[1].unwrapToIntList())); - TRTORCH_ASSERT( (output_size.nbDims == 1 || output_size.nbDims == 3), "aten::upsample_nearest3d input Tensor and output size dimension mismatch"); + TRTORCH_ASSERT(out_size.size() == 3, "aten::upsample_nearest3d input Tensor and output size dimension mismatch"); + + auto out_shape = in_shape; + std::copy(out_size.begin(), out_size.end(), out_shape.begin() + (in_shape.size() - out_size.size())); auto resize_layer = ctx->net->addResize(*in); TRTORCH_CHECK(resize_layer, "Unable to create interpolation (resizing) layer from node" << *n); - resize_layer->setOutputDimensions(output_size); + resize_layer->setOutputDimensions(util::toDims(out_shape)); resize_layer->setResizeMode(nvinfer1::ResizeMode::kNEAREST); + resize_layer->setName(util::node_info(n).c_str()); + + auto layer_output = ctx->AssociateValueAndTensor(n->outputs()[0], resize_layer->getOutput(0)); + LOG_DEBUG("Output tensor shape: " << layer_output->getDimensions()); } else { LOG_DEBUG("scale factor parameters not supported yet."); }