Skip to content

Commit

Permalink
fix(//core/conversion/converters/impl): code works for interpolate2d/…
Browse files Browse the repository at this point in the history
…3d, doesn't work for 1d yet

Signed-off-by: Abhiram Iyer <[email protected]>
Signed-off-by: Abhiram Iyer <[email protected]>
  • Loading branch information
abhi-iyer committed Jun 8, 2020
1 parent 7f12160 commit e4cb117
Showing 1 changed file with 37 additions and 19 deletions.
56 changes: 37 additions & 19 deletions core/conversion/converters/impl/interpolate.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,49 +15,62 @@ auto interpolate_registrations = RegisterNodeConversionPatterns()
.pattern({
"aten::upsample_nearest1d(Tensor self, int[1] output_size, float? scales=None) -> (Tensor)",
[](ConversionCtx* ctx, const torch::jit::Node*n, args& args) -> bool {
TRTORCH_ASSERT(args[0].IValue()->isTensor(), "Input expected to be of type Tensor");
std::cout << "GOT IN HERE!!!!!!" << std::endl;

auto in = args[0].ITensor();
auto in_shape = util::toVec(in->getDimensions());

// Case 1: user uses output size and not scales
if (!args[1].IValue()->isNone() && args[2].IValue()->isNone()) {
auto output_size = util::toDims(args[1].unwrapToIntList());
auto out_size = util::toVec(util::toDims(args[1].unwrapToIntList()));

TRTORCH_ASSERT(output_size.nbDims == 1, "aten::upsample_nearest1d input Tensor and output size dimension mismatch");
TRTORCH_ASSERT(out_size.size() == 1, "aten::upsample_nearest1d input Tensor and output size dimension mismatch");

auto out_shape = in_shape;
std::copy(out_size.begin(), out_size.end(), out_shape.begin() + (in_shape.size() - out_size.size()));

// remove padding that TensorRT adds automatically
// out_shape.erase(out_shape.begin(), out_shape.begin()+1);

auto resize_layer = ctx->net->addResize(*in);
TRTORCH_CHECK(resize_layer, "Unable to create interpolation (resizing) layer from node" << *n);

resize_layer->setOutputDimensions(output_size);
resize_layer->setOutputDimensions(util::toDims(out_shape));
resize_layer->setResizeMode(nvinfer1::ResizeMode::kNEAREST);
resize_layer->setName(util::node_info(n).c_str());

auto layer_output = ctx->AssociateValueAndTensor(n->outputs()[0], resize_layer->getOutput(0));
LOG_DEBUG("Output tensor shape: " << layer_output->getDimensions());
} else {
LOG_DEBUG("scale factor parameters not supported yet.");
LOG_DEBUG("scale factor parameter not supported yet.");
}

return true;
}
}).pattern({
"aten::upsample_nearest2d(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None) -> (Tensor)",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
// std::raise(SIGINT);

TRTORCH_ASSERT(args[0].IValue()->isTensor(), "Input expected to be of type Tensor");

auto in = args[0].ITensor();
auto in_shape = util::toVec(in->getDimensions());

// Case 1: user uses output_size and not scales_h, scales_w
if (!args[1].IValue()->isNone() && args[2].IValue()->isNone() && args[3].IValue()->isNone()){
auto output_size = util::toDims(args[1].unwrapToIntList());

TRTORCH_ASSERT( (output_size.nbDims == 1 || output_size.nbDims == 2), "aten::upsample_nearest2d input Tensor and output size dimension mismatch");
auto out_size = util::toVec(util::toDims(args[1].unwrapToIntList()));

TRTORCH_ASSERT(out_size.size() == 2, "aten::upsample_nearest2d input Tensor and output size dimension mismatch");

auto out_shape = in_shape;
std::copy(out_size.begin(), out_size.end(), out_shape.begin() + (in_shape.size() - out_size.size()));

auto resize_layer = ctx->net->addResize(*in);
TRTORCH_CHECK(resize_layer, "Unable to create interpolation (resizing) layer from node" << *n);

resize_layer->setOutputDimensions(output_size);
resize_layer->setOutputDimensions(util::toDims(out_shape));
resize_layer->setResizeMode(nvinfer1::ResizeMode::kNEAREST);
resize_layer->setName(util::node_info(n).c_str());

auto layer_output = ctx->AssociateValueAndTensor(n->outputs()[0], resize_layer->getOutput(0));
LOG_DEBUG("Output tensor shape: " << layer_output->getDimensions());
} else {
LOG_DEBUG("scale factor parameters not supported yet.");
}
Expand All @@ -67,22 +80,27 @@ auto interpolate_registrations = RegisterNodeConversionPatterns()
}).pattern({
"aten::upsample_nearest3d(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> (Tensor)",
[](ConversionCtx* ctx, const torch::jit::Node*n, args& args) -> bool {
TRTORCH_ASSERT(args[0].IValue()->isTensor(), "Input expected to be of type Tensor");

auto in = args[0].ITensor();
auto in_shape = util::toVec(in->getDimensions());

// Case 1: user uses output size and not scales_d, scales_h, scales_w
if (!args[1].IValue()->isNone() && args[2].IValue()->isNone() && args[3].IValue()->isNone() && args[4].IValue()->isNone()) {
auto output_size = util::toDims(args[1].unwrapToIntList());
auto out_size = util::toVec(util::toDims(args[1].unwrapToIntList()));

TRTORCH_ASSERT( (output_size.nbDims == 1 || output_size.nbDims == 3), "aten::upsample_nearest3d input Tensor and output size dimension mismatch");
TRTORCH_ASSERT(out_size.size() == 3, "aten::upsample_nearest3d input Tensor and output size dimension mismatch");

auto out_shape = in_shape;
std::copy(out_size.begin(), out_size.end(), out_shape.begin() + (in_shape.size() - out_size.size()));

auto resize_layer = ctx->net->addResize(*in);
TRTORCH_CHECK(resize_layer, "Unable to create interpolation (resizing) layer from node" << *n);

resize_layer->setOutputDimensions(output_size);
resize_layer->setOutputDimensions(util::toDims(out_shape));
resize_layer->setResizeMode(nvinfer1::ResizeMode::kNEAREST);
resize_layer->setName(util::node_info(n).c_str());

auto layer_output = ctx->AssociateValueAndTensor(n->outputs()[0], resize_layer->getOutput(0));
LOG_DEBUG("Output tensor shape: " << layer_output->getDimensions());
} else {
LOG_DEBUG("scale factor parameters not supported yet.");
}
Expand Down

0 comments on commit e4cb117

Please sign in to comment.