Skip to content

Commit

Permalink
feat(//core/conversion/var): created ITensorOrFreeze() method, to rep…
Browse files Browse the repository at this point in the history
…lace functionality of Var::ITensor()

Signed-off-by: Abhiram Iyer <[email protected]>
Signed-off-by: Abhiram Iyer <[email protected]>

fix(): updates to some comments on the PR
Signed-off-by: Abhiram Iyer <[email protected]>

Signed-off-by: Abhiram Iyer <[email protected]>

fix(): addressed PR comment
Signed-off-by: Abhiram Iyer <[email protected]>

Signed-off-by: Abhiram Iyer <[email protected]>

fix(): addressed PR comments
Signed-off-by: Abhiram Iyer <[email protected]>

Signed-off-by: Abhiram Iyer <[email protected]>

fix(): addressed PR comments
Signed-off-by: Abhiram Iyer <[email protected]>

Signed-off-by: Abhiram Iyer <[email protected]>

fix(): addressing PR comments
Signed-off-by: Abhiram Iyer <[email protected]>

Signed-off-by: Abhiram Iyer <[email protected]>

fix(): addressing PR comments
Signed-off-by: Abhiram Iyer <[email protected]>

Signed-off-by: Abhiram Iyer <[email protected]>

fix(): Addressed PR comments
Signed-off-by: Abhiram Iyer <[email protected]>

Signed-off-by: Abhiram Iyer <[email protected]>

fix(): bug in test_serialization, need to fix

Signed-off-by: Abhiram Iyer <[email protected]>

Delete converters.h.orig

delete .orig file

Signed-off-by: Abhiram Iyer <[email protected]>
Signed-off-by: Abhiram Iyer <[email protected]>

Update activation.cpp

addressing PR comments

Signed-off-by: Abhiram Iyer <[email protected]>
Signed-off-by: Abhiram Iyer <[email protected]>

Delete converters.h.orig

delete .orig file

Signed-off-by: Abhiram Iyer <[email protected]>

Update activation.cpp

addressing PR comments

Signed-off-by: Abhiram Iyer <[email protected]>
  • Loading branch information
abhi-iyer authored and narendasan committed Jul 28, 2020
1 parent 362c932 commit 2ccf8d0
Show file tree
Hide file tree
Showing 17 changed files with 167 additions and 95 deletions.
25 changes: 23 additions & 2 deletions core/conversion/converters/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,32 @@ config_setting(
}
)

cc_library(
name = "weights",
hdrs = [
"Weights.h"
],
srcs = [
"Weights.cpp"
],
deps = [
"@tensorrt//:nvinfer",
"//core/util:prelude",
"//core/conversion/conversionctx"
] + select({
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch//:libtorch"],
}),
alwayslink = True,
)

cc_library(
name = "converters",
hdrs = [
"converters.h"
],
srcs = [
"NodeConverterRegistry.cpp",
"Weights.cpp",
"impl/activation.cpp",
"impl/batch_norm.cpp",
"impl/concat.cpp",
Expand Down Expand Up @@ -51,5 +69,8 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar")
pkg_tar(
name = "include",
package_dir = "core/conversion/converters/",
srcs = ["converters.h"],
srcs = [
"converters.h",
"Weights.h"
],
)
31 changes: 22 additions & 9 deletions core/conversion/converters/Weights.cpp
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
#include "core/util/prelude.h"
#include "core/conversion/converters/converters.h"
#include "core/conversion/converters/Weights.h"

namespace trtorch {
namespace core {
namespace conversion {
namespace converters {


Weights::Weights() {
this->num_input_maps = 0;
this->num_output_maps = 0;
Expand All @@ -18,20 +17,36 @@ Weights::Weights() {
Weights::Weights(ConversionCtx* ctx, float val) {
this->num_input_maps = 1;
this->num_output_maps = 1;

this->data.type = nvinfer1::DataType::kFLOAT;
float* buf = reinterpret_cast<float*>(malloc(1 * sizeof(float)));
buf[0] = val;
this->data.values = buf;
this->data.count = 1;
ctx->builder_resources.push_back(buf);
this->kernel_shape.nbDims = 1;
this->kernel_shape.d[0] = 1;

this->shape.nbDims = 0;
this->kernel_shape.nbDims = 0;
}

Weights::Weights(ConversionCtx* ctx, int32_t val) {
this->num_input_maps = 1;
this->num_output_maps = 1;

this->data.type = nvinfer1::DataType::kINT32;
int32_t* buf = reinterpret_cast<int32_t*>(malloc(1 * sizeof(int32_t)));
buf[0] = val;
this->data.values = buf;
this->data.count = 1;
ctx->builder_resources.push_back(buf);

this->shape.nbDims = 0;
this->kernel_shape.nbDims = 0;
}

Weights::Weights(ConversionCtx* ctx, at::Tensor t) {
if (t.sizes().size() > nvinfer1::Dims::MAX_DIMS) {
//TODO: Handle this with exceptions or whatever
LOG_INTERNAL_ERROR("The tensor requested to be converted to nvinfer1::Weights exceeds the max number of dimensions for TensorRT");
TRTORCH_THROW_ERROR("The tensor requested to be converted to nvinfer1::Weights exceeds the max number of dimensions for TensorRT");
}
this->shape = util::toDims(t.sizes());
if (t.sizes().size() >= 2) {
Expand Down Expand Up @@ -59,9 +74,7 @@ Weights::Weights(ConversionCtx* ctx, at::Tensor t) {
t_cpu = t_cpu.contiguous();
auto dtype_optional = util::toTRTDataType(t_cpu.dtype());
if (!dtype_optional) {
//TODO: Handle this with exceptions or whatever
//TODO: Implement handling for the Torch Types
LOG_INTERNAL_ERROR("The tensor requested to be converted to nvinfer1::Weights is of an unsupported type");
TRTORCH_THROW_ERROR("The tensor requested to be converted to nvinfer1::Weights is of an unsupported type");
}

// Store the data in the conversion context so it remains until building is complete
Expand Down
45 changes: 45 additions & 0 deletions core/conversion/converters/Weights.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
#pragma once

#include "core/util/prelude.h"
#include "core/conversion/conversionctx/ConversionCtx.h"

namespace trtorch {
namespace core {
namespace conversion {
namespace converters {

struct Weights {
nvinfer1::Weights data;
nvinfer1::Dims kernel_shape;
nvinfer1::Dims shape;
int64_t num_input_maps;
int64_t num_output_maps;

Weights();
Weights(ConversionCtx* ctx, at::Tensor t);
Weights(ConversionCtx* ctx, float val);
Weights(ConversionCtx* ctx, int32_t val);
friend std::ostream& operator<<(std::ostream& os, const Weights& w);
};

inline nvinfer1::ITensor* tensor_to_const(ConversionCtx* ctx, at::Tensor t) {
auto t_weights = Weights(ctx, t);
auto const_layer = ctx->net->addConstant(t_weights.shape, t_weights.data);
TRTORCH_CHECK(const_layer, "Unable to freeze tensor");

auto out = const_layer->getOutput(0);

std::ostringstream tensor_id;
tensor_id << reinterpret_cast<int*>(out);

LOG_DEBUG(ctx->logger, "Freezing tensor " << tensor_id.str() << " as an IConstantLayer");
const_layer->setName(("[Freeze Tensor " + tensor_id.str() + " ]").c_str());

return out;
}


} // namespace converters
} // namespace conversion
} // namespace core
} // namespace trtorch
23 changes: 1 addition & 22 deletions core/conversion/converters/converters.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include "core/util/prelude.h"
#include "core/conversion/var/Var.h"
#include "core/conversion/conversionctx/ConversionCtx.h"
#include "core/conversion/converters/Weights.h"

namespace trtorch {
namespace core {
Expand Down Expand Up @@ -39,28 +40,6 @@ class RegisterNodeConversionPatterns {
bool node_is_convertable(const torch::jit::Node* n);
OpConverter get_node_converter_for(const torch::jit::FunctionSchema* signature);

struct Weights {
//TODO: Rebuild this in a way that makes sense for more than just conv2/3D and linear
nvinfer1::Weights data;
nvinfer1::Dims kernel_shape;
nvinfer1::Dims shape;
int64_t num_input_maps;
int64_t num_output_maps;

Weights();
Weights(ConversionCtx* ctx, at::Tensor t);
Weights(ConversionCtx* ctx, float val);
friend std::ostream& operator<<(std::ostream& os, const Weights& w);
};

inline nvinfer1::ITensor* tensor_to_const(ConversionCtx* ctx, at::Tensor t) {
auto t_weights = Weights(ctx, t);
auto const_layer = ctx->net->addConstant(t_weights.shape, t_weights.data);
TRTORCH_CHECK(const_layer, "Unable to freeze tensor");
const_layer->setName("[Freeze Tensor]");
return const_layer->getOutput(0);
}

} // namespace converters
} // namespace conversion
} // namespace core
Expand Down
6 changes: 3 additions & 3 deletions core/conversion/converters/impl/activation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ namespace {

#define convert(act, trt_type) \
bool act(ConversionCtx* ctx, const torch::jit::Node* n, args& args) { \
auto in = args[0].ITensor(); \
auto in = args[0].ITensorOrFreeze(ctx); \
\
auto new_layer = \
ctx->net->addActivation(*in, nvinfer1::ActivationType::trt_type); \
Expand Down Expand Up @@ -46,7 +46,7 @@ auto acthardtanh TRTORCH_UNUSED = RegisterNodeConversionPatterns()
.pattern({
"aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> (Tensor)",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
auto in = args[0].ITensor();
auto in = args[0].ITensorOrFreeze(ctx);
auto min = args[1].unwrapToDouble();
auto max = args[2].unwrapToDouble();

Expand All @@ -66,7 +66,7 @@ auto acthardtanh TRTORCH_UNUSED = RegisterNodeConversionPatterns()
//TODO: Remove after functionalization
"aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> (Tensor(a!))",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
auto in = args[0].ITensor();
auto in = args[0].ITensorOrFreeze(ctx);
auto min = args[1].unwrapToDouble();
auto max = args[2].unwrapToDouble();

Expand Down
2 changes: 1 addition & 1 deletion core/conversion/converters/impl/batch_norm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ auto batch_norm_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns()
Tensor? mean, Tensor? var,
bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor))SIG",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
auto input = args[0].ITensor();
auto input = args[0].ITensor(); // assumes non-static input Tensor
auto orig_shape = input->getDimensions();
auto shape = util::toVec(orig_shape);
auto options = torch::TensorOptions().dtype(torch::kFloat32);
Expand Down
2 changes: 1 addition & 1 deletion core/conversion/converters/impl/conv_deconv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ auto conv_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns()
int[] output_padding, int groups, bool benchmark,
bool deterministic, bool cudnn_enabled) -> (Tensor))SIG",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
auto in = args[0].ITensor();
auto in = args[0].ITensor(); // assumes non-static input Tensor

auto w = Weights(ctx, args[1].unwrapToTensor());
auto stride = util::toDims(args[3].unwrapToIntList());
Expand Down
29 changes: 14 additions & 15 deletions core/conversion/converters/impl/element_wise.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ nvinfer1::ILayer* add_elementwise(ConversionCtx* ctx, nvinfer1::ElementWiseOpera
self = self_shuffle->getOutput(0);
}


nvinfer1::ILayer* ele;
if (scalar != 1) {
LOG_WARNING("Please verify scalar handling in add converter, channel axis set to 3 but scaling is uniform");
Expand Down Expand Up @@ -73,8 +72,8 @@ auto element_wise_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns(
"aten::add.Tensor(Tensor self, Tensor other, Scalar alpha=1) -> Tensor",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
// Should implement self + alpha * other
auto self = args[0].ITensor();
auto other = args[1].ITensor();
auto self = args[0].ITensorOrFreeze(ctx);
auto other = args[1].ITensorOrFreeze(ctx);
auto scalar = args[2].unwrapToScalar().to<float>();
auto add = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kSUM, self, other, util::node_info(n), scalar);

Expand All @@ -90,8 +89,8 @@ auto element_wise_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns(
"aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> (Tensor(a!))",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
// Should implement self + alpha * other
auto self = args[0].ITensor();
auto other = args[1].ITensor();
auto self = args[0].ITensorOrFreeze(ctx);
auto other = args[1].ITensorOrFreeze(ctx);
auto scalar = args[2].unwrapToScalar().to<float>();
auto add = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kSUM, self, other, util::node_info(n), scalar);

Expand All @@ -107,8 +106,8 @@ auto element_wise_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns(
"aten::sub.Tensor(Tensor self, Tensor other, Scalar alpha=1) -> Tensor",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
// Should implement self - alpha * other
auto self = args[0].ITensor();
auto other = args[1].ITensor();
auto self = args[0].ITensorOrFreeze(ctx);
auto other = args[1].ITensorOrFreeze(ctx);
auto scalar = args[2].unwrapToScalar().to<float>();
auto sub = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kSUB, self, other, util::node_info(n), scalar);

Expand All @@ -124,8 +123,8 @@ auto element_wise_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns(
"aten::div.Tensor(Tensor self, Tensor other) -> Tensor",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
// Should implement self / other
auto self = args[0].ITensor();
auto other = args[1].ITensor();
auto self = args[0].ITensorOrFreeze(ctx);
auto other = args[1].ITensorOrFreeze(ctx);
auto div = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kDIV, self, other, util::node_info(n));

TRTORCH_CHECK(div, "Unable to create div layer from node: " << *n);
Expand All @@ -140,8 +139,8 @@ auto element_wise_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns(
"aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
// TODO: Remove with functionalization
auto self = args[0].ITensor();
auto other = args[1].ITensor();
auto self = args[0].ITensorOrFreeze(ctx);
auto other = args[1].ITensorOrFreeze(ctx);
auto div = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kDIV, self, other, util::node_info(n));

TRTORCH_CHECK(div, "Unable to create div layer from node: " << *n);
Expand All @@ -156,8 +155,8 @@ auto element_wise_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns(
"aten::mul.Tensor(Tensor self, Tensor other) -> Tensor",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
// Should implement self * other
auto self = args[0].ITensor();
auto other = args[1].ITensor();
auto self = args[0].ITensorOrFreeze(ctx);
auto other = args[1].ITensorOrFreeze(ctx);
auto mul = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kPROD, self, other, util::node_info(n));

TRTORCH_CHECK(mul, "Unable to create mul layer from node: " << *n);
Expand All @@ -172,8 +171,8 @@ auto element_wise_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns(
"aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
// TODO: Remove with functionalization
auto self = args[0].ITensor();
auto other = args[1].ITensor();
auto self = args[0].ITensorOrFreeze(ctx);
auto other = args[1].ITensorOrFreeze(ctx);
auto mul = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kPROD, self, other, util::node_info(n));

TRTORCH_CHECK(mul, "Unable to create mul layer from node: " << *n);
Expand Down
2 changes: 1 addition & 1 deletion core/conversion/converters/impl/linear.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ auto linear_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns()
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
// PyTorch follows in: Nx*xIN, W: OUTxIN, B: OUT, out: Nx*xOUT
// TensorRT inserts a flatten in when following conv
auto in = args[0].ITensor();
auto in = args[0].ITensorOrFreeze(ctx);
auto shape = util::toVec(in->getDimensions());

LOG_DEBUG("Input tensor shape: " << in->getDimensions());
Expand Down
24 changes: 2 additions & 22 deletions core/conversion/converters/impl/matrix_multiply.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,30 +12,10 @@ auto mm_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns()
.pattern({
"aten::matmul(Tensor self, Tensor other) -> (Tensor)",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
nvinfer1::ITensor* self;
if (args[0].isIValue()) {
auto t = args[0].unwrapToTensor();
auto t_weights = Weights(ctx, t);
auto const_layer = ctx->net->addConstant(t_weights.shape, t_weights.data);
TRTORCH_CHECK(const_layer, "Unable to freeze tensor self for node: " << *n);
const_layer->setName((util::node_info(n) + " [Freeze Tensor(self)]").c_str());
self = const_layer->getOutput(0);
} else {
self = args[0].ITensor();
}
auto self = args[0].ITensorOrFreeze(ctx);
LOG_DEBUG("self tensor shape: " << self->getDimensions());

nvinfer1::ITensor* other;
if (args[1].isIValue()) {
auto t = args[1].unwrapToTensor();
auto t_weights = Weights(ctx, t);
auto const_layer = ctx->net->addConstant(t_weights.shape, t_weights.data);
TRTORCH_CHECK(const_layer, "Unable to freeze tensor other for node: " << *n);
const_layer->setName((util::node_info(n) + " [Freeze Tensor(other)]").c_str());
other = const_layer->getOutput(0);
} else {
other = args[1].ITensor();
}
auto other = args[1].ITensorOrFreeze(ctx);
LOG_DEBUG("other tensor shape: " << other->getDimensions());

auto mm_layer = ctx->net->addMatrixMultiply(*self, nvinfer1::MatrixOperation::kNONE, *other, nvinfer1::MatrixOperation::kNONE);
Expand Down
Loading

0 comments on commit 2ccf8d0

Please sign in to comment.