Skip to content

Commit

Permalink
Add support for importing double-typed weights as float (onnx#674)
Browse files Browse the repository at this point in the history
Signed-off-by: Kevin Chen <[email protected]>
  • Loading branch information
kevinch-nv committed Jul 2, 2021
1 parent 8f09ded commit 5ab7d3e
Show file tree
Hide file tree
Showing 3 changed files with 66 additions and 2 deletions.
2 changes: 1 addition & 1 deletion docs/operators.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ TensorRT 7.2 supports operators up to Opset 13. Latest information of ONNX opera

TensorRT supports the following ONNX data types: FLOAT32, FLOAT16, INT8, and BOOL

> Note: There is limited support for INT32 and INT64 types. TensorRT will attempt to cast down INT64 to INT32 where possible. If not possible, TensorRT will throw an error. See the [TensorRT layer support matrix](https://docs.nvidia.com/deeplearning/sdk/tensorrt-support-matrix/index.html#layers-precision-matrix) for more information on data type support.
> Note: There is limited support for INT32, INT64, and DOUBLE types. TensorRT will attempt to cast down INT64 to INT32 and DOUBLE down to FLOAT where possible. If not possible, TensorRT will throw an error. See the [TensorRT layer support matrix](https://docs.nvidia.com/deeplearning/sdk/tensorrt-support-matrix/index.html#layers-precision-matrix) for more information on data type support.
## Operator Support Matrix

Expand Down
65 changes: 64 additions & 1 deletion onnx2trt_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,46 @@ int32_t* convertINT64(const int64_t* weightValues, nvinfer1::Dims shape, IImport
return int32Weights;
}

float* convertDouble(const double* weightValues, nvinfer1::Dims shape, IImporterContext* ctx)
{
static bool logged = false;
if (!logged)
{
LOG_WARNING(
"Your ONNX model has been generated with double-typed weights, while TensorRT does not natively support "
"double. "
"Attempting to cast down to float.");
logged = true;
}
const size_t nbWeights = volume(shape);
float* floatWeights{
reinterpret_cast<float*>(ctx->createTempWeights(::ONNX_NAMESPACE::TensorProto::FLOAT, shape).values)};

bool outOfBounds{false};
const double floatMax = static_cast<double>(std::numeric_limits<float>::max());
const double floatMin = static_cast<double>(std::numeric_limits<float>::lowest());
for (size_t i = 0; i < nbWeights; i++)
{
if (weightValues[i] > floatMax || weightValues[i] < floatMin)
{
floatWeights[i] = static_cast<float>(std::max(std::min(weightValues[i], floatMax), floatMin));
LOG_WARNING("Weight at index " << i << ": " << weightValues[i]
<< " is out of range. Clamping to: " << floatWeights[i]);
outOfBounds = true;
}
else
{
floatWeights[i] = static_cast<float>(weightValues[i]);
}
}
if (outOfBounds)
{
LOG_WARNING("One or more weights outside the range of FLOAT was clamped");
}

return floatWeights;
}

bool convertOnnxPadding(const std::vector<int64_t>& onnxPadding, nvinfer1::Dims2* begPadding, nvinfer1::Dims2* endPadding)
{
const size_t size = onnxPadding.size();
Expand Down Expand Up @@ -376,7 +416,7 @@ bool convertOnnxWeights(
// For weights parsed from external files, createTempWeights is necessary to keep them in scope
ShapedWeights externalWeights;

// Downcast INT64 weights to INT32 weights before copying the values to externalWeights
// // Cast non-native TRT types to their corresponding proxy types
if (onnxDtype == ::ONNX_NAMESPACE::TensorProto::INT64)
{
dataPtr = dataBuf.data();
Expand All @@ -385,6 +425,15 @@ bool convertOnnxWeights(
onnxDtype = ::ONNX_NAMESPACE::TensorProto::INT32;
externalWeights = ctx->createTempWeights(onnxDtype, shape);
std::memcpy(externalWeights.values, dataPtr, nbytes);
}
else if (onnxDtype == ::ONNX_NAMESPACE::TensorProto::DOUBLE)
{
dataPtr = dataBuf.data();
dataPtr = convertDouble(reinterpret_cast<const double*>(dataPtr), shape, ctx);
nbytes = nbytes / (sizeof(double) / sizeof(float));
onnxDtype = ::ONNX_NAMESPACE::TensorProto::FLOAT;
externalWeights = ctx->createTempWeights(onnxDtype, shape);
std::memcpy(externalWeights.values, dataPtr, nbytes);
}
// Copy weight values directly to externalWeights
else
Expand Down Expand Up @@ -425,6 +474,20 @@ bool convertOnnxWeights(
}
onnxDtype = ::ONNX_NAMESPACE::TensorProto::INT32;
}
else if (onnxDtype == ::ONNX_NAMESPACE::TensorProto::DOUBLE)
{
if (onnxTensor.raw_data().size() > 0)
{
dataPtr = convertDouble(reinterpret_cast<const double*>(onnxTensor.raw_data().data()), shape, ctx);
nbytes = onnxTensor.raw_data().size() / (sizeof(double) / sizeof(float));
}
else if (onnxTensor.double_data().size() > 0)
{
dataPtr = convertDouble(onnxTensor.double_data().data(), shape, ctx);
nbytes = onnxTensor.double_data().size() * sizeof(float);
}
onnxDtype = ::ONNX_NAMESPACE::TensorProto::FLOAT;
}

// Check for supported types that can be found in the int32_data field in the TensorProto
// https://github.com/onnx/onnx/blob/master/onnx/onnx.proto#L382-L387
Expand Down
1 change: 1 addition & 0 deletions onnx2trt_utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@

#include <cstring> // For std::memcpy
#include <iostream>
#include <limits>
#include <numeric>
#include <sstream>

Expand Down

0 comments on commit 5ab7d3e

Please sign in to comment.