diff --git a/cpp/api/include/trtorch/trtorch.h b/cpp/api/include/trtorch/trtorch.h index 043ba41b58..cd3abf6975 100644 --- a/cpp/api/include/trtorch/trtorch.h +++ b/cpp/api/include/trtorch/trtorch.h @@ -12,6 +12,7 @@ #include #include #include +#include // Just include the .h? #ifndef DOXYGEN_SHOULD_SKIP_THIS @@ -41,56 +42,6 @@ namespace trtorch { * */ struct TRTORCH_API CompileSpec { - /** - * @brief A struct to hold an input range (used by TensorRT Optimization - * profile) - * - * This struct can either hold a single vector representing an input shape, - * signifying a static input shape or a set of three input shapes representing - * the min, optiminal and max input shapes allowed for the engine. - */ - struct TRTORCH_API InputRange { - /// Minimum acceptable input size into the engine - std::vector min; - /// Optimal input size into the engine (gets best performace) - std::vector opt; - /// Maximum acceptable input size into the engine - std::vector max; - /** - * @brief Construct a new Input Range object for static input size from - * vector - * - * @param opt - */ - InputRange(std::vector opt); - /** - * @brief Construct a new Input Range object static input size from - * c10::ArrayRef (the type produced by tensor.sizes()) - * - * @param opt - */ - InputRange(c10::ArrayRef opt); - /** - * @brief Construct a new Input Range object dynamic input size from vectors - * for min, opt, and max supported sizes - * - * @param min - * @param opt - * @param max - */ - InputRange(std::vector min, std::vector opt, std::vector max); - /** - * @brief Construct a new Input Range object dynamic input size from - * c10::ArrayRef (the type produced by tensor.sizes()) for min, opt, and max - * supported sizes - * - * @param min - * @param opt - * @param max - */ - InputRange(c10::ArrayRef min, c10::ArrayRef opt, c10::ArrayRef max); - }; - /** * Supported Data Types that can be used with TensorRT engines * @@ -191,146 +142,6 @@ struct TRTORCH_API CompileSpec { Value value; }; - /** - * @brief A struct to hold Input of a network. - * This struct has all the info (shape, dtype, name, memory_format) of an input tensor. - * The shape field in this struct can either hold a single vector representing an input shape, - * signifying a static input shape or a set of three input shapes representing - * the min, optiminal and max input shapes allowed for the engine. - * dtype : This can take values among values supported by trtorch::DataType - */ - struct TRTORCH_API Input { - /// Minimum acceptable input size into the engine - std::vector min; - /// Optimal input size into the engine (gets best performace) - std::vector opt; - /// Maximum acceptable input size into the engine - std::vector max; - /// Data type of the input - DataType dtype; - - /** - * @brief Construct a new Input Range object for static input size from - * vector - * - * @param opt - */ - Input(std::vector opt, DataType dtype=DataType::kFloat); - /** - * @brief Construct a new Input Range object static input size from - * c10::ArrayRef (the type produced by tensor.sizes()) - * - * @param opt - */ - Input(c10::ArrayRef opt, DataType dtype=DataType::kFloat); - /** - * @brief Construct a new Input Range object dynamic input size from vectors - * for min, opt, and max supported sizes - * - * @param min - * @param opt - * @param max - */ - Input(std::vector min, std::vector opt, std::vector max, DataType dtype=DataType::kFloat); - /** - * @brief Construct a new Input Range object dynamic input size from - * c10::ArrayRef (the type produced by tensor.sizes()) for min, opt, and max - * supported sizes - * - * @param min - * @param opt - * @param max - */ - Input(c10::ArrayRef min, c10::ArrayRef opt, c10::ArrayRef max, DataType dtype=DataType::kFloat); - }; - - /** - * Emum for selecting engine capability - */ - enum class EngineCapability : int8_t { - kDEFAULT, - kSAFE_GPU, - kSAFE_DLA, - }; - - /** - * @brief Construct a new Extra Info object from input ranges. - * Each entry in the vector represents a input and should be provided in call - * order. - * - * Use this constructor if you want to use dynamic shape - * - * @param input_ranges - */ - CompileSpec(std::vector input_ranges) : input_ranges(std::move(input_ranges)) {} - /** - * @brief Construct a new Extra Info object - * Convienence constructor to set fixed input size from vectors describing - * size of input tensors. Each entry in the vector represents a input and - * should be provided in call order. - * - * @param fixed_sizes - */ - CompileSpec(std::vector> fixed_sizes); - /** - * @brief Construct a new Extra Info object - * Convienence constructor to set fixed input size from c10::ArrayRef's (the - * output of tensor.sizes()) describing size of input tensors. Each entry in - * the vector represents a input and should be provided in call order. - * @param fixed_sizes - */ - CompileSpec(std::vector> fixed_sizes); - - // Defaults should reflect TensorRT defaults for BuilderConfig - - /** - * Sizes for inputs to engine, can either be a single size or a range - * defined by Min, Optimal, Max sizes - * - * Order is should match call order - */ - std::vector input_ranges; - - /** - * Default operating precision for the engine - */ - DataType op_precision = DataType::kFloat; - - /** - * Data types for input tensors - */ - std::vector input_dtypes; - - /** - * Prevent Float32 layers from using TF32 data format - * - * TF32 computes inner products by rounding the inputs to 10-bit mantissas - * before multiplying, but accumulates the sum using 23-bit mantissas. - * This is the behavior of FP32 layers by default. - */ - bool disable_tf32 = false; - - /** - * Build a refitable engine - */ - bool refit = false; - - /** - * Build a debugable engine - */ - bool debug = false; - - /** - * Truncate long/double type to int/float type - */ - bool truncate_long_and_double = false; - - /** - * Restrict operating type to only set default operation precision - * (op_precision) - */ - bool strict_types = false; - /* * Setting data structure for Target device */ @@ -443,10 +254,225 @@ struct TRTORCH_API CompileSpec { Device() : device_type(DeviceType::kGPU), gpu_id(0), dla_core(0), allow_gpu_fallback(false) {} }; - /* - * Target Device + /** + * Emum for selecting engine capability */ - Device device; + enum class EngineCapability : int8_t { + kDEFAULT, + kSAFE_GPU, + kSAFE_DLA, + }; + + class TRTORCH_API TensorFormat { + public: + /** + * Underlying enum class to support the TensorFormat Class + * + * In the case that you need to use the TensorFormat class itself, interface + * using this enum vs. normal instatination + * + * ex. trtorch::TensorFormat type = TensorFormat::kContiguous; + */ + enum Value : int8_t { + /// Contiguous / NCHW / Linear + kContiguous, + /// Channel Last / NHWC + kChannelsLast, + }; + + /** + * @brief Construct a new TensorFormat object + * + */ + TensorFormat() = default; + /** + * @brief TensorFormat constructor from enum + * + */ + constexpr TensorFormat(Value t) : value(t) {} + /** + * @brief Construct a new TensorFormat object from torch type enums + * + * @param t + */ + TensorFormat(at::MemoryFormat t); + /** + * @brief Get the enum value of the TensorFormat object + * + * @return Value + */ + operator Value() const { + return value; + } + explicit operator bool() = delete; + /** + * @brief Comparision operator for TensorFormat + * + * @param other + * @return true + * @return false + */ + constexpr bool operator==(TensorFormat other) const { + return value == other.value; + } + /** + * @brief Comparision operator for TensorFormat + * + * @param other + * @return true + * @return false + */ + constexpr bool operator==(TensorFormat::Value other) const { + return value == other; + } + /** + * @brief Comparision operator for TensorFormat + * + * @param other + * @return true + * @return false + */ + constexpr bool operator!=(TensorFormat other) const { + return value != other.value; + } + /** + * @brief Comparision operator for TensorFormat + * + * @param other + * @return true + * @return false + */ + constexpr bool operator!=(TensorFormat::Value other) const { + return value != other; + } + + private: + Value value; + }; + + /** + * @brief A struct to hold an input range (used by TensorRT Optimization + * profile) + * + * This struct can either hold a single vector representing an input shape, + * signifying a static input shape or a set of three input shapes representing + * the min, optiminal and max input shapes allowed for the engine. + */ + struct TRTORCH_API Input { + /// Minimum acceptable input size into the engine + std::vector min_shape; + /// Optimal input size into the engine (size optimized for given kernels accept any size in min max range) + std::vector opt_shape; + /// Maximum acceptable input size into the engine + std::vector max_shape; + /// Input shape to be fed to TensorRT, in the event of a dynamic shape, -1's will hold the place of variable dimensions + std::vector shape; + /// Expected data type for the input + DataType dtype; + /// Expected tensor format for the input + TensorFormat format; + /** + * @brief Construct a new Input spec object for static input size from + * vector, optional arguments allow the user to configure expected input shape + * tensor format + * + * @param shape Input tensor shape + * @param dtype Expected data type for the input (Defaults to Float32) + * @param format Expected tensor format for the input (Defaults to contiguous) + */ + Input(std::vector shape, DataType dtype=DataType::kFloat, TensorFormat format=TensorFormat::kContiguous); + /** + * @brief Construct a new Input spec object for static input size from + * c10::ArrayRef (the type produced by tensor.sizes()), vector, optional arguments + * allow the user to configure expected input shape tensor format + * + * @param shape Input tensor shape + * @param dtype Expected data type for the input (Defaults to Float32) + * @param format Expected tensor format for the input (Defaults to contiguous) + */ + Input(c10::ArrayRef shape, DataType dtype=DataType::kFloat, TensorFormat format=TensorFormat::kContiguous); + /** + * @brief Construct a new Input spec object for a dynamic input size from vectors + * for minimum shape, optimal shape, and max shape supported sizes optional arguments + * allow the user to configure expected input shape tensor format + * + * @param min_shape Minimum shape for input tensor + * @param opt_shape Target optimization shape for input tensor + * @param max_shape Maximum acceptible shape for input tensor + * @param dtype Expected data type for the input (Defaults to Float32) + * @param format Expected tensor format for the input (Defaults to contiguous) + */ + Input(std::vector min_shape, std::vector opt_shape, std::vector max_shape, DataType dtype=DataType::kFloat, TensorFormat format=TensorFormat::kContiguous); + /** + * @brief Construct a new Input Range object dynamic input size from + * c10::ArrayRef (the type produced by tensor.sizes()) for min, opt, and max + * supported sizes + * + * @param min_shape Minimum shape for input tensor + * @param opt_shape Target optimization shape for input tensor + * @param max_shape Maximum acceptible shape for input tensor + * @param dtype Expected data type for the input (Defaults to Float32) + * @param format Expected tensor format for the input (Defaults to contiguous) + */ + Input(c10::ArrayRef min_shape, c10::ArrayRef opt_shape, c10::ArrayRef max_shape, DataType dtype=DataType::kFloat, TensorFormat format=TensorFormat::kContiguous); + + private: + bool input_is_dynamic; + }; + + /** + * @brief A struct to hold an input range (used by TensorRT Optimization + * profile) + * + * This struct can either hold a single vector representing an input shape, + * signifying a static input shape or a set of three input shapes representing + * the min, optiminal and max input shapes allowed for the engine. + */ + struct TRTORCH_API InputRange { + /// Minimum acceptable input size into the engine + std::vector min; + /// Optimal input size into the engine (gets best performace) + std::vector opt; + /// Maximum acceptable input size into the engine + std::vector max; + /** + * @brief Construct a new Input Range object for static input size from + * vector + * + * @param opt + */ + [[deprecated("trtorch::CompileSpec::InputRange is being deprecated in favor of trtorch::CompileSpec::Input. trtorch::CompileSpec::InputRange will be removed in TRTorch v0.5.0")]] + InputRange(std::vector opt); + /** + * @brief Construct a new Input Range object static input size from + * c10::ArrayRef (the type produced by tensor.sizes()) + * + * @param opt + */ + [[deprecated("trtorch::CompileSpec::InputRange is being deprecated in favor of trtorch::CompileSpec::Input. trtorch::CompileSpec::InputRange will be removed in TRTorch v0.5.0")]] + InputRange(c10::ArrayRef opt); + /** + * @brief Construct a new Input Range object dynamic input size from vectors + * for min, opt, and max supported sizes + * + * @param min + * @param opt + * @param max + */ + [[deprecated("trtorch::CompileSpec::InputRange is being deprecated in favor of trtorch::CompileSpec::Input. trtorch::CompileSpec::InputRange will be removed in TRTorch v0.5.0")]] + InputRange(std::vector min, std::vector opt, std::vector max); + /** + * @brief Construct a new Input Range object dynamic input size from + * c10::ArrayRef (the type produced by tensor.sizes()) for min, opt, and max + * supported sizes + * + * @param min + * @param opt + * @param max + */ + [[deprecated("trtorch::CompileSpec::InputRange is being deprecated in favor of trtorch::CompileSpec::Input. trtorch::CompileSpec::InputRange will be removed in TRTorch v0.5.0")]] + InputRange(c10::ArrayRef min, c10::ArrayRef opt, c10::ArrayRef max); + }; /** * @brief A struct to hold fallback info @@ -477,6 +503,106 @@ struct TRTORCH_API CompileSpec { TorchFallback(bool enabled, uint64_t min_size) : enabled(enabled), min_block_size(min_size) {} }; + /** + * @brief Construct a new Extra Info object from input ranges. + * Each entry in the vector represents a input and should be provided in call + * order. + * + * Use this constructor if you want to use dynamic shape + * + * @param input_ranges + */ + [[deprecated("trtorch::CompileSpec::CompileSpec(std::vector input_ranges) is being deprecated in favor of trtorch::CompileSpec::CompileSpec(std::vector inputs). trtorch::CompileSpec::CompileSpec(std::vector input_ranges) will be removed in TRTorch v0.5.0")]] + CompileSpec(std::vector input_ranges) : input_ranges(std::move(input_ranges)) {} + /** + * @brief Construct a new Extra Info object + * Convienence constructor to set fixed input size from vectors describing + * size of input tensors. Each entry in the vector represents a input and + * should be provided in call order. + * + * @param fixed_sizes + */ + [[deprecated("trtorch::CompileSpec::InputRange is being deprecated in favor of trtorch::CompileSpec::Input. trtorch::CompileSpec::InputRange will be removed in TRTorch v0.5.0")]] + CompileSpec(std::vector> fixed_sizes); + /** + * @brief Construct a new Extra Info object + * Convienence constructor to set fixed input size from c10::ArrayRef's (the + * output of tensor.sizes()) describing size of input tensors. Each entry in + * the vector represents a input and should be provided in call order. + * @param fixed_sizes + */ + [[deprecated("trtorch::CompileSpec::InputRange is being deprecated in favor of trtorch::CompileSpec::Input. trtorch::CompileSpec::InputRange will be removed in TRTorch v0.5.0")]] + CompileSpec(std::vector> fixed_sizes); + + // Defaults should reflect TensorRT defaults for BuilderConfig + + /** + * @brief Specifications for inputs to the engine, can either be a single size or a range defined by min, opt and max sizes + * Users can also specify expected input type as well as tensor memory format + * + * Order in vector should match call order for the function + */ + std::vector inputs; + + /** + * Sizes for inputs to engine, can either be a single size or a range + * defined by Min, Optimal, Max sizes + * + * Order is should match call order + */ + [[deprecated("trtorch::CompileSpec::input_ranges is being deprecated in favor of trtorch::CompileSpec::inputs. trtorch::CompileSpec::input_ranges will be removed in TRTorch v0.5.0")]] + std::vector input_ranges; + + /** + * Default operating precision for the engine + */ + [[deprecated("trtorch::CompileSpec::op_precision is being deprecated in favor of trtorch::CompileSpec::enabled_precisions, a set of all enabled precisions to use during compilation, trtorch::CompileSpec::op_precision will be removed in TRTorch v0.5.0")]] + DataType op_precision = DataType::kFloat; + + /** + * @brief The set of precisions TensorRT is allowed to use for kernels during compilation + * + */ + std::set enabled_precisions = {DataType::kFloat}; + + /** + * Prevent Float32 layers from using TF32 data format + * + * TF32 computes inner products by rounding the inputs to 10-bit mantissas + * before multiplying, but accumulates the sum using 23-bit mantissas. + * This is the behavior of FP32 layers by default. + */ + bool disable_tf32 = false; + + /** + * Build a refitable engine + */ + bool refit = false; + + /** + * Build a debugable engine + */ + bool debug = false; + + /** + * Truncate long/double type to int/float type + */ + bool truncate_long_and_double = false; + + /** + * Restrict operating type to only set default operation precision + * (op_precision) + */ + bool strict_types = false; + + /** + * Target Device + */ + Device device; + + /** + * @brief Settings related to partial compilation + */ TorchFallback torch_fallback; /** diff --git a/cpp/api/src/compile_spec.cpp b/cpp/api/src/compile_spec.cpp index 29a6a9adea..fb54547a32 100644 --- a/cpp/api/src/compile_spec.cpp +++ b/cpp/api/src/compile_spec.cpp @@ -1,3 +1,5 @@ +#include + #include "torch/csrc/jit/api/module.h" #include "core/compiler.h" @@ -6,6 +8,33 @@ #include "trtorch/trtorch.h" namespace trtorch { + +nvinfer1::DataType toTRTDataType(CompileSpec::DataType value) { + switch (value) { + case CompileSpec::DataType::kChar: + return nvinfer1::DataType::kINT8; + case CompileSpec::DataType::kHalf: + return nvinfer1::DataType::kHALF; + case CompileSpec::DataType::kInt32: + return nvinfer1::DataType::kINT32; + case CompileSpec::DataType::kBool: + return nvinfer1::DataType::kBOOL; + case CompileSpec::DataType::kFloat: + default: + return nvinfer1::DataType::kFLOAT; + } +} + +nvinfer1::TensorFormat toTRTTensorFormat(CompileSpec::TensorFormat value) { + switch (value) { + case CompileSpec::TensorFormat::kChannelsLast: + return nvinfer1::TensorFormat::kHWC; + case CompileSpec::TensorFormat::kContiguous: + default: + return nvinfer1::TensorFormat::kLINEAR; + } +} + CompileSpec::DataType::DataType(c10::ScalarType t) { TRTORCH_CHECK( t == at::kHalf || t == at::kFloat || t == at::kChar || t == at::kInt || t == at::kBool, @@ -30,6 +59,21 @@ CompileSpec::DataType::DataType(c10::ScalarType t) { } } +CompileSpec::TensorFormat::TensorFormat(at::MemoryFormat t) { + TRTORCH_CHECK( + t == at::MemoryFormat::Contiguous || t == at::MemoryFormat::ChannelsLast, "Tensor format is unsupported" + ); + + switch (t) { + case at::MemoryFormat::ChannelsLast: + value = TensorFormat::kChannelsLast; + case at::MemoryFormat::Contiguous: + default: + value = TensorFormat::kContiguous; + break; + } +} + CompileSpec::Device::DeviceType::DeviceType(c10::DeviceType t) { TRTORCH_CHECK(t == at::kCUDA, "Device type when specified using torch device enum must be torch::kCUDA"); value = DeviceType::kGPU; @@ -72,66 +116,87 @@ CompileSpec::CompileSpec(std::vector> fixed_sizes) { } /* ====== DEFINE INPUTS CLASS MEMBERS ======*/ -CompileSpec::Input::Input(std::vector opt) { - this->opt = opt; - this->min = opt; - this->max = opt; +CompileSpec::Input::Input(std::vector shape, DataType dtype, TensorFormat format) { + this->opt_shape = shape; + this->min_shape = shape; + this->max_shape = shape; + this->shape = shape; + this->dtype = dtype; + this->format = format; + this->input_is_dynamic = false; +} + +CompileSpec::Input::Input(c10::IntArrayRef shape, DataType dtype, TensorFormat format) { + this->opt_shape = core::util::toVec(shape); + this->min_shape = core::util::toVec(shape); + this->max_shape = core::util::toVec(shape); + this->shape = core::util::toVec(shape); + this->dtype = dtype; + this->format = format; + this->input_is_dynamic = false; +} + +CompileSpec::Input::Input(std::vector min_shape, std::vector opt_shape, std::vector max_shape, DataType dtype, TensorFormat format) { + this->opt_shape = opt_shape; + this->min_shape = min_shape; + this->max_shape = max_shape; + this->shape = core::util::toVec(core::ir::Input(this->min_shape, this->opt_shape, this->max_shape).input_shape); + this->dtype = dtype; + this->format = format; + this->input_is_dynamic = true; +} + +CompileSpec::Input::Input(c10::IntArrayRef min_shape, c10::IntArrayRef opt_shape, c10::IntArrayRef max_shape, DataType dtype, TensorFormat format) { + this->opt_shape = core::util::toVec(opt_shape); + this->min_shape = core::util::toVec(min_shape); + this->max_shape = core::util::toVec(max_shape); + this->shape = core::util::toVec(core::ir::Input(this->min_shape, this->opt_shape, this->max_shape).input_shape); + this->dtype = dtype; + this->format = format; + this->input_is_dynamic = true; } -CompileSpec::Input::Input(c10::IntArrayRef opt) { - this->opt = core::util::toVec(opt); - this->min = core::util::toVec(opt); - this->max = core::util::toVec(opt); -} - -CompileSpec::Input::Input(std::vector min, std::vector opt, std::vector max) { - this->opt = opt; - this->min = min; - this->max = max; -} +/* ==========================================*/ -CompileSpec::Input::Input(c10::IntArrayRef min, c10::IntArrayRef opt, c10::IntArrayRef max) { - this->opt = core::util::toVec(opt); - this->min = core::util::toVec(min); - this->max = core::util::toVec(max); +core::ir::Input to_internal_input(CompileSpec::InputRange& i) { + return core::ir::Input(i.min, i.opt, i.max); } -/* ==========================================*/ - -core::ir::InputRange to_internal_input_range(CompileSpec::InputRange i) { - return core::ir::InputRange(i.min, i.opt, i.max); +core::ir::Input to_internal_input(CompileSpec::Input& i) { + return core::ir::Input(i.min_shape, i.opt_shape, i.max_shape, toTRTDataType(i.dtype), toTRTTensorFormat(i.format)); } -std::vector to_vec_internal_input_ranges(std::vector external) { - std::vector internal; +std::vector to_vec_internal_inputs(std::vector& external) { + std::vector internal; for (auto range : external) { - internal.push_back(to_internal_input_range(range)); + internal.push_back(to_internal_input(range)); } return internal; } -nvinfer1::DataType toTRTDataType(CompileSpec::DataType value) { - switch (value) { - case CompileSpec::DataType::kChar: - return nvinfer1::DataType::kINT8; - case CompileSpec::DataType::kHalf: - return nvinfer1::DataType::kHALF; - case CompileSpec::DataType::kInt32: - return nvinfer1::DataType::kINT32; - case CompileSpec::DataType::kBool: - return nvinfer1::DataType::kBOOL; - case CompileSpec::DataType::kFloat: - default: - return nvinfer1::DataType::kFLOAT; +std::vector to_vec_internal_inputs(std::vector& external) { + std::vector internal; + for (auto range : external) { + internal.push_back(to_internal_input(range)); } + return internal; } core::CompileSpec to_internal_compile_spec(CompileSpec external) { - core::CompileSpec internal(to_vec_internal_input_ranges(external.input_ranges)); + core::CompileSpec internal(to_vec_internal_inputs(external.inputs)); + if (external.input_ranges.size() > 0 ) { + internal = core::CompileSpec(to_vec_internal_inputs(external.input_ranges)); + } else { + TRTORCH_CHECK(external.inputs.size() > 0, "Compilation requires at least one input specification"); + internal = core::CompileSpec(to_vec_internal_inputs(external.inputs)); + } - internal.convert_info.engine_settings.op_precision = toTRTDataType(external.op_precision); - for (auto dtype : external.input_dtypes) { - internal.convert_info.engine_settings.input_dtypes.push_back(toTRTDataType(dtype)); + if (external.enabled_precisions.size() <= 1 && toTRTDataType(external.op_precision) != nvinfer1::DataType::kFLOAT) { + internal.convert_info.engine_settings.enabled_precisions.insert(toTRTDataType(external.op_precision)); + } else { + for(auto p : external.enabled_precisions) { + internal.convert_info.engine_settings.enabled_precisions.insert(toTRTDataType(p)); + } } internal.convert_info.engine_settings.disable_tf32 = external.disable_tf32; @@ -172,7 +237,7 @@ core::CompileSpec to_internal_compile_spec(CompileSpec external) { internal.convert_info.engine_settings.num_avg_timing_iters = external.num_avg_timing_iters; internal.convert_info.engine_settings.workspace_size = external.workspace_size; - if (internal.convert_info.engine_settings.op_precision == nvinfer1::DataType::kINT8) { + if (internal.convert_info.engine_settings.enabled_precisions.find(nvinfer1::DataType::kINT8) != internal.convert_info.engine_settings.enabled_precisions.end()) { internal.convert_info.engine_settings.calibrator = external.ptq_calibrator; } else { internal.convert_info.engine_settings.calibrator = nullptr;