Skip to content

Commit

Permalink
fix: Partial compilation translation to internal settings was incorrect
Browse files Browse the repository at this point in the history
Signed-off-by: Naren Dasan <[email protected]>
Signed-off-by: Naren Dasan <[email protected]>
  • Loading branch information
narendasan committed Nov 3, 2021
1 parent 540e135 commit 648bad3
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 5 deletions.
2 changes: 1 addition & 1 deletion cpp/src/compile_spec.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ torchtrt::core::CompileSpec to_internal_compile_spec(CompileSpec external) {
"require_full_compilation is enabled however the list of modules to run in torch is not empty (Found "
<< external.torch_executed_modules.size() << " modules)");

internal.partition_info.enabled = external.require_full_compilation;
internal.partition_info.enabled = !external.require_full_compilation;
internal.partition_info.min_block_size = external.min_block_size;
internal.partition_info.forced_fallback_operators = std::move(external.torch_executed_ops);
internal.lower_info.forced_fallback_modules = std::move(external.torch_executed_modules);
Expand Down
8 changes: 4 additions & 4 deletions py/torch_tensorrt/ts/_compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,7 @@ def compile(module: torch.jit.ScriptModule,
spec = {
"inputs": inputs,
"device": device,
"disable_tf32":
disable_tf32, # Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas
"disable_tf32": disable_tf32, # Force FP32 layers to use traditional as FP32 format
"sparse_weights": sparse_weights, #Enable sparsity for convolution and fully connected layers.
"enabled_precisions": enabled_precisions, # Enabling FP16 kernels
"refit": refit, # enable refit
Expand All @@ -111,8 +110,9 @@ def compile(module: torch.jit.ScriptModule,
"truncate_long_and_double": truncate_long_and_double,
"torch_fallback": {
"enabled": not require_full_compilation,
"force_fallback_ops": torch_executed_ops,
"force_fallback_modules": torch_executed_modules
"forced_fallback_ops": torch_executed_ops,
"forced_fallback_modules": torch_executed_modules,
"min_block_size": min_block_sizexs
}
}

Expand Down

0 comments on commit 648bad3

Please sign in to comment.