From 906333abd41c8be8a6f097da42c1931ea3bb37d5 Mon Sep 17 00:00:00 2001 From: Kaihui-intel Date: Sat, 14 Sep 2024 16:17:46 +0800 Subject: [PATCH] Replace FORCE_DEVICE with INC_TARGET_DEVICE [transformers] (#2005) Signed-off-by: Kaihui-intel --- neural_compressor/transformers/quantization/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neural_compressor/transformers/quantization/utils.py b/neural_compressor/transformers/quantization/utils.py index 8dba085a553..6f209344348 100644 --- a/neural_compressor/transformers/quantization/utils.py +++ b/neural_compressor/transformers/quantization/utils.py @@ -353,9 +353,9 @@ def convert_to_quantized_model(model, config, device="cpu"): import intel_extension_for_pytorch assert hasattr(torch, "xpu") and torch.xpu.is_available(), "There is no xpu device in this system!" - os.environ["FORCE_DEVICE"] = "cpu" + os.environ["INC_TARGET_DEVICE"] = "cpu" logger.info( - "Set the environment variable FORCE_DEVICE='cpu' to ensure the quantization process occurs on the CPU." + "Set the environment variable INC_TARGET_DEVICE='cpu' to ensure the quantization process occurs on the CPU." ) orig_dtype = torch.float32