From c903ac5f58730dc769d8a87efef7f0a4006e8ee0 Mon Sep 17 00:00:00 2001 From: "Lu, Yintong" Date: Wed, 13 Dec 2023 16:13:36 +0800 Subject: [PATCH 1/2] [Bug] fix auto-space generation Signed-off-by: Lu, Yintong --- .../adaptor/torch_utils/smooth_quant.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/neural_compressor/adaptor/torch_utils/smooth_quant.py b/neural_compressor/adaptor/torch_utils/smooth_quant.py index 0cf32183084..2e0983c7edf 100644 --- a/neural_compressor/adaptor/torch_utils/smooth_quant.py +++ b/neural_compressor/adaptor/torch_utils/smooth_quant.py @@ -33,6 +33,7 @@ from collections import UserDict, defaultdict from tqdm import tqdm +import numpy def enough_memo_store_scale(device, need_space): @@ -976,15 +977,12 @@ def _auto_tune_alpha( :return: """ logger.info("start sq auto tuning") - alpha_scale = 100 - alpha_space = list( - range( - round(alpha_min * alpha_scale), - round((alpha_max + alpha_step) * alpha_scale), - round(alpha_step * alpha_scale), - ) + round_num = max( + len(str(alpha_min).split(".")[1]), + len(str(alpha_max).split(".")[1]), + len(str(alpha_step).split(".")[1]) ) - alpha_space = [alpha / alpha_scale for alpha in alpha_space] + alpha_space = numpy.round(numpy.arange(alpha_min, alpha_max + alpha_step, alpha_step), round_num).tolist() ##wrapper new module self._qdq_model_wrapper_for_auto(save_q_input=True) ##set alpha to 0.5 as default @@ -1189,7 +1187,6 @@ def transform( self.insert_mul, self.allow_absorb = True, False if isinstance(alpha, float) and (alpha < 0 or alpha > 1): logger.warning("reset alpha to in range [0.0, 1.0]") - import numpy alpha = numpy.clip(alpha, 0.0, 1.0) From 6d11b2ebb28f299eb42e65a0150f34452918256b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 14 Dec 2023 01:09:14 +0000 Subject: [PATCH 2/2] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- neural_compressor/adaptor/torch_utils/smooth_quant.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/neural_compressor/adaptor/torch_utils/smooth_quant.py b/neural_compressor/adaptor/torch_utils/smooth_quant.py index 2e0983c7edf..e0fd3d7d23f 100644 --- a/neural_compressor/adaptor/torch_utils/smooth_quant.py +++ b/neural_compressor/adaptor/torch_utils/smooth_quant.py @@ -32,8 +32,8 @@ logger = logging.getLogger() from collections import UserDict, defaultdict -from tqdm import tqdm import numpy +from tqdm import tqdm def enough_memo_store_scale(device, need_space): @@ -978,9 +978,7 @@ def _auto_tune_alpha( """ logger.info("start sq auto tuning") round_num = max( - len(str(alpha_min).split(".")[1]), - len(str(alpha_max).split(".")[1]), - len(str(alpha_step).split(".")[1]) + len(str(alpha_min).split(".")[1]), len(str(alpha_max).split(".")[1]), len(str(alpha_step).split(".")[1]) ) alpha_space = numpy.round(numpy.arange(alpha_min, alpha_max + alpha_step, alpha_step), round_num).tolist() ##wrapper new module