From c7808e44d5ccc2390503a1517e1d8b01e6f4db14 Mon Sep 17 00:00:00 2001 From: xin3he Date: Fri, 21 Jun 2024 14:55:58 +0800 Subject: [PATCH] fix bug Signed-off-by: xin3he --- test/3x/torch/quantization/weight_only/test_rtn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/3x/torch/quantization/weight_only/test_rtn.py b/test/3x/torch/quantization/weight_only/test_rtn.py index 1c6c0a2c9d5..206bd20aa10 100644 --- a/test/3x/torch/quantization/weight_only/test_rtn.py +++ b/test/3x/torch/quantization/weight_only/test_rtn.py @@ -241,7 +241,7 @@ def test_double_quant_params(self, dtype, double_quant_bits, double_quant_group_ out = model(self.example_inputs)[0] atol_true = (out - self.q_label).amax() # compare atol, this case is an ideal case. - if not (dtype, double_quant_bits, double_quant_group_size) == (256, 6, "nf4"): + if not (dtype, double_quant_bits, double_quant_group_size) == ("nf4", 6, 256): assert ( atol_false < atol_true ), "asym for double quant should have smaller atol because scales is bigger than zero, please double check."