diff --git a/python/llm/test/inference_gpu/test_transformers_api_attention.py b/python/llm/test/inference_gpu/test_transformers_api_attention.py index 84bdcf8e8cb..c18a52bb201 100644 --- a/python/llm/test/inference_gpu/test_transformers_api_attention.py +++ b/python/llm/test/inference_gpu/test_transformers_api_attention.py @@ -151,7 +151,7 @@ def Llama2_7B_gpu_model(self, Name, Model, Tokenizer, model_path): # currently only compare the output of the last self-attention layer. layer_norm = "model.layers.31.input_layernorm" self_attn = "model.layers.31.self_attn" - lower_bound = 8e-3 + lower_bound = 2e-2 self.run_optimize_gpu_model(Name, Model, Tokenizer, model_path, self_attn, layer_norm, lower_bound) def Falcon_7B_gpu_model(self, Name, Model, Tokenizer, model_path): @@ -165,7 +165,7 @@ def Chatglm2_gpu_model(self, Name, Model, Tokenizer, model_path): # currently only need to compare the output of one self-attention layer. layer_norm = "transformer.encoder.layers.27.input_layernorm" self_attn = "transformer.encoder.layers.27.self_attention" - lower_bound = 4e-2 + lower_bound = 1e-1 self.run_optimize_gpu_model(Name, Model, Tokenizer, model_path, self_attn, layer_norm, lower_bound) def Mistral_gpu_model(self, Name, Model, Tokenizer, model_path): @@ -182,7 +182,7 @@ def Baichuan_gpu_model(self, Name, Model, Tokenizer, model_path): # currently only need to compare the output of one self-attention layer. layer_norm = "model.layers.31.input_layernorm" self_attn = "model.layers.31.self_attn" - lower_bound = 8e-3 + lower_bound = 2e-2 self.run_optimize_gpu_model(Name, Model, Tokenizer, model_path, self_attn, layer_norm, lower_bound) def Qwen_gpu_model(self, Name, Model, Tokenizer, model_path):