From f359b967fe7b78a90d336cb093b46e7f790b1e3f Mon Sep 17 00:00:00 2001 From: Yishuo Wang Date: Tue, 17 Dec 2024 10:31:32 +0800 Subject: [PATCH] update --- .../test_transformers_api_layernorm.py | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/python/llm/test/inference_gpu/test_transformers_api_layernorm.py b/python/llm/test/inference_gpu/test_transformers_api_layernorm.py index 68a15d8a579..b0cd8a178f7 100644 --- a/python/llm/test/inference_gpu/test_transformers_api_layernorm.py +++ b/python/llm/test/inference_gpu/test_transformers_api_layernorm.py @@ -13,39 +13,39 @@ # See the License for the specific language governing permissions and # limitations under the License. # - + import os import pytest import gc - + import torch from ipex_llm.transformers import AutoModelForCausalLM, AutoModel from transformers import LlamaTokenizer, AutoTokenizer - + device = os.environ['DEVICE'] print(f'Running on {device}') - + PROMPT = "Once upon a time, there existed a little girl who liked to have adventures. She wanted to go to places and meet new people, and have fun" TEST_MODEL_LIST = [ ("Falcon-7B", AutoModelForCausalLM, AutoTokenizer, os.environ.get('FALCON_7B_ORIGIN_PATH')) ] - + class Test_Optimize_Gpu_Model: def setup_method(self): self.layer_outputs = [] self.pre_layer_outputs = [] - + def run_optimize_gpu_model(self, Name, Model, Tokenizer, model_path, LayerNorm_layer, layer_before_LayerNorm, lower_bound): with torch.inference_mode(): def pre_forward_hook(module, input, output, layer_name): self.pre_layer_outputs.append(output) - + def forward_hook(module, input, output, layer_name): self.layer_outputs.append(output) - + tokenizer = Tokenizer.from_pretrained(model_path, trust_remote_code=True) input_ids = tokenizer.encode(PROMPT, return_tensors="pt").to(device) - + model = Model.from_pretrained(model_path, load_in_4bit=True, optimize_model=False, @@ -64,18 +64,18 @@ def forward_hook(module, input, output, layer_name): # the list `layer_output` has only one element. layer_tensor = self.layer_outputs.pop() model.to('cpu') - + opt_model = Model.from_pretrained(model_path, load_in_4bit=True, optimize_model=True, trust_remote_code=True) opt_model = opt_model.to(device) - - + + def replace_forward_hook(module, input, output, layer_name): output = self.pre_layer_outputs[0] return output - + for layer_name, layer_module in opt_model.named_modules(): if layer_name == layer_before_LayerNorm: layer_module.register_forward_hook( @@ -89,12 +89,12 @@ def replace_forward_hook(module, input, output, layer_name): # the list `layer_output` has only one element. opt_layer_tensor = self.layer_outputs[0] opt_model.to('cpu') - - + + LayerNorm_output_diff = [] for i, (t1, t2) in enumerate(zip(layer_tensor, opt_layer_tensor)): LayerNorm_output_diff.append(t1 - t2) - + max_diff_tensor = [torch.max(item).item() for item in LayerNorm_output_diff] print(max_diff_tensor) torch.xpu.empty_cache() @@ -102,16 +102,16 @@ def replace_forward_hook(module, input, output, layer_name): del opt_model gc.collect() assert all(max_diff <= lower_bound for max_diff in max_diff_tensor) - + @pytest.mark.parametrize('Name, Model, Tokenizer, model_path',TEST_MODEL_LIST) def test_dynamic_functions(self, Name, Model, Tokenizer, model_path): if Name == "Falcon-7B": self.Falcon_7B_gpu_model(Name, Model, Tokenizer, model_path) - + def Falcon_7B_gpu_model(self, Name, Model, Tokenizer, model_path): # currently only compare the output of the last LayerNorm layer. layer_before_LayerNorm = "transformer.h.30" LayerNorm_layer = "transformer.h.31.input_layernorm" - lower_bound = 0 + lower_bound = 1e-5 self.run_optimize_gpu_model(Name, Model, Tokenizer, model_path, LayerNorm_layer, layer_before_LayerNorm, lower_bound) \ No newline at end of file