You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Traceback (most recent call last):
File "/home/jerome/github/LLaMA-Efficient-Tuning/src/train_sft.py", line 98, in
main()
File "/home/jerome/github/LLaMA-Efficient-Tuning/src/train_sft.py", line 26, in main
model, tokenizer = load_pretrained(model_args, finetuning_args, training_args.do_train, stage="sft")
File "/home/jerome/github/LLaMA-Efficient-Tuning/src/utils/common.py", line 216, in load_pretrained
model = _init_adapter(model, model_args, finetuning_args, is_trainable, is_mergeable)
File "/home/jerome/github/LLaMA-Efficient-Tuning/src/utils/common.py", line 133, in _init_adapter
model = get_peft_model(model, lora_config)
File "/home/jerome/anaconda3/envs/left/lib/python3.10/site-packages/peft/mapping.py", line 120, in get_peft_model
return MODEL_TYPE_TO_PEFT_MODEL_MAPPING[peft_config.task_type](model, peft_config)
File "/home/jerome/anaconda3/envs/left/lib/python3.10/site-packages/peft/peft_model.py", line 662, in init
super().init(model, peft_config, adapter_name)
File "/home/jerome/anaconda3/envs/left/lib/python3.10/site-packages/peft/peft_model.py", line 99, in init
self.base_model = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type](
File "/home/jerome/anaconda3/envs/left/lib/python3.10/site-packages/peft/tuners/lora.py", line 154, in init
self.add_adapter(adapter_name, self.peft_config[adapter_name])
File "/home/jerome/anaconda3/envs/left/lib/python3.10/site-packages/peft/tuners/lora.py", line 161, in add_adapter
self._find_and_replace(adapter_name)
File "/home/jerome/anaconda3/envs/left/lib/python3.10/site-packages/peft/tuners/lora.py", line 254, in _find_and_replace
raise ValueError(
ValueError: Target modules ['q_proj', 'v_proj'] not found in the base model. Please check the target modules and try again.
The text was updated successfully, but these errors were encountered:
是不是还不支持baichuan-7B基座模型,还是说我PEFT的版本有问题?
Traceback (most recent call last):
File "/home/jerome/github/LLaMA-Efficient-Tuning/src/train_sft.py", line 98, in
main()
File "/home/jerome/github/LLaMA-Efficient-Tuning/src/train_sft.py", line 26, in main
model, tokenizer = load_pretrained(model_args, finetuning_args, training_args.do_train, stage="sft")
File "/home/jerome/github/LLaMA-Efficient-Tuning/src/utils/common.py", line 216, in load_pretrained
model = _init_adapter(model, model_args, finetuning_args, is_trainable, is_mergeable)
File "/home/jerome/github/LLaMA-Efficient-Tuning/src/utils/common.py", line 133, in _init_adapter
model = get_peft_model(model, lora_config)
File "/home/jerome/anaconda3/envs/left/lib/python3.10/site-packages/peft/mapping.py", line 120, in get_peft_model
return MODEL_TYPE_TO_PEFT_MODEL_MAPPING[peft_config.task_type](model, peft_config)
File "/home/jerome/anaconda3/envs/left/lib/python3.10/site-packages/peft/peft_model.py", line 662, in init
super().init(model, peft_config, adapter_name)
File "/home/jerome/anaconda3/envs/left/lib/python3.10/site-packages/peft/peft_model.py", line 99, in init
self.base_model = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type](
File "/home/jerome/anaconda3/envs/left/lib/python3.10/site-packages/peft/tuners/lora.py", line 154, in init
self.add_adapter(adapter_name, self.peft_config[adapter_name])
File "/home/jerome/anaconda3/envs/left/lib/python3.10/site-packages/peft/tuners/lora.py", line 161, in add_adapter
self._find_and_replace(adapter_name)
File "/home/jerome/anaconda3/envs/left/lib/python3.10/site-packages/peft/tuners/lora.py", line 254, in _find_and_replace
raise ValueError(
ValueError: Target modules ['q_proj', 'v_proj'] not found in the base model. Please check the target modules and try again.
The text was updated successfully, but these errors were encountered: