From dd1a50910d2afa9d4d49b90eeb8491950465add4 Mon Sep 17 00:00:00 2001 From: egortolmachev <150433814+egortolmachev@users.noreply.github.com> Date: Mon, 8 Apr 2024 17:59:38 +0300 Subject: [PATCH] [Bugfix] Added Command-R GPTQ support (#3849) Co-authored-by: Egor Tolmachev --- vllm/model_executor/models/commandr.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/vllm/model_executor/models/commandr.py b/vllm/model_executor/models/commandr.py index 620d631351900..4674dcbc14da6 100644 --- a/vllm/model_executor/models/commandr.py +++ b/vllm/model_executor/models/commandr.py @@ -349,11 +349,21 @@ def load_weights( if shard_name not in name: continue name = name.replace(shard_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: + # lm_head is not used in vllm as it is tied with embed_token. + # To prevent errors, skip loading lm_head.weight. + if "lm_head.weight" in name: + continue + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue param = params_dict[name] weight_loader = getattr(param, "weight_loader", default_weight_loader)