[BugFix] gemma loading after quantization or LoRA. (#3553)
This commit is contained in:
parent
c188ecb080
commit
b7050ca7df
@ -340,6 +340,10 @@ class GemmaForCausalLM(nn.Module):
|
|||||||
weight_loader(param, loaded_weight, shard_id)
|
weight_loader(param, loaded_weight, shard_id)
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
|
# lm_head is not used in vllm as it is tied with embed_token.
|
||||||
|
# To prevent errors, skip loading lm_head.weight.
|
||||||
|
if "lm_head.weight" in name:
|
||||||
|
continue
|
||||||
# Skip loading extra bias for GPTQ models.
|
# Skip loading extra bias for GPTQ models.
|
||||||
if name.endswith(".bias") and name not in params_dict:
|
if name.endswith(".bias") and name not in params_dict:
|
||||||
continue
|
continue
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user