[Bugfix] Specify device when loading LoRA and embedding tensors (#7129)
Co-authored-by: Jacob Schein <jacobschein@Jacobs-MacBook-Pro-2.local>
This commit is contained in:
parent
789937af2e
commit
89b8db6bb2
@ -248,7 +248,7 @@ class LoRAModel(AdapterModel):
|
|||||||
f" target modules in {expected_lora_modules}"
|
f" target modules in {expected_lora_modules}"
|
||||||
f" but received {unexpected_modules}."
|
f" but received {unexpected_modules}."
|
||||||
f" Please verify that the loaded LoRA module is correct")
|
f" Please verify that the loaded LoRA module is correct")
|
||||||
tensors = torch.load(lora_bin_file_path)
|
tensors = torch.load(lora_bin_file_path, map_location=device)
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"{lora_dir} doesn't contain tensors")
|
raise ValueError(f"{lora_dir} doesn't contain tensors")
|
||||||
|
|
||||||
@ -257,7 +257,8 @@ class LoRAModel(AdapterModel):
|
|||||||
embeddings = safetensors.torch.load_file(
|
embeddings = safetensors.torch.load_file(
|
||||||
new_embeddings_tensor_path)
|
new_embeddings_tensor_path)
|
||||||
elif os.path.isfile(new_embeddings_bin_file_path):
|
elif os.path.isfile(new_embeddings_bin_file_path):
|
||||||
embeddings = torch.load(new_embeddings_bin_file_path)
|
embeddings = torch.load(new_embeddings_bin_file_path,
|
||||||
|
map_location=device)
|
||||||
|
|
||||||
rank = config["r"]
|
rank = config["r"]
|
||||||
lora_alpha = config["lora_alpha"]
|
lora_alpha = config["lora_alpha"]
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user