[Bugfix] Specify device when loading LoRA and embedding tensors (#7129)

Co-authored-by: Jacob Schein <jacobschein@Jacobs-MacBook-Pro-2.local>
This commit is contained in:
Jacob Schein 2024-08-05 16:35:47 -07:00 committed by GitHub
parent 789937af2e
commit 89b8db6bb2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -248,7 +248,7 @@ class LoRAModel(AdapterModel):
f" target modules in {expected_lora_modules}"
f" but received {unexpected_modules}."
f" Please verify that the loaded LoRA module is correct")
tensors = torch.load(lora_bin_file_path)
tensors = torch.load(lora_bin_file_path, map_location=device)
else:
raise ValueError(f"{lora_dir} doesn't contain tensors")
@ -257,7 +257,8 @@ class LoRAModel(AdapterModel):
embeddings = safetensors.torch.load_file(
new_embeddings_tensor_path)
elif os.path.isfile(new_embeddings_bin_file_path):
embeddings = torch.load(new_embeddings_bin_file_path)
embeddings = torch.load(new_embeddings_bin_file_path,
map_location=device)
rank = config["r"]
lora_alpha = config["lora_alpha"]