Fix a bug in tying OPT embeddings (#1)

This commit is contained in:
Woosuk Kwon 2023-02-24 16:29:36 -08:00 committed by GitHub
parent c84c708a1d
commit cbf8779afa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 25 additions and 2 deletions

View File

@ -25,7 +25,8 @@ def get_model(
torch_dtype = STR_DTYPE_TO_TORCH_DTYPE[dtype.lower()]
else:
torch_dtype = dtype
for model_class, model in MODEL_CLASSES.items():
for model_class, hf_model in MODEL_CLASSES.items():
if model_class in model_name:
return model.from_pretrained(model_name, torch_dtype=torch_dtype)
model = hf_model.from_pretrained(model_name, torch_dtype=torch_dtype)
return model.eval()
raise ValueError(f'Invalid model name: {model_name}')

View File

@ -232,6 +232,28 @@ class OPTForCausalLM(OPTPreTrainedModel):
# Initialize weights and apply final processing
self.post_init()
# NOTE(woosuk): While the following methods are not called in the model code,
# they may be internally used by the transformers library.
# For example, tie_weights() does not work without these methods.
# Thus, do not delete these methods.
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
def forward(
self,
input_ids: torch.LongTensor,