Fix sampler

This commit is contained in:
Woosuk Kwon 2023-02-23 20:30:12 +00:00
parent fdd0f2f472
commit de0fabbc5c
2 changed files with 6 additions and 8 deletions

View File

@ -227,7 +227,7 @@ class OPTForCausalLM(OPTPreTrainedModel):
self.model = OPTModel(config)
# the lm_head weight is automatically tied to the embed tokens weight
self.lm_head = nn.Linear(config.word_embed_proj_dim, config.vocab_size, bias=False)
self.sampler = Sampler(embedding=self.lm_head.weight)
self.sampler = Sampler()
# Initialize weights and apply final processing
self.post_init()
@ -242,5 +242,6 @@ class OPTForCausalLM(OPTPreTrainedModel):
) -> Dict[int, Tuple[int, int]]:
hidden_states = self.model(
input_ids, positions, kv_caches, input_metadata, cache_events)
next_tokens = self.sampler(hidden_states, input_metadata)
next_tokens = self.sampler(
self.lm_head.weight, hidden_states, input_metadata)
return next_tokens

View File

@ -8,15 +8,12 @@ from cacheflow.models import InputMetadata
class Sampler(nn.Module):
def __init__(
self,
embedding: torch.Tensor,
) -> None:
def __init__(self) -> None:
super().__init__()
self.embedding = embedding # [vocab_size, hidden_size]
def forward(
self,
embedding: torch.Tensor,
hidden_states: torch.Tensor,
input_metadata: InputMetadata,
) -> Dict[int, Tuple[int, int]]:
@ -31,7 +28,7 @@ class Sampler(nn.Module):
hidden_states = hidden_states[last_token_indicies]
# Get the logits for the next tokens.
logits = torch.matmul(hidden_states, self.embedding.t())
logits = torch.matmul(hidden_states, embedding.t())
# Sample the next tokens.
# TODO(woosuk): Implement other sampling methods.