diff --git a/vllm/sequence.py b/vllm/sequence.py index 92362a9a..7dcacab6 100644 --- a/vllm/sequence.py +++ b/vllm/sequence.py @@ -160,7 +160,7 @@ class SequenceData: self._stage = SequenceStage.PREFILL def get_num_uncomputed_tokens(self) -> int: - """Return the number of prefil tokens that are not computed.""" + """Return the number of prefill tokens that are not computed.""" # we use `get_len()` which includes prompt_len + output_len instead # of prompt_len here. This is because during recompute we need to # prefill for both prompt and output. @@ -345,12 +345,9 @@ class Sequence: def get_num_new_tokens(self) -> int: """Get the number of new tokens to be computed. - Args: - remainig_token_budget: The remaining token budgets. Returns: - The new number of tokens to be computed. I.e., 1 for decode, prompt - size for prefill. If there's not enough remainig_token_budget, it - can return the chunked number of new tokens. + The new number of tokens to be computed. I.e., 1 for decode, or + the remaining prompt size for prefill. """ if self.data.stage == SequenceStage.DECODE: return 1