From 20044cab7aa6e884e13460506b0e0b6a12722b5d Mon Sep 17 00:00:00 2001 From: Lily Liu Date: Wed, 2 Aug 2023 13:35:10 -0700 Subject: [PATCH] Fix log message in scheduler (#652) --- vllm/core/scheduler.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/vllm/core/scheduler.py b/vllm/core/scheduler.py index bff35774..058e3ef4 100644 --- a/vllm/core/scheduler.py +++ b/vllm/core/scheduler.py @@ -190,13 +190,13 @@ class Scheduler: break num_prompt_tokens = seq_group.get_seqs()[0].get_len() - if num_prompt_tokens > min( - self.scheduler_config.max_model_len, - self.scheduler_config.max_num_batched_tokens): + prompt_limit = min( + self.scheduler_config.max_model_len, + self.scheduler_config.max_num_batched_tokens) + if num_prompt_tokens > prompt_limit: logger.warning( f"Input prompt ({num_prompt_tokens} tokens) is too long" - " and exceeds limit of " - f"{self.scheduler_config.max_model_len}") + f" and exceeds limit of {prompt_limit}") for seq in seq_group.get_seqs(): seq.status = SequenceStatus.FINISHED_IGNORED ignored_seq_groups.append(seq_group)