From 0603379863cd98aac0b56a32eed49c6f0c8bff46 Mon Sep 17 00:00:00 2001 From: metacryptom <98044045+metacryptom@users.noreply.github.com> Date: Sun, 25 Jun 2023 13:00:24 +0800 Subject: [PATCH] fix wrong using getattr to get dict value (#232) --- vllm/engine/tokenizer_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/engine/tokenizer_utils.py b/vllm/engine/tokenizer_utils.py index 1a0115e6..2b082c33 100644 --- a/vllm/engine/tokenizer_utils.py +++ b/vllm/engine/tokenizer_utils.py @@ -22,7 +22,7 @@ def get_tokenizer( logger.info( "OpenLLaMA models do not support the fast tokenizer. " "Using the slow tokenizer instead.") - elif config.model_type == "llama" and getattr(kwargs, "use_fast", True): + elif config.model_type == "llama" and kwargs.get("use_fast", True): # LLaMA fast tokenizer causes protobuf errors in some environments. # However, we found that the below LLaMA fast tokenizer works well in # most environments. @@ -31,7 +31,7 @@ def get_tokenizer( f"Using the LLaMA fast tokenizer in '{model_name}' to avoid " "potential protobuf errors.") elif config.model_type in _MODEL_TYPES_WITH_SLOW_TOKENIZER: - if getattr(kwargs, "use_fast", False) == True: + if kwargs.get("use_fast", False) == True: raise ValueError( f"Cannot use the fast tokenizer for {config.model_type} due to " "bugs in the fast tokenizer.")