Disable CUDA graph for SqueezeLLM (#2161)

This commit is contained in:
Woosuk Kwon 2023-12-17 10:24:25 -08:00 committed by GitHub
parent 2c9b638065
commit 6f41f0e377
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -185,10 +185,11 @@ class ModelConfig:
self.max_context_len_to_capture = self.max_model_len self.max_context_len_to_capture = self.max_model_len
self.max_context_len_to_capture = min(self.max_context_len_to_capture, self.max_context_len_to_capture = min(self.max_context_len_to_capture,
self.max_model_len) self.max_model_len)
if self.quantization == "gptq" and not self.enforce_eager: if (self.quantization in ["gptq", "squeezellm"]
and not self.enforce_eager):
# Related issue: https://github.com/vllm-project/vllm/issues/2147 # Related issue: https://github.com/vllm-project/vllm/issues/2147
logger.warning("GPTQ does not support CUDA graph yet. Disabling " logger.warning(f"{self.quantization} does not support CUDA graph "
"CUDA graph.") "yet. Disabling CUDA graph.")
self.enforce_eager = True self.enforce_eager = True
def verify_with_parallel_config( def verify_with_parallel_config(