Disable CUDA graph for SqueezeLLM (#2161)
This commit is contained in:
parent
2c9b638065
commit
6f41f0e377
@ -185,10 +185,11 @@ class ModelConfig:
|
|||||||
self.max_context_len_to_capture = self.max_model_len
|
self.max_context_len_to_capture = self.max_model_len
|
||||||
self.max_context_len_to_capture = min(self.max_context_len_to_capture,
|
self.max_context_len_to_capture = min(self.max_context_len_to_capture,
|
||||||
self.max_model_len)
|
self.max_model_len)
|
||||||
if self.quantization == "gptq" and not self.enforce_eager:
|
if (self.quantization in ["gptq", "squeezellm"]
|
||||||
|
and not self.enforce_eager):
|
||||||
# Related issue: https://github.com/vllm-project/vllm/issues/2147
|
# Related issue: https://github.com/vllm-project/vllm/issues/2147
|
||||||
logger.warning("GPTQ does not support CUDA graph yet. Disabling "
|
logger.warning(f"{self.quantization} does not support CUDA graph "
|
||||||
"CUDA graph.")
|
"yet. Disabling CUDA graph.")
|
||||||
self.enforce_eager = True
|
self.enforce_eager = True
|
||||||
|
|
||||||
def verify_with_parallel_config(
|
def verify_with_parallel_config(
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user