[V1] Do not use inductor for piecewise CUDA graphs (#10225)

Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu>
This commit is contained in:
Woosuk Kwon 2024-11-11 11:05:57 -08:00 committed by GitHub
parent f9dadfbee3
commit d7a4f2207b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -404,15 +404,14 @@ class GPUModelRunner:
def load_model(self) -> None:
if self.use_cuda_graph:
# FIXME(woosuk): Currently, the custom ops are not supported
# in the piecewise compilation mode. We rely on TorchInductor
# to optimize the model.
# FIXME(woosuk): Currently, we do not use inductor to reduce the
# compilation time and any potential issues with the inductor.
os.environ["VLLM_CUSTOM_OPS"] = "none"
set_compilation_config(
CompilationConfig(
use_cudagraph=True,
non_cudagraph_ops=["vllm.unified_v1_flash_attention"],
use_inductor=True,
use_inductor=False,
))
logger.info("Starting to load model %s...", self.model_config.model)