From aa0addb39726b685522e7cf154b564b4159759ad Mon Sep 17 00:00:00 2001 From: Yongzao <532741407@qq.com> Date: Tue, 29 Oct 2024 04:49:56 +0800 Subject: [PATCH] Adding "torch compile" annotations to moe models (#9758) --- vllm/model_executor/models/arctic.py | 2 ++ vllm/model_executor/models/mixtral.py | 2 ++ vllm/model_executor/models/olmoe.py | 2 ++ vllm/model_executor/models/phimoe.py | 2 ++ 4 files changed, 8 insertions(+) diff --git a/vllm/model_executor/models/arctic.py b/vllm/model_executor/models/arctic.py index 30b1f1cc..fd29d4cc 100644 --- a/vllm/model_executor/models/arctic.py +++ b/vllm/model_executor/models/arctic.py @@ -5,6 +5,7 @@ import torch from torch import nn from vllm.attention import Attention, AttentionMetadata +from vllm.compilation.decorators import support_torch_compile from vllm.config import CacheConfig from vllm.distributed import (get_pp_group, get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size, @@ -360,6 +361,7 @@ class ArcticDecoderLayer(nn.Module): return hidden_states +@support_torch_compile class ArcticModel(nn.Module): def __init__( diff --git a/vllm/model_executor/models/mixtral.py b/vllm/model_executor/models/mixtral.py index dd384eee..1514243a 100644 --- a/vllm/model_executor/models/mixtral.py +++ b/vllm/model_executor/models/mixtral.py @@ -28,6 +28,7 @@ from torch import nn from transformers import MixtralConfig from vllm.attention import Attention, AttentionMetadata +from vllm.compilation.decorators import support_torch_compile from vllm.config import CacheConfig, LoRAConfig from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size from vllm.model_executor.layers.fused_moe import FusedMoE @@ -245,6 +246,7 @@ class MixtralDecoderLayer(nn.Module): return hidden_states, residual +@support_torch_compile class MixtralModel(nn.Module): def __init__( diff --git a/vllm/model_executor/models/olmoe.py b/vllm/model_executor/models/olmoe.py index a1ba80e0..374cbb8d 100644 --- a/vllm/model_executor/models/olmoe.py +++ b/vllm/model_executor/models/olmoe.py @@ -17,6 +17,7 @@ from torch import nn from transformers import PretrainedConfig from vllm.attention import Attention, AttentionMetadata +from vllm.compilation.decorators import support_torch_compile from vllm.config import CacheConfig from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size from vllm.model_executor.layers.fused_moe import FusedMoE @@ -239,6 +240,7 @@ class OlmoeDecoderLayer(nn.Module): return hidden_states, residual +@support_torch_compile class OlmoeModel(nn.Module): def __init__( diff --git a/vllm/model_executor/models/phimoe.py b/vllm/model_executor/models/phimoe.py index a9c81591..bb8a9327 100644 --- a/vllm/model_executor/models/phimoe.py +++ b/vllm/model_executor/models/phimoe.py @@ -28,6 +28,7 @@ from torch import nn from transformers.configuration_utils import PretrainedConfig from vllm.attention import Attention, AttentionMetadata +from vllm.compilation.decorators import support_torch_compile from vllm.config import CacheConfig, LoRAConfig from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size from vllm.model_executor.layers.fused_moe import FusedMoE @@ -429,6 +430,7 @@ class PhiMoEDecoderLayer(nn.Module): return hidden_states, residual +@support_torch_compile class PhiMoEModel(nn.Module): def __init__(