vllm/vllm/model_executor/layers/activation.py

99 lines
2.7 KiB
Python
Raw Normal View History

"""Custom activation functions."""
2023-11-19 09:56:47 +08:00
from typing import Optional
2023-04-02 15:30:17 +08:00
import torch
import torch.nn as nn
2023-06-17 18:07:40 +08:00
from vllm import activation_ops
2023-11-19 09:56:47 +08:00
from vllm.model_executor.layers.quantization import QuantizationConfig
2023-04-02 15:30:17 +08:00
class SiluAndMul(nn.Module):
"""An activation function for SwiGLU.
The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
2023-04-02 15:30:17 +08:00
Shapes:
x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
return: (batch_size, seq_len, d) or (num_tokens, d)
"""
2023-04-02 15:30:17 +08:00
def forward(self, x: torch.Tensor) -> torch.Tensor:
d = x.shape[-1] // 2
output_shape = (x.shape[:-1] + (d, ))
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
2023-04-02 15:30:17 +08:00
activation_ops.silu_and_mul(out, x)
return out
class NewGELU(nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = torch.empty_like(x)
activation_ops.gelu_new(out, x)
return out
class FastGELU(nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = torch.empty_like(x)
activation_ops.gelu_fast(out, x)
return out
2023-11-19 09:56:47 +08:00
class ScaledActivation(nn.Module):
"""An activation function with post-scale parameters.
This is used for some quantization methods like AWQ.
"""
def __init__(
self,
act_module: nn.Module,
hidden_size: int,
params_dtype: torch.dtype,
):
super().__init__()
self.act = act_module
self.scales = nn.Parameter(
torch.empty(hidden_size, dtype=params_dtype, device="cuda"))
def forward(self, x: torch.Tensor):
return self.act(x) / self.scales
_ACTIVATION_REGISTRY = {
"gelu": nn.GELU(),
"gelu_fast": FastGELU(),
"gelu_new": NewGELU(),
"gelu_pytorch_tanh": nn.GELU(approximate="tanh"),
"relu": nn.ReLU(),
}
2023-11-19 09:56:47 +08:00
def get_act_fn(
act_fn_name: str,
quant_config: Optional[QuantizationConfig] = None,
intermediate_size: Optional[int] = None,
) -> nn.Module:
"""Get an activation function by name."""
2023-11-19 09:56:47 +08:00
act_fn_name = act_fn_name.lower()
if act_fn_name not in _ACTIVATION_REGISTRY:
raise ValueError(
f"Activation function {act_fn_name!r} is not supported.")
act_fn = _ACTIVATION_REGISTRY[act_fn_name]
if quant_config is not None:
if act_fn_name in quant_config.get_scaled_act_names():
if intermediate_size is None:
raise ValueError(
"intermediate_size must be specified for scaled "
"activation functions.")
return ScaledActivation(
act_fn,
intermediate_size,
params_dtype=torch.get_default_dtype(),
)
return act_fn