flash-attention/flash_attn/flash_attention.py

116 lines
5.4 KiB
Python
Raw Normal View History

2022-05-21 05:21:58 +08:00
import math
import torch
import torch.nn as nn
from einops import rearrange
2022-06-02 09:50:26 +08:00
from flash_attn.rotary import RotaryEmbedding, RotaryEmbedding2D
2022-07-01 11:26:04 +08:00
from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
2022-06-02 09:50:26 +08:00
from flash_attn.bert_padding import unpad_input, pad_input, index_first_axis
2022-05-21 05:21:58 +08:00
2022-05-27 04:57:38 +08:00
class FlashAttention(nn.Module):
2022-05-21 05:21:58 +08:00
"""Implement the scaled dot product attention with softmax.
Arguments
---------
2022-07-01 11:26:04 +08:00
softmax_scale: The temperature to use for the softmax attention.
2022-05-21 05:21:58 +08:00
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.1)
"""
2022-07-01 11:26:04 +08:00
def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
2022-05-21 05:21:58 +08:00
super().__init__()
2022-07-01 11:26:04 +08:00
self.softmax_scale = softmax_scale
2022-05-21 05:21:58 +08:00
self.dropout_p = attention_dropout
def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
2022-05-21 05:21:58 +08:00
max_s=None, need_weights=False):
"""Implements the multihead softmax attention.
Arguments
---------
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
if unpadded: (nnz, 3, h, d)
key_padding_mask: a bool tensor of shape (B, S)
2022-05-21 05:21:58 +08:00
"""
assert not need_weights
assert qkv.dtype in [torch.float16, torch.bfloat16]
2022-05-21 05:21:58 +08:00
assert qkv.is_cuda
if cu_seqlens is None:
batch_size = qkv.shape[0]
seqlen = qkv.shape[1]
if key_padding_mask is None:
qkv = rearrange(qkv, 'b s ... -> (b s) ...')
max_s = seqlen
cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
device=qkv.device)
2022-07-01 11:26:04 +08:00
output = flash_attn_unpadded_qkvpacked_func(
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
softmax_scale=self.softmax_scale, causal=causal
)
2022-05-21 05:21:58 +08:00
output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
else:
nheads = qkv.shape[-2]
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
2022-05-21 05:21:58 +08:00
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
2022-07-01 11:26:04 +08:00
output_unpad = flash_attn_unpadded_qkvpacked_func(
x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
softmax_scale=self.softmax_scale, causal=causal
)
2022-05-21 05:21:58 +08:00
output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
indices, batch_size, seqlen),
'b s (h d) -> b s h d', h=nheads)
else:
assert max_s is not None
2022-07-01 11:26:04 +08:00
output = flash_attn_unpadded_qkvpacked_func(
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
softmax_scale=self.softmax_scale, causal=causal
)
2022-05-21 05:21:58 +08:00
return output, None
2022-05-27 04:57:38 +08:00
class FlashMHA(nn.Module):
2022-05-21 05:21:58 +08:00
def __init__(self, embed_dim, num_heads, bias=True, batch_first=True, attention_dropout=0.0,
causal=False, use_rotary_emb=None, device=None, dtype=None, **kwargs) -> None:
assert batch_first
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.embed_dim = embed_dim
self.causal = causal
self.num_heads = num_heads
assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads"
self.head_dim = self.embed_dim // num_heads
assert self.head_dim in [16, 32, 64, 128], "Only support head_dim == 16, 32, 64, or 128"
2022-05-21 05:21:58 +08:00
assert use_rotary_emb in [None, '1d', '2d']
self.use_rotary_emb = use_rotary_emb
if self.use_rotary_emb == '1d':
self.rotary_emb = RotaryEmbedding(self.head_dim)
elif self.use_rotary_emb == '2d':
self.rotary_emb = RotaryEmbedding2D(self.head_dim)
self.Wqkv = nn.Linear(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
2022-05-27 04:57:38 +08:00
self.inner_attn = FlashAttention(attention_dropout=attention_dropout, **factory_kwargs)
2022-05-21 05:21:58 +08:00
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs)
2022-08-10 01:14:10 +08:00
def forward(self, x, key_padding_mask=None, need_weights=False):
"""x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim)
key_padding_mask: bool tensor of shape (batch, seqlen)
"""
2022-05-21 05:21:58 +08:00
qkv = self.Wqkv(x)
if self.use_rotary_emb:
query, key, value = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3,
h=self.num_heads).unbind(dim=2)
query, key = self.rotary_emb(query, key, seq_dimension=-3)
qkv = torch.stack([query.type(x.dtype), key.type(x.dtype), value], dim=2)
2022-05-21 05:21:58 +08:00
else:
qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
context, attn_weights = self.inner_attn(qkv, key_padding_mask=key_padding_mask,
need_weights=need_weights, causal=self.causal)
return self.out_proj(rearrange(context, 'b s h d -> b s (h d)')), attn_weights