From 31653980749654c80c561bff5bb697c176da3b46 Mon Sep 17 00:00:00 2001 From: Vik Paruchuri Date: Wed, 15 Mar 2023 10:36:19 -0700 Subject: [PATCH] Remove unused kwargs in flashattention --- flash_attn/flash_attention.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flash_attn/flash_attention.py b/flash_attn/flash_attention.py index db6804e..0719d0a 100644 --- a/flash_attn/flash_attention.py +++ b/flash_attn/flash_attention.py @@ -18,7 +18,7 @@ class FlashAttention(nn.Module): attention_dropout: The dropout rate to apply to the attention (default: 0.0) """ - def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None): + def __init__(self, softmax_scale=None, attention_dropout=0.0): super().__init__() self.softmax_scale = softmax_scale self.dropout_p = attention_dropout @@ -74,7 +74,7 @@ class FlashAttention(nn.Module): class FlashMHA(nn.Module): def __init__(self, embed_dim, num_heads, bias=True, batch_first=True, attention_dropout=0.0, - causal=False, device=None, dtype=None, **kwargs) -> None: + causal=False, device=None, dtype=None) -> None: assert batch_first factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() @@ -87,7 +87,7 @@ class FlashMHA(nn.Module): assert self.head_dim % 8 == 0 and self.head_dim <= 128, "Only support head_dim <= 128 and divisible by 8" self.Wqkv = nn.Linear(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs) - self.inner_attn = FlashAttention(attention_dropout=attention_dropout, **factory_kwargs) + self.inner_attn = FlashAttention(attention_dropout=attention_dropout) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs) def forward(self, x, key_padding_mask=None, need_weights=False):