Fix OOM in attention kernel test (#1223)

This commit is contained in:
Woosuk Kwon 2023-09-28 14:33:24 -07:00 committed by GitHub
parent 202351d5bf
commit 6f88f762bf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -247,8 +247,11 @@ def test_multi_query_kv_attention(
torch.random.manual_seed(seed)
torch.cuda.manual_seed(seed)
seq_lens = random.sample(range(1, MAX_SEQ_LEN), num_seqs)
seq_lens[-1] = MAX_SEQ_LEN
# MAX_SEQ_LEN sometimes causes OOM in the reference implementation.
# As the xformers library is already tested with its own tests, we can use
# a smaller MAX_SEQ_LEN here.
max_len = min(MAX_SEQ_LEN, 4096)
seq_lens = random.sample(range(1, max_len), num_seqs)
num_tokens = sum(seq_lens)
scale = float(1.0 / (head_size**0.5))