2022-12-25 08:33:07 +08:00
|
|
|
# Run test with:
|
|
|
|
|
# torchrun --no_python --nproc_per_node=8 pytest -q -s tests/modules/test_mha_parallel.py
|
|
|
|
|
|
|
|
|
|
import math
|
|
|
|
|
|
2023-08-19 11:59:35 +08:00
|
|
|
import pytest
|
2022-12-25 08:33:07 +08:00
|
|
|
import torch
|
|
|
|
|
import torch.nn.functional as F
|
2023-08-19 11:59:35 +08:00
|
|
|
from apex.transformer import parallel_state, tensor_parallel
|
2022-12-25 08:33:07 +08:00
|
|
|
from einops import rearrange
|
|
|
|
|
from flash_attn.modules.mha import MHA, ParallelMHA
|
|
|
|
|
|
2023-08-19 11:59:35 +08:00
|
|
|
is_sm8x = torch.cuda.get_device_capability("cuda")[0] >= 8
|
2022-12-25 08:33:07 +08:00
|
|
|
|
|
|
|
|
|
2023-08-19 11:59:35 +08:00
|
|
|
@pytest.mark.parametrize("dtype", [torch.float16] + ([torch.bfloat16] if is_sm8x else []))
|
2022-12-25 08:33:07 +08:00
|
|
|
# @pytest.mark.parametrize('dtype', [torch.float16])
|
2023-08-19 11:59:35 +08:00
|
|
|
@pytest.mark.parametrize("world_size", [1, 2, 4, 8])
|
2022-12-25 08:33:07 +08:00
|
|
|
# @pytest.mark.parametrize('world_size', [2])
|
2023-08-19 11:59:35 +08:00
|
|
|
@pytest.mark.parametrize("sequence_parallel", [True, False])
|
2023-01-08 05:45:22 +08:00
|
|
|
# @pytest.mark.parametrize('sequence_parallel', [False])
|
2023-08-19 11:59:35 +08:00
|
|
|
@pytest.mark.parametrize("head_dim", [64, 128])
|
2022-12-25 08:33:07 +08:00
|
|
|
# @pytest.mark.parametrize('head_dim', [64])
|
2023-08-19 11:59:35 +08:00
|
|
|
@pytest.mark.parametrize("embed_dim", [1024, 4096])
|
2022-12-25 08:33:07 +08:00
|
|
|
# @pytest.mark.parametrize('embed_dim', [1024])
|
2023-01-08 05:45:22 +08:00
|
|
|
def test_mha_parallel(embed_dim, head_dim, sequence_parallel, world_size, dtype):
|
2022-12-25 08:33:07 +08:00
|
|
|
assert embed_dim % head_dim == 0
|
|
|
|
|
num_heads = embed_dim // head_dim
|
|
|
|
|
assert num_heads % world_size == 0
|
|
|
|
|
rtol, atol = (3e-3, 1e-2) if dtype == torch.bfloat16 else (3e-3, 1e-3)
|
|
|
|
|
if not torch.distributed.is_initialized():
|
2023-08-19 11:59:35 +08:00
|
|
|
torch.distributed.init_process_group(backend="nccl", init_method="env://")
|
|
|
|
|
device = f"cuda:{torch.distributed.get_rank()}"
|
2022-12-25 08:33:07 +08:00
|
|
|
assert world_size <= torch.distributed.get_world_size()
|
|
|
|
|
parallel_state.initialize_model_parallel(tensor_model_parallel_size_=world_size)
|
|
|
|
|
rank = parallel_state.get_tensor_model_parallel_rank()
|
|
|
|
|
# set seed
|
|
|
|
|
torch.random.manual_seed(0)
|
2023-01-08 05:45:22 +08:00
|
|
|
batch_size = 2
|
2022-12-25 08:33:07 +08:00
|
|
|
seqlen = 1024
|
|
|
|
|
assert (batch_size * seqlen) % world_size == 0
|
2023-08-19 11:59:35 +08:00
|
|
|
x_pt = torch.randn(
|
|
|
|
|
batch_size * seqlen, embed_dim, device=device, dtype=dtype, requires_grad=True
|
|
|
|
|
)
|
2022-12-25 08:33:07 +08:00
|
|
|
# We need to generate g here so that all processes get the same gradient,
|
|
|
|
|
# as rank 0 will have an extra bias that changes the RNG.
|
|
|
|
|
# If we don't divide by batch_size, the gradient gets a bit too large.
|
|
|
|
|
g = torch.randn_like(x_pt) / 32
|
2023-01-08 05:45:22 +08:00
|
|
|
if sequence_parallel:
|
2023-08-19 11:59:35 +08:00
|
|
|
x = (
|
|
|
|
|
tensor_parallel.scatter_to_sequence_parallel_region(x_pt)
|
|
|
|
|
.detach()
|
|
|
|
|
.clone()
|
|
|
|
|
.requires_grad_()
|
|
|
|
|
)
|
2023-01-08 05:45:22 +08:00
|
|
|
else:
|
|
|
|
|
x = x_pt.detach().clone().requires_grad_()
|
2022-12-25 08:33:07 +08:00
|
|
|
|
2023-08-19 11:59:35 +08:00
|
|
|
model_pt = MHA(
|
|
|
|
|
embed_dim,
|
|
|
|
|
num_heads,
|
|
|
|
|
rotary_emb_dim=int(head_dim // 2),
|
|
|
|
|
use_flash_attn=True,
|
|
|
|
|
device=device,
|
|
|
|
|
dtype=dtype,
|
|
|
|
|
)
|
2022-12-25 08:33:07 +08:00
|
|
|
partition_dim = embed_dim // world_size
|
2023-08-19 11:59:35 +08:00
|
|
|
model = ParallelMHA(
|
|
|
|
|
embed_dim,
|
|
|
|
|
num_heads,
|
|
|
|
|
parallel_state.get_tensor_model_parallel_group(),
|
|
|
|
|
rotary_emb_dim=int(head_dim // 2),
|
|
|
|
|
use_flash_attn=True,
|
|
|
|
|
sequence_parallel=sequence_parallel,
|
|
|
|
|
device=device,
|
|
|
|
|
dtype=dtype,
|
|
|
|
|
)
|
2022-12-25 08:33:07 +08:00
|
|
|
|
|
|
|
|
with torch.no_grad():
|
|
|
|
|
model.Wqkv.weight.copy_(
|
2023-08-19 11:59:35 +08:00
|
|
|
rearrange(
|
|
|
|
|
rearrange(model_pt.Wqkv.weight, "(three o) i -> three o i", three=3)[
|
|
|
|
|
:, rank * partition_dim : (rank + 1) * partition_dim
|
|
|
|
|
],
|
|
|
|
|
"three o i -> (three o) i",
|
|
|
|
|
)
|
2022-12-25 08:33:07 +08:00
|
|
|
)
|
|
|
|
|
model.Wqkv.bias.copy_(
|
2023-08-19 11:59:35 +08:00
|
|
|
rearrange(
|
|
|
|
|
rearrange(model_pt.Wqkv.bias, "(three o) -> three o", three=3)[
|
|
|
|
|
:, rank * partition_dim : (rank + 1) * partition_dim
|
|
|
|
|
],
|
|
|
|
|
"three o -> (three o)",
|
|
|
|
|
)
|
2022-12-25 08:33:07 +08:00
|
|
|
)
|
|
|
|
|
model.out_proj.weight.copy_(
|
2023-08-19 11:59:35 +08:00
|
|
|
model_pt.out_proj.weight[:, rank * partition_dim : (rank + 1) * partition_dim]
|
2022-12-25 08:33:07 +08:00
|
|
|
)
|
|
|
|
|
if rank == 0:
|
|
|
|
|
model.out_proj.bias.copy_(model_pt.out_proj.bias)
|
|
|
|
|
|
|
|
|
|
out = model(x, seqlen=seqlen)
|
2023-08-19 11:59:35 +08:00
|
|
|
out_pt = rearrange(model_pt(rearrange(x_pt, "(b s) d -> b s d", s=seqlen)), "b s d -> (b s) d")
|
2022-12-25 08:33:07 +08:00
|
|
|
partition_batch_dim = batch_size * seqlen // world_size
|
|
|
|
|
assert torch.allclose(
|
2023-01-08 05:45:22 +08:00
|
|
|
out,
|
2023-08-19 11:59:35 +08:00
|
|
|
out_pt[rank * partition_batch_dim : (rank + 1) * partition_batch_dim]
|
|
|
|
|
if sequence_parallel
|
|
|
|
|
else out_pt,
|
|
|
|
|
rtol=rtol,
|
|
|
|
|
atol=atol,
|
2022-12-25 08:33:07 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
out_pt.backward(g)
|
2023-08-19 11:59:35 +08:00
|
|
|
out.backward(
|
|
|
|
|
g[rank * partition_batch_dim : (rank + 1) * partition_batch_dim] if sequence_parallel else g
|
|
|
|
|
)
|
2022-12-25 08:33:07 +08:00
|
|
|
parallel_state.destroy_model_parallel()
|
|
|
|
|
|
|
|
|
|
assert torch.allclose(
|
2023-01-08 05:45:22 +08:00
|
|
|
x.grad,
|
2023-08-19 11:59:35 +08:00
|
|
|
x_pt.grad[rank * partition_batch_dim : (rank + 1) * partition_batch_dim]
|
|
|
|
|
if sequence_parallel
|
|
|
|
|
else x_pt.grad,
|
|
|
|
|
rtol=rtol,
|
|
|
|
|
atol=atol / 100, # magnitude of x.grad is quite small
|
2022-12-25 08:33:07 +08:00
|
|
|
)
|
|
|
|
|
# The error for d_weight and d_bias is quite a bit higher
|
|
|
|
|
assert torch.allclose(
|
|
|
|
|
model.Wqkv.weight.grad,
|
2023-08-19 11:59:35 +08:00
|
|
|
rearrange(
|
|
|
|
|
rearrange(model_pt.Wqkv.weight.grad, "(three o) i -> three o i", three=3)[
|
|
|
|
|
:, rank * partition_dim : (rank + 1) * partition_dim
|
|
|
|
|
],
|
|
|
|
|
"three o i -> (three o) i",
|
|
|
|
|
),
|
|
|
|
|
rtol=rtol,
|
|
|
|
|
atol=atol * 10,
|
2022-12-25 08:33:07 +08:00
|
|
|
)
|
|
|
|
|
assert torch.allclose(
|
|
|
|
|
model.Wqkv.bias.grad,
|
2023-08-19 11:59:35 +08:00
|
|
|
rearrange(
|
|
|
|
|
rearrange(model_pt.Wqkv.bias.grad, "(three o) -> three o", three=3)[
|
|
|
|
|
:, rank * partition_dim : (rank + 1) * partition_dim
|
|
|
|
|
],
|
|
|
|
|
"three o -> (three o)",
|
|
|
|
|
),
|
|
|
|
|
rtol=rtol,
|
|
|
|
|
atol=atol * 5,
|
2022-12-25 08:33:07 +08:00
|
|
|
)
|
|
|
|
|
assert torch.allclose(
|
|
|
|
|
model.out_proj.weight.grad,
|
2023-08-19 11:59:35 +08:00
|
|
|
model_pt.out_proj.weight.grad[:, rank * partition_dim : (rank + 1) * partition_dim],
|
|
|
|
|
rtol=rtol,
|
|
|
|
|
atol=atol * 10,
|
2022-12-25 08:33:07 +08:00
|
|
|
)
|
|
|
|
|
if rank == 0:
|
2023-08-19 11:59:35 +08:00
|
|
|
assert torch.allclose(
|
|
|
|
|
model.out_proj.bias.grad, model_pt.out_proj.bias.grad, rtol=rtol, atol=atol * 5
|
|
|
|
|
)
|