2023-04-19 12:43:37 +08:00
|
|
|
# Copyright (c) 2023, Tri Dao.
|
|
|
|
|
|
2023-03-29 16:21:25 +08:00
|
|
|
import time
|
|
|
|
|
|
|
|
|
|
import pytest
|
2023-08-19 11:59:35 +08:00
|
|
|
import torch
|
2023-03-29 16:21:25 +08:00
|
|
|
from flash_attn.models.gpt import GPTLMHeadModel
|
2023-08-19 11:59:35 +08:00
|
|
|
from flash_attn.models.gpt_neox import gpt_neox_config_to_gpt2_config, remap_state_dict_hf_gpt_neox
|
|
|
|
|
from flash_attn.utils.pretrained import state_dict_from_pretrained
|
|
|
|
|
from transformers import AutoTokenizer, GPTNeoXConfig
|
|
|
|
|
from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXForCausalLM
|
2023-03-29 16:21:25 +08:00
|
|
|
|
|
|
|
|
|
2023-08-19 11:59:35 +08:00
|
|
|
@pytest.mark.parametrize("model_name", ["EleutherAI/gpt-neox-20b"])
|
2023-03-29 16:21:25 +08:00
|
|
|
def test_gptj_state_dict(model_name):
|
|
|
|
|
config = gpt_neox_config_to_gpt2_config(GPTNeoXConfig.from_pretrained(model_name))
|
2023-08-19 11:59:35 +08:00
|
|
|
pretrained_state_dict = remap_state_dict_hf_gpt_neox(
|
|
|
|
|
state_dict_from_pretrained(model_name), config
|
|
|
|
|
)
|
|
|
|
|
model = GPTLMHeadModel(config, device="meta") # Without device='meta' init is very slow
|
2023-03-29 16:21:25 +08:00
|
|
|
state_dict = model.state_dict()
|
|
|
|
|
assert state_dict.keys() == pretrained_state_dict.keys()
|
|
|
|
|
for k in state_dict.keys():
|
|
|
|
|
assert state_dict[k].shape == pretrained_state_dict[k].shape
|
|
|
|
|
|
|
|
|
|
|
2023-09-13 16:03:30 +08:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
|
"model_name",
|
|
|
|
|
[
|
|
|
|
|
"EleutherAI/pythia-1b",
|
|
|
|
|
"EleutherAI/pythia-2.8b",
|
|
|
|
|
"EleutherAI/gpt-neox-20b",
|
|
|
|
|
"togethercomputer/RedPajama-INCITE-7B-Base",
|
|
|
|
|
],
|
|
|
|
|
)
|
2023-03-29 16:21:25 +08:00
|
|
|
def test_gpt_neox_optimized(model_name):
|
|
|
|
|
"""Check that our implementation of GPT-NeoX (with all optimizations enabled) matches the
|
|
|
|
|
HF implementation: the output of our forward pass in fp16 should be around the same as the HF
|
|
|
|
|
forward pass in fp16, when compared to the HF forward pass in fp32.
|
|
|
|
|
"""
|
|
|
|
|
dtype = torch.float16
|
2023-08-19 11:59:35 +08:00
|
|
|
device = "cuda"
|
2023-03-29 16:21:25 +08:00
|
|
|
config = gpt_neox_config_to_gpt2_config(GPTNeoXConfig.from_pretrained(model_name))
|
|
|
|
|
config.use_flash_attn = True
|
|
|
|
|
config.fused_bias_fc = True
|
2023-09-13 16:03:30 +08:00
|
|
|
config.fused_mlp = config.activation_function in [
|
|
|
|
|
"gelu_fast",
|
|
|
|
|
"gelu_new",
|
|
|
|
|
"gelu_approx",
|
|
|
|
|
"gelu_pytorch_tanh",
|
|
|
|
|
]
|
2023-03-30 06:59:36 +08:00
|
|
|
config.fused_dropout_add_ln = True
|
2023-03-29 16:21:25 +08:00
|
|
|
config.residual_in_fp32 = True
|
|
|
|
|
|
|
|
|
|
model = GPTLMHeadModel.from_pretrained(model_name, config, device=device, dtype=dtype)
|
|
|
|
|
model.eval()
|
|
|
|
|
|
|
|
|
|
torch.manual_seed(0)
|
|
|
|
|
batch_size = 2
|
|
|
|
|
max_seqlen = 256
|
|
|
|
|
seqlens = torch.randint(max_seqlen // 2, max_seqlen + 1, (batch_size,), device=device)
|
2023-08-19 11:59:35 +08:00
|
|
|
input_ids = torch.randint(
|
|
|
|
|
0, config.vocab_size, (batch_size, max_seqlen), dtype=torch.long, device=device
|
|
|
|
|
)
|
2023-03-29 16:21:25 +08:00
|
|
|
with torch.no_grad():
|
|
|
|
|
out = model.transformer(input_ids)
|
|
|
|
|
logits = model(input_ids).logits
|
|
|
|
|
del model
|
|
|
|
|
|
2023-09-13 16:03:30 +08:00
|
|
|
# Need at least 2 GPUs, otherwise we'll OOM for the 20B model
|
2023-03-29 16:21:25 +08:00
|
|
|
# Without device_map, the model is loaded on the CPU, which is very slow
|
2023-08-19 11:59:35 +08:00
|
|
|
model_ref = GPTNeoXForCausalLM.from_pretrained(model_name, device_map="auto")
|
2023-03-29 16:21:25 +08:00
|
|
|
model_ref.eval()
|
|
|
|
|
with torch.no_grad():
|
|
|
|
|
out_ref = model_ref.gpt_neox(input_ids).last_hidden_state.to(device=device)
|
|
|
|
|
logits_ref = model_ref(input_ids).logits.to(device=device)
|
|
|
|
|
del model_ref
|
|
|
|
|
|
2023-08-19 11:59:35 +08:00
|
|
|
model_hf = GPTNeoXForCausalLM.from_pretrained(
|
|
|
|
|
model_name, torch_dtype=dtype, device_map={"": device}
|
|
|
|
|
)
|
2023-03-29 16:21:25 +08:00
|
|
|
model_hf.eval()
|
|
|
|
|
with torch.no_grad():
|
|
|
|
|
out_hf = model_hf.gpt_neox(input_ids).last_hidden_state
|
|
|
|
|
logits_hf = model_hf(input_ids).logits
|
|
|
|
|
del model_hf
|
|
|
|
|
|
2023-08-19 11:59:35 +08:00
|
|
|
print(f"Output max diff: {(out - out_ref).abs().max().item()}")
|
|
|
|
|
print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
|
|
|
|
|
print(f"HF fp16 max diff: {(out_hf - out_ref).abs().max().item()}")
|
|
|
|
|
print(f"HF fp16 mean diff: {(out_hf - out_ref).abs().mean().item()}")
|
2023-03-29 16:21:25 +08:00
|
|
|
assert (out - out_ref).abs().max().item() < 2 * (out_hf - out_ref).abs().max().item()
|
|
|
|
|
assert (out - out_ref).abs().mean().item() < 2 * (out_hf - out_ref).abs().mean().item()
|
|
|
|
|
|
2023-08-19 11:59:35 +08:00
|
|
|
print(f"Logits max diff: {(logits - logits_ref).abs().max().item()}")
|
|
|
|
|
print(f"Logits mean diff: {(logits - logits_ref).abs().mean().item()}")
|
|
|
|
|
print(f"HF fp16 max diff: {(logits_hf - logits_ref).abs().max().item()}")
|
|
|
|
|
print(f"HF fp16 mean diff: {(logits_hf - logits_ref).abs().mean().item()}")
|
|
|
|
|
assert (logits - logits_ref).abs().max().item() < 2 * (
|
|
|
|
|
logits_hf - logits_ref
|
|
|
|
|
).abs().max().item()
|
|
|
|
|
assert (logits - logits_ref).abs().mean().item() < 2 * (
|
|
|
|
|
logits_hf - logits_ref
|
|
|
|
|
).abs().mean().item()
|