flash-attention/training/configs/experiment/owt/gpt2m-flash.yaml
2022-11-28 17:34:40 -08:00

18 lines
382 B
YAML

# @package _global_
defaults:
- /experiment/owt/gpt2s-flash.yaml
- override /model/gpt2model: gpt2-medium
# Can enable mlp_checkpoint_lvl to fit batch_size 32 to A100 40GB
model:
config:
mlp_checkpoint_lvl: 1
datamodule:
# batch_size: 32
batch_size: ${eval:"8 if ${train.gpu_mem} < 24 else (16 if ${train.gpu_mem} < 40 else 32)"}
train:
optimizer:
lr: 1.5e-4