flash-attention/training/configs/experiment/pile/gpt3m-flash.yaml

17 lines
404 B
YAML
Raw Normal View History

2022-11-29 09:31:19 +08:00
# @package _global_
defaults:
- /experiment/pile/gpt3s-flash.yaml
- override /model/gpt2model: gpt2-medium
# Can enable mlp_checkpoint_lvl to fit batch_size 16 to A100 40GB
# model:
# config:
# mlp_checkpoint_lvl: 1
datamodule:
2022-11-29 20:13:51 +08:00
batch_size: ${eval:"4 if ${train.gpu_mem} < 24 else (8 if ${train.gpu_mem} < 40 else (16 if ${train.gpu_mem} < 80 else 32))"}
2022-11-29 09:31:19 +08:00
train:
optimizer:
lr: 3.0e-4