flash-attention/training/configs/experiment/pile/gpt3-2.7B-flash-rotary-8k.yaml

19 lines
405 B
YAML
Raw Normal View History

2022-11-29 09:31:19 +08:00
# @package _global_
defaults:
2022-11-29 20:13:51 +08:00
- /experiment/pile/gpt3xl-flash-rotary-8k.yaml
2022-11-29 09:31:19 +08:00
model:
config:
n_embd: 2560
n_head: 32
n_layer: 32
initializer_range: ${eval:"(2 / (${.n_embd} * 5)) ** 0.5"}
mlp_checkpoint_lvl: 0
datamodule:
batch_size: ${eval:"1 if ${train.gpu_mem} < 24 else (2 if ${train.gpu_mem} < 40 else (4 if ${train.gpu} < 80 else 8))"}
train:
optimizer:
lr: 1.6e-4