18 lines
423 B
YAML
18 lines
423 B
YAML
# @package _global_
|
|
defaults:
|
|
- /experiment/owt/gpt2s-flash.yaml
|
|
- override /model/gpt2model: gpt2-medium
|
|
|
|
# Can enable mlp_checkpoint_lvl to fit batch_size 32 to A100 40GB
|
|
# model:
|
|
# config:
|
|
# mlp_checkpoint_lvl: 1
|
|
|
|
datamodule:
|
|
# batch_size: 32
|
|
batch_size: ${eval:"8 if ${train.gpu_mem} < 24 else (16 if ${train.gpu_mem} < 40 else (32 if ${train.gpu_mem} < 80 else 64))"}
|
|
|
|
train:
|
|
optimizer:
|
|
lr: 1.5e-4
|