|
model_name: llama-124m |
|
model_config: |
|
name: '' |
|
hf_config: {} |
|
scale_embeddings: false |
|
block_size: 2048 |
|
vocab_size: 32000 |
|
padding_multiple: 64 |
|
n_layer: 12 |
|
n_head: 12 |
|
n_embd: 768 |
|
rotary_percentage: 1.0 |
|
parallel_residual: false |
|
bias: false |
|
lm_head_bias: false |
|
n_query_groups: 4 |
|
shared_attention_norm: false |
|
norm_class_name: RMSNorm |
|
post_attention_norm: false |
|
post_mlp_norm: false |
|
norm_eps: 1.0e-06 |
|
mlp_class_name: LLaMAMLP |
|
gelu_approximate: none |
|
intermediate_size: 2048 |
|
rope_condense_ratio: 1 |
|
rope_base: 10000 |
|
n_expert: 0 |
|
n_expert_per_token: 0 |
|
out_dir: out/pretrain/llama-124m |
|
precision: bf16-mixed |
|
resume: false |
|
train: |
|
save_interval: 10000 |
|
log_interval: 1 |
|
global_batch_size: 96 |
|
micro_batch_size: 12 |
|
lr_warmup_steps: 200 |
|
max_tokens: 19632670900 |
|
max_seq_length: 2048 |
|
max_norm: 1.0 |
|
min_lr: 5.0e-05 |
|
eval: |
|
interval: 1000 |
|
max_iters: 100 |
|
initial_validation: false |
|
final_validation: true |
|
optimizer: |
|
class_path: torch.optim.AdamW |
|
init_args: |
|
lr: 0.0005 |
|
weight_decay: 0.01 |
|
betas: |
|
- 0.9 |
|
- 0.99 |
|
devices: auto |
|
num_nodes: 1 |
|
logger_name: wandb |
|
seed: 42 |
|
|