File size: 997 Bytes
cb9e677 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
# data
data:
instruct_data: "/root/data/mol_instructions_train.jsonl" # Fill this with the path to your training data
data: "" # Optionally fill with pretraining data
eval_instruct_data: "" # Optionally fill with evaluation data
# model
model_id_or_path: "/root/mistral_models/7B-v0.3" # Path to downloaded model
lora:
rank: 64
# optim
seq_len: 32768
batch_size: 2
#TODO try other values
max_steps: 500
optim:
lr: 5.e-5
weight_decay: 0.05
pct_start: 0.05
# other
seed: 99
log_freq: 1
eval_freq: 100
no_eval: True
ckpt_freq: 100
ckpt_only_lora: False # Save only trained LoRA adapters. Set to `False` to merge LoRA adapter into the base model and save full fine-tuned model
run_dir: "/root/mistral-finetune/runseed99"
wandb:
project: "CHEMISTral7b-ft"
offline: False # Set to True if you want to use wandb in offline mode
key: "aaf77f83a4e316f6a8b47fa975ab6b5e73c7c8df" # Optionally set your WandB API key
run_name: "runseed99" # Optionally name your WandB run
|