File size: 1,538 Bytes
658c1e3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
exp_name: 'cosine_lr'

# Training dataset (from Huggingface)
data_source: "MedCat/MedCAT-PT-v1"

# The base model (from HuggingFace model hub)
model_name: "SeaLLMs/SeaLLMs-v3-1.5B"

# Tokenizer
tokenizer_device: 'cpu' # 'cpu', 'cuda:0', 'cuda:1'
tokenizer_batch_size: 1_000
max_length: 512

# Checkpoints configuration
output_folder: "./checkpoints/MedCAT-PT"  # Where to save checkpoints during the training
save_total_limit: 5  # Limit on number of checkpoints to keep
save_model_to: "./checkpoints/MedCAT-PT/"  # Where to save the last checkpoint + base_model + data_version
save_strategy: "steps"  # Saving strategy (either 'steps' or 'epoch')
save_steps: 10_000  # Save model every ... steps

# Logging configuration
logging_dir: "./logs"  # Directory for logs + base_model + data_version
logging_steps: 100  # Frequency of logging

# Training configuration
learning_rate: 5e-5 # Default 5e-5
lr_scheduler_type: cosine # default linear
warmup_steps: 2000 # default 0

per_device_train_batch_size: 4  # Training batch size
per_device_eval_batch_size: 4  # Evaluation batch size
num_train_epochs: 1  # Number of epochs
# max_steps: 500  # Total training steps (or use num_train_epochs instead)
eval_steps: 10_000  # Frequency of evaluation. Should equal to logging_steps (can be different, but should be equal)
evaluation_samples: 20_000 # evaluation samples used to evaluate the model during training process
evaluation_strategy: "steps"  # Evaluation strategy (either 'steps' or 'epoch')
seed: 3407  # Random seed for reproducibility