duytrq's picture
Upload config.yaml with huggingface_hub
bc8ff7a verified
exp_name: 'cosine_lr'
# Training dataset (from Huggingface)
data_source: "MedCat/MedCAT-PT-v1"
# The base model (from HuggingFace model hub)
model_name: "microsoft/BioGPT-Large"
# Tokenizer
tokenizer_device: 'cpu' # 'cpu', 'cuda:0', 'cuda:1'
tokenizer_batch_size: 1_000
max_length: 512
# Checkpoints configuration
output_folder: "./checkpoints/MedCAT-PT" # Where to save checkpoints during the training
save_total_limit: 5 # Limit on number of checkpoints to keep
save_model_to: "./checkpoints/MedCAT-PT/" # Where to save the last checkpoint + base_model + data_version
save_strategy: "steps" # Saving strategy (either 'steps' or 'epoch')
save_steps: 10_000 # Save model every ... steps
# Logging configuration
logging_dir: "./logs" # Directory for logs + base_model + data_version
logging_steps: 1_000 # Frequency of logging
# Training configuration
learning_rate: 5e-5 # Default 5e-5
lr_scheduler_type: "cosine" # default linear
# warmup_steps: 2000 # default 0
per_device_train_batch_size: 16 # Training batch size
per_device_eval_batch_size: 16 # Evaluation batch size
num_train_epochs: 1 # Number of epochs
# max_steps: 500 # Total training steps (or use num_train_epochs instead)
eval_steps: 10_000 # Frequency of evaluation. Should equal to logging_steps (can be different, but should be equal)
evaluation_samples: 20_000 # evaluation samples used to evaluate the model during training process
evaluation_strategy: "steps" # Evaluation strategy (either 'steps' or 'epoch')
seed: 3407 # Random seed for reproducibility