CHATBOT / models_config.json
Marcos12886's picture
Empezar a poner en carpeta aprolos8000
1d21972
raw
history blame
2.08 kB
{
"mon": {
"dataset_path": "A-POR-LOS-8000/data/baby_cry_detection",
"output_dir": "A-POR-LOS-8000/distilhubert-finetuned-cry-detector",
"training_args": {
"num_train_epochs": 10,
"learning_rate": 0.00003,
"warmup_ratio": 0.001,
"output_dir": "A-POR-LOS-8000/distilhubert-finetuned-cry-detector",
"eval_strategy": "epoch",
"save_strategy": "epoch",
"lr_scheduler_type": "cosine",
"auto_find_batch_size": true,
"per_device_train_batch_size": 8,
"per_device_eval_batch_size": 8,
"gradient_accumulation_steps": 8,
"gradient_checkpointing": true,
"load_best_model_at_end": true,
"greater_is_better": true,
"metric_for_best_model": "accuracy",
"optim": "adamw_torch",
"hub_strategy": "checkpoint",
"report_to": "tensorboard",
"full_determinism": true,
"seed": 123,
"data_seed":123
}
},
"class": {
"dataset_path": "A-POR-LOS-8000/data/mixed_data",
"output_dir": "A-POR-LOS-8000/distilhubert-finetuned-mixed-data",
"training_args": {
"num_train_epochs": 15,
"learning_rate": 0.0003,
"warmup_ratio": 0.4,
"output_dir": "A-POR-LOS-8000/distilhubert-finetuned-mixed-data",
"eval_strategy": "epoch",
"save_strategy": "epoch",
"lr_scheduler_type": "cosine",
"auto_find_batch_size": true,
"per_device_train_batch_size": 8,
"per_device_eval_batch_size": 8,
"gradient_accumulation_steps": 8,
"gradient_checkpointing": true,
"load_best_model_at_end": true,
"greater_is_better": true,
"optim": "adamw_torch",
"hub_strategy": "checkpoint",
"report_to": "tensorboard",
"full_determinism": true,
"seed": 123,
"data_seed":123
}
}
}