amupd's picture
SpeechT5 upload
62e9ca6
raw
history blame
2.03 kB
# @package _group_
common:
fp16: true
log_format: json
log_interval: 200
tensorboard_logdir: tblog
seed: 1337
checkpoint:
no_epoch_checkpoints: false
best_checkpoint_metric: dec_accuracy
restore_file: checkpoint_last.pt
maximize_best_checkpoint_metric: true
distributed_training:
ddp_backend: legacy_ddp
find_unused_parameters: true
distributed_world_size: 1
distributed_port: -1
nprocs_per_node: 8
task:
_name: iwslt_joint_pretraining
data: ???
fine_tuning: true
label_dir: ???
normalize: true # must be consistent with pre-training
labels: ["ltr"]
single_target: true
add_decoder: true
pad_audio: true
random_crop: false
max_keep_size: 480000
hubert_tokenizer: "none"
sp_path: None
dataset:
num_workers: 6
max_tokens: 1280000
skip_invalid_size_inputs_valid_test: true
train_subset: train_100
valid_subset: dev_other
required_batch_size_multiple: 1
criterion:
_name: ctc_ce
zero_infinity: true
dec_weight: 1.0
optimization:
max_update: 80000
lr: [0.00003]
sentence_avg: true
update_freq: [1]
optimizer:
_name: adam
adam_betas: (0.9,0.98)
adam_eps: 1e-08
weight_decay: 0.0
lr_scheduler:
_name: tri_stage
phase_ratio: [0.1, 0.4, 0.5]
final_lr_scale: 0.05
model:
_name: yitrans_asr
w2v_path: ???
apply_mask: true
mask_prob: 0.5
mask_channel_prob: 0.5
mask_channel_length: 64
layerdrop: 0.1
decoder_layerdrop: 0.1
activation_dropout: 0.1
feature_grad_mult: 0.0
freeze_finetune_updates: 0
add_decoder: true
share_decoder_input_output_embed: true
hydra:
job:
config:
override_dirname:
kv_sep: '-'
item_sep: '__'
exclude_keys:
- run
- task.data
- task.label_dir
- model.w2v_path
- dataset.train_subset
- dataset.valid_subset
- criterion.wer_kenlm_model
- criterion.wer_lexicon
run:
dir: ???
sweep:
dir: ???
subdir: ${hydra.job.config_name}__${hydra.job.override_dirname}