# ################################ # Model: Fastspeech2 for TTS # Authors: Sathvik Udupa, Yingzhi Wang # ################################ n_symbols: 62 #fixed deppending on symbols in textToSequence n_mel_channels: 80 padding_idx: 0 # Encoder parameters enc_num_layers: 4 enc_num_head: 2 enc_d_model: 384 enc_ffn_dim: 1024 enc_k_dim: 384 enc_v_dim: 384 enc_dropout: 0.1 # Decoder parameters dec_num_layers: 4 dec_num_head: 2 dec_d_model: 384 dec_ffn_dim: 1024 dec_k_dim: 384 dec_v_dim: 384 dec_dropout: 0.1 # common normalize_before: True ffn_type: 1dcnn #1dcnn or ffn dur_pred_kernel_size: 3 pitch_pred_kernel_size: 3 energy_pred_kernel_size: 3 model: !new:speechbrain.lobes.models.FastSpeech2.FastSpeech2 enc_num_layers: !ref enc_num_head: !ref enc_d_model: !ref enc_ffn_dim: !ref enc_k_dim: !ref enc_v_dim: !ref enc_dropout: !ref dec_num_layers: !ref dec_num_head: !ref dec_d_model: !ref dec_ffn_dim: !ref dec_k_dim: !ref dec_v_dim: !ref dec_dropout: !ref normalize_before: !ref ffn_type: !ref n_char: !ref n_mels: !ref padding_idx: !ref dur_pred_kernel_size: !ref pitch_pred_kernel_size: !ref energy_pred_kernel_size: !ref # The lexicon file must be the same used for training lexicon: - "t" - "?" - "q" - "j" - "g" - "p" - "x" - "(" - "é" - "e" - "z" - "," - "o" - "a" - "m" - "n" - "u" - "d" - ":" - "w" - "à" - "“" - "." - "”" - "’" - "[" - "v" - "h" - " " - "ê" - "b" - "'" - "\"" - "f" - "â" - "!" - ";" - "l" - "r" - "è" - "i" - "]" - "s" - "k" - "y" - ")" - "c" - "ü" - "-" input_encoder: !new:speechbrain.dataio.encoder.TextEncoder modules: model: !ref pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer loadables: model: !ref