tyanz commited on
Commit
567a136
1 Parent(s): 115cf00

change dropout_rate to float type

Browse files
examples/magicdata-read/cosyvoice/conf/cosyvoice.fromscratch.yaml CHANGED
@@ -31,7 +31,7 @@ llm: !new:cosyvoice.llm.llm.TransformerLM
31
  num_blocks: 3
32
  dropout_rate: 0.1
33
  positional_dropout_rate: 0.1
34
- attention_dropout_rate: 0
35
  normalize_before: True
36
  input_layer: 'linear'
37
  pos_enc_layer_type: 'rel_pos_espnet'
@@ -49,7 +49,7 @@ llm: !new:cosyvoice.llm.llm.TransformerLM
49
  num_blocks: 7
50
  dropout_rate: 0.1
51
  positional_dropout_rate: 0.1
52
- attention_dropout_rate: 0
53
  input_layer: 'linear_legacy'
54
  pos_enc_layer_type: 'rel_pos_espnet'
55
  selfattention_layer_type: 'rel_selfattn'
@@ -97,7 +97,7 @@ flow: !new:cosyvoice.flow.flow.MaskedDiffWithXvec
97
  in_channels: 320
98
  out_channels: 80
99
  channels: [256, 256]
100
- dropout: 0
101
  attention_head_dim: 64
102
  n_blocks: 4
103
  num_mid_blocks: 8
 
31
  num_blocks: 3
32
  dropout_rate: 0.1
33
  positional_dropout_rate: 0.1
34
+ attention_dropout_rate: 0.0
35
  normalize_before: True
36
  input_layer: 'linear'
37
  pos_enc_layer_type: 'rel_pos_espnet'
 
49
  num_blocks: 7
50
  dropout_rate: 0.1
51
  positional_dropout_rate: 0.1
52
+ attention_dropout_rate: 0.0
53
  input_layer: 'linear_legacy'
54
  pos_enc_layer_type: 'rel_pos_espnet'
55
  selfattention_layer_type: 'rel_selfattn'
 
97
  in_channels: 320
98
  out_channels: 80
99
  channels: [256, 256]
100
+ dropout: 0.0
101
  attention_head_dim: 64
102
  n_blocks: 4
103
  num_mid_blocks: 8
examples/magicdata-read/cosyvoice/conf/cosyvoice.yaml CHANGED
@@ -31,7 +31,7 @@ llm: !new:cosyvoice.llm.llm.TransformerLM
31
  num_blocks: 6
32
  dropout_rate: 0.1
33
  positional_dropout_rate: 0.1
34
- attention_dropout_rate: 0
35
  normalize_before: True
36
  input_layer: 'linear'
37
  pos_enc_layer_type: 'rel_pos_espnet'
@@ -49,7 +49,7 @@ llm: !new:cosyvoice.llm.llm.TransformerLM
49
  num_blocks: 14
50
  dropout_rate: 0.1
51
  positional_dropout_rate: 0.1
52
- attention_dropout_rate: 0
53
  input_layer: 'linear_legacy'
54
  pos_enc_layer_type: 'rel_pos_espnet'
55
  selfattention_layer_type: 'rel_selfattn'
@@ -97,7 +97,7 @@ flow: !new:cosyvoice.flow.flow.MaskedDiffWithXvec
97
  in_channels: 320
98
  out_channels: 80
99
  channels: [256, 256]
100
- dropout: 0
101
  attention_head_dim: 64
102
  n_blocks: 4
103
  num_mid_blocks: 12
 
31
  num_blocks: 6
32
  dropout_rate: 0.1
33
  positional_dropout_rate: 0.1
34
+ attention_dropout_rate: 0.0
35
  normalize_before: True
36
  input_layer: 'linear'
37
  pos_enc_layer_type: 'rel_pos_espnet'
 
49
  num_blocks: 14
50
  dropout_rate: 0.1
51
  positional_dropout_rate: 0.1
52
+ attention_dropout_rate: 0.0
53
  input_layer: 'linear_legacy'
54
  pos_enc_layer_type: 'rel_pos_espnet'
55
  selfattention_layer_type: 'rel_selfattn'
 
97
  in_channels: 320
98
  out_channels: 80
99
  channels: [256, 256]
100
+ dropout: 0.0
101
  attention_head_dim: 64
102
  n_blocks: 4
103
  num_mid_blocks: 12