CaiRou-Huang commited on
Commit
a94413f
β€’
1 Parent(s): 5d904b0

Upload 14 files

Browse files
model_assets/jvnv-F1-jp/config.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 1000,
5
+ "seed": 42,
6
+ "epochs": 300,
7
+ "learning_rate": 0.0001,
8
+ "betas": [0.8, 0.99],
9
+ "eps": 1e-9,
10
+ "batch_size": 4,
11
+ "bf16_run": true,
12
+ "fp16_run": false,
13
+ "lr_decay": 0.99996,
14
+ "segment_size": 16384,
15
+ "init_lr_ratio": 1,
16
+ "warmup_epochs": 0,
17
+ "c_mel": 45,
18
+ "c_kl": 1.0,
19
+ "c_commit": 100,
20
+ "skip_optimizer": true,
21
+ "freeze_ZH_bert": false,
22
+ "freeze_JP_bert": false,
23
+ "freeze_EN_bert": false,
24
+ "freeze_emo": false,
25
+ "freeze_style": false
26
+ },
27
+ "data": {
28
+ "use_jp_extra": true,
29
+ "training_files": "Data/jvnv-F1-jp/train.list",
30
+ "validation_files": "Data/jvnv-F1-jp/val.list",
31
+ "max_wav_value": 32768.0,
32
+ "sampling_rate": 44100,
33
+ "filter_length": 2048,
34
+ "hop_length": 512,
35
+ "win_length": 2048,
36
+ "n_mel_channels": 128,
37
+ "mel_fmin": 0.0,
38
+ "mel_fmax": null,
39
+ "add_blank": true,
40
+ "n_speakers": 1,
41
+ "cleaned_text": true,
42
+ "spk2id": {
43
+ "jvnv-F1-jp": 0
44
+ },
45
+ "num_styles": 7,
46
+ "style2id": {
47
+ "Neutral": 0,
48
+ "Angry": 1,
49
+ "Disgust": 2,
50
+ "Fear": 3,
51
+ "Happy": 4,
52
+ "Sad": 5,
53
+ "Surprise": 6
54
+ }
55
+ },
56
+ "model": {
57
+ "use_spk_conditioned_encoder": true,
58
+ "use_noise_scaled_mas": true,
59
+ "use_mel_posterior_encoder": false,
60
+ "use_duration_discriminator": false,
61
+ "use_wavlm_discriminator": true,
62
+ "inter_channels": 192,
63
+ "hidden_channels": 192,
64
+ "filter_channels": 768,
65
+ "n_heads": 2,
66
+ "n_layers": 6,
67
+ "kernel_size": 3,
68
+ "p_dropout": 0.1,
69
+ "resblock": "1",
70
+ "resblock_kernel_sizes": [3, 7, 11],
71
+ "resblock_dilation_sizes": [
72
+ [1, 3, 5],
73
+ [1, 3, 5],
74
+ [1, 3, 5]
75
+ ],
76
+ "upsample_rates": [8, 8, 2, 2, 2],
77
+ "upsample_initial_channel": 512,
78
+ "upsample_kernel_sizes": [16, 16, 8, 2, 2],
79
+ "n_layers_q": 3,
80
+ "use_spectral_norm": false,
81
+ "gin_channels": 512,
82
+ "slm": {
83
+ "model": "./slm/wavlm-base-plus",
84
+ "sr": 16000,
85
+ "hidden": 768,
86
+ "nlayers": 13,
87
+ "initial_channel": 64
88
+ }
89
+ },
90
+ "version": "2.0-JP-Extra",
91
+ "model_name": "jvnv-F1-jp"
92
+ }
model_assets/jvnv-F1-jp/jvnv-F1-jp_e182_s16000.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd324b2dd04b4a3384e0dbf4a268fd8a9bbedcfe80608fbd2a3aaaa44e474abe
3
+ size 251150980
model_assets/jvnv-F1-jp/style_vectors.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f959bb45ed0922efc31ff24e9147253814f42cb1d2d1e2bb10391a9df368489
3
+ size 7296
model_assets/jvnv-F2-jp/config.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 1000,
5
+ "seed": 42,
6
+ "epochs": 300,
7
+ "learning_rate": 0.0001,
8
+ "betas": [0.8, 0.99],
9
+ "eps": 1e-9,
10
+ "batch_size": 4,
11
+ "bf16_run": false,
12
+ "fp16_run": false,
13
+ "lr_decay": 0.99996,
14
+ "segment_size": 16384,
15
+ "init_lr_ratio": 1,
16
+ "warmup_epochs": 0,
17
+ "c_mel": 45,
18
+ "c_kl": 1.0,
19
+ "c_commit": 100,
20
+ "skip_optimizer": true,
21
+ "freeze_ZH_bert": false,
22
+ "freeze_JP_bert": false,
23
+ "freeze_EN_bert": false,
24
+ "freeze_emo": false,
25
+ "freeze_style": false
26
+ },
27
+ "data": {
28
+ "use_jp_extra": true,
29
+ "training_files": "/content/drive/MyDrive/Style-Bert-VITS2/Data/jvnv-F2/train.list",
30
+ "validation_files": "/content/drive/MyDrive/Style-Bert-VITS2/Data/jvnv-F2/val.list",
31
+ "max_wav_value": 32768.0,
32
+ "sampling_rate": 44100,
33
+ "filter_length": 2048,
34
+ "hop_length": 512,
35
+ "win_length": 2048,
36
+ "n_mel_channels": 128,
37
+ "mel_fmin": 0.0,
38
+ "mel_fmax": null,
39
+ "add_blank": true,
40
+ "n_speakers": 1,
41
+ "cleaned_text": true,
42
+ "spk2id": {
43
+ "jvnv-F2-jp": 0
44
+ },
45
+ "num_styles": 7,
46
+ "style2id": {
47
+ "Neutral": 0,
48
+ "Angry": 1,
49
+ "Disgust": 2,
50
+ "Fear": 3,
51
+ "Happy": 4,
52
+ "Sad": 5,
53
+ "Surprise": 6
54
+ }
55
+ },
56
+ "model": {
57
+ "use_spk_conditioned_encoder": true,
58
+ "use_noise_scaled_mas": true,
59
+ "use_mel_posterior_encoder": false,
60
+ "use_duration_discriminator": false,
61
+ "use_wavlm_discriminator": true,
62
+ "inter_channels": 192,
63
+ "hidden_channels": 192,
64
+ "filter_channels": 768,
65
+ "n_heads": 2,
66
+ "n_layers": 6,
67
+ "kernel_size": 3,
68
+ "p_dropout": 0.1,
69
+ "resblock": "1",
70
+ "resblock_kernel_sizes": [3, 7, 11],
71
+ "resblock_dilation_sizes": [
72
+ [1, 3, 5],
73
+ [1, 3, 5],
74
+ [1, 3, 5]
75
+ ],
76
+ "upsample_rates": [8, 8, 2, 2, 2],
77
+ "upsample_initial_channel": 512,
78
+ "upsample_kernel_sizes": [16, 16, 8, 2, 2],
79
+ "n_layers_q": 3,
80
+ "use_spectral_norm": false,
81
+ "gin_channels": 512,
82
+ "slm": {
83
+ "model": "./slm/wavlm-base-plus",
84
+ "sr": 16000,
85
+ "hidden": 768,
86
+ "nlayers": 13,
87
+ "initial_channel": 64
88
+ }
89
+ },
90
+ "version": "2.0-JP-Extra",
91
+ "model_name": "jvnv-F2-jp"
92
+ }
model_assets/jvnv-F2-jp/jvnv-F2_e166_s20000.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6289a6f30bb9795744815b9da764a3c8198b18652d9fddef82fff1e14f0e784
3
+ size 251150980
model_assets/jvnv-F2-jp/style_vectors.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:900f8cde3a336d12193fec7b7d8e6c5dc77b3a5d719a9be3f8598389cd88e643
3
+ size 7296
model_assets/jvnv-M1-jp/config.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 1000,
5
+ "seed": 42,
6
+ "epochs": 300,
7
+ "learning_rate": 0.0001,
8
+ "betas": [0.8, 0.99],
9
+ "eps": 1e-9,
10
+ "batch_size": 4,
11
+ "bf16_run": true,
12
+ "fp16_run": false,
13
+ "lr_decay": 0.99996,
14
+ "segment_size": 16384,
15
+ "init_lr_ratio": 1,
16
+ "warmup_epochs": 0,
17
+ "c_mel": 45,
18
+ "c_kl": 1.0,
19
+ "c_commit": 100,
20
+ "skip_optimizer": true,
21
+ "freeze_ZH_bert": false,
22
+ "freeze_JP_bert": false,
23
+ "freeze_EN_bert": false,
24
+ "freeze_emo": false,
25
+ "freeze_style": false
26
+ },
27
+ "data": {
28
+ "use_jp_extra": true,
29
+ "training_files": "Data/jvnv-M1-jp/train.list",
30
+ "validation_files": "Data/jvnv-M1-jp/val.list",
31
+ "max_wav_value": 32768.0,
32
+ "sampling_rate": 44100,
33
+ "filter_length": 2048,
34
+ "hop_length": 512,
35
+ "win_length": 2048,
36
+ "n_mel_channels": 128,
37
+ "mel_fmin": 0.0,
38
+ "mel_fmax": null,
39
+ "add_blank": true,
40
+ "n_speakers": 1,
41
+ "cleaned_text": true,
42
+ "spk2id": {
43
+ "jvnv-M1-jp": 0
44
+ },
45
+ "num_styles": 7,
46
+ "style2id": {
47
+ "Neutral": 0,
48
+ "Angry": 1,
49
+ "Disgust": 2,
50
+ "Fear": 3,
51
+ "Happy": 4,
52
+ "Sad": 5,
53
+ "Surprise": 6
54
+ }
55
+ },
56
+ "model": {
57
+ "use_spk_conditioned_encoder": true,
58
+ "use_noise_scaled_mas": true,
59
+ "use_mel_posterior_encoder": false,
60
+ "use_duration_discriminator": false,
61
+ "use_wavlm_discriminator": true,
62
+ "inter_channels": 192,
63
+ "hidden_channels": 192,
64
+ "filter_channels": 768,
65
+ "n_heads": 2,
66
+ "n_layers": 6,
67
+ "kernel_size": 3,
68
+ "p_dropout": 0.1,
69
+ "resblock": "1",
70
+ "resblock_kernel_sizes": [3, 7, 11],
71
+ "resblock_dilation_sizes": [
72
+ [1, 3, 5],
73
+ [1, 3, 5],
74
+ [1, 3, 5]
75
+ ],
76
+ "upsample_rates": [8, 8, 2, 2, 2],
77
+ "upsample_initial_channel": 512,
78
+ "upsample_kernel_sizes": [16, 16, 8, 2, 2],
79
+ "n_layers_q": 3,
80
+ "use_spectral_norm": false,
81
+ "gin_channels": 512,
82
+ "slm": {
83
+ "model": "./slm/wavlm-base-plus",
84
+ "sr": 16000,
85
+ "hidden": 768,
86
+ "nlayers": 13,
87
+ "initial_channel": 64
88
+ }
89
+ },
90
+ "version": "2.0-JP-Extra",
91
+ "model_name": "jvnv-M1-jp"
92
+ }
model_assets/jvnv-M1-jp/jvnv-M1-jp_e158_s14000.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d86765f1fe08dbba74cd06283e96b6941b3f232329fabbba9c30e6edc27887a
3
+ size 251150980
model_assets/jvnv-M1-jp/style_vectors.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a925435e8c1c9efc8fc8e90e690655ab9a7bae00a790892e13e936510d04f05
3
+ size 7296
model_assets/jvnv-M2-jp/config.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 1000,
5
+ "seed": 42,
6
+ "epochs": 300,
7
+ "learning_rate": 0.0001,
8
+ "betas": [0.8, 0.99],
9
+ "eps": 1e-9,
10
+ "batch_size": 4,
11
+ "bf16_run": true,
12
+ "fp16_run": false,
13
+ "lr_decay": 0.99996,
14
+ "segment_size": 16384,
15
+ "init_lr_ratio": 1,
16
+ "warmup_epochs": 0,
17
+ "c_mel": 45,
18
+ "c_kl": 1.0,
19
+ "c_commit": 100,
20
+ "skip_optimizer": true,
21
+ "freeze_ZH_bert": false,
22
+ "freeze_JP_bert": false,
23
+ "freeze_EN_bert": false,
24
+ "freeze_emo": false,
25
+ "freeze_style": false
26
+ },
27
+ "data": {
28
+ "use_jp_extra": true,
29
+ "training_files": "Data/jvnv-M2-jp/train.list",
30
+ "validation_files": "Data/jvnv-M2-jp/val.list",
31
+ "max_wav_value": 32768.0,
32
+ "sampling_rate": 44100,
33
+ "filter_length": 2048,
34
+ "hop_length": 512,
35
+ "win_length": 2048,
36
+ "n_mel_channels": 128,
37
+ "mel_fmin": 0.0,
38
+ "mel_fmax": null,
39
+ "add_blank": true,
40
+ "n_speakers": 1,
41
+ "cleaned_text": true,
42
+ "spk2id": {
43
+ "jvnv-M2-jp": 0
44
+ },
45
+ "num_styles": 7,
46
+ "style2id": {
47
+ "Neutral": 0,
48
+ "Angry": 1,
49
+ "Disgust": 2,
50
+ "Fear": 3,
51
+ "Happy": 4,
52
+ "Sad": 5,
53
+ "Surprise": 6
54
+ }
55
+ },
56
+ "model": {
57
+ "use_spk_conditioned_encoder": true,
58
+ "use_noise_scaled_mas": true,
59
+ "use_mel_posterior_encoder": false,
60
+ "use_duration_discriminator": false,
61
+ "use_wavlm_discriminator": true,
62
+ "inter_channels": 192,
63
+ "hidden_channels": 192,
64
+ "filter_channels": 768,
65
+ "n_heads": 2,
66
+ "n_layers": 6,
67
+ "kernel_size": 3,
68
+ "p_dropout": 0.1,
69
+ "resblock": "1",
70
+ "resblock_kernel_sizes": [3, 7, 11],
71
+ "resblock_dilation_sizes": [
72
+ [1, 3, 5],
73
+ [1, 3, 5],
74
+ [1, 3, 5]
75
+ ],
76
+ "upsample_rates": [8, 8, 2, 2, 2],
77
+ "upsample_initial_channel": 512,
78
+ "upsample_kernel_sizes": [16, 16, 8, 2, 2],
79
+ "n_layers_q": 3,
80
+ "use_spectral_norm": false,
81
+ "gin_channels": 512,
82
+ "slm": {
83
+ "model": "./slm/wavlm-base-plus",
84
+ "sr": 16000,
85
+ "hidden": 768,
86
+ "nlayers": 13,
87
+ "initial_channel": 64
88
+ }
89
+ },
90
+ "version": "2.0-JP-Extra",
91
+ "model_name": "jvnv-M2-jp"
92
+ }
model_assets/jvnv-M2-jp/jvnv-M2-jp_e159_s17000.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8245f39438076d36a3befd8aefb15c38830cef326c1f7c9d9c8e64b647645402
3
+ size 251150980
model_assets/jvnv-M2-jp/style_vectors.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c965bb63fa4a759d41a8a4a3649333125d6497ae8a705d81b7d5c5bd2854797c
3
+ size 7296
monotonic_align/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy import zeros, int32, float32
2
+ from torch import from_numpy
3
+
4
+ from .core import maximum_path_jit
5
+
6
+
7
+ def maximum_path(neg_cent, mask):
8
+ device = neg_cent.device
9
+ dtype = neg_cent.dtype
10
+ neg_cent = neg_cent.data.cpu().numpy().astype(float32)
11
+ path = zeros(neg_cent.shape, dtype=int32)
12
+
13
+ t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32)
14
+ t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32)
15
+ maximum_path_jit(path, neg_cent, t_t_max, t_s_max)
16
+ return from_numpy(path).to(device=device, dtype=dtype)
monotonic_align/core.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numba
2
+
3
+
4
+ @numba.jit(
5
+ numba.void(
6
+ numba.int32[:, :, ::1],
7
+ numba.float32[:, :, ::1],
8
+ numba.int32[::1],
9
+ numba.int32[::1],
10
+ ),
11
+ nopython=True,
12
+ nogil=True,
13
+ )
14
+ def maximum_path_jit(paths, values, t_ys, t_xs):
15
+ b = paths.shape[0]
16
+ max_neg_val = -1e9
17
+ for i in range(int(b)):
18
+ path = paths[i]
19
+ value = values[i]
20
+ t_y = t_ys[i]
21
+ t_x = t_xs[i]
22
+
23
+ v_prev = v_cur = 0.0
24
+ index = t_x - 1
25
+
26
+ for y in range(t_y):
27
+ for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
28
+ if x == y:
29
+ v_cur = max_neg_val
30
+ else:
31
+ v_cur = value[y - 1, x]
32
+ if x == 0:
33
+ if y == 0:
34
+ v_prev = 0.0
35
+ else:
36
+ v_prev = max_neg_val
37
+ else:
38
+ v_prev = value[y - 1, x - 1]
39
+ value[y, x] += max(v_prev, v_cur)
40
+
41
+ for y in range(t_y - 1, -1, -1):
42
+ path[y, index] = 1
43
+ if index != 0 and (
44
+ index == y or value[y - 1, index] < value[y - 1, index - 1]
45
+ ):
46
+ index = index - 1