Kit-Lemonfoot
commited on
Fixed some deprecation issues.
Browse files- app.py +3 -1
- models.py +2 -1
- models_jp_extra.py +2 -1
- modules.py +5 -4
- utils.py +1 -4
app.py
CHANGED
@@ -67,8 +67,10 @@ def tts_fn(
|
|
67 |
|
68 |
if(not model_holder.current_model):
|
69 |
model_holder.load_model_gr(model_name, model_path)
|
|
|
70 |
if(model_holder.current_model.model_path != model_path):
|
71 |
model_holder.load_model_gr(model_name, model_path)
|
|
|
72 |
speaker_id = model_holder.current_model.spk2id[speaker]
|
73 |
start_time = datetime.datetime.now()
|
74 |
|
@@ -288,7 +290,7 @@ if __name__ == "__main__":
|
|
288 |
for (name, model_path, voice_name, speakerid, datasetauthor, image) in voicedata:
|
289 |
with gr.TabItem(name):
|
290 |
mn = gr.Textbox(value=model_path, visible=False, interactive=False)
|
291 |
-
mp = gr.Textbox(value=f"model_assets
|
292 |
spk = gr.Textbox(value=speakerid, visible=False, interactive=False)
|
293 |
with gr.Row():
|
294 |
with gr.Column():
|
|
|
67 |
|
68 |
if(not model_holder.current_model):
|
69 |
model_holder.load_model_gr(model_name, model_path)
|
70 |
+
logger.info(f"Loaded model '{model_name}'")
|
71 |
if(model_holder.current_model.model_path != model_path):
|
72 |
model_holder.load_model_gr(model_name, model_path)
|
73 |
+
logger.info(f"Swapped to model '{model_name}'")
|
74 |
speaker_id = model_holder.current_model.spk2id[speaker]
|
75 |
start_time = datetime.datetime.now()
|
76 |
|
|
|
290 |
for (name, model_path, voice_name, speakerid, datasetauthor, image) in voicedata:
|
291 |
with gr.TabItem(name):
|
292 |
mn = gr.Textbox(value=model_path, visible=False, interactive=False)
|
293 |
+
mp = gr.Textbox(value=f"model_assets\\{model_path}\\{model_path}.safetensors", visible=False, interactive=False)
|
294 |
spk = gr.Textbox(value=speakerid, visible=False, interactive=False)
|
295 |
with gr.Row():
|
296 |
with gr.Column():
|
models.py
CHANGED
@@ -5,7 +5,8 @@ import torch
|
|
5 |
from torch import nn
|
6 |
from torch.nn import Conv1d, Conv2d, ConvTranspose1d
|
7 |
from torch.nn import functional as F
|
8 |
-
from torch.nn.utils import remove_weight_norm
|
|
|
9 |
|
10 |
import attentions
|
11 |
import commons
|
|
|
5 |
from torch import nn
|
6 |
from torch.nn import Conv1d, Conv2d, ConvTranspose1d
|
7 |
from torch.nn import functional as F
|
8 |
+
from torch.nn.utils import remove_weight_norm
|
9 |
+
from torch.nn.utils.parametrizations import spectral_norm, weight_norm
|
10 |
|
11 |
import attentions
|
12 |
import commons
|
models_jp_extra.py
CHANGED
@@ -9,7 +9,8 @@ import attentions
|
|
9 |
import monotonic_align
|
10 |
|
11 |
from torch.nn import Conv1d, ConvTranspose1d, Conv2d
|
12 |
-
from torch.nn.utils import
|
|
|
13 |
|
14 |
from commons import init_weights, get_padding
|
15 |
from text import symbols, num_tones, num_languages
|
|
|
9 |
import monotonic_align
|
10 |
|
11 |
from torch.nn import Conv1d, ConvTranspose1d, Conv2d
|
12 |
+
from torch.nn.utils import remove_weight_norm
|
13 |
+
from torch.nn.utils.parametrizations import spectral_norm, weight_norm
|
14 |
|
15 |
from commons import init_weights, get_padding
|
16 |
from text import symbols, num_tones, num_languages
|
modules.py
CHANGED
@@ -5,7 +5,8 @@ import torch
|
|
5 |
from torch import nn
|
6 |
from torch.nn import Conv1d
|
7 |
from torch.nn import functional as F
|
8 |
-
from torch.nn.utils import remove_weight_norm
|
|
|
9 |
|
10 |
import commons
|
11 |
from attentions import Encoder
|
@@ -158,7 +159,7 @@ class WN(torch.nn.Module):
|
|
158 |
cond_layer = torch.nn.Conv1d(
|
159 |
gin_channels, 2 * hidden_channels * n_layers, 1
|
160 |
)
|
161 |
-
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
|
162 |
|
163 |
for i in range(n_layers):
|
164 |
dilation = dilation_rate**i
|
@@ -170,7 +171,7 @@ class WN(torch.nn.Module):
|
|
170 |
dilation=dilation,
|
171 |
padding=padding,
|
172 |
)
|
173 |
-
in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
|
174 |
self.in_layers.append(in_layer)
|
175 |
|
176 |
# last one is not necessary
|
@@ -180,7 +181,7 @@ class WN(torch.nn.Module):
|
|
180 |
res_skip_channels = hidden_channels
|
181 |
|
182 |
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
|
183 |
-
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
|
184 |
self.res_skip_layers.append(res_skip_layer)
|
185 |
|
186 |
def forward(self, x, x_mask, g=None, **kwargs):
|
|
|
5 |
from torch import nn
|
6 |
from torch.nn import Conv1d
|
7 |
from torch.nn import functional as F
|
8 |
+
from torch.nn.utils import remove_weight_norm
|
9 |
+
from torch.nn.utils.parametrizations import weight_norm
|
10 |
|
11 |
import commons
|
12 |
from attentions import Encoder
|
|
|
159 |
cond_layer = torch.nn.Conv1d(
|
160 |
gin_channels, 2 * hidden_channels * n_layers, 1
|
161 |
)
|
162 |
+
self.cond_layer = torch.nn.utils.parametrizations.weight_norm(cond_layer, name="weight")
|
163 |
|
164 |
for i in range(n_layers):
|
165 |
dilation = dilation_rate**i
|
|
|
171 |
dilation=dilation,
|
172 |
padding=padding,
|
173 |
)
|
174 |
+
in_layer = torch.nn.utils.parametrizations.weight_norm(in_layer, name="weight")
|
175 |
self.in_layers.append(in_layer)
|
176 |
|
177 |
# last one is not necessary
|
|
|
181 |
res_skip_channels = hidden_channels
|
182 |
|
183 |
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
|
184 |
+
res_skip_layer = torch.nn.utils.parametrizations.weight_norm(res_skip_layer, name="weight")
|
185 |
self.res_skip_layers.append(res_skip_layer)
|
186 |
|
187 |
def forward(self, x, x_mask, g=None, **kwargs):
|
utils.py
CHANGED
@@ -162,10 +162,7 @@ def load_safetensors(checkpoint_path, model, for_infer=False):
|
|
162 |
if key == "iteration":
|
163 |
continue
|
164 |
logger.warning(f"Unexpected key: {key}")
|
165 |
-
|
166 |
-
logger.info(f"Loaded '{checkpoint_path}'")
|
167 |
-
else:
|
168 |
-
logger.info(f"Loaded '{checkpoint_path}' (iteration {iteration})")
|
169 |
return model, iteration
|
170 |
|
171 |
|
|
|
162 |
if key == "iteration":
|
163 |
continue
|
164 |
logger.warning(f"Unexpected key: {key}")
|
165 |
+
#logger.info(f"Swapped to model '{checkpoint_path}'")
|
|
|
|
|
|
|
166 |
return model, iteration
|
167 |
|
168 |
|