Kororinpa commited on
Commit
7353fd4
1 Parent(s): 603982f

Create new file

Browse files
Files changed (1) hide show
  1. app.py +67 -0
app.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ path = "Kororinpa/Amadeus_Project"
4
+ os.chdir(path)
5
+ print(os.getcwd())
6
+
7
+ %matplotlib inline
8
+ import matplotlib.pyplot as plt
9
+ import IPython.display as ipd
10
+
11
+ import os
12
+ import json
13
+ import math
14
+ import torch
15
+ from torch import nn
16
+ from torch.nn import functional as F
17
+ from torch.utils.data import DataLoader
18
+
19
+ import commons
20
+ import utils
21
+ from data_utils import TextAudioLoader, TextAudioCollate, TextAudioSpeakerLoader, TextAudioSpeakerCollate
22
+ from models import SynthesizerTrn
23
+ from text.symbols import symbols
24
+ from text import text_to_sequence
25
+
26
+ from scipy.io.wavfile import write
27
+
28
+
29
+ def get_text(text, hps):
30
+ text_norm = text_to_sequence(text, hps.data.text_cleaners)
31
+ if hps.data.add_blank:
32
+ text_norm = commons.intersperse(text_norm, 0)
33
+ text_norm = torch.LongTensor(text_norm)
34
+ return text_norm
35
+
36
+ hps = utils.get_hparams_from_file("Kororinpa/Amadeus_Project/configs/steins_gate_base.json")
37
+
38
+ net_g = SynthesizerTrn(
39
+ len(symbols),
40
+ hps.data.filter_length // 2 + 1,
41
+ hps.train.segment_size // hps.data.hop_length,
42
+ **hps.model).cuda()
43
+ _ = net_g.eval()
44
+
45
+ _ = utils.load_checkpoint("Kororinpa/Amadeus_Project/G_265000.pth", net_g, None)
46
+
47
+ def syn(content):
48
+ stn_tst = get_text(content, hps)
49
+ with torch.no_grad():
50
+ x_tst = stn_tst.cuda().unsqueeze(0)
51
+ x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).cuda()
52
+ audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy()
53
+ return (hps.data.sampling_rate,audio)
54
+ #ipd.display(ipd.Audio(audio, rate=hps.data.sampling_rate))
55
+
56
+ demo = gr.Interface(fn=syn,inputs="text",outputs=gr.Audio)
57
+
58
+ app = gr.Blocks()
59
+ with app:
60
+ with gr.Tabs():
61
+ with gr.TabItem("Basic"):
62
+ input1 = gr.Textbox()
63
+ submit = gr.Button("Convert", variant="primary")
64
+ output1 = gr.Audio(label="Output Audio")
65
+ submit.click(syn,input1,output1)
66
+
67
+ app.launch()