Spaces:
Running
Running
Commit
·
24b96ed
1
Parent(s):
076fe18
Update app.py
Browse files
app.py
CHANGED
@@ -92,8 +92,7 @@ def convert(model, src_mic,src_file, tgt):
|
|
92 |
return
|
93 |
|
94 |
if not tgt:
|
95 |
-
|
96 |
-
return
|
97 |
|
98 |
with torch.no_grad():
|
99 |
# tgt
|
@@ -140,7 +139,7 @@ def convert(model, src_mic,src_file, tgt):
|
|
140 |
model = gr.Dropdown(choices=["FreeVC MLS","FreeVC (24kHz)"], value="FreeVC MLS",type="value", label="Model")
|
141 |
audio1_mic=gr.Audio(source="microphone", type="filepath", label='record your voice', optional=True)
|
142 |
audio1_file = gr.inputs.Audio(type='filepath', label='or upload an audio file', optional=True)
|
143 |
-
audio2 = gr.inputs.Audio(label="Reference Audio", type='filepath',
|
144 |
inputs = [model, audio1_mic, audio1_file, audio2]
|
145 |
outputs = gr.outputs.Audio(label="Output Audio", type='filepath')
|
146 |
|
|
|
92 |
return
|
93 |
|
94 |
if not tgt:
|
95 |
+
tgt="52_4703_000002.wav"
|
|
|
96 |
|
97 |
with torch.no_grad():
|
98 |
# tgt
|
|
|
139 |
model = gr.Dropdown(choices=["FreeVC MLS","FreeVC (24kHz)"], value="FreeVC MLS",type="value", label="Model")
|
140 |
audio1_mic=gr.Audio(source="microphone", type="filepath", label='record your voice', optional=True)
|
141 |
audio1_file = gr.inputs.Audio(type='filepath', label='or upload an audio file', optional=True)
|
142 |
+
audio2 = gr.inputs.Audio(label="Reference Audio", type='filepath', optional=True)
|
143 |
inputs = [model, audio1_mic, audio1_file, audio2]
|
144 |
outputs = gr.outputs.Audio(label="Output Audio", type='filepath')
|
145 |
|