fffiloni commited on
Commit
346d904
1 Parent(s): 6624658

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -3
app.py CHANGED
@@ -1,16 +1,20 @@
1
  import gradio as gr
 
2
 
3
  whisper_to_gpt = gr.Blocks.load(name="spaces/fffiloni/whisper-to-chatGPT")
4
  tts = gr.Interface.load(name="spaces/StevenLimcorn/fastspeech2-TTS")
 
5
 
6
  def infer(audio):
7
  gpt_response = whisper_to_gpt(audio, "translate", fn_index=0)
8
- print(gpt_response)
9
  audio_response = tts(gpt_response[1], "Fastspeech2 + Melgan", fn_index=0)
10
- return audio_response
 
 
11
 
12
  inputs = gr.Audio(source="microphone",type="filepath")
13
- outputs = gr.Audio()
14
 
15
  demo = gr.Interface(fn=infer, inputs=inputs, outputs=outputs)
16
  demo.launch()
 
1
  import gradio as gr
2
+ from PIL import Image
3
 
4
  whisper_to_gpt = gr.Blocks.load(name="spaces/fffiloni/whisper-to-chatGPT")
5
  tts = gr.Interface.load(name="spaces/StevenLimcorn/fastspeech2-TTS")
6
+ talking_face = gr.Interface.load(name="spaces/fffiloni/one-shot-talking-face")
7
 
8
  def infer(audio):
9
  gpt_response = whisper_to_gpt(audio, "translate", fn_index=0)
10
+ #print(gpt_response)
11
  audio_response = tts(gpt_response[1], "Fastspeech2 + Melgan", fn_index=0)
12
+ image = Image.open(r"wise_woman_portrait.png")
13
+ portrait = talking_face(image, audio_response)
14
+ return audio_response, portrait
15
 
16
  inputs = gr.Audio(source="microphone",type="filepath")
17
+ outputs = [gr.Audio(), gr.Video()]
18
 
19
  demo = gr.Interface(fn=infer, inputs=inputs, outputs=outputs)
20
  demo.launch()