fffiloni commited on
Commit
cf0d196
1 Parent(s): c7d5561

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -10
app.py CHANGED
@@ -3,24 +3,25 @@ import gradio as gr
3
 
4
  from PIL import Image
5
  import os
 
 
 
6
  token = os.environ.get('HF_TOKEN')
7
  whisper_to_gpt = gr.Blocks.load(name="spaces/fffiloni/whisper-to-chatGPT")
8
  tts = gr.Interface.load(name="spaces/Flux9665/IMS-Toucan")
9
  talking_face = gr.Blocks.load(name="spaces/fffiloni/one-shot-talking-face", api_key=token)
10
 
11
-
12
-
13
  def infer(audio):
14
- gpt_response = whisper_to_gpt(audio, "translate", fn_index=0)
15
  #print(gpt_response)
16
- audio_response = tts(gpt_response[1], "English Text", "English Accent", "English Speaker's Voice", fn_index=0)
17
  #image = Image.open(r"wise_woman_portrait.png")
18
  portrait_link = talking_face("wise_woman_portrait.png", audio_response, fn_index=0)
19
 
20
-
21
  #portrait_response = requests.get(portrait_link, headers={'Authorization': 'Bearer ' + token})
22
  #print(portrait_response.text)
23
- return portrait_link
 
24
 
25
  title = """
26
  <div style="text-align: center; max-width: 500px; margin: 0 auto;">
@@ -80,15 +81,22 @@ with gr.Blocks(css=css) as demo:
80
 
81
  gr.HTML(title)
82
 
83
- gpt_response = gr.Video(label="Talking Portrait response")
84
 
85
  with gr.Column(elem_id="col-container-2"):
86
 
87
- record_input = gr.Audio(source="microphone",type="filepath", label="Audio input", show_label=True,elem_id="record_btn")
 
88
 
89
  send_btn = gr.Button("Send my request !")
90
-
91
- send_btn.click(infer, inputs=[record_input], outputs=[gpt_response])
 
 
 
 
 
 
92
 
93
  demo.queue(max_size=32, concurrency_count=20).launch(debug=True)
94
 
 
3
 
4
  from PIL import Image
5
  import os
6
+
7
+ from share_btn import community_icon_html, loading_icon_html, share_js
8
+
9
  token = os.environ.get('HF_TOKEN')
10
  whisper_to_gpt = gr.Blocks.load(name="spaces/fffiloni/whisper-to-chatGPT")
11
  tts = gr.Interface.load(name="spaces/Flux9665/IMS-Toucan")
12
  talking_face = gr.Blocks.load(name="spaces/fffiloni/one-shot-talking-face", api_key=token)
13
 
 
 
14
  def infer(audio):
15
+ whisper_to_gpt_response = whisper_to_gpt(audio, "translate", fn_index=0)
16
  #print(gpt_response)
17
+ audio_response = tts(whisper_to_gpt_response[1], "English Text", "English Accent", "English Speaker's Voice", fn_index=0)
18
  #image = Image.open(r"wise_woman_portrait.png")
19
  portrait_link = talking_face("wise_woman_portrait.png", audio_response, fn_index=0)
20
 
 
21
  #portrait_response = requests.get(portrait_link, headers={'Authorization': 'Bearer ' + token})
22
  #print(portrait_response.text)
23
+
24
+ return whisper_to_gpt_response[0], portrait_link, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
25
 
26
  title = """
27
  <div style="text-align: center; max-width: 500px; margin: 0 auto;">
 
81
 
82
  gr.HTML(title)
83
 
84
+ gpt_response = gr.Video(label="Talking Portrait response", elem_id="video_out")
85
 
86
  with gr.Column(elem_id="col-container-2"):
87
 
88
+ record_input = gr.Audio(source="microphone",type="filepath", label="Audio input", show_label=True, elem_id="record_btn")
89
+ whisper_tr = gr.Textbox(label="whisper english translation", elem_id="text_inp")
90
 
91
  send_btn = gr.Button("Send my request !")
92
+
93
+ with gr.Group(elem_id="share-btn-container"):
94
+ community_icon = gr.HTML(community_icon_html, visible=False)
95
+ loading_icon = gr.HTML(loading_icon_html, visible=False)
96
+ share_button = gr.Button("Share to community", elem_id="share-btn", visible=False)
97
+
98
+ send_btn.click(infer, inputs=[record_input], outputs=[whisper_tr, gpt_response, share_button, community_icon, loading_icon])
99
+ share_button.click(None, [], [], _js=share_js)
100
 
101
  demo.queue(max_size=32, concurrency_count=20).launch(debug=True)
102