haohoo commited on
Commit
a028cde
1 Parent(s): 07a869a
Files changed (1) hide show
  1. app.py +335 -0
app.py ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, time, wave
2
+ import openai
3
+ import gradio as gr
4
+ import requests
5
+ from pydub import AudioSegment as am
6
+ from xml.etree import ElementTree
7
+
8
+ api_base = "https://mvp-azureopenai.openai.azure.com/"
9
+ api_key = os.getenv("OPENAI_API_KEY")
10
+
11
+ openai.api_type = "azure"
12
+ openai.api_base = api_base
13
+ openai.api_version = "2023-03-15-preview"
14
+ openai.api_key = api_key
15
+
16
+ messages_gpt = []
17
+ messages_chat = [
18
+ {"role": "system", "content": "You are an AI assistant that helps people find information."},
19
+ ]
20
+ prompts = ""
21
+ response_walle = []
22
+ messages_vchat = [
23
+ {"role": "system", "content": "You are an AI assistant that helps people find information and just response with SSML."},
24
+ ]
25
+
26
+ with gr.Blocks() as page:
27
+ with gr.Tabs():
28
+ with gr.TabItem("GPT Playgroud"):
29
+ ui_chatbot_gpt = gr.Chatbot(label="GPT Playground:")
30
+ with gr.Row():
31
+ with gr.Column(scale=0.9):
32
+ ui_prompt_gpt = gr.Textbox(placeholder="Please enter your prompt here.", show_label=False).style(container=False)
33
+ with gr.Column(scale=0.1, min_width=100):
34
+ ui_clear_gpt = gr.Button("Clear Input", )
35
+ with gr.Accordion("Expand to config parameters:", open=False):
36
+ gr.Markdown("Look at me...")
37
+ with gr.Row():
38
+ ui_temp_gpt = gr.Slider(0.1, 1.0, 0.9, step=0.1, label="Temperature", interactive=True)
39
+ ui_max_tokens_gpt = gr.Slider(100, 4000, 1000, step=100, label="Max Tokens", interactive=True)
40
+ ui_top_p_gpt = gr.Slider(0.1, 1.0, 0.5, step=0.1, label="Top P", interactive=True)
41
+ with gr.Accordion("Select radio button to see detail:", open=False):
42
+ ui_res_radio_gpt = gr.Radio(["Response from OpenAI Model", "Prompt messages history"], label="Show OpenAI response:", interactive=True)
43
+ ui_response_gpt = gr.TextArea(show_label=False, interactive=False).style(container=False)
44
+
45
+ def get_parameters_gpt(slider_1, slider_2, slider_3):
46
+ ui_temp_gpt.value = slider_1
47
+ ui_max_tokens_gpt.value = slider_2
48
+ ui_top_p_gpt.value = slider_3
49
+ print("Log - Updated GPT parameters: Temperature=", ui_temp_gpt.value,
50
+ " Max Tokens=", ui_max_tokens_gpt.value, " Top_P=", ui_top_p_gpt.value)
51
+
52
+ def select_response_gpt(radio):
53
+ if radio == "Response from OpenAI Model":
54
+ return gr.update(value=gpt_x)
55
+ else:
56
+ return gr.update(value=messages_gpt)
57
+
58
+ def user_gpt(user_message, history):
59
+ global prompts
60
+ prompts = user_message
61
+ messages_gpt.append(prompts)
62
+ return "", history + [[user_message, None]]
63
+
64
+ def bot_gpt(history):
65
+ global gpt_x
66
+ gpt_x = openai.Completion.create(
67
+ engine="mvp-text-davinci-003",
68
+ prompt=prompts,
69
+ temperature=0.6,
70
+ max_tokens=1000,
71
+ top_p=1,
72
+ frequency_penalty=0,
73
+ presence_penalty=0,
74
+ best_of=1,
75
+ stop=None
76
+ )
77
+ gpt_reply = gpt_x.choices[0].text
78
+ messages_gpt.append(gpt_reply)
79
+ history[-1][1] = gpt_reply
80
+ return history
81
+
82
+ ui_temp_gpt.change(get_parameters_gpt, [ui_temp_gpt, ui_max_tokens_gpt, ui_top_p_gpt])
83
+ ui_max_tokens_gpt.change(get_parameters_gpt, [ui_temp_gpt, ui_max_tokens_gpt, ui_top_p_gpt])
84
+ ui_top_p_gpt.change(get_parameters_gpt, [ui_temp_gpt, ui_max_tokens_gpt, ui_top_p_gpt])
85
+
86
+ ui_prompt_gpt.submit(user_gpt, [ui_prompt_gpt, ui_chatbot_gpt], [ui_prompt_gpt, ui_chatbot_gpt], queue=False).then(
87
+ bot_gpt, ui_chatbot_gpt, ui_chatbot_gpt
88
+ )
89
+ ui_clear_gpt.click(lambda: None, None, ui_chatbot_gpt, queue=False)
90
+ ui_res_radio_gpt.change(select_response_gpt, ui_res_radio_gpt, ui_response_gpt)
91
+
92
+ with gr.TabItem("ChatGPT"):
93
+ ui_chatbot_chat = gr.Chatbot(label="ChatGPT:")
94
+ with gr.Row():
95
+ with gr.Column(scale=0.9):
96
+ ui_prompt_chat = gr.Textbox(placeholder="Please enter your prompt here.", show_label=False).style(container=False)
97
+ with gr.Column(scale=0.1, min_width=100):
98
+ ui_clear_chat = gr.Button("Clear Chat")
99
+ with gr.Blocks():
100
+ with gr.Accordion("Expand to config parameters:", open=False):
101
+ gr.Markdown("Here is the default system prompt, you can change it to your own prompt.")
102
+ ui_prompt_sys = gr.Textbox(value="You are an AI assistant that helps people find information.", show_label=False, interactive=True).style(container=False)
103
+ with gr.Row():
104
+ ui_temp_chat = gr.Slider(0.1, 1.0, 0.7, step=0.1, label="Temperature", interactive=True)
105
+ ui_max_tokens_chat = gr.Slider(100, 8000, 800, step=100, label="Max Tokens", interactive=True)
106
+ ui_top_p_chat = gr.Slider(0.05, 1.0, 0.9, step=0.1, label="Top P", interactive=True)
107
+ with gr.Accordion("Select radio button to see detail:", open=False):
108
+ ui_res_radio_chat = gr.Radio(["Response from OpenAI Model", "Prompt messages history"], label="Show OpenAI response:", interactive=True)
109
+ ui_response_chat = gr.TextArea(show_label=False, interactive=False).style(container=False)
110
+
111
+ def get_parameters_chat(slider_1, slider_2, slider_3):
112
+ ui_temp_chat.value = slider_1
113
+ ui_max_tokens_chat.value = slider_2
114
+ ui_top_p_chat.value = slider_3
115
+ print("Log - Updated chatGPT parameters: Temperature=", ui_temp_chat.value,
116
+ " Max Tokens=", ui_max_tokens_chat.value, " Top_P=", ui_top_p_chat.value)
117
+
118
+ def select_response_chat(radio):
119
+ if radio == "Response from OpenAI Model":
120
+ return gr.update(value=chat_x)
121
+ else:
122
+ return gr.update(value=messages_chat)
123
+
124
+ def user_chat(user_message, history):
125
+ messages_chat.append({"role": "user", "content": user_message})
126
+ return "", history + [[user_message, None]]
127
+
128
+ def bot_chat(history):
129
+ global chat_x
130
+ chat_x = openai.ChatCompletion.create(
131
+ engine="mvp-gpt-35-turbo", messages=messages_chat,
132
+ temperature=ui_temp_chat.value,
133
+ max_tokens=ui_max_tokens_chat.value,
134
+ top_p=ui_top_p_chat.value,
135
+ frequency_penalty=0,
136
+ presence_penalty=0,
137
+ stop=None
138
+ )
139
+
140
+ ui_response_chat.value= chat_x
141
+ print(ui_response_chat.value)
142
+
143
+ chat_reply = chat_x.choices[0].message.content
144
+ messages_chat.append({"role": "assistant", "content": chat_reply})
145
+
146
+ history[-1][1] = chat_reply
147
+ return history
148
+
149
+ def reset_sys(sysmsg):
150
+ global messages_chat
151
+ messages_chat = [
152
+ {"role": "system", "content": sysmsg},
153
+ ]
154
+
155
+ ui_res_radio_chat.change(select_response_chat, ui_res_radio_chat, ui_response_chat)
156
+ ui_temp_chat.change(get_parameters_chat, [ui_temp_chat, ui_max_tokens_chat, ui_top_p_chat])
157
+ ui_max_tokens_chat.change(get_parameters_chat, [ui_temp_chat, ui_max_tokens_chat, ui_top_p_chat])
158
+ ui_top_p_chat.change(get_parameters_chat, [ui_temp_chat, ui_max_tokens_chat, ui_top_p_chat])
159
+ ui_prompt_sys.submit(reset_sys, ui_prompt_sys)
160
+ ui_prompt_chat.submit(user_chat, [ui_prompt_chat, ui_chatbot_chat], [ui_prompt_chat, ui_chatbot_chat], queue=False).then(
161
+ bot_chat, ui_chatbot_chat, ui_chatbot_chat
162
+ )
163
+ ui_clear_chat.click(lambda: None, None, ui_chatbot_chat, queue=False).then(reset_sys, ui_prompt_sys)
164
+
165
+
166
+ with gr.TabItem("WALL·E 2"):
167
+ ui_prompt_walle = gr.Textbox(placeholder="Please enter your prompt here to generate image.", show_label=False).style(container=False)
168
+ ui_image_walle = gr.Image()
169
+ with gr.Accordion("Select radio button to see detail:", open=False):
170
+ ui_response_walle = gr.TextArea(show_label=False, interactive=False).style(container=False)
171
+
172
+ def get_image_walle(prompt_walle):
173
+ global response_walle
174
+ walle_api_version = '2022-08-03-preview'
175
+ url = "{}dalle/text-to-image?api-version={}".format(api_base, walle_api_version)
176
+ headers= { "api-key": api_key, "Content-Type": "application/json" }
177
+ body = {
178
+ "caption": prompt_walle,
179
+ "resolution": "1024x1024"
180
+ }
181
+ submission = requests.post(url, headers=headers, json=body)
182
+ response_walle.append(submission.json())
183
+ print("Log - WALL·E status: {}".format(submission.json()))
184
+ operation_location = submission.headers['Operation-Location']
185
+ retry_after = submission.headers['Retry-after']
186
+ status = ""
187
+ while (status != "Succeeded"):
188
+ time.sleep(int(retry_after))
189
+ response = requests.get(operation_location, headers=headers)
190
+ response_walle.append(response.json())
191
+ print("Log - WALL·E status: {}".format(response.json()))
192
+ status = response.json()['status']
193
+ image_url_walle = response.json()['result']['contentUrl']
194
+ return gr.update(value=image_url_walle)
195
+
196
+ def get_response_walle():
197
+ global response_walle
198
+ return gr.update(value=response_walle)
199
+
200
+ ui_prompt_walle.submit(get_image_walle, ui_prompt_walle, ui_image_walle, queue=False).then(get_response_walle, None, ui_response_walle)
201
+
202
+ with gr.TabItem("VoiceChat"):
203
+ with gr.Row():
204
+ with gr.Column():
205
+ with gr.Accordion("Expand to config parameters:", open=False):
206
+ ui_prompt_sys_vchat = gr.Textbox(value="You are an AI assistant that helps people find information and just response with SSML.", show_label=False, interactive=True).style(container=False)
207
+ ui_voice_inc_vchat = gr.Audio(source="microphone", type="filepath")
208
+ ui_voice_out_vchat = gr.Audio(value=None, type="filepath", interactive=False).style(container=False)
209
+ with gr.Accordion("Expand to config parameters:", open=False):
210
+ with gr.Row():
211
+ ui_temp_vchat = gr.Slider(0.1, 1.0, 0.7, step=0.1, label="Temperature", interactive=True)
212
+ ui_max_tokens_vchat = gr.Slider(100, 8000, 800, step=100, label="Max Tokens", interactive=True)
213
+ ui_top_p_vchat = gr.Slider(0.05, 1.0, 0.9, step=0.1, label="Top P", interactive=True)
214
+ with gr.Column():
215
+ ui_chatbot_vchat = gr.Chatbot(label="Voice to ChatGPT:")
216
+ with gr.Accordion("Select radio button to see detail:", open=False):
217
+ ui_res_radio_vchat = gr.Radio(["Response from OpenAI Model", "Prompt messages history"], label="Show OpenAI response:", interactive=True)
218
+ ui_response_vchat = gr.TextArea(show_label=False, interactive=False).style(container=False)
219
+
220
+ def get_parameters_vchat(slider_1, slider_2, slider_3):
221
+ ui_temp_vchat.value = slider_1
222
+ ui_max_tokens_vchat.value = slider_2
223
+ ui_top_p_vchat.value = slider_3
224
+ print("Log - Updated chatGPT parameters: Temperature=", ui_temp_vchat.value,
225
+ " Max Tokens=", ui_max_tokens_vchat.value, " Top_P=", ui_top_p_vchat.value)
226
+
227
+ def select_response_vchat(radio):
228
+ if radio == "Response from OpenAI Model":
229
+ return gr.update(value=vchat_x)
230
+ else:
231
+ return gr.update(value=messages_vchat)
232
+
233
+
234
+
235
+ def speech_to_text(voice_message):
236
+ # Downsample input voice to 16kHz
237
+ voice_wav = am.from_file(voice_message, format='wav')
238
+ voice_wav = voice_wav.set_frame_rate(16000)
239
+ voice_wav.export(voice_message, format='wav')
240
+ # STT
241
+ OASK_Speech = os.getenv("OASK_Speech")
242
+ service_region = "westus"
243
+
244
+ base_url = "https://"+service_region+".stt.speech.microsoft.com/"
245
+ path = 'speech/recognition/conversation/cognitiveservices/v1'
246
+ constructed_url = base_url + path
247
+ params = {
248
+ 'language': 'zh-CN',
249
+ 'format': 'detailed'
250
+ }
251
+ headers = {
252
+ 'Ocp-Apim-Subscription-Key': OASK_Speech,
253
+ 'Content-Type': 'audio/wav; codecs=audio/pcm; samplerate=16000',
254
+ 'Accept': 'application/json;text/xml'
255
+ }
256
+ body = open(voice_message,'rb').read()
257
+ response = requests.post(constructed_url, params=params, headers=headers, data=body)
258
+ if response.status_code == 200:
259
+ rs = response.json()
260
+ if rs != '':
261
+ print(rs)
262
+ else:
263
+ print("\nLog - Status code: " + str(response.status_code) + "\nSomething went wrong. Check your subscription key and headers.\n")
264
+ print("Reason: " + str(response.reason) + "\n")
265
+
266
+ sst_text = rs['DisplayText']
267
+ return sst_text
268
+
269
+ def text_to_speech():
270
+ OASK_Speech = os.getenv("OASK_Speech")
271
+ service_region = "westus"
272
+
273
+ base_url = "https://"+service_region+".tts.speech.microsoft.com/"
274
+ path = 'cognitiveservices/v1'
275
+ constructed_url = base_url + path
276
+ headers = {
277
+ 'Ocp-Apim-Subscription-Key': OASK_Speech,
278
+ 'Content-Type': 'application/ssml+xml',
279
+ 'X-Microsoft-OutputFormat': 'riff-24khz-16bit-mono-pcm',
280
+ 'User-Agent': 'Voice ChatGPT'
281
+ }
282
+ xml_body = ElementTree.Element('speak', version='1.0')
283
+ xml_body.set('{http://www.w3.org/XML/1998/namespace}lang', 'zh-cn')
284
+ voice = ElementTree.SubElement(xml_body, 'voice')
285
+ voice.set('{http://www.w3.org/XML/1998/namespace}lang', 'zh-cn')
286
+ voice.set('name', 'zh-CN-XiaoxiaoNeural')
287
+ voice.text = vchat_reply
288
+ body = ElementTree.tostring(xml_body)
289
+ response = requests.post(constructed_url, headers=headers, data=body)
290
+ if response.status_code == 200:
291
+ with open('chatgpt.wav', 'wb') as audio:
292
+ audio.write(response.content)
293
+ print("\nStatus code: " + str(response.status_code) + "\nYour TTS is ready for playback.\n")
294
+ else:
295
+ print("\nStatus code: " + str(response.status_code) + "\nSomething went wrong. Check your subscription key and headers.\n")
296
+ print("Reason: " + str(response.reason) + "\n")
297
+
298
+ tts_file = "chatgpt.wav"
299
+ return gr.update(value=tts_file, interactive=True)
300
+
301
+ def user_vchat(user_voice_message, history):
302
+ user_message = speech_to_text(user_voice_message)
303
+ messages_vchat.append({"role": "user", "content": user_message})
304
+ return history + [[user_message, None]]
305
+
306
+ def bot_vchat(history):
307
+ global vchat_x, vchat_reply
308
+ vchat_x = openai.ChatCompletion.create(
309
+ engine="mvp-gpt-35-turbo", messages=messages_vchat,
310
+ temperature=ui_temp_chat.value,
311
+ max_tokens=ui_max_tokens_chat.value,
312
+ top_p=ui_top_p_chat.value,
313
+ frequency_penalty=0,
314
+ presence_penalty=0,
315
+ stop=None
316
+ )
317
+ ui_response_vchat.value= vchat_x
318
+ print(ui_response_vchat.value)
319
+ vchat_reply = vchat_x.choices[0].message.content
320
+ messages_vchat.append({"role": "assistant", "content": vchat_reply})
321
+ history[-1][1] = vchat_reply
322
+ return history
323
+
324
+ ui_res_radio_chat.change(select_response_chat, ui_res_radio_chat, ui_response_chat)
325
+ ui_temp_chat.change(get_parameters_chat, [ui_temp_chat, ui_max_tokens_chat, ui_top_p_chat])
326
+ ui_max_tokens_chat.change(get_parameters_chat, [ui_temp_chat, ui_max_tokens_chat, ui_top_p_chat])
327
+ ui_top_p_chat.change(get_parameters_chat, [ui_temp_chat, ui_max_tokens_chat, ui_top_p_chat])
328
+ ui_voice_inc_vchat.change(user_vchat, [ui_voice_inc_vchat, ui_chatbot_vchat], ui_chatbot_vchat, queue=False).then(
329
+ bot_vchat, ui_chatbot_vchat, ui_chatbot_vchat, queue=False).then(text_to_speech, None, ui_voice_out_vchat)
330
+
331
+
332
+ page.launch(share=False)
333
+
334
+
335
+