sandz7 commited on
Commit
8ecf076
Β·
1 Parent(s): 7e85e00

placed async into sync

Browse files
Files changed (1) hide show
  1. app.py +91 -30
app.py CHANGED
@@ -9,6 +9,8 @@ from PIL import Image
9
  import threading
10
  from openai import OpenAI
11
  import os
 
 
12
 
13
  API_KEY = os.getenv('OPEN_AI_API_KEYS')
14
 
@@ -110,57 +112,116 @@ def check_cuda_availability():
110
 
111
  mode = ""
112
 
113
- @spaces.GPU(duration=120)
114
- async def bot_comms(message, history):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  global mode
116
 
117
  if message == "check cuda":
118
  result = check_cuda_availability()
119
- yield result
120
- return
121
 
122
  if message == "imagery":
123
  mode = message
124
- yield "Imagery On! Type your prompt to make the image πŸ–ΌοΈ"
125
- return
126
 
127
  if message == "chatting":
128
  mode = message
129
- yield "Imagery Off. Ask me any questions. β˜„οΈ"
130
- return
131
 
132
  if mode == "imagery":
133
  print("On imagery\n\n")
134
- image = diffusing(
135
- prompt=message,
136
- )
137
- yield image
138
- return
139
 
140
  if mode == "chatting" or mode == "":
141
  print("On chatting or no mode.\n\n")
142
- stream = multimodal_and_generation(
143
- message=message,
144
- history=history,
145
- )
146
  gpt_outputs = []
147
  async for chunk in stream:
148
  if chunk.choices[0].delta.content is not None:
149
  text = chunk.choices[0].delta.content
150
  gpt_outputs.append(text)
151
- yield "".join(gpt_outputs)
152
-
153
- chatbot = gr.Chatbot(height=600, label="Chimera AI")
154
- chat_input = gr.MultimodalTextbox(interactive=True, file_types=["images"], placeholder="Enter your question or upload an image.", show_label=False)
155
- with gr.Blocks(fill_height=True) as demo:
156
- gr.Markdown(DESCRIPTION)
157
- gr.ChatInterface(
158
- fn=bot_comms,
159
- chatbot=chatbot,
160
- fill_height=True,
161
- multimodal=True,
162
- textbox=chat_input,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  )
164
 
165
  if __name__ == "__main__":
166
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  import threading
10
  from openai import OpenAI
11
  import os
12
+ import asyncio
13
+ from typing import Any
14
 
15
  API_KEY = os.getenv('OPEN_AI_API_KEYS')
16
 
 
112
 
113
  mode = ""
114
 
115
+ # @spaces.GPU(duration=120)
116
+ # async def bot_comms(message, history):
117
+ # global mode
118
+
119
+ # if message == "check cuda":
120
+ # result = check_cuda_availability()
121
+ # yield result
122
+ # return
123
+
124
+ # if message == "imagery":
125
+ # mode = message
126
+ # yield "Imagery On! Type your prompt to make the image πŸ–ΌοΈ"
127
+ # return
128
+
129
+ # if message == "chatting":
130
+ # mode = message
131
+ # yield "Imagery Off. Ask me any questions. β˜„οΈ"
132
+ # return
133
+
134
+ # if mode == "imagery":
135
+ # print("On imagery\n\n")
136
+ # image = diffusing(
137
+ # prompt=message,
138
+ # )
139
+ # yield image
140
+ # return
141
+
142
+ # if mode == "chatting" or mode == "":
143
+ # print("On chatting or no mode.\n\n")
144
+ # stream = multimodal_and_generation(
145
+ # message=message,
146
+ # history=history,
147
+ # )
148
+ # gpt_outputs = []
149
+ # async for chunk in stream:
150
+ # if chunk.choices[0].delta.content is not None:
151
+ # text = chunk.choices[0].delta.content
152
+ # gpt_outputs.append(text)
153
+ # yield "".join(gpt_outputs)
154
+
155
+ async def bot_comms_async(message, history):
156
  global mode
157
 
158
  if message == "check cuda":
159
  result = check_cuda_availability()
160
+ return [result]
 
161
 
162
  if message == "imagery":
163
  mode = message
164
+ return ["Imagery On! Type your prompt to make the image πŸ–ΌοΈ"]
 
165
 
166
  if message == "chatting":
167
  mode = message
168
+ return ["Imagery Off. Ask me any questions. β˜„οΈ"]
 
169
 
170
  if mode == "imagery":
171
  print("On imagery\n\n")
172
+ image = diffusing(prompt=message)
173
+ return [image]
 
 
 
174
 
175
  if mode == "chatting" or mode == "":
176
  print("On chatting or no mode.\n\n")
177
+ stream = multimodal_and_generation(message=message, history=history)
 
 
 
178
  gpt_outputs = []
179
  async for chunk in stream:
180
  if chunk.choices[0].delta.content is not None:
181
  text = chunk.choices[0].delta.content
182
  gpt_outputs.append(text)
183
+ return ["".join(gpt_outputs)]
184
+
185
+ def bot_comms(message: str, history: Any):
186
+ return asyncio.run(bot_comms_async(message, history))
187
+
188
+ # Define your Gradio UI as usual
189
+ import gradio as gr
190
+
191
+ with gr.Blocks() as demo:
192
+ chatbot = gr.Chatbot()
193
+ msg = gr.Textbox()
194
+ with gr.Row():
195
+ submit = gr.Button("Submit")
196
+
197
+ def user(message, history):
198
+ return "", history + [[message, None]]
199
+
200
+ def bot_response(message, history):
201
+ response = bot_comms(message, history)
202
+ return history + [[message, response]]
203
+
204
+ msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
205
+ bot_response, [msg, chatbot], [msg, chatbot]
206
+ )
207
+ submit.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
208
+ bot_response, [msg, chatbot], [msg, chatbot]
209
  )
210
 
211
  if __name__ == "__main__":
212
+ demo.launch(share=True)
213
+
214
+ # chatbot = gr.Chatbot(height=600, label="Chimera AI")
215
+ # chat_input = gr.MultimodalTextbox(interactive=True, file_types=["images"], placeholder="Enter your question or upload an image.", show_label=False)
216
+ # with gr.Blocks(fill_height=True) as demo:
217
+ # gr.Markdown(DESCRIPTION)
218
+ # gr.ChatInterface(
219
+ # fn=bot_comms,
220
+ # chatbot=chatbot,
221
+ # fill_height=True,
222
+ # multimodal=True,
223
+ # textbox=chat_input,
224
+ # )
225
+
226
+ # if __name__ == "__main__":
227
+ # demo.launch()