sandz7 commited on
Commit
0d883c8
Β·
verified Β·
1 Parent(s): 5f9798d

placed if conditions to change UI on gradio depending on mode

Browse files
Files changed (1) hide show
  1. app.py +56 -60
app.py CHANGED
@@ -137,29 +137,20 @@ def check_cuda_availability():
137
 
138
  mode = ""
139
 
140
- # # Handling Gradio UI depending on mode
141
- # def mode_gradio(text: str):
142
- # """
143
- # Handles the interface gradio will output depending what mode
144
- # it's in. It stores the mode in a dictionary that was placed by
145
- # a condition inside bot_comms and switches the behaviour in gradio.
146
- # """
147
-
148
- # # Mode active
149
- # mode_selected = text
150
- # return
151
 
152
- # # Image created from diffusing
153
- # image_created = {}
154
-
155
- # # Generate An Image from PIL Image grabbed from diffusing
156
- # def create_image(image):
157
- # """
158
- # Generates the image using the Image positions
159
- # grabbed from PIL in the diffusing.
160
- # """
161
- # draw = ImageDraw.Draw(image)
162
- # retu
163
 
164
  @spaces.GPU(duration=120)
165
  def bot_comms(message, history):
@@ -172,26 +163,25 @@ def bot_comms(message, history):
172
 
173
  if message == "check cuda":
174
  logger.debug("Checking CUDA availability.")
175
- return check_cuda_availability(), None
176
 
177
  if message == "imagery":
178
  logger.debug("Switching to imagery mode.")
179
  mode_manager.set_mode("imagery")
180
- return "Imagery On! Type your prompt to make the image πŸ–ΌοΈ", None
181
 
182
  if message == "chatting":
183
  logger.debug("Switching to chatting mode.")
184
  mode_manager.set_mode("chatting")
185
- return "Imagery Off. Ask me any questions. β˜„οΈ", None
186
 
187
  if mode == "imagery":
188
  logger.debug("Processing imagery prompt.")
189
  message = message["text"]
190
  image = diffusing(message)
191
  # mode_gradio("imagery")
192
- # image_created["Image"] = image
193
- # yield ("Generated Image:", image)
194
- return "", image
195
 
196
  buffer = ""
197
  gpt_outputs = []
@@ -205,7 +195,7 @@ def bot_comms(message, history):
205
  # print(f"text recieved inside the stream:\n{text}")
206
  gpt_outputs.append(text)
207
  buffer += text
208
- yield "".join(gpt_outputs), None
209
 
210
 
211
  with gr.Blocks(fill_height=True) as demo:
@@ -213,38 +203,44 @@ with gr.Blocks(fill_height=True) as demo:
213
 
214
  chatbot = gr.Chatbot(height=600, label="Chimera AI")
215
  chat_input = gr.MultimodalTextbox(interactive=True, file_types=["images"], placeholder="Enter your question or upload an image.", show_label=False)
216
- image_output = gr.Image(type="pil", label="Generated Image")
217
 
218
- def process_response(message, history):
219
- response = bot_comms(message, history)
220
- if isinstance(response, tuple) and len(response) == 2:
221
- text, image = response
222
- return text, image
223
- return response, None
224
-
225
- chatbot_output = gr.Chatbot(height=600, label="Chimera AI")
226
-
227
- chat_input.submit(process_response, inputs=[chat_input, chatbot], outputs=[chatbot_output, image_output])
228
- # if mode_gradio == "imagery":
229
- # gr.Interface()
230
- # Customize chatinterface to handle tuples
231
- # def custom_fn(*args, **kwargs):
232
- # result = list(bot_comms(*args, **kwargs))
233
- # output = []
234
- # for item in result:
235
- # if isinstance(item, tuple) and isinstance(item[1], Image.Image):
236
- # output.append((item[0], None))
237
- # output.append((None, item[1]))
238
- # else:
239
- # output.append(item)
240
- # return output
241
- # gr.ChatInterface(
242
- # fn=bot_comms,
243
- # chatbot=chatbot,
244
- # fill_height=True,
245
- # multimodal=True,
246
- # textbox=chat_input,
247
- # )
 
 
 
 
 
 
248
 
249
  if __name__ == "__main__":
250
  demo.launch()
 
137
 
138
  mode = ""
139
 
140
+ # Handling Gradio UI depending on mode
141
+ def mode_gradio(text: str):
142
+ """
143
+ Handles the interface gradio will output depending what mode
144
+ it's in. It stores the mode in a dictionary that was placed by
145
+ a condition inside bot_comms and switches the behaviour in gradio.
146
+ """
147
+
148
+ # Mode active
149
+ mode_selected = text
150
+ return
151
 
152
+ # Image created from diffusing
153
+ image_created = {}
 
 
 
 
 
 
 
 
 
154
 
155
  @spaces.GPU(duration=120)
156
  def bot_comms(message, history):
 
163
 
164
  if message == "check cuda":
165
  logger.debug("Checking CUDA availability.")
166
+ return check_cuda_availability()
167
 
168
  if message == "imagery":
169
  logger.debug("Switching to imagery mode.")
170
  mode_manager.set_mode("imagery")
171
+ return "Imagery On! Type your prompt to make the image πŸ–ΌοΈ"
172
 
173
  if message == "chatting":
174
  logger.debug("Switching to chatting mode.")
175
  mode_manager.set_mode("chatting")
176
+ return "Imagery Off. Ask me any questions. β˜„οΈ"
177
 
178
  if mode == "imagery":
179
  logger.debug("Processing imagery prompt.")
180
  message = message["text"]
181
  image = diffusing(message)
182
  # mode_gradio("imagery")
183
+ image_created["Image"] = image
184
+ return image
 
185
 
186
  buffer = ""
187
  gpt_outputs = []
 
195
  # print(f"text recieved inside the stream:\n{text}")
196
  gpt_outputs.append(text)
197
  buffer += text
198
+ yield "".join(gpt_outputs)
199
 
200
 
201
  with gr.Blocks(fill_height=True) as demo:
 
203
 
204
  chatbot = gr.Chatbot(height=600, label="Chimera AI")
205
  chat_input = gr.MultimodalTextbox(interactive=True, file_types=["images"], placeholder="Enter your question or upload an image.", show_label=False)
206
+ # image_output = gr.Image(type="pil", label="Generated Image")
207
 
208
+ # def process_response(message, history):
209
+ # response = bot_comms(message, history)
210
+ # if isinstance(response, tuple) and len(response) == 2:
211
+ # text, image = response
212
+ # return text, image
213
+ # return response, None
214
+
215
+ # chatbot_output = gr.Chatbot(height=600, label="Chimera AI")
216
+
217
+ # chat_input.submit(process_response, inputs=[chat_input, chatbot], outputs=[chatbot_output, image_output])
218
+ if image_created:
219
+ gr.Interface(
220
+ fn=bot_comms,
221
+ inputs='text',
222
+ outputs='image',
223
+ fill_height=True,
224
+ )
225
+ # Customize chatinterface to handle tuples
226
+ # def custom_fn(*args, **kwargs):
227
+ # result = list(bot_comms(*args, **kwargs))
228
+ # output = []
229
+ # for item in result:
230
+ # if isinstance(item, tuple) and isinstance(item[1], Image.Image):
231
+ # output.append((item[0], None))
232
+ # output.append((None, item[1]))
233
+ # else:
234
+ # output.append(item)
235
+ # return output
236
+ else:
237
+ gr.ChatInterface(
238
+ fn=bot_comms,
239
+ chatbot=chatbot,
240
+ fill_height=True,
241
+ multimodal=True,
242
+ textbox=chat_input,
243
+ )
244
 
245
  if __name__ == "__main__":
246
  demo.launch()