sandz7 commited on
Commit
a338474
Β·
verified Β·
1 Parent(s): 6eabca8

placed the same gradio format as osiris

Browse files
Files changed (1) hide show
  1. app.py +123 -60
app.py CHANGED
@@ -120,25 +120,53 @@ def multimodal_and_generation(message, history):
120
  return streamer
121
 
122
 
123
- def diffusing(prompt,
124
- history):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  """
126
- Generates an image using Stable Diffusion based on the input prompt.
 
127
  """
128
-
 
 
 
129
  image_base = base(
130
  prompt=prompt,
131
- num_inference_steps=40,
132
- denoising_end=0.8,
133
- output_type="latent",
134
  ).images
135
  image = refiner(
136
  prompt=prompt,
137
- num_inference_steps=40,
138
- denoising_start=0.8,
139
  image=image_base
140
  ).images[0]
141
- print(f"\n\nImage has been diffused:\n {image}\n\n")
142
  return image
143
 
144
  def check_cuda_availability():
@@ -201,57 +229,92 @@ def bot_comms(message, history):
201
  chatbot = gr.Chatbot(height=600, label="Chimera AI")
202
  chat_input = gr.MultimodalTextbox(interactive=True, file_types=["images"], placeholder="Enter your question or upload an image.", show_label=False)
203
 
204
- with gr.Blocks(fill_height=True) as demo:
205
- gr.Markdown(DESCRIPTION)
206
 
207
- # image_output = gr.Image(type="pil", label="Generated Image")
208
 
209
- # def process_response(message, history):
210
- # response = bot_comms(message, history)
211
- # if isinstance(response, tuple) and len(response) == 2:
212
- # text, image = response
213
- # return text, image
214
- # return response, None
215
-
216
- # chatbot_output = gr.Chatbot(height=600, label="Chimera AI")
217
-
218
- # chat_input.submit(process_response, inputs=[chat_input, chatbot], outputs=[chatbot_output, image_output])
219
- if mode_manager.get_mode() == "imagery":
220
- # # Ensure's a unique block ID for image output
221
- gr.Interface(
222
- fn=diffusing,
223
- inputs="text",
224
- outputs="image",
225
- fill_height=True,
226
- )
227
- # with gr.Blocks():
228
- # gr.Interface(
229
- # fn=diffusing,
230
- # inputs='text',
231
- # outputs='image',
232
- # fill_height=True,
233
- # )
234
- # # Customize chatinterface to handle tuples
235
- # # def custom_fn(*args, **kwargs):
236
- # # result = list(bot_comms(*args, **kwargs))
237
- # # output = []
238
- # # for item in result:
239
- # # if isinstance(item, tuple) and isinstance(item[1], Image.Image):
240
- # # output.append((item[0], None))
241
- # # output.append((None, item[1]))
242
- # # else:
243
- # # output.append(item)
244
- # # return output
245
- # else:
246
- # # Unique block ID for chat interface
247
- # with gr.Blocks():
248
- # gr.ChatInterface(
249
- # fn=bot_comms,
250
- # chatbot=chatbot,
251
- # fill_height=True,
252
- # multimodal=True,
253
- # textbox=chat_input,
254
- # )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
 
256
  if __name__ == "__main__":
257
- demo.launch()
 
120
  return streamer
121
 
122
 
123
+ # def diffusing(prompt,
124
+ # history):
125
+ # """
126
+ # Generates an image using Stable Diffusion based on the input prompt.
127
+ # """
128
+
129
+ # image_base = base(
130
+ # prompt=prompt,
131
+ # num_inference_steps=40,
132
+ # denoising_end=0.8,
133
+ # output_type="latent",
134
+ # ).images
135
+ # image = refiner(
136
+ # prompt=prompt,
137
+ # num_inference_steps=40,
138
+ # denoising_start=0.8,
139
+ # image=image_base
140
+ # ).images[0]
141
+ # print(f"\n\nImage has been diffused:\n {image}\n\n")
142
+ # return image
143
+
144
+ # function to take input and generate text tokena
145
+ @spaces.GPU(duration=120)
146
+ def diffusing(prompt: str,
147
+ n_steps: int,
148
+ high_noise_frac: float):
149
  """
150
+ Takes input, passes it into the pipeline,
151
+ get the top 5 scores, and ouput those scores into images
152
  """
153
+
154
+ # n_steps = int(n_steps)
155
+
156
+ # Generate image based on text
157
  image_base = base(
158
  prompt=prompt,
159
+ num_inference_steps=n_steps,
160
+ denoising_end=high_noise_frac,
161
+ output_type="latent"
162
  ).images
163
  image = refiner(
164
  prompt=prompt,
165
+ num_inference_steps=n_steps,
166
+ denoising_start=high_noise_frac,
167
  image=image_base
168
  ).images[0]
169
+
170
  return image
171
 
172
  def check_cuda_availability():
 
229
  chatbot = gr.Chatbot(height=600, label="Chimera AI")
230
  chat_input = gr.MultimodalTextbox(interactive=True, file_types=["images"], placeholder="Enter your question or upload an image.", show_label=False)
231
 
232
+ # with gr.Blocks(fill_height=True) as demo:
233
+ # gr.Markdown(DESCRIPTION)
234
 
235
+ # # image_output = gr.Image(type="pil", label="Generated Image")
236
 
237
+ # # def process_response(message, history):
238
+ # # response = bot_comms(message, history)
239
+ # # if isinstance(response, tuple) and len(response) == 2:
240
+ # # text, image = response
241
+ # # return text, image
242
+ # # return response, None
243
+
244
+ # # chatbot_output = gr.Chatbot(height=600, label="Chimera AI")
245
+
246
+ # # chat_input.submit(process_response, inputs=[chat_input, chatbot], outputs=[chatbot_output, image_output])
247
+ # if mode_manager.get_mode() == "imagery":
248
+ # # # Ensure's a unique block ID for image output
249
+ # gr.Interface(
250
+ # fn=diffusing,
251
+ # inputs="text",
252
+ # outputs="image",
253
+ # fill_height=True,
254
+ # )
255
+ # # with gr.Blocks():
256
+ # # gr.Interface(
257
+ # # fn=diffusing,
258
+ # # inputs='text',
259
+ # # outputs='image',
260
+ # # fill_height=True,
261
+ # # )
262
+ # # # Customize chatinterface to handle tuples
263
+ # # # def custom_fn(*args, **kwargs):
264
+ # # # result = list(bot_comms(*args, **kwargs))
265
+ # # # output = []
266
+ # # # for item in result:
267
+ # # # if isinstance(item, tuple) and isinstance(item[1], Image.Image):
268
+ # # # output.append((item[0], None))
269
+ # # # output.append((None, item[1]))
270
+ # # # else:
271
+ # # # output.append(item)
272
+ # # # return output
273
+ # # else:
274
+ # # # Unique block ID for chat interface
275
+ # # with gr.Blocks():
276
+ # # gr.ChatInterface(
277
+ # # fn=bot_comms,
278
+ # # chatbot=chatbot,
279
+ # # fill_height=True,
280
+ # # multimodal=True,
281
+ # # textbox=chat_input,
282
+ # # )
283
+
284
+ # if __name__ == "__main__":
285
+ # demo.launch()
286
+
287
+ with gr.Blocks(fill_height=True) as demo:
288
+ gr.Markdown(DESCRIPTION)
289
+ gr.Interface(
290
+ fn=osiris,
291
+ inputs="text",
292
+ outputs="image",
293
+ fill_height=True,
294
+ additional_inputs_accordion=gr.Accordion(label="βš™οΈ Parameters", open=False, render=False),
295
+ additional_inputs=[
296
+ gr.Slider(minimum=20,
297
+ maximum=100,
298
+ step=1,
299
+ value=40,
300
+ label="Number of Inference Steps",
301
+ render=False),
302
+ gr.Slider(minimum=0.0,
303
+ maximum=1.0,
304
+ step=0.1,
305
+ value=0.8,
306
+ label="High Noise Fraction",
307
+ render=False),
308
+ ],
309
+ examples=[
310
+ ["A sprawling cyberpunk metropolis at dusk, with towering skyscrapers adorned with neon signs, holographic billboards, and flying cars weaving through the sky. On a crowded street corner, a cybernetically enhanced street artist creates mesmerizing light sculptures with their augmented reality gloves. Rain glistens on the bustling sidewalks as pedestrians with colorful umbrellas rush past."],
311
+ ["A mystical enchanted forest bathed in twilight, where bioluminescent plants cast an ethereal glow. A crystal-clear waterfall cascades into a shimmering pool, surrounded by ancient trees with twisted roots. A lone elf archer, dressed in elegant green and gold attire, stands on a moss-covered rock, her bow drawn as she watches over the tranquil scene. Ethereal fairies with delicate wings flutter around, leaving trails of sparkling dust in the air."],
312
+ ["An elaborate steampunk dirigible floating gracefully above a cloud-covered landscape. The airship, with its brass and copper gears, massive steam-powered engines, and ornate Victorian design, cruises past a golden sunset. A distinguished gentleman in a top hat and goggles stands on the observation deck, holding a brass spyglass to his eye as he surveys the horizon. Passengers in vintage attire marvel at the view."],
313
+ ["A charming, snow-covered log cabin nestled in the heart of a tranquil mountain range during a starry winter night. Smoke gently curls from the stone chimney, and warm light spills from the windows, illuminating the cozy interior. A young woman in a thick woolen coat and a fur-lined hat stands by the front door, holding a lantern that casts a warm glow on the snow. Pine trees, heavy with snow, frame the scene, while the Northern Lights dance across the sky."],
314
+ ["A vibrant underwater kingdom where a colorful coral reef teems with marine life. Schools of iridescent fish swim through the crystal-clear waters, and a sunken pirate ship, encrusted with barnacles and treasure chests, rests on the sandy seabed. A curious mermaid with flowing turquoise hair and a shimmering silver tail explores the depths, holding a glowing pearl in her hand. Playful dolphins swim around her, and a wise old sea turtle watches from a nearby rock."]
315
+ ],
316
+ cache_examples=False
317
+ )
318
 
319
  if __name__ == "__main__":
320
+ demo.launch()