KingNish commited on
Commit
cac40c5
·
verified ·
1 Parent(s): b1301a2
Files changed (1) hide show
  1. app.py +259 -117
app.py CHANGED
@@ -4,54 +4,173 @@ import random
4
  import spaces
5
  import torch
6
  import time
 
7
  from diffusers import DiffusionPipeline, AutoencoderTiny
 
8
  from diffusers.models.attention_processor import AttnProcessor2_0
 
9
  from custom_pipeline import FluxWithCFGPipeline
10
 
 
 
 
 
11
  torch.backends.cuda.matmul.allow_tf32 = True
 
12
 
13
- # Constants
14
  MAX_SEED = np.iinfo(np.int32).max
15
- MAX_IMAGE_SIZE = 2048
16
  DEFAULT_WIDTH = 1024
17
  DEFAULT_HEIGHT = 1024
18
- DEFAULT_INFERENCE_STEPS = 1
 
 
 
19
 
20
- # Device and model setup
21
  dtype = torch.float16
22
- pipe = FluxWithCFGPipeline.from_pretrained(
23
- "black-forest-labs/FLUX.1-schnell", torch_dtype=dtype
24
- )
25
- pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype)
26
- pipe.to("cuda")
27
- pipe.load_lora_weights('hugovntr/flux-schnell-realism', weight_name='schnell-realism_v2.3.safetensors', adapter_name="better")
28
- pipe.set_adapters(["better"], adapter_weights=[1.0])
29
- pipe.fuse_lora(adapter_name=["better"], lora_scale=1.0)
30
- pipe.unload_lora_weights()
31
-
32
- torch.cuda.empty_cache()
33
-
34
- # Inference function
35
- @spaces.GPU(duration=25)
36
- def generate_image(prompt, seed=24, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, randomize_seed=False, num_inference_steps=2, progress=gr.Progress(track_tqdm=True)):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  if randomize_seed:
38
  seed = random.randint(0, MAX_SEED)
39
- generator = torch.Generator().manual_seed(int(float(seed)))
 
 
 
 
 
 
 
 
40
 
41
- start_time = time.time()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
- # Only generate the last image in the sequence
44
- img = pipe.generate_images(
45
- prompt=prompt,
46
- width=width,
47
- height=height,
48
- num_inference_steps=num_inference_steps,
49
- generator=generator
50
- )
51
- latency = f"Latency: {(time.time()-start_time):.2f} seconds"
52
- return img, seed, latency
53
 
54
- # Example prompts
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  examples = [
56
  "a tiny astronaut hatching from an egg on the moon",
57
  "a cute white cat holding a sign that says hello world",
@@ -60,107 +179,130 @@ examples = [
60
  "photo of a woman on the beach, shot from above. She is facing the sea, while wearing a white dress. She has long blonde hair",
61
  "Selfie photo of a wizard with long beard and purple robes, he is apparently in the middle of Tokyo. Probably taken from a phone.",
62
  "Photo of a young woman with long, wavy brown hair tied in a bun and glasses. She has a fair complexion and is wearing subtle makeup, emphasizing her eyes and lips. She is dressed in a black top. The background appears to be an urban setting with a building facade, and the sunlight casts a warm glow on her face.",
 
 
63
  ]
64
 
65
  # --- Gradio UI ---
66
- with gr.Blocks() as demo:
67
- with gr.Column(elem_id="app-container"):
68
- gr.Markdown("# 🎨 Realtime FLUX Image Generator")
69
- gr.Markdown("Generate stunning images in real-time with Modified Flux.Schnell pipeline.")
70
- gr.Markdown("<span style='color: red;'>Note: Sometimes it stucks or stops generating images (I don't know why). In that situation just refresh the site.</span>")
71
-
72
- with gr.Row():
73
- with gr.Column(scale=2.5):
74
- result = gr.Image(label="Generated Image", show_label=False, interactive=False)
75
- with gr.Column(scale=1):
76
- prompt = gr.Text(
77
- label="Prompt",
78
- placeholder="Describe the image you want to generate...",
79
- lines=3,
80
- show_label=False,
81
- container=False,
82
- )
83
- generateBtn = gr.Button("🖼️ Generate Image")
84
- enhanceBtn = gr.Button("🚀 Enhance Image")
85
-
86
- with gr.Column("Advanced Options"):
87
- with gr.Row():
88
- realtime = gr.Checkbox(label="Realtime Toggler", info="If TRUE then uses more GPU but create image in realtime.", value=False)
89
- latency = gr.Text(label="Latency")
90
- with gr.Row():
91
- seed = gr.Number(label="Seed", value=42)
92
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
93
- with gr.Row():
94
- width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_WIDTH)
95
- height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_HEIGHT)
96
- num_inference_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=4, step=1, value=DEFAULT_INFERENCE_STEPS)
97
-
98
- with gr.Row():
99
- gr.Markdown("### 🌟 Inspiration Gallery")
100
- with gr.Row():
101
- gr.Examples(
102
- examples=examples,
103
- fn=generate_image,
104
- inputs=[prompt],
105
- outputs=[result, seed, latency],
106
- cache_examples="lazy"
107
  )
 
 
 
 
 
 
108
 
109
- enhanceBtn.click(
110
- fn=generate_image,
111
- inputs=[prompt, seed, width, height],
112
- outputs=[result, seed, latency],
113
- show_progress="full",
114
- queue=False,
115
- concurrency_limit=None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  )
117
 
 
 
 
 
 
 
 
118
  generateBtn.click(
119
  fn=generate_image,
120
- inputs=[prompt, seed, width, height, randomize_seed, num_inference_steps],
121
- outputs=[result, seed, latency],
122
- show_progress="full",
123
- api_name="RealtimeFlux",
124
- queue=False
125
  )
126
 
127
- def update_ui(realtime_enabled):
128
- return {
129
- prompt: gr.update(interactive=True),
130
- generateBtn: gr.update(visible=not realtime_enabled)
131
- }
132
-
133
- realtime.change(
134
- fn=update_ui,
135
- inputs=[realtime],
136
- outputs=[prompt, generateBtn],
137
- queue=False,
138
- concurrency_limit=None
139
  )
140
-
141
- def realtime_generation(*args):
142
- if args[0]: # If realtime is enabled
143
- return next(generate_image(*args[1:]))
144
-
145
  prompt.submit(
146
  fn=generate_image,
147
- inputs=[prompt, seed, width, height, randomize_seed, num_inference_steps],
148
- outputs=[result, seed, latency],
149
  show_progress="full",
150
- queue=False,
151
- concurrency_limit=None
152
  )
153
 
154
- for component in [prompt, width, height, num_inference_steps]:
155
- component.input(
156
- fn=realtime_generation,
157
- inputs=[realtime, prompt, seed, width, height, randomize_seed, num_inference_steps],
158
- outputs=[result, seed, latency],
159
- show_progress="hidden",
160
- trigger_mode="always_last",
161
- queue=False,
162
- concurrency_limit=None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  )
164
 
165
- # Launch the app
166
- demo.launch()
 
 
4
  import spaces
5
  import torch
6
  import time
7
+ import logging
8
  from diffusers import DiffusionPipeline, AutoencoderTiny
9
+ # Using AttnProcessor2_0 for potential speedup with PyTorch 2.x
10
  from diffusers.models.attention_processor import AttnProcessor2_0
11
+ # Assuming custom_pipeline defines FluxWithCFGPipeline correctly
12
  from custom_pipeline import FluxWithCFGPipeline
13
 
14
+ # --- Setup Logging ---
15
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
16
+
17
+ # --- Torch Optimizations ---
18
  torch.backends.cuda.matmul.allow_tf32 = True
19
+ torch.backends.cudnn.benchmark = True # Enable cuDNN benchmark for potentially faster convolutions
20
 
21
+ # --- Constants ---
22
  MAX_SEED = np.iinfo(np.int32).max
23
+ MAX_IMAGE_SIZE = 2048 # Keep a reasonable limit to prevent OOMs
24
  DEFAULT_WIDTH = 1024
25
  DEFAULT_HEIGHT = 1024
26
+ DEFAULT_INFERENCE_STEPS = 1 # FLUX Schnell is designed for few steps
27
+ MIN_INFERENCE_STEPS = 1
28
+ MAX_INFERENCE_STEPS = 8 # Allow slightly more steps for potential quality boost
29
+ ENHANCE_STEPS = 4 # Fixed steps for the enhance button
30
 
31
+ # --- Device and Model Setup ---
32
  dtype = torch.float16
33
+ device = "cuda" if torch.cuda.is_available() else "cpu"
34
+ pipe = None # Initialize pipe to None
35
+
36
+ try:
37
+ logging.info("Loading diffusion pipeline...")
38
+ pipe = FluxWithCFGPipeline.from_pretrained(
39
+ "black-forest-labs/FLUX.1-schnell", torch_dtype=dtype
40
+ )
41
+ logging.info("Loading VAE...")
42
+ pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype)
43
+
44
+ logging.info(f"Moving pipeline to {device}...")
45
+ pipe.to(device)
46
+
47
+ # Apply optimizations
48
+ logging.info("Setting attention processor...")
49
+ pipe.unet.set_attn_processor(AttnProcessor2_0())
50
+ pipe.vae.set_attn_processor(AttnProcessor2_0()) # VAE might benefit too
51
+
52
+ logging.info("Loading and fusing LoRA...")
53
+ pipe.load_lora_weights('hugovntr/flux-schnell-realism', weight_name='schnell-realism_v2.3.safetensors', adapter_name="better")
54
+ pipe.set_adapters(["better"], adapter_weights=[1.0])
55
+ pipe.fuse_lora(adapter_name=["better"], lora_scale=1.0) # Fuse for potential speedup
56
+ pipe.unload_lora_weights() # Unload after fusing
57
+ logging.info("LoRA fused and unloaded.")
58
+
59
+ # --- Compilation (Major Speed Optimization) ---
60
+ # Note: Compilation takes time on the first run.
61
+ # logging.info("Compiling UNet (this may take a moment)...")
62
+ # pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) # Use reduce-overhead for dynamic shapes
63
+ # logging.info("Compiling VAE Decoder...")
64
+ # pipe.vae.decoder = torch.compile(pipe.vae.decoder, mode="reduce-overhead", fullgraph=True)
65
+ # logging.info("Compiling VAE Encoder...")
66
+ # pipe.vae.encoder = torch.compile(pipe.vae.encoder, mode="reduce-overhead", fullgraph=True)
67
+ # logging.info("Model compilation finished.")
68
+
69
+ # --- Optional: Warm-up Run ---
70
+ # logging.info("Performing warm-up run...")
71
+ # with torch.inference_mode():
72
+ # _ = pipe(prompt="warmup", num_inference_steps=1, generator=torch.Generator(device=device).manual_seed(0), output_type="pil", return_dict=False)[0]
73
+ # logging.info("Warm-up complete.")
74
+
75
+ # Clear cache after setup
76
+ if torch.cuda.is_available():
77
+ torch.cuda.empty_cache()
78
+ logging.info("CUDA cache cleared after setup.")
79
+
80
+ except Exception as e:
81
+ logging.error(f"Error during model loading or setup: {e}", exc_info=True)
82
+ # Display error in Gradio if UI is already built, otherwise just log and exit.
83
+ # For simplicity here, we'll rely on the Gradio UI showing an error if `pipe` is None later.
84
+ # If running script directly, consider `sys.exit()`
85
+ # raise gr.Error(f"Failed to load models. Check logs for details. Error: {e}")
86
+
87
+
88
+ # --- Inference Function ---
89
+ @spaces.GPU(duration=30) # Slightly increased duration buffer
90
+ def generate_image(prompt: str, seed: int = 42, width: int = DEFAULT_WIDTH, height: int = DEFAULT_HEIGHT, randomize_seed: bool = False, num_inference_steps: int = DEFAULT_INFERENCE_STEPS, is_enhance: bool = False):
91
+ """Generates an image using the FLUX pipeline with error handling."""
92
+
93
+ if pipe is None:
94
+ raise gr.Error("Diffusion pipeline failed to load. Cannot generate images.")
95
+
96
+ if not prompt or prompt.strip() == "":
97
+ # Return a blank image or previous result if prompt is empty?
98
+ # For now, raise warning and return None.
99
+ gr.Warning("Prompt is empty. Please enter a description.")
100
+ # Returning None for image, original seed, and error message
101
+ return None, seed, "Error: Empty prompt"
102
+
103
+ start_time = time.time()
104
+
105
  if randomize_seed:
106
  seed = random.randint(0, MAX_SEED)
107
+
108
+ # Clamp dimensions to avoid excessive memory usage
109
+ width = min(width, MAX_IMAGE_SIZE)
110
+ height = min(height, MAX_IMAGE_SIZE)
111
+
112
+ # Use fixed steps for enhance button, otherwise use slider value
113
+ steps_to_use = ENHANCE_STEPS if is_enhance else num_inference_steps
114
+ # Clamp steps
115
+ steps_to_use = max(MIN_INFERENCE_STEPS, min(steps_to_use, MAX_INFERENCE_STEPS))
116
 
117
+ logging.info(f"Generating image with prompt: '{prompt}', seed: {seed}, size: {width}x{height}, steps: {steps_to_use}")
118
+
119
+ try:
120
+ # Ensure generator is on the correct device
121
+ generator = torch.Generator(device=device).manual_seed(int(float(seed)))
122
+
123
+ # Use inference_mode for efficiency
124
+ with torch.inference_mode():
125
+ # Generate the image (assuming pipe returns list/tuple with image first)
126
+ # Modify pipe call based on its actual signature if needed
127
+ result_img = pipe(
128
+ prompt=prompt,
129
+ width=width,
130
+ height=height,
131
+ num_inference_steps=steps_to_use,
132
+ generator=generator,
133
+ output_type="pil", # Ensure PIL output for Gradio Image component
134
+ return_dict=False # Assuming the custom pipeline supports this for direct output
135
+ )[0][0] # Assuming the output structure is [[img]]
136
+
137
+ latency = time.time() - start_time
138
+ latency_str = f"Latency: {latency:.2f} seconds (Steps: {steps_to_use})"
139
+ logging.info(f"Image generated successfully. {latency_str}")
140
+ return result_img, seed, latency_str
141
+
142
+ except torch.cuda.OutOfMemoryError as e:
143
+ logging.error(f"CUDA OutOfMemoryError: {e}", exc_info=True)
144
+ # Clear cache and suggest reducing size/steps
145
+ if torch.cuda.is_available():
146
+ torch.cuda.empty_cache()
147
+ raise gr.Error("GPU ran out of memory. Try reducing the image width/height or the number of inference steps.")
148
+
149
+ except Exception as e:
150
+ logging.error(f"Error during image generation: {e}", exc_info=True)
151
+ # Clear cache just in case
152
+ if torch.cuda.is_available():
153
+ torch.cuda.empty_cache()
154
+ raise gr.Error(f"An error occurred during generation: {e}")
155
 
 
 
 
 
 
 
 
 
 
 
156
 
157
+ # --- Real-time Generation Wrapper ---
158
+ # This function checks the realtime toggle before calling the main generation function.
159
+ # It's triggered by changes in prompt or sliders when realtime is enabled.
160
+ def handle_realtime_update(realtime_enabled: bool, prompt: str, seed: int, width: int, height: int, randomize_seed: bool, num_inference_steps: int):
161
+ if realtime_enabled and pipe is not None:
162
+ logging.debug("Realtime update triggered.")
163
+ # Call generate_image directly. Errors within generate_image will be caught and raised as gr.Error.
164
+ # We don't set is_enhance=True for realtime updates.
165
+ return generate_image(prompt, seed, width, height, randomize_seed, num_inference_steps, is_enhance=False)
166
+ else:
167
+ # If realtime is disabled or pipe failed, don't update the image, seed, or latency.
168
+ # Return gr.update() for each output component to indicate no change.
169
+ logging.debug("Realtime update skipped (disabled or pipe error).")
170
+ return gr.update(), gr.update(), gr.update()
171
+
172
+
173
+ # --- Example Prompts ---
174
  examples = [
175
  "a tiny astronaut hatching from an egg on the moon",
176
  "a cute white cat holding a sign that says hello world",
 
179
  "photo of a woman on the beach, shot from above. She is facing the sea, while wearing a white dress. She has long blonde hair",
180
  "Selfie photo of a wizard with long beard and purple robes, he is apparently in the middle of Tokyo. Probably taken from a phone.",
181
  "Photo of a young woman with long, wavy brown hair tied in a bun and glasses. She has a fair complexion and is wearing subtle makeup, emphasizing her eyes and lips. She is dressed in a black top. The background appears to be an urban setting with a building facade, and the sunlight casts a warm glow on her face.",
182
+ "High-resolution photorealistic render of a sleek, futuristic motorcycle parked on a neon-lit street at night, rain reflecting the lights.",
183
+ "Watercolor painting of a cozy bookstore interior with overflowing shelves and a cat sleeping in a sunbeam.",
184
  ]
185
 
186
  # --- Gradio UI ---
187
+ with gr.Blocks(css="#app-container { max-width: 1280px; margin: auto; }") as demo:
188
+ gr.Markdown("# 🎨 Realtime FLUX Image Generator")
189
+ gr.Markdown("Generate stunning images in real-time with Modified Flux.Schnell pipeline. Optimized for speed.")
190
+ gr.Markdown("<span style='color: red;'>Note: Realtime generation requires a capable GPU. If generation stops or fails, try refreshing or reducing image size/steps.</span>")
191
+
192
+ if pipe is None:
193
+ gr.Markdown("<h2 style='color: red; text-align: center;'>Critical Error: Failed to load models. The application cannot function. Please check the logs.</h2>")
194
+
195
+ with gr.Row():
196
+ with gr.Column(scale=3): # Give image slightly more space
197
+ result = gr.Image(label="Generated Image", show_label=False, interactive=False, height=768) # Adjust height as needed
198
+ latency = gr.Text(label="Generation Info", interactive=False)
199
+
200
+ with gr.Column(scale=2):
201
+ prompt = gr.Text(
202
+ label="Prompt",
203
+ placeholder="Describe the image you want to generate...",
204
+ lines=3,
205
+ show_label=False,
206
+ container=False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207
  )
208
+
209
+ with gr.Row():
210
+ generateBtn = gr.Button("🖼️ Generate Image", variant="primary", interactive=pipe is not None)
211
+ enhanceBtn = gr.Button(f"🚀 Enhance (Steps: {ENHANCE_STEPS})", interactive=pipe is not None) # Use fixed steps for enhance
212
+
213
+ realtime = gr.Checkbox(label="⚡ Realtime Generation", info="Generates image automatically as you type or adjust sliders (requires more GPU).", value=False, interactive=pipe is not None)
214
 
215
+ with gr.Accordion("Advanced Options", open=False):
216
+ with gr.Row():
217
+ seed = gr.Number(label="Seed", value=42, precision=0, interactive=pipe is not None) # Use precision=0 for integers
218
+ randomize_seed = gr.Checkbox(label="Randomize Seed", value=True, interactive=pipe is not None)
219
+ with gr.Row():
220
+ width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=64, value=DEFAULT_WIDTH, interactive=pipe is not None) # Increase step for faster adjustment
221
+ height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=64, value=DEFAULT_HEIGHT, interactive=pipe is not None)
222
+ num_inference_steps = gr.Slider(
223
+ label="Inference Steps",
224
+ minimum=MIN_INFERENCE_STEPS,
225
+ maximum=MAX_INFERENCE_STEPS,
226
+ step=1,
227
+ value=DEFAULT_INFERENCE_STEPS,
228
+ info=f"Controls quality vs speed. Default: {DEFAULT_INFERENCE_STEPS}. Enhance uses {ENHANCE_STEPS}.",
229
+ interactive=pipe is not None
230
+ )
231
+
232
+ gr.Markdown("---") # Separator
233
+ gr.Markdown("### 🌟 Inspiration Gallery")
234
+ gr.Examples(
235
+ examples=examples,
236
+ fn=generate_image, # Examples directly call generate_image
237
+ inputs=[prompt], # Only prompt needed for examples, others use defaults
238
+ outputs=[result, seed, latency], # Match output components
239
+ cache_examples="lazy", # Use caching
240
+ run_on_click=True, # Ensure examples run when clicked
241
+ label="Example Prompts"
242
  )
243
 
244
+ # --- Event Listeners ---
245
+
246
+ # Combine inputs needed for generate_image
247
+ gen_inputs = [prompt, seed, width, height, randomize_seed, num_inference_steps]
248
+ outputs = [result, seed, latency]
249
+
250
+ # Generate Button Click
251
  generateBtn.click(
252
  fn=generate_image,
253
+ inputs=gen_inputs,
254
+ outputs=outputs,
255
+ show_progress="full", # Show progress for explicit clicks
256
+ api_name="generate_flux_image",
257
+ queue=True # Use queue for button clicks to handle multiple requests gracefully
258
  )
259
 
260
+ # Enhance Button Click - uses fixed steps
261
+ enhanceBtn.click(
262
+ fn=generate_image,
263
+ # Pass is_enhance=True as the last argument
264
+ inputs=[prompt, seed, width, height, randomize_seed, num_inference_steps, gr.Checkbox(value=True, visible=False)], # Pass True for is_enhance
265
+ outputs=outputs,
266
+ show_progress="full",
267
+ queue=True
 
 
 
 
268
  )
269
+
270
+ # Prompt Submission (Enter key)
 
 
 
271
  prompt.submit(
272
  fn=generate_image,
273
+ inputs=gen_inputs,
274
+ outputs=outputs,
275
  show_progress="full",
276
+ queue=True
 
277
  )
278
 
279
+ # --- Realtime Updates ---
280
+ # List of components that trigger realtime updates
281
+ realtime_triggers = [prompt, width, height, num_inference_steps, seed, randomize_seed]
282
+
283
+ # Inputs for the realtime handler function
284
+ realtime_inputs = [realtime, prompt, seed, width, height, randomize_seed, num_inference_steps]
285
+
286
+ for component in realtime_triggers:
287
+ # Use 'input' for sliders/text, 'change' for checkboxes/radio if needed
288
+ # Using .input for text allows updates while typing
289
+ # Using .change for sliders updates when released (default) or continuously if specified
290
+ event_type = "input" if isinstance(component, (gr.Textbox, gr.Number)) else "change"
291
+
292
+ getattr(component, event_type)(
293
+ fn=handle_realtime_update,
294
+ inputs=realtime_inputs,
295
+ outputs=outputs,
296
+ show_progress="hidden", # Hide progress for realtime updates
297
+ # queue=False essential for responsiveness & cancelling previous requests
298
+ # trigger_mode='throttle' with a small delay (e.g., 0.5s) can prevent excessive calls
299
+ # 'always_last' ensures only the latest input value triggers execution
300
+ queue=False,
301
+ trigger_mode="throttle",
302
+ throttle_delay=0.5 # Throttle updates slightly (e.g., every 500ms)
303
+ # trigger_mode="always_last", # Alternative: trigger only after user stops changing for a bit
304
  )
305
 
306
+ # --- Launch the App ---
307
+ if __name__ == "__main__":
308
+ demo.queue().launch(debug=True) # Enable queue for better handling of multiple users/requests, add debug=True for more logs