freeb5d commited on
Commit
d358a03
·
verified ·
1 Parent(s): 027590c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -29
app.py CHANGED
@@ -2,51 +2,42 @@ import gradio as gr
2
  import threading
3
  import os
4
  import torch
5
- from diffusers import StableDiffusionPipeline
6
 
7
- # Set environment variables for performance
8
  os.environ["OMP_NUM_THREADS"] = str(os.cpu_count())
9
  torch.set_num_threads(os.cpu_count())
10
 
11
- # Load models
12
- model1 = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
13
- model2 = StableDiffusionPipeline.from_pretrained("Purz/face-projection")
14
 
15
- # Event to stop image generation
16
  stop_event = threading.Event()
17
 
18
  def generate_images(text, selected_model):
19
- """Generate images based on the input text and selected model."""
20
  stop_event.clear()
21
 
22
- try:
23
- if selected_model == "Model 1 (Turbo Realism)":
24
- model = model1
25
- elif selected_model == "Model 2 (Face Projection)":
26
- model = model2
27
- else:
28
- return ["Invalid model selection."] * 3
29
 
30
- results = []
31
- for i in range(3):
32
- if stop_event.is_set():
33
- return ["Image generation stopped by user."] * 3
34
 
35
- modified_text = f"{text} variation {i+1}"
36
- result = model(modified_text).images[0] # Generate image
37
- results.append(result)
38
 
39
- return results
40
- except Exception as e:
41
- return [f"Error generating images: {str(e)}"] * 3
42
 
43
  def stop_generation():
44
- """Stop the ongoing image generation."""
45
  stop_event.set()
46
  return ["Generation stopped."] * 3
47
 
48
- # Gradio interface
49
- with gr.Blocks() as interface:
50
  gr.Markdown(
51
  "### ⚠ Sorry for the inconvenience. The Space is currently running on the CPU, which might affect performance. We appreciate your understanding."
52
  )
@@ -70,5 +61,4 @@ with gr.Blocks() as interface:
70
  generate_button.click(generate_images, inputs=[text_input, model_selector], outputs=[output1, output2, output3])
71
  stop_button.click(stop_generation, inputs=[], outputs=[output1, output2, output3])
72
 
73
- # Launch the interface with an API endpoint
74
- interface.launch(api_name="generate_images")
 
2
  import threading
3
  import os
4
  import torch
 
5
 
 
6
  os.environ["OMP_NUM_THREADS"] = str(os.cpu_count())
7
  torch.set_num_threads(os.cpu_count())
8
 
9
+ model1 = gr.load("models/prithivMLmods/SD3.5-Turbo-Realism-2.0-LoRA")
10
+ model2 = gr.load("models/Purz/face-projection")
 
11
 
 
12
  stop_event = threading.Event()
13
 
14
  def generate_images(text, selected_model):
 
15
  stop_event.clear()
16
 
17
+ if selected_model == "Model 1 (Turbo Realism)":
18
+ model = model1
19
+ elif selected_model == "Model 2 (Face Projection)":
20
+ model = model2
21
+ else:
22
+ return ["Invalid model selection."] * 3
 
23
 
24
+ results = []
25
+ for i in range(3):
26
+ if stop_event.is_set():
27
+ return ["Image generation stopped by user."] * 3
28
 
29
+ modified_text = f"{text} variation {i+1}"
30
+ result = model(modified_text)
31
+ results.append(result)
32
 
33
+ return results
 
 
34
 
35
  def stop_generation():
36
+ """Stops the ongoing image generation by setting the stop_event flag."""
37
  stop_event.set()
38
  return ["Generation stopped."] * 3
39
 
40
+ with gr.Blocks() as interface:#...
 
41
  gr.Markdown(
42
  "### ⚠ Sorry for the inconvenience. The Space is currently running on the CPU, which might affect performance. We appreciate your understanding."
43
  )
 
61
  generate_button.click(generate_images, inputs=[text_input, model_selector], outputs=[output1, output2, output3])
62
  stop_button.click(stop_generation, inputs=[], outputs=[output1, output2, output3])
63
 
64
+ interface.launch()