Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
-
from diffusers import StableDiffusionPipeline
|
4 |
from PIL import Image
|
5 |
import numpy as np
|
6 |
import cv2
|
@@ -8,38 +8,40 @@ import cv2
|
|
8 |
# Device setup
|
9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
|
11 |
-
# Load
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
return StableDiffusionPipeline.from_pretrained(model_name).to(device)
|
15 |
|
16 |
-
image_model = load_image_model()
|
|
|
17 |
|
18 |
-
def generate_image(prompt):
|
19 |
try:
|
|
|
20 |
with torch.no_grad():
|
21 |
image = image_model(prompt).images[0]
|
22 |
return image, None
|
23 |
except Exception as e:
|
24 |
return None, f"Error generating image: {str(e)}"
|
25 |
|
26 |
-
# Face swap function placeholder (replace with actual implementation)
|
27 |
-
def swap_faces(image1, image2):
|
28 |
-
# Placeholder implementation, replace this with your actual face swapping code
|
29 |
-
return image1
|
30 |
-
|
31 |
def face_swap(image1, image2):
|
32 |
try:
|
33 |
if image1 is None or image2 is None:
|
34 |
return None, "Images for face swap are required"
|
35 |
image1 = cv2.cvtColor(np.array(image1), cv2.COLOR_RGB2BGR)
|
36 |
image2 = cv2.cvtColor(np.array(image2), cv2.COLOR_RGB2BGR)
|
37 |
-
swapped_image =
|
38 |
return cv2.cvtColor(swapped_image, cv2.COLOR_BGR2RGB), None
|
39 |
except Exception as e:
|
40 |
return None, f"Error during face swap: {str(e)}"
|
41 |
|
42 |
-
# Upscaling function
|
43 |
def upscale_image(image, scale_factor=2):
|
44 |
try:
|
45 |
if image is None:
|
@@ -52,17 +54,25 @@ def upscale_image(image, scale_factor=2):
|
|
52 |
except Exception as e:
|
53 |
return None, f"Error during upscaling: {str(e)}"
|
54 |
|
55 |
-
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
try:
|
58 |
if prompt:
|
59 |
-
|
60 |
-
generated_image, error = generate_image(prompt)
|
61 |
if error:
|
62 |
return None, error
|
63 |
return generated_image, None
|
64 |
elif image1 and image2:
|
65 |
-
# Perform faceswap if two images are provided
|
66 |
swapped_image, error = face_swap(image1, image2)
|
67 |
if error:
|
68 |
return None, error
|
@@ -70,8 +80,13 @@ def process_image(prompt, image1=None, image2=None, scale_factor=2):
|
|
70 |
if error:
|
71 |
return None, error
|
72 |
return upscaled_image, None
|
|
|
|
|
|
|
|
|
|
|
73 |
else:
|
74 |
-
return None, "Either a prompt or two images must be provided"
|
75 |
except Exception as e:
|
76 |
return None, f"Error in process_image function: {str(e)}"
|
77 |
|
@@ -80,16 +95,18 @@ iface = gr.Interface(
|
|
80 |
fn=process_image,
|
81 |
inputs=[
|
82 |
gr.Textbox(label="Enter your prompt", placeholder="Type your prompt here..."),
|
83 |
-
gr.
|
84 |
-
gr.Image(label="Image
|
85 |
-
gr.
|
|
|
|
|
86 |
],
|
87 |
outputs=[
|
88 |
gr.Image(label="Output Image"),
|
89 |
gr.Textbox(label="Error Message", placeholder="Error details will appear here...")
|
90 |
],
|
91 |
title="Fooocus Image Processing",
|
92 |
-
description="Generate images from prompts, swap faces, and
|
93 |
)
|
94 |
|
95 |
if __name__ == "__main__":
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
+
from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline
|
4 |
from PIL import Image
|
5 |
import numpy as np
|
6 |
import cv2
|
|
|
8 |
# Device setup
|
9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
|
11 |
+
# Load model pipelines for different styles
|
12 |
+
models = {
|
13 |
+
"fooocusv2": "CompVis/stable-diffusion-v1-4",
|
14 |
+
"SAI Anime": "CompVis/stable-diffusion-v1-4-anime"
|
15 |
+
# Add more models as needed
|
16 |
+
}
|
17 |
+
|
18 |
+
def load_image_model(style):
|
19 |
+
model_name = models.get(style, "CompVis/stable-diffusion-v1-4")
|
20 |
return StableDiffusionPipeline.from_pretrained(model_name).to(device)
|
21 |
|
22 |
+
image_model = load_image_model("fooocusv2")
|
23 |
+
inpaint_model = StableDiffusionInpaintPipeline.from_pretrained("CompVis/stable-diffusion-v1-4-inpainting").to(device)
|
24 |
|
25 |
+
def generate_image(prompt, style):
|
26 |
try:
|
27 |
+
image_model = load_image_model(style)
|
28 |
with torch.no_grad():
|
29 |
image = image_model(prompt).images[0]
|
30 |
return image, None
|
31 |
except Exception as e:
|
32 |
return None, f"Error generating image: {str(e)}"
|
33 |
|
|
|
|
|
|
|
|
|
|
|
34 |
def face_swap(image1, image2):
|
35 |
try:
|
36 |
if image1 is None or image2 is None:
|
37 |
return None, "Images for face swap are required"
|
38 |
image1 = cv2.cvtColor(np.array(image1), cv2.COLOR_RGB2BGR)
|
39 |
image2 = cv2.cvtColor(np.array(image2), cv2.COLOR_RGB2BGR)
|
40 |
+
swapped_image = image1 # Placeholder implementation
|
41 |
return cv2.cvtColor(swapped_image, cv2.COLOR_BGR2RGB), None
|
42 |
except Exception as e:
|
43 |
return None, f"Error during face swap: {str(e)}"
|
44 |
|
|
|
45 |
def upscale_image(image, scale_factor=2):
|
46 |
try:
|
47 |
if image is None:
|
|
|
54 |
except Exception as e:
|
55 |
return None, f"Error during upscaling: {str(e)}"
|
56 |
|
57 |
+
def inpaint_image(image, mask):
|
58 |
+
try:
|
59 |
+
if image is None or mask is None:
|
60 |
+
return None, "Image and mask are required for inpainting"
|
61 |
+
image = Image.fromarray(np.array(image))
|
62 |
+
mask = Image.fromarray(np.array(mask))
|
63 |
+
inpainted_image = inpaint_model(prompt="inpainting", image=image, mask_image=mask).images[0]
|
64 |
+
return inpainted_image, None
|
65 |
+
except Exception as e:
|
66 |
+
return None, f"Error during inpainting: {str(e)}"
|
67 |
+
|
68 |
+
def process_image(prompt, style, image1=None, image2=None, mask=None, scale_factor=2):
|
69 |
try:
|
70 |
if prompt:
|
71 |
+
generated_image, error = generate_image(prompt, style)
|
|
|
72 |
if error:
|
73 |
return None, error
|
74 |
return generated_image, None
|
75 |
elif image1 and image2:
|
|
|
76 |
swapped_image, error = face_swap(image1, image2)
|
77 |
if error:
|
78 |
return None, error
|
|
|
80 |
if error:
|
81 |
return None, error
|
82 |
return upscaled_image, None
|
83 |
+
elif image1 and mask:
|
84 |
+
inpainted_image, error = inpaint_image(image1, mask)
|
85 |
+
if error:
|
86 |
+
return None, error
|
87 |
+
return inpainted_image, None
|
88 |
else:
|
89 |
+
return None, "Either a prompt or two images or an image with a mask must be provided"
|
90 |
except Exception as e:
|
91 |
return None, f"Error in process_image function: {str(e)}"
|
92 |
|
|
|
95 |
fn=process_image,
|
96 |
inputs=[
|
97 |
gr.Textbox(label="Enter your prompt", placeholder="Type your prompt here..."),
|
98 |
+
gr.Dropdown(label="Select Style", choices=list(models.keys()), default="fooocusv2"),
|
99 |
+
gr.Image(label="Image 1 (for faceswap or inpainting)", type="pil", optional=True),
|
100 |
+
gr.Image(label="Image 2 (for faceswap)", type="pil", optional=True),
|
101 |
+
gr.Image(label="Mask (for inpainting)", type="pil", optional=True),
|
102 |
+
gr.Slider(label="Upscale Factor", minimum=1, maximum=4, step=1, value=2, optional=True)
|
103 |
],
|
104 |
outputs=[
|
105 |
gr.Image(label="Output Image"),
|
106 |
gr.Textbox(label="Error Message", placeholder="Error details will appear here...")
|
107 |
],
|
108 |
title="Fooocus Image Processing",
|
109 |
+
description="Generate images from prompts with style selection, swap faces, upscale images, and perform inpainting."
|
110 |
)
|
111 |
|
112 |
if __name__ == "__main__":
|