Update app.py
Browse files
app.py
CHANGED
@@ -1,64 +1,25 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
-
from PIL import Image, ImageFilter
|
4 |
import numpy as np
|
5 |
-
import cv2
|
6 |
|
7 |
# Initialize models with fixed choices
|
8 |
segmentation_model = pipeline("image-segmentation", model="nvidia/segformer-b1-finetuned-cityscapes-1024-1024")
|
9 |
depth_estimator = pipeline("depth-estimation", model="Intel/zoedepth-nyu-kitti")
|
10 |
|
11 |
-
def
|
12 |
-
"""
|
13 |
-
Apply a more realistic lens blur (bokeh effect) using OpenCV.
|
14 |
-
"""
|
15 |
-
if radius < 1:
|
16 |
-
return image
|
17 |
-
|
18 |
-
# Convert PIL image to OpenCV format
|
19 |
-
img_np = np.array(image)
|
20 |
-
|
21 |
-
# Create a circular kernel for the bokeh effect
|
22 |
-
kernel_size = 2 * radius + 1
|
23 |
-
kernel = np.zeros((kernel_size, kernel_size), dtype=np.float32)
|
24 |
-
center = radius
|
25 |
-
for i in range(kernel_size):
|
26 |
-
for j in range(kernel_size):
|
27 |
-
# Create circular kernel
|
28 |
-
if np.sqrt((i - center) ** 2 + (j - center) ** 2) <= radius:
|
29 |
-
kernel[i, j] = 1.0
|
30 |
-
|
31 |
-
# Normalize the kernel
|
32 |
-
if kernel.sum() != 0:
|
33 |
-
kernel = kernel / kernel.sum()
|
34 |
-
|
35 |
-
# Apply the filter to each channel separately
|
36 |
-
channels = cv2.split(img_np)
|
37 |
-
blurred_channels = []
|
38 |
-
|
39 |
-
for channel in channels:
|
40 |
-
blurred_channel = cv2.filter2D(channel, -1, kernel)
|
41 |
-
blurred_channels.append(blurred_channel)
|
42 |
-
|
43 |
-
# Merge the channels back
|
44 |
-
blurred_img = cv2.merge(blurred_channels)
|
45 |
-
|
46 |
-
# Convert back to PIL image
|
47 |
-
return Image.fromarray(blurred_img)
|
48 |
-
|
49 |
-
def process_image(input_image, method, blur_intensity, blur_type):
|
50 |
"""
|
51 |
Process the input image using one of two methods:
|
52 |
|
53 |
-
1.
|
54 |
- Uses segmentation to extract a foreground mask.
|
55 |
-
- Applies
|
56 |
- Composites the final image.
|
57 |
|
58 |
-
2. Depth
|
59 |
- Uses depth estimation to generate a depth map.
|
60 |
- Normalizes the depth map to be used as a blending mask.
|
61 |
-
- Blends a fully blurred version
|
62 |
|
63 |
Returns:
|
64 |
- output_image: final composited image.
|
@@ -67,15 +28,7 @@ def process_image(input_image, method, blur_intensity, blur_type):
|
|
67 |
# Ensure image is in RGB mode
|
68 |
input_image = input_image.convert("RGB")
|
69 |
|
70 |
-
|
71 |
-
if blur_type == "Gaussian Blur":
|
72 |
-
blur_fn = lambda img, rad: img.filter(ImageFilter.GaussianBlur(radius=rad))
|
73 |
-
elif blur_type == "Lens Blur":
|
74 |
-
blur_fn = lens_blur
|
75 |
-
else:
|
76 |
-
blur_fn = lambda img, rad: img.filter(ImageFilter.GaussianBlur(radius=rad))
|
77 |
-
|
78 |
-
if method == "Segmented Background Blur":
|
79 |
# Use segmentation to obtain a foreground mask
|
80 |
results = segmentation_model(input_image)
|
81 |
# Assume the last result is the main foreground object
|
@@ -85,14 +38,14 @@ def process_image(input_image, method, blur_intensity, blur_type):
|
|
85 |
# Threshold to create a binary mask
|
86 |
binary_mask = foreground_mask.point(lambda p: 255 if p > 128 else 0)
|
87 |
|
88 |
-
# Blur the background using
|
89 |
-
blurred_background =
|
90 |
|
91 |
# Composite the final image: keep foreground and use blurred background elsewhere
|
92 |
output_image = Image.composite(input_image, blurred_background, binary_mask)
|
93 |
mask_image = binary_mask
|
94 |
|
95 |
-
elif method == "Depth
|
96 |
# Generate depth map
|
97 |
depth_results = depth_estimator(input_image)
|
98 |
depth_map = depth_results["depth"]
|
@@ -103,8 +56,8 @@ def process_image(input_image, method, blur_intensity, blur_type):
|
|
103 |
normalized_depth = (norm * 255).astype(np.uint8)
|
104 |
mask_image = Image.fromarray(normalized_depth)
|
105 |
|
106 |
-
# Create fully blurred version using
|
107 |
-
blurred_image =
|
108 |
|
109 |
# Convert images to arrays for blending
|
110 |
orig_np = np.array(input_image).astype(np.float32)
|
@@ -131,13 +84,10 @@ with gr.Blocks() as demo:
|
|
131 |
with gr.Column():
|
132 |
input_image = gr.Image(label="Input Image", type="pil")
|
133 |
method = gr.Radio(label="Processing Method",
|
134 |
-
choices=["
|
135 |
-
value="
|
136 |
-
blur_intensity = gr.Slider(label="Blur Intensity (
|
137 |
minimum=1, maximum=30, step=1, value=15)
|
138 |
-
blur_type = gr.Dropdown(label="Blur Type",
|
139 |
-
choices=["Gaussian Blur", "Lens Blur"],
|
140 |
-
value="Gaussian Blur")
|
141 |
run_button = gr.Button("Process Image")
|
142 |
with gr.Column():
|
143 |
output_image = gr.Image(label="Output Image")
|
@@ -146,7 +96,7 @@ with gr.Blocks() as demo:
|
|
146 |
# Set up event handler
|
147 |
run_button.click(
|
148 |
fn=process_image,
|
149 |
-
inputs=[input_image, method, blur_intensity
|
150 |
outputs=[output_image, mask_output]
|
151 |
)
|
152 |
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
+
from PIL import Image, ImageFilter
|
4 |
import numpy as np
|
|
|
5 |
|
6 |
# Initialize models with fixed choices
|
7 |
segmentation_model = pipeline("image-segmentation", model="nvidia/segformer-b1-finetuned-cityscapes-1024-1024")
|
8 |
depth_estimator = pipeline("depth-estimation", model="Intel/zoedepth-nyu-kitti")
|
9 |
|
10 |
+
def process_image(input_image, method, blur_intensity):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
"""
|
12 |
Process the input image using one of two methods:
|
13 |
|
14 |
+
1. Segmentation Blur Model:
|
15 |
- Uses segmentation to extract a foreground mask.
|
16 |
+
- Applies Gaussian blur to the background.
|
17 |
- Composites the final image.
|
18 |
|
19 |
+
2. Monocular Depth Estimation Model:
|
20 |
- Uses depth estimation to generate a depth map.
|
21 |
- Normalizes the depth map to be used as a blending mask.
|
22 |
+
- Blends a fully blurred version with the original image.
|
23 |
|
24 |
Returns:
|
25 |
- output_image: final composited image.
|
|
|
28 |
# Ensure image is in RGB mode
|
29 |
input_image = input_image.convert("RGB")
|
30 |
|
31 |
+
if method == "Segmentation Blur Model":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
# Use segmentation to obtain a foreground mask
|
33 |
results = segmentation_model(input_image)
|
34 |
# Assume the last result is the main foreground object
|
|
|
38 |
# Threshold to create a binary mask
|
39 |
binary_mask = foreground_mask.point(lambda p: 255 if p > 128 else 0)
|
40 |
|
41 |
+
# Blur the background using Gaussian blur
|
42 |
+
blurred_background = input_image.filter(ImageFilter.GaussianBlur(radius=blur_intensity))
|
43 |
|
44 |
# Composite the final image: keep foreground and use blurred background elsewhere
|
45 |
output_image = Image.composite(input_image, blurred_background, binary_mask)
|
46 |
mask_image = binary_mask
|
47 |
|
48 |
+
elif method == "Monocular Depth Estimation Model":
|
49 |
# Generate depth map
|
50 |
depth_results = depth_estimator(input_image)
|
51 |
depth_map = depth_results["depth"]
|
|
|
56 |
normalized_depth = (norm * 255).astype(np.uint8)
|
57 |
mask_image = Image.fromarray(normalized_depth)
|
58 |
|
59 |
+
# Create fully blurred version using Gaussian blur
|
60 |
+
blurred_image = input_image.filter(ImageFilter.GaussianBlur(radius=blur_intensity))
|
61 |
|
62 |
# Convert images to arrays for blending
|
63 |
orig_np = np.array(input_image).astype(np.float32)
|
|
|
84 |
with gr.Column():
|
85 |
input_image = gr.Image(label="Input Image", type="pil")
|
86 |
method = gr.Radio(label="Processing Method",
|
87 |
+
choices=["Segmentation Blur Model", "Monocular Depth Estimation Model"],
|
88 |
+
value="Segmentation Blur Model")
|
89 |
+
blur_intensity = gr.Slider(label="Blur Intensity (sigma)",
|
90 |
minimum=1, maximum=30, step=1, value=15)
|
|
|
|
|
|
|
91 |
run_button = gr.Button("Process Image")
|
92 |
with gr.Column():
|
93 |
output_image = gr.Image(label="Output Image")
|
|
|
96 |
# Set up event handler
|
97 |
run_button.click(
|
98 |
fn=process_image,
|
99 |
+
inputs=[input_image, method, blur_intensity],
|
100 |
outputs=[output_image, mask_output]
|
101 |
)
|
102 |
|