Spaces:
Running
Running
ammariii08
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,98 +1,98 @@
|
|
1 |
-
import cv2
|
2 |
-
import numpy as np
|
3 |
-
import gradio as gr
|
4 |
-
from PIL import Image
|
5 |
-
import tempfile
|
6 |
-
|
7 |
-
def equalize_exposure(images):
|
8 |
-
equalized_images = []
|
9 |
-
for img in images:
|
10 |
-
img_lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
|
11 |
-
l, a, b = cv2.split(img_lab)
|
12 |
-
# Apply CLAHE to L-channel
|
13 |
-
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
|
14 |
-
l_eq = clahe.apply(l)
|
15 |
-
img_eq = cv2.merge((l_eq, a, b))
|
16 |
-
img_eq = cv2.cvtColor(img_eq, cv2.COLOR_LAB2BGR)
|
17 |
-
equalized_images.append(img_eq)
|
18 |
-
return equalized_images
|
19 |
-
|
20 |
-
def stitch_images(image_files):
|
21 |
-
# Load images and convert to BGR format
|
22 |
-
images = []
|
23 |
-
for file in image_files:
|
24 |
-
img_pil = Image.open(file).convert('RGB')
|
25 |
-
img_bgr = cv2.cvtColor(np.array(img_pil), cv2.COLOR_RGB2BGR)
|
26 |
-
images.append(img_bgr)
|
27 |
-
|
28 |
-
# Check if there are at least two images
|
29 |
-
if len(images) < 2:
|
30 |
-
print("Need at least two images to stitch.")
|
31 |
-
return None, None
|
32 |
-
|
33 |
-
# Equalize exposure of images
|
34 |
-
images_eq = equalize_exposure(images)
|
35 |
-
|
36 |
-
# Create a Stitcher object using the default parameters
|
37 |
-
stitcher = cv2.Stitcher_create(cv2.Stitcher_PANORAMA)
|
38 |
-
|
39 |
-
# Configure stitcher parameters
|
40 |
-
stitcher.setPanoConfidenceThresh(0.8)
|
41 |
-
stitcher.setWaveCorrection(False)
|
42 |
-
|
43 |
-
# Perform stitching
|
44 |
-
status, stitched = stitcher.stitch(images_eq)
|
45 |
-
|
46 |
-
if status != cv2.Stitcher_OK:
|
47 |
-
print(f"Image stitching failed ({status})")
|
48 |
-
return None, None
|
49 |
-
|
50 |
-
# Perspective correction
|
51 |
-
# Convert to grayscale
|
52 |
-
gray = cv2.cvtColor(stitched, cv2.COLOR_BGR2GRAY)
|
53 |
-
# Find all non-zero points (non-black areas)
|
54 |
-
coords = cv2.findNonZero(gray)
|
55 |
-
x, y, w, h = cv2.boundingRect(coords)
|
56 |
-
|
57 |
-
# Define source and destination points for perspective transform
|
58 |
-
src_pts = np.float32([
|
59 |
-
[x, y],
|
60 |
-
[x + w, y],
|
61 |
-
[x + w, y + h],
|
62 |
-
[x, y + h]
|
63 |
-
])
|
64 |
-
|
65 |
-
dst_pts = np.float32([
|
66 |
-
[0, 0],
|
67 |
-
[w, 0],
|
68 |
-
[w, h],
|
69 |
-
[0, h]
|
70 |
-
])
|
71 |
-
|
72 |
-
# Compute the perspective transform matrix and apply it
|
73 |
-
M = cv2.getPerspectiveTransform(src_pts, dst_pts)
|
74 |
-
warped = cv2.warpPerspective(stitched, M, (w, h))
|
75 |
-
|
76 |
-
# Convert corrected image back to PIL format
|
77 |
-
stitched_rgb = cv2.cvtColor(warped, cv2.COLOR_BGR2RGB)
|
78 |
-
stitched_image = Image.fromarray(stitched_rgb)
|
79 |
-
|
80 |
-
# Save the stitched image to a temporary file for download
|
81 |
-
temp_file = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
|
82 |
-
stitched_image.save(temp_file.name)
|
83 |
-
|
84 |
-
return stitched_image, temp_file.name
|
85 |
-
|
86 |
-
# Gradio Interface
|
87 |
-
with gr.Blocks() as interface:
|
88 |
-
gr.Markdown("<h1 style='color: #2196F3; text-align: center;'>Image Stitcher 🧵</h1>")
|
89 |
-
gr.Markdown("<h3 style='color: #2196F3; text-align: center;'>Upload the images you want to stitch</h3>")
|
90 |
-
|
91 |
-
image_upload = gr.Files(type="filepath", label="Upload Images")
|
92 |
-
stitch_button = gr.Button("Stitch", variant="primary")
|
93 |
-
stitched_image = gr.Image(type="pil", label="Stitched Image")
|
94 |
-
download_button = gr.File(label="Download Stitched Image")
|
95 |
-
|
96 |
-
stitch_button.click(stitch_images, inputs=image_upload, outputs=[stitched_image, download_button])
|
97 |
-
|
98 |
interface.launch()
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import gradio as gr
|
4 |
+
from PIL import Image
|
5 |
+
import tempfile
|
6 |
+
|
7 |
+
def equalize_exposure(images):
|
8 |
+
equalized_images = []
|
9 |
+
for img in images:
|
10 |
+
img_lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
|
11 |
+
l, a, b = cv2.split(img_lab)
|
12 |
+
# Apply CLAHE to L-channel
|
13 |
+
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
|
14 |
+
l_eq = clahe.apply(l)
|
15 |
+
img_eq = cv2.merge((l_eq, a, b))
|
16 |
+
img_eq = cv2.cvtColor(img_eq, cv2.COLOR_LAB2BGR)
|
17 |
+
equalized_images.append(img_eq)
|
18 |
+
return equalized_images
|
19 |
+
|
20 |
+
def stitch_images(image_files):
|
21 |
+
# Load images and convert to BGR format
|
22 |
+
images = []
|
23 |
+
for file in image_files:
|
24 |
+
img_pil = Image.open(file).convert('RGB')
|
25 |
+
img_bgr = cv2.cvtColor(np.array(img_pil), cv2.COLOR_RGB2BGR)
|
26 |
+
images.append(img_bgr)
|
27 |
+
|
28 |
+
# Check if there are at least two images
|
29 |
+
if len(images) < 2:
|
30 |
+
print("Need at least two images to stitch.")
|
31 |
+
return None, None
|
32 |
+
|
33 |
+
# Equalize exposure of images
|
34 |
+
images_eq = equalize_exposure(images)
|
35 |
+
|
36 |
+
# Create a Stitcher object using the default parameters
|
37 |
+
stitcher = cv2.Stitcher_create(cv2.Stitcher_PANORAMA)
|
38 |
+
|
39 |
+
# Configure stitcher parameters
|
40 |
+
stitcher.setPanoConfidenceThresh(0.8)
|
41 |
+
stitcher.setWaveCorrection(False)
|
42 |
+
|
43 |
+
# Perform stitching
|
44 |
+
status, stitched = stitcher.stitch(images_eq)
|
45 |
+
|
46 |
+
if status != cv2.Stitcher_OK:
|
47 |
+
print(f"Image stitching failed ({status})")
|
48 |
+
return None, None
|
49 |
+
|
50 |
+
# Perspective correction
|
51 |
+
# Convert to grayscale
|
52 |
+
gray = cv2.cvtColor(stitched, cv2.COLOR_BGR2GRAY)
|
53 |
+
# Find all non-zero points (non-black areas)
|
54 |
+
coords = cv2.findNonZero(gray)
|
55 |
+
x, y, w, h = cv2.boundingRect(coords)
|
56 |
+
|
57 |
+
# Define source and destination points for perspective transform
|
58 |
+
src_pts = np.float32([
|
59 |
+
[x, y],
|
60 |
+
[x + w, y],
|
61 |
+
[x + w, y + h],
|
62 |
+
[x, y + h]
|
63 |
+
])
|
64 |
+
|
65 |
+
dst_pts = np.float32([
|
66 |
+
[0, 0],
|
67 |
+
[w, 0],
|
68 |
+
[w, h],
|
69 |
+
[0, h]
|
70 |
+
])
|
71 |
+
|
72 |
+
# Compute the perspective transform matrix and apply it
|
73 |
+
M = cv2.getPerspectiveTransform(src_pts, dst_pts)
|
74 |
+
warped = cv2.warpPerspective(stitched, M, (w, h))
|
75 |
+
|
76 |
+
# Convert corrected image back to PIL format
|
77 |
+
stitched_rgb = cv2.cvtColor(warped, cv2.COLOR_BGR2RGB)
|
78 |
+
stitched_image = Image.fromarray(stitched_rgb)
|
79 |
+
|
80 |
+
# Save the stitched image to a temporary file for download
|
81 |
+
temp_file = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
|
82 |
+
stitched_image.save(temp_file.name)
|
83 |
+
|
84 |
+
return stitched_image, temp_file.name
|
85 |
+
|
86 |
+
# Gradio Interface
|
87 |
+
with gr.Blocks() as interface:
|
88 |
+
gr.Markdown("<h1 style='color: #2196F3; text-align: center;'>Image Stitcher 🧵</h1>")
|
89 |
+
gr.Markdown("<h3 style='color: #2196F3; text-align: center;'>Upload the images you want to stitch</h3>")
|
90 |
+
|
91 |
+
image_upload = gr.Files(type="filepath", label="Upload Images")
|
92 |
+
stitch_button = gr.Button("Stitch", variant="primary")
|
93 |
+
stitched_image = gr.Image(type="pil", label="Stitched Image")
|
94 |
+
download_button = gr.File(label="Download Stitched Image")
|
95 |
+
|
96 |
+
stitch_button.click(stitch_images, inputs=image_upload, outputs=[stitched_image, download_button])
|
97 |
+
|
98 |
interface.launch()
|