Spaces:
Sleeping
Sleeping
ariankhalfani
commited on
Commit
•
3c66f88
1
Parent(s):
e04afcf
Create app0.py
Browse files
app0.py
ADDED
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from ultralytics import YOLO
|
3 |
+
import cv2
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image, ImageDraw, ImageFont
|
6 |
+
import base64
|
7 |
+
from io import BytesIO
|
8 |
+
import tempfile
|
9 |
+
import os
|
10 |
+
from pathlib import Path
|
11 |
+
import shutil
|
12 |
+
|
13 |
+
# Load YOLOv8 model
|
14 |
+
model = YOLO("best.pt")
|
15 |
+
|
16 |
+
# Create directories if not present
|
17 |
+
uploaded_folder = Path('Uploaded_Picture')
|
18 |
+
predicted_folder = Path('Predicted_Picture')
|
19 |
+
uploaded_folder.mkdir(parents=True, exist_ok=True)
|
20 |
+
predicted_folder.mkdir(parents=True, exist_ok=True)
|
21 |
+
|
22 |
+
# Path for HTML database file
|
23 |
+
html_db_file = Path('patient_predictions.html')
|
24 |
+
|
25 |
+
# Initialize HTML database file if not present
|
26 |
+
if not html_db_file.exists():
|
27 |
+
with open(html_db_file, 'w') as f:
|
28 |
+
f.write("""
|
29 |
+
<html>
|
30 |
+
<head><title>Patient Prediction Database</title></head>
|
31 |
+
<body>
|
32 |
+
<h1>Patient Prediction Database</h1>
|
33 |
+
<table border="1" style="width:100%; border-collapse: collapse; text-align: center;">
|
34 |
+
<thead>
|
35 |
+
<tr>
|
36 |
+
<th>Name</th>
|
37 |
+
<th>Age</th>
|
38 |
+
<th>Medical Record</th>
|
39 |
+
<th>Sex</th>
|
40 |
+
<th>Result</th>
|
41 |
+
<th>Predicted Image</th>
|
42 |
+
</tr>
|
43 |
+
</thead>
|
44 |
+
<tbody>
|
45 |
+
""")
|
46 |
+
|
47 |
+
def predict_image(input_image, name, age, medical_record, sex):
|
48 |
+
if input_image is None:
|
49 |
+
return None, "Please Input The Image"
|
50 |
+
|
51 |
+
# Convert Gradio input image (PIL Image) to numpy array
|
52 |
+
image_np = np.array(input_image)
|
53 |
+
|
54 |
+
# Ensure the image is in the correct format
|
55 |
+
if len(image_np.shape) == 2: # grayscale to RGB
|
56 |
+
image_np = cv2.cvtColor(image_np, cv2.COLOR_GRAY2RGB)
|
57 |
+
elif image_np.shape[2] == 4: # RGBA to RGB
|
58 |
+
image_np = cv2.cvtColor(image_np, cv2.COLOR_RGBA2RGB)
|
59 |
+
|
60 |
+
# Perform prediction
|
61 |
+
results = model(image_np)
|
62 |
+
|
63 |
+
# Draw bounding boxes on the image
|
64 |
+
image_with_boxes = image_np.copy()
|
65 |
+
raw_predictions = []
|
66 |
+
|
67 |
+
if results[0].boxes:
|
68 |
+
# Sort the results by confidence and take the highest confidence one
|
69 |
+
highest_confidence_result = max(results[0].boxes, key=lambda x: x.conf.item())
|
70 |
+
|
71 |
+
# Determine the label based on the class index
|
72 |
+
class_index = highest_confidence_result.cls.item()
|
73 |
+
if class_index == 0:
|
74 |
+
label = "Immature"
|
75 |
+
color = (0, 255, 255) # Yellow for Immature
|
76 |
+
elif class_index == 1:
|
77 |
+
label = "Mature"
|
78 |
+
color = (255, 0, 0) # Red for Mature
|
79 |
+
else:
|
80 |
+
label = "Normal"
|
81 |
+
color = (0, 255, 0) # Green for Normal
|
82 |
+
|
83 |
+
confidence = highest_confidence_result.conf.item()
|
84 |
+
xmin, ymin, xmax, ymax = map(int, highest_confidence_result.xyxy[0])
|
85 |
+
|
86 |
+
# Draw the bounding box
|
87 |
+
cv2.rectangle(image_with_boxes, (xmin, ymin), (xmax, ymax), color, 2)
|
88 |
+
|
89 |
+
# Enlarge font scale and thickness
|
90 |
+
font_scale = 1.0
|
91 |
+
thickness = 2
|
92 |
+
|
93 |
+
# Calculate label background size
|
94 |
+
(text_width, text_height), baseline = cv2.getTextSize(f'{label} {confidence:.2f}', cv2.FONT_HERSHEY_SIMPLEX, font_scale, thickness)
|
95 |
+
cv2.rectangle(image_with_boxes, (xmin, ymin - text_height - baseline), (xmin + text_width, ymin), (0, 0, 0), cv2.FILLED)
|
96 |
+
|
97 |
+
# Put the label text with black background
|
98 |
+
cv2.putText(image_with_boxes, f'{label} {confidence:.2f}', (xmin, ymin - 10), cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255), thickness)
|
99 |
+
|
100 |
+
raw_predictions.append(f"Label: {label}, Confidence: {confidence:.2f}, Box: [{xmin}, {ymin}, {xmax}, {ymax}]")
|
101 |
+
|
102 |
+
raw_predictions_str = "\n".join(raw_predictions)
|
103 |
+
|
104 |
+
# Convert to PIL image for further processing
|
105 |
+
pil_image_with_boxes = Image.fromarray(image_with_boxes)
|
106 |
+
|
107 |
+
# Add text and watermark
|
108 |
+
pil_image_with_boxes = add_text_and_watermark(pil_image_with_boxes, name, age, medical_record, sex, label)
|
109 |
+
|
110 |
+
# Save images to directories
|
111 |
+
image_name = f"{name}-{age}-{sex}-{medical_record}.png"
|
112 |
+
input_image.save(uploaded_folder / image_name)
|
113 |
+
pil_image_with_boxes.save(predicted_folder / image_name)
|
114 |
+
|
115 |
+
# Convert the predicted image to base64 for embedding in HTML
|
116 |
+
buffered = BytesIO()
|
117 |
+
pil_image_with_boxes.save(buffered, format="PNG")
|
118 |
+
predicted_image_base64 = base64.b64encode(buffered.getvalue()).decode()
|
119 |
+
|
120 |
+
# Append the prediction to the HTML database
|
121 |
+
append_patient_info_to_html(name, age, medical_record, sex, label, predicted_image_base64)
|
122 |
+
|
123 |
+
return pil_image_with_boxes, raw_predictions_str
|
124 |
+
|
125 |
+
# Function to add watermark
|
126 |
+
def add_watermark(image):
|
127 |
+
try:
|
128 |
+
logo = Image.open('image-logo.png').convert("RGBA")
|
129 |
+
image = image.convert("RGBA")
|
130 |
+
|
131 |
+
# Resize logo
|
132 |
+
basewidth = 100
|
133 |
+
wpercent = (basewidth / float(logo.size[0]))
|
134 |
+
hsize = int((float(wpercent) * logo.size[1]))
|
135 |
+
logo = logo.resize((basewidth, hsize), Image.LANCZOS)
|
136 |
+
|
137 |
+
# Position logo
|
138 |
+
position = (image.width - logo.width - 10, image.height - logo.height - 10)
|
139 |
+
|
140 |
+
# Composite image
|
141 |
+
transparent = Image.new('RGBA', (image.width, image.height), (0, 0, 0, 0))
|
142 |
+
transparent.paste(image, (0, 0))
|
143 |
+
transparent.paste(logo, position, mask=logo)
|
144 |
+
|
145 |
+
return transparent.convert("RGB")
|
146 |
+
except Exception as e:
|
147 |
+
print(f"Error adding watermark: {e}")
|
148 |
+
return image
|
149 |
+
|
150 |
+
# Function to add text and watermark
|
151 |
+
def add_text_and_watermark(image, name, age, medical_record, sex, label):
|
152 |
+
draw = ImageDraw.Draw(image)
|
153 |
+
|
154 |
+
# Load a larger font (adjust the size as needed)
|
155 |
+
font_size = 24 # Example font size
|
156 |
+
try:
|
157 |
+
font = ImageFont.truetype("font.ttf", size=font_size)
|
158 |
+
except IOError:
|
159 |
+
font = ImageFont.load_default()
|
160 |
+
print("Error: cannot open resource, using default font.")
|
161 |
+
|
162 |
+
text = f"Name: {name}, Age: {age}, Medical Record: {medical_record}, Sex: {sex}, Result: {label}"
|
163 |
+
|
164 |
+
# Calculate text bounding box
|
165 |
+
text_bbox = draw.textbbox((0, 0), text, font=font)
|
166 |
+
text_width, text_height = text_bbox[2] - text_bbox[0], text_bbox[3] - text_bbox[1]
|
167 |
+
text_x = 20
|
168 |
+
text_y = 40
|
169 |
+
padding = 10
|
170 |
+
|
171 |
+
# Draw a filled rectangle for the background
|
172 |
+
draw.rectangle(
|
173 |
+
[text_x - padding, text_y - padding, text_x + text_width + padding, text_y + text_height + padding],
|
174 |
+
fill="black"
|
175 |
+
)
|
176 |
+
|
177 |
+
# Draw text on top of the rectangle
|
178 |
+
draw.text((text_x, text_y), text, fill=(255, 255, 255, 255), font=font)
|
179 |
+
|
180 |
+
# Add watermark to the image
|
181 |
+
image_with_watermark = add_watermark(image)
|
182 |
+
|
183 |
+
return image_with_watermark
|
184 |
+
|
185 |
+
def append_patient_info_to_html(name, age, medical_record, sex, result, predicted_image_base64):
|
186 |
+
# Add the patient info to the HTML table
|
187 |
+
html_entry = f"""
|
188 |
+
<tr>
|
189 |
+
<td>{name}</td>
|
190 |
+
<td>{age}</td>
|
191 |
+
<td>{medical_record}</td>
|
192 |
+
<td>{sex}</td>
|
193 |
+
<td>{result}</td>
|
194 |
+
<td><img src="data:image/png;base64,{predicted_image_base64}" alt="Predicted Image" width="150"></td>
|
195 |
+
</tr>
|
196 |
+
"""
|
197 |
+
|
198 |
+
with open(html_db_file, 'a') as f:
|
199 |
+
f.write(html_entry)
|
200 |
+
|
201 |
+
# Ensure we only add the closing tags once
|
202 |
+
if "</tbody></table></body></html>" not in open(html_db_file).read():
|
203 |
+
with open(html_db_file, 'a') as f:
|
204 |
+
f.write("""
|
205 |
+
</tbody>
|
206 |
+
</table>
|
207 |
+
</body>
|
208 |
+
</html>
|
209 |
+
""")
|
210 |
+
|
211 |
+
return str(html_db_file) # Return the HTML file path for download
|
212 |
+
|
213 |
+
# Function to download the folders
|
214 |
+
def download_folder(folder):
|
215 |
+
zip_path = os.path.join(tempfile.gettempdir(), f"{folder}.zip")
|
216 |
+
|
217 |
+
# Zip the folder
|
218 |
+
shutil.make_archive(zip_path.replace('.zip', ''), 'zip', folder)
|
219 |
+
|
220 |
+
return zip_path
|
221 |
+
|
222 |
+
# Gradio Blocks Interface
|
223 |
+
def demo():
|
224 |
+
with gr.Blocks() as demo:
|
225 |
+
with gr.Column():
|
226 |
+
gr.Markdown("# Cataract Detection System")
|
227 |
+
gr.Markdown("Upload an image to detect cataract and add patient details.")
|
228 |
+
gr.Markdown("This application uses YOLOv8 with mAP=0.981")
|
229 |
+
|
230 |
+
with gr.Column():
|
231 |
+
name = gr.Textbox(label="Name")
|
232 |
+
age = gr.Number(label="Age")
|
233 |
+
medical_record = gr.Textbox(label="Medical Record")
|
234 |
+
sex = gr.Radio(["Male", "Female"], label="Sex")
|
235 |
+
input_image = gr.Image(type="pil", label="Upload an Image", image_mode="RGB")
|
236 |
+
|
237 |
+
with gr.Column():
|
238 |
+
submit_btn = gr.Button("Submit")
|
239 |
+
output_image = gr.Image(type="pil", label="Predicted Image")
|
240 |
+
|
241 |
+
with gr.Row():
|
242 |
+
raw_result = gr.Textbox(label="Prediction Result")
|
243 |
+
|
244 |
+
with gr.Row():
|
245 |
+
download_html_btn = gr.Button("Download Patient Information (HTML)")
|
246 |
+
download_uploaded_btn = gr.Button("Download Uploaded Images")
|
247 |
+
download_predicted_btn = gr.Button("Download Predicted Images")
|
248 |
+
|
249 |
+
# Add file download output components for the uploaded and predicted images
|
250 |
+
patient_info_file = gr.File(label="Patient Information HTML File")
|
251 |
+
uploaded_folder_file = gr.File(label="Uploaded Images Zip File")
|
252 |
+
predicted_folder_file = gr.File(label="Predicted Images Zip File")
|
253 |
+
|
254 |
+
# Connect functions with components
|
255 |
+
submit_btn.click(fn=interface, inputs=[name, age, medical_record, sex, input_image], outputs=[output_image, raw_result])
|
256 |
+
download_html_btn.click(fn=lambda name, age, medical_record, sex, raw_result: html_db_file,
|
257 |
+
inputs=[name, age, medical_record, sex, raw_result], outputs=patient_info_file)
|
258 |
+
download_uploaded_btn.click(fn=lambda: download_folder('Uploaded_Picture'), outputs=uploaded_folder_file)
|
259 |
+
download_predicted_btn.click(fn=lambda: download_folder('Predicted_Picture'), outputs=predicted_folder_file)
|
260 |
+
|
261 |
+
# Launch Gradio app
|
262 |
+
demo.launch()
|
263 |
+
|
264 |
+
# Calling the demo function to initiate the interface
|
265 |
+
demo()
|