- README.md +27 -1
- utils/yolo_utils.py +57 -0
README.md
CHANGED
@@ -1,4 +1,15 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
Code for the article "From Density to Geometry: YOLOv8 Instance Segmentation for Reverse Engineering of Optimized Structures"
|
3 |
|
4 |
## Table of Contents
|
@@ -8,10 +19,15 @@ Code for the article "From Density to Geometry: YOLOv8 Instance Segmentation for
|
|
8 |
- [Prerequisites](#prerequisites)
|
9 |
- [Installing](#installing)
|
10 |
- [Datasets](#datasets)
|
|
|
|
|
11 |
|
12 |
## Overview
|
13 |
Brief description of what the project does and the problem it solves. Include a link or reference to the original article that inspired or is associated with this implementation.
|
14 |
|
|
|
|
|
|
|
15 |
## Reference
|
16 |
This code aims to reproduce the results presented in the research article:
|
17 |
|
@@ -54,3 +70,13 @@ If you want to use one of the linked datasets, please unzip it inside of the dat
|
|
54 |
path: # dataset root dir
|
55 |
```
|
56 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Demo Space
|
3 |
+
emoji: 🤗
|
4 |
+
colorFrom: yellow
|
5 |
+
colorTo: orange
|
6 |
+
sdk: gradio
|
7 |
+
app_file: app.py
|
8 |
+
pinned: false
|
9 |
+
---
|
10 |
+
|
11 |
+
|
12 |
+
# YOLOv8-TO
|
13 |
Code for the article "From Density to Geometry: YOLOv8 Instance Segmentation for Reverse Engineering of Optimized Structures"
|
14 |
|
15 |
## Table of Contents
|
|
|
19 |
- [Prerequisites](#prerequisites)
|
20 |
- [Installing](#installing)
|
21 |
- [Datasets](#datasets)
|
22 |
+
- [Training](#training)
|
23 |
+
- [Inference](#inference)
|
24 |
|
25 |
## Overview
|
26 |
Brief description of what the project does and the problem it solves. Include a link or reference to the original article that inspired or is associated with this implementation.
|
27 |
|
28 |
+
## Demo
|
29 |
+
Try it at:
|
30 |
+
|
31 |
## Reference
|
32 |
This code aims to reproduce the results presented in the research article:
|
33 |
|
|
|
70 |
path: # dataset root dir
|
71 |
```
|
72 |
|
73 |
+
|
74 |
+
## Training
|
75 |
+
|
76 |
+
To train the model, make sure the train dataset is setup according to the above section and according to the documentation from ultralytics:
|
77 |
+
https://docs.ultralytics.com/datasets/
|
78 |
+
|
79 |
+
Refer to the notebook `YOLOv8_TO.ipynb` for an example of how to train the model.
|
80 |
+
|
81 |
+
## Inference
|
82 |
+
Refer to the notebook `YOLOv8_TO.ipynb` for an example of how to perform inference with the trained model.
|
utils/yolo_utils.py
CHANGED
@@ -130,6 +130,25 @@ from PIL import Image
|
|
130 |
import matplotlib.pyplot as plt
|
131 |
from matplotlib.colors import TwoSlopeNorm
|
132 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
def preprocess_image(image_path, threshold_value=0.9, upscale=False, upscale_factor=2.0):
|
134 |
image = Image.open(image_path).convert('L')
|
135 |
image = image.point(lambda x: 255 if x > threshold_value * 255 else 0, '1')
|
@@ -237,3 +256,41 @@ def plot_results(input_image_array_tensor, seg_result, pred_Phi, sum_pred_H, fin
|
|
237 |
plt.figtext(0.5, 0.05, f'Dice Loss: {dice_loss.item():.4f}', ha='center', fontsize=16)
|
238 |
|
239 |
fig.savefig(filename, dpi=600)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
import matplotlib.pyplot as plt
|
131 |
from matplotlib.colors import TwoSlopeNorm
|
132 |
|
133 |
+
from PIL import Image
|
134 |
+
|
135 |
+
def preprocess_image_pil(image, threshold_value=0.9, upscale=False, upscale_factor=2.0):
|
136 |
+
# Ensure the image is in grayscale mode
|
137 |
+
if image.mode != 'L':
|
138 |
+
image = image.convert('L')
|
139 |
+
|
140 |
+
# Apply threshold
|
141 |
+
image = image.point(lambda x: 255 if x > threshold_value * 255 else 0, '1')
|
142 |
+
|
143 |
+
# Upscale if requested
|
144 |
+
if upscale:
|
145 |
+
image = image.resize(
|
146 |
+
(int(image.width * upscale_factor), int(image.height * upscale_factor)),
|
147 |
+
resample=Image.BICUBIC
|
148 |
+
)
|
149 |
+
|
150 |
+
return image
|
151 |
+
|
152 |
def preprocess_image(image_path, threshold_value=0.9, upscale=False, upscale_factor=2.0):
|
153 |
image = Image.open(image_path).convert('L')
|
154 |
image = image.point(lambda x: 255 if x > threshold_value * 255 else 0, '1')
|
|
|
256 |
plt.figtext(0.5, 0.05, f'Dice Loss: {dice_loss.item():.4f}', ha='center', fontsize=16)
|
257 |
|
258 |
fig.savefig(filename, dpi=600)
|
259 |
+
|
260 |
+
|
261 |
+
import numpy as np
|
262 |
+
from PIL import Image
|
263 |
+
import io
|
264 |
+
|
265 |
+
def plot_results_gradio(input_image_array_tensor, seg_result, pred_Phi, sum_pred_H, final_H, dice_loss, tversky_loss):
|
266 |
+
nelx = input_image_array_tensor.shape[1] - 1
|
267 |
+
nely = input_image_array_tensor.shape[0] - 1
|
268 |
+
fig, axes = plt.subplots(2, 2, figsize=(8, 8))
|
269 |
+
|
270 |
+
axes[0, 0].imshow(input_image_array_tensor.squeeze(), origin='lower', cmap='gray_r')
|
271 |
+
axes[0, 0].set_title('Input Image')
|
272 |
+
axes[0, 0].axis('on')
|
273 |
+
|
274 |
+
axes[0, 1].imshow(seg_result)
|
275 |
+
axes[0, 1].set_title('Segmentation Result')
|
276 |
+
axes[0, 1].axis('off')
|
277 |
+
|
278 |
+
render_colors1 = ['yellow', 'g', 'r', 'c', 'm', 'y', 'black', 'orange', 'pink', 'cyan', 'slategrey', 'wheat', 'purple', 'mediumturquoise', 'darkviolet', 'orangered']
|
279 |
+
for i, color in zip(range(0, pred_Phi.shape[1]), render_colors1*100):
|
280 |
+
axes[1, 1].contourf(np.flipud(pred_Phi[:, i].numpy().reshape((nely+1, nelx+1), order='F')), [0, 1], colors=color)
|
281 |
+
axes[1, 1].set_title('Prediction contours')
|
282 |
+
axes[1, 1].set_aspect('equal')
|
283 |
+
|
284 |
+
axes[1, 0].imshow(np.flipud(sum_pred_H.detach().numpy().reshape((nely+1, nelx+1), order='F')), origin='lower', cmap='gray_r')
|
285 |
+
axes[1, 0].set_title('Prediction Projection')
|
286 |
+
|
287 |
+
plt.subplots_adjust(hspace=0.3, wspace=0.01)
|
288 |
+
plt.figtext(0.5, 0.05, f'Dice Loss: {dice_loss.item():.4f}', ha='center', fontsize=16)
|
289 |
+
|
290 |
+
# Convert figure to a PIL Image
|
291 |
+
buf = io.BytesIO()
|
292 |
+
plt.savefig(buf, format='png')
|
293 |
+
plt.close(fig)
|
294 |
+
buf.seek(0)
|
295 |
+
img = Image.open(buf)
|
296 |
+
return img
|