Commit
·
477c54f
0
Parent(s):
Initial commit: Bambu A1 Mini analysis system
Browse files- .DS_Store +0 -0
- .env.example +18 -0
- .gitignore +41 -0
- app.py +376 -0
- core/.DS_Store +0 -0
- core/analysis/__init__.py +17 -0
- core/analysis/defect_detector.py +500 -0
- core/analysis/image_processor.py +43 -0
- core/analysis/segmentation_model.py +1129 -0
- mo_optimizer.py +232 -0
- requirements.txt +26 -0
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
.env.example
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# MQTT Configuration
|
2 |
+
host=mqtt.bambulab.com
|
3 |
+
port=8883
|
4 |
+
username=bblp
|
5 |
+
password=bblp
|
6 |
+
|
7 |
+
# Printer Information
|
8 |
+
PRINTER_SERIAL=0309CA471800852
|
9 |
+
|
10 |
+
# S3 Configuration
|
11 |
+
AWS_ACCESS_KEY_ID=your_access_key
|
12 |
+
AWS_SECRET_ACCESS_KEY=your_secret_key
|
13 |
+
AWS_REGION=us-east-1
|
14 |
+
S3_BUCKET=bambu-prints
|
15 |
+
|
16 |
+
# Logging
|
17 |
+
LOG_LEVEL=DEBUG
|
18 |
+
LOG_FORMAT=%(asctime)s - %(levelname)s - %(message)s
|
.gitignore
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
*.so
|
6 |
+
.Python
|
7 |
+
env/
|
8 |
+
build/
|
9 |
+
develop-eggs/
|
10 |
+
dist/
|
11 |
+
downloads/
|
12 |
+
eggs/
|
13 |
+
.eggs/
|
14 |
+
lib/
|
15 |
+
lib64/
|
16 |
+
parts/
|
17 |
+
sdist/
|
18 |
+
var/
|
19 |
+
wheels/
|
20 |
+
*.egg-info/
|
21 |
+
.installed.cfg
|
22 |
+
*.egg
|
23 |
+
|
24 |
+
# Environment
|
25 |
+
.env
|
26 |
+
.venv
|
27 |
+
venv/
|
28 |
+
ENV/
|
29 |
+
|
30 |
+
# IDE
|
31 |
+
.idea/
|
32 |
+
.vscode/
|
33 |
+
*.swp
|
34 |
+
*.swo
|
35 |
+
|
36 |
+
# Logs
|
37 |
+
*.log
|
38 |
+
|
39 |
+
# Local files
|
40 |
+
temp/
|
41 |
+
uploads/
|
app.py
ADDED
@@ -0,0 +1,376 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
+
import cv2
|
4 |
+
import paho.mqtt.client as mqtt
|
5 |
+
import json
|
6 |
+
import time
|
7 |
+
import threading
|
8 |
+
import os
|
9 |
+
from io import BytesIO
|
10 |
+
import zipfile
|
11 |
+
import logging
|
12 |
+
from dotenv import load_dotenv
|
13 |
+
from mo_optimizer import MOPrintOptimizer
|
14 |
+
from core.analysis import DefectDetector, ImageProcessor
|
15 |
+
from core.database import PrintJob
|
16 |
+
from datetime import datetime
|
17 |
+
import requests
|
18 |
+
from PIL import Image
|
19 |
+
|
20 |
+
# Load environment variables
|
21 |
+
load_dotenv()
|
22 |
+
|
23 |
+
# MQTT Configuration
|
24 |
+
HOST = os.getenv("host")
|
25 |
+
PORT = int(os.getenv("port", 8883))
|
26 |
+
USERNAME = os.getenv("username")
|
27 |
+
PASSWORD = os.getenv("password")
|
28 |
+
PRINTER_SERIAL = os.getenv("PRINTER_SERIAL", "0309CA471800852")
|
29 |
+
|
30 |
+
# Setup logging
|
31 |
+
logging.basicConfig(
|
32 |
+
level=logging.INFO,
|
33 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
34 |
+
)
|
35 |
+
logger = logging.getLogger(__name__)
|
36 |
+
|
37 |
+
# Global variables to store received data
|
38 |
+
latest_data = {
|
39 |
+
"bed_temperature": "N/A",
|
40 |
+
"nozzle_temperature": "N/A",
|
41 |
+
"status": "N/A",
|
42 |
+
"update_time": "Waiting for data...",
|
43 |
+
"current_image_url": None,
|
44 |
+
"print_progress": 0
|
45 |
+
}
|
46 |
+
|
47 |
+
# Initialize analysis components
|
48 |
+
optimizer = MOPrintOptimizer()
|
49 |
+
detector = DefectDetector()
|
50 |
+
|
51 |
+
def create_3mf_package(gcode_content: str, gcode_filename: str = "Metadata/plate_1.gcode") -> BytesIO:
|
52 |
+
"""Create a 3MF package from G-code content"""
|
53 |
+
zip_buffer = BytesIO()
|
54 |
+
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zipf:
|
55 |
+
zipf.writestr(gcode_filename, gcode_content)
|
56 |
+
zip_buffer.seek(0)
|
57 |
+
return zip_buffer
|
58 |
+
|
59 |
+
def create_client(host, port, username, password):
|
60 |
+
"""Create and configure MQTT client"""
|
61 |
+
client = mqtt.Client()
|
62 |
+
client.username_pw_set(username, password)
|
63 |
+
client.tls_set(tls_version=mqtt.ssl.PROTOCOL_TLS)
|
64 |
+
client.on_connect = on_connect
|
65 |
+
client.on_message = on_message
|
66 |
+
try:
|
67 |
+
client.connect(host, port)
|
68 |
+
client.loop_start()
|
69 |
+
return client
|
70 |
+
except Exception as e:
|
71 |
+
logger.error(f"MQTT connection failed: {e}")
|
72 |
+
return None
|
73 |
+
|
74 |
+
def on_connect(client, userdata, flags, rc):
|
75 |
+
"""MQTT connection callback"""
|
76 |
+
logger.info(f"Connected with result code {rc}")
|
77 |
+
if rc == 0:
|
78 |
+
# Subscribe to printer response topic
|
79 |
+
response_topic = f"bambu_a1_mini/response/{PRINTER_SERIAL}"
|
80 |
+
client.subscribe(response_topic)
|
81 |
+
logger.info(f"Subscribed to {response_topic}")
|
82 |
+
|
83 |
+
# Subscribe to printer status topic
|
84 |
+
status_topic = f"bambu_a1_mini/status/{PRINTER_SERIAL}"
|
85 |
+
client.subscribe(status_topic)
|
86 |
+
logger.info(f"Subscribed to {status_topic}")
|
87 |
+
|
88 |
+
# Subscribe to camera image topic
|
89 |
+
image_topic = f"bambu_a1_mini/image/{PRINTER_SERIAL}"
|
90 |
+
client.subscribe(image_topic)
|
91 |
+
logger.info(f"Subscribed to {image_topic}")
|
92 |
+
|
93 |
+
def on_message(client, userdata, message):
|
94 |
+
"""MQTT message callback"""
|
95 |
+
try:
|
96 |
+
data = json.loads(message.payload)
|
97 |
+
|
98 |
+
if "image" in message.topic:
|
99 |
+
# handle new image URL
|
100 |
+
image_url = data.get("image_url")
|
101 |
+
if image_url:
|
102 |
+
latest_data.update({
|
103 |
+
"current_image_url": image_url,
|
104 |
+
"image_timestamp": data.get("timestamp"),
|
105 |
+
"square_id": data.get("square_id")
|
106 |
+
})
|
107 |
+
# update interface display
|
108 |
+
s3_image.update(value=image_url)
|
109 |
+
|
110 |
+
# record to database
|
111 |
+
if hasattr(demo, 'db_manager'):
|
112 |
+
job = demo.db_manager.session.query(PrintJob).filter_by(
|
113 |
+
square_id=data.get("square_id"),
|
114 |
+
status="printing"
|
115 |
+
).first()
|
116 |
+
if job:
|
117 |
+
job.match_image(
|
118 |
+
image_url,
|
119 |
+
datetime.strptime(data.get("timestamp"), "%Y-%m-%d %H:%M:%S")
|
120 |
+
)
|
121 |
+
demo.db_manager.session.commit()
|
122 |
+
|
123 |
+
elif "status" in message.topic:
|
124 |
+
# update status
|
125 |
+
latest_data.update({
|
126 |
+
"status": data.get("status"),
|
127 |
+
"print_progress": data.get("progress", 0),
|
128 |
+
"current_square_id": data.get("square_id"),
|
129 |
+
"print_timestamp": data.get("timestamp"),
|
130 |
+
"update_time": time.strftime("%Y-%m-%d %H:%M:%S")
|
131 |
+
})
|
132 |
+
except Exception as e:
|
133 |
+
logger.error(f"Error processing message: {e}")
|
134 |
+
|
135 |
+
def send_print_command(params):
|
136 |
+
"""Send print parameters to Pi Zero via MQTT"""
|
137 |
+
command = {
|
138 |
+
"action": "print",
|
139 |
+
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
|
140 |
+
"square_id": params.get("square_id"), # get from position_manager
|
141 |
+
"parameters": {
|
142 |
+
"bed_temp": float(params["bed_temp"]),
|
143 |
+
"nozzle_temp": float(params["nozzle_temp"]),
|
144 |
+
"print_speed": float(params["print_speed"]),
|
145 |
+
"layer_height": float(params["layer_height"]),
|
146 |
+
"flow_rate": float(params["flow_rate"]),
|
147 |
+
"retraction_distance": float(params["retraction_distance"]),
|
148 |
+
"fan_speed": float(params["fan_speed"])
|
149 |
+
}
|
150 |
+
}
|
151 |
+
|
152 |
+
command_topic = f"bambu_a1_mini/command/{PRINTER_SERIAL}"
|
153 |
+
mqtt_client.publish(command_topic, json.dumps(command))
|
154 |
+
return "Print command sent successfully"
|
155 |
+
|
156 |
+
# Initialize MQTT client
|
157 |
+
mqtt_client = create_client(HOST, PORT, USERNAME, PASSWORD)
|
158 |
+
|
159 |
+
def update_printer_status():
|
160 |
+
"""Get current printer status"""
|
161 |
+
if mqtt_client is None:
|
162 |
+
return "MQTT not connected", "No connection"
|
163 |
+
|
164 |
+
status_text = (
|
165 |
+
f"Status: {latest_data['status']}\n"
|
166 |
+
f"Progress: {latest_data['print_progress']}%\n"
|
167 |
+
f"Bed Temp: {latest_data['bed_temperature']}°C\n"
|
168 |
+
f"Nozzle Temp: {latest_data['nozzle_temperature']}°C\n"
|
169 |
+
f"Last Update: {latest_data['update_time']}"
|
170 |
+
)
|
171 |
+
|
172 |
+
job_text = (
|
173 |
+
f"Square ID: {latest_data.get('current_square_id', 'N/A')}\n"
|
174 |
+
f"Print Time: {latest_data.get('print_time', 'N/A')}\n"
|
175 |
+
f"Image Status: {'Captured' if latest_data.get('current_image_url') else 'Waiting'}"
|
176 |
+
)
|
177 |
+
|
178 |
+
return status_text, job_text
|
179 |
+
|
180 |
+
def analyze_print(image,
|
181 |
+
nozzle_temp, print_speed, layer_height,
|
182 |
+
flow_rate, retraction_distance, fan_speed):
|
183 |
+
"""Analyze print quality and return evaluation results"""
|
184 |
+
if image is None:
|
185 |
+
return None, 0, 0, 0, 0, 0, 0, 0, 0
|
186 |
+
|
187 |
+
# Package current parameters
|
188 |
+
current_params = {
|
189 |
+
'nozzle_temp': float(nozzle_temp),
|
190 |
+
'print_speed': float(print_speed),
|
191 |
+
'layer_height': float(layer_height),
|
192 |
+
'flow_rate': float(flow_rate),
|
193 |
+
'retraction_distance': float(retraction_distance),
|
194 |
+
'fan_speed': float(fan_speed)
|
195 |
+
}
|
196 |
+
|
197 |
+
# Analyze print quality
|
198 |
+
analysis_results = detector.analyze_print(image)
|
199 |
+
|
200 |
+
# Get optimization results
|
201 |
+
results = optimizer.evaluate_objectives(image, current_params)
|
202 |
+
|
203 |
+
# Calculate quality metrics
|
204 |
+
metrics = detector.calculate_quality_metrics(image)
|
205 |
+
|
206 |
+
return (
|
207 |
+
analysis_results['binary_mask'], # Visualization
|
208 |
+
float(metrics['missing_rate']),
|
209 |
+
float(metrics['excess_rate']),
|
210 |
+
float(metrics['stringing_rate']),
|
211 |
+
float(metrics['uniformity_score']),
|
212 |
+
float(analysis_results['quality_score']),
|
213 |
+
float(results['objectives']['speed']),
|
214 |
+
float(results['objectives']['material']),
|
215 |
+
float(results['objectives']['total'])
|
216 |
+
)
|
217 |
+
|
218 |
+
# Gradio interface
|
219 |
+
with gr.Blocks(title="Bambu A1 Mini Print Analysis") as demo:
|
220 |
+
gr.Markdown("# Bambu A1 Mini Print Quality Analysis")
|
221 |
+
|
222 |
+
with gr.Row():
|
223 |
+
# print status
|
224 |
+
printer_status = gr.Textbox(
|
225 |
+
label="Printer Status",
|
226 |
+
value="Initializing...",
|
227 |
+
interactive=False
|
228 |
+
)
|
229 |
+
current_job_status = gr.Textbox(
|
230 |
+
label="Current Print Job",
|
231 |
+
value="No active print job",
|
232 |
+
interactive=False
|
233 |
+
)
|
234 |
+
refresh_btn = gr.Button("Refresh Status")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
# show S3 image
|
238 |
+
s3_image = gr.Image(
|
239 |
+
label="Latest S3 Image",
|
240 |
+
type="filepath", # use URL
|
241 |
+
interactive=False
|
242 |
+
)
|
243 |
+
captured_image = gr.Image(
|
244 |
+
label="Current Print Image",
|
245 |
+
type="numpy"
|
246 |
+
)
|
247 |
+
|
248 |
+
with gr.Row():
|
249 |
+
with gr.Column():
|
250 |
+
# Print parameter inputs
|
251 |
+
nozzle_temp = gr.Slider(minimum=180, maximum=250, step=1,
|
252 |
+
value=200,
|
253 |
+
label="Nozzle Temperature (°C)")
|
254 |
+
print_speed = gr.Slider(minimum=20, maximum=150, step=1,
|
255 |
+
value=60,
|
256 |
+
label="Print Speed (mm/s)")
|
257 |
+
layer_height = gr.Slider(minimum=0.1, maximum=0.4, step=0.01,
|
258 |
+
value=0.2,
|
259 |
+
label="Layer Height (mm)")
|
260 |
+
flow_rate = gr.Slider(minimum=90, maximum=110, step=1,
|
261 |
+
value=100,
|
262 |
+
label="Flow Rate (%)")
|
263 |
+
retraction_distance = gr.Slider(minimum=0, maximum=10, step=0.1,
|
264 |
+
value=5,
|
265 |
+
label="Retraction Distance (mm)")
|
266 |
+
fan_speed = gr.Slider(minimum=0, maximum=100, step=1,
|
267 |
+
value=100,
|
268 |
+
label="Fan Speed (%)")
|
269 |
+
|
270 |
+
capture_btn = gr.Button("Capture Image")
|
271 |
+
analyze_btn = gr.Button("Analyze Print")
|
272 |
+
|
273 |
+
with gr.Column():
|
274 |
+
# Results visualization
|
275 |
+
result_image = gr.Image(label="Analysis Result")
|
276 |
+
|
277 |
+
# Quality metrics
|
278 |
+
with gr.Row():
|
279 |
+
missing_rate = gr.Number(label="Missing Rate", value=0.0)
|
280 |
+
excess_rate = gr.Number(label="Excess Rate", value=0.0)
|
281 |
+
with gr.Row():
|
282 |
+
stringing_rate = gr.Number(label="Stringing Rate", value=0.0)
|
283 |
+
uniformity_score = gr.Number(label="Uniformity Score", value=0.0)
|
284 |
+
|
285 |
+
# Overall scores
|
286 |
+
with gr.Row():
|
287 |
+
quality_output = gr.Number(label="Print Quality Score")
|
288 |
+
speed_output = gr.Number(label="Print Speed Score")
|
289 |
+
material_output = gr.Number(label="Material Efficiency Score")
|
290 |
+
total_output = gr.Number(label="Total Performance Score")
|
291 |
+
|
292 |
+
# Connect inputs to outputs
|
293 |
+
def capture_frame(img):
|
294 |
+
"""Capture current image for analysis"""
|
295 |
+
if img is None:
|
296 |
+
return None
|
297 |
+
# Convert URL image to numpy array if needed
|
298 |
+
if isinstance(img, str):
|
299 |
+
response = requests.get(img)
|
300 |
+
img = Image.open(BytesIO(response.content))
|
301 |
+
img = np.array(img)
|
302 |
+
return img
|
303 |
+
|
304 |
+
capture_btn.click(
|
305 |
+
fn=capture_frame,
|
306 |
+
inputs=[s3_image],
|
307 |
+
outputs=[captured_image]
|
308 |
+
)
|
309 |
+
|
310 |
+
analyze_btn.click(
|
311 |
+
fn=lambda img, *params: (
|
312 |
+
analyze_print(img, *params),
|
313 |
+
send_print_command(dict(
|
314 |
+
nozzle_temp=params[0], # nozzle_temp slider
|
315 |
+
print_speed=params[1], # print_speed slider
|
316 |
+
layer_height=params[2], # layer_height slider
|
317 |
+
flow_rate=params[3], # flow_rate slider
|
318 |
+
retraction_distance=params[4], # retraction_distance slider
|
319 |
+
fan_speed=params[5], # fan_speed slider
|
320 |
+
bed_temp=60 # use default value, because there is no slider for bed_temp
|
321 |
+
))
|
322 |
+
),
|
323 |
+
inputs=[
|
324 |
+
captured_image,
|
325 |
+
nozzle_temp, print_speed, layer_height,
|
326 |
+
flow_rate, retraction_distance, fan_speed
|
327 |
+
],
|
328 |
+
outputs=[
|
329 |
+
result_image,
|
330 |
+
missing_rate, excess_rate,
|
331 |
+
stringing_rate, uniformity_score,
|
332 |
+
quality_output, speed_output,
|
333 |
+
material_output, total_output,
|
334 |
+
printer_status, current_job_status
|
335 |
+
]
|
336 |
+
)
|
337 |
+
|
338 |
+
# add image refresh function
|
339 |
+
def refresh_images():
|
340 |
+
"""refresh image display"""
|
341 |
+
image_url = latest_data.get("current_image_url")
|
342 |
+
if image_url:
|
343 |
+
return image_url
|
344 |
+
return None
|
345 |
+
|
346 |
+
# connect refresh button
|
347 |
+
refresh_btn.click(
|
348 |
+
fn=lambda: (
|
349 |
+
update_printer_status()[0],
|
350 |
+
update_printer_status()[1],
|
351 |
+
refresh_images()
|
352 |
+
),
|
353 |
+
outputs=[
|
354 |
+
printer_status,
|
355 |
+
current_job_status,
|
356 |
+
s3_image
|
357 |
+
]
|
358 |
+
)
|
359 |
+
|
360 |
+
# Auto-refresh printer status
|
361 |
+
def auto_refresh():
|
362 |
+
while True:
|
363 |
+
time.sleep(5) # Update every 5 seconds
|
364 |
+
status, job_status = update_printer_status()
|
365 |
+
printer_status.update(status)
|
366 |
+
current_job_status.update(job_status)
|
367 |
+
# update image
|
368 |
+
image_url = refresh_images()
|
369 |
+
if image_url:
|
370 |
+
s3_image.update(image_url)
|
371 |
+
|
372 |
+
threading.Thread(target=auto_refresh, daemon=True).start()
|
373 |
+
|
374 |
+
# Launch the app
|
375 |
+
if __name__ == "__main__":
|
376 |
+
demo.launch()
|
core/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
core/analysis/__init__.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Bambu A1 Mini Print Analysis Core
|
3 |
+
|
4 |
+
This package provides image analysis and defect detection functionality
|
5 |
+
for the Bambu A1 Mini 3D printer.
|
6 |
+
|
7 |
+
Modules:
|
8 |
+
- image_processor: Basic image processing utilities
|
9 |
+
- defect_detector: Defect detection and analysis
|
10 |
+
- segmentation_model: Print area segmentation
|
11 |
+
"""
|
12 |
+
|
13 |
+
from .image_processor import ImageProcessor
|
14 |
+
from .segmentation_model import PrintQualitySegmentation
|
15 |
+
from .defect_detector import DefectDetector
|
16 |
+
|
17 |
+
__all__ = ['ImageProcessor', 'DefectDetector', 'PrintQualitySegmentation']
|
core/analysis/defect_detector.py
ADDED
@@ -0,0 +1,500 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import cv2
|
3 |
+
from PIL import Image
|
4 |
+
from skimage import feature, filters, morphology
|
5 |
+
from sklearn.cluster import HDBSCAN
|
6 |
+
from skimage import measure, registration
|
7 |
+
from typing import Dict, List, Tuple
|
8 |
+
from .image_processor import ImageProcessor
|
9 |
+
from .segmentation_model import PrintQualitySegmentation
|
10 |
+
|
11 |
+
class DefectDetector:
|
12 |
+
"""Specialized detector for well plate / array printing quality analysis"""
|
13 |
+
|
14 |
+
def __init__(self):
|
15 |
+
self.reference_square_size = None # mm
|
16 |
+
self.pixels_per_mm = None
|
17 |
+
self.hdbscan_params = {
|
18 |
+
'min_cluster_size': 5,
|
19 |
+
'min_samples': 3,
|
20 |
+
'cluster_selection_epsilon': 0.1
|
21 |
+
}
|
22 |
+
|
23 |
+
# Add groundtruth template
|
24 |
+
self.groundtruth = self._create_groundtruth_template()
|
25 |
+
|
26 |
+
self.segmentation_model = PrintQualitySegmentation()
|
27 |
+
|
28 |
+
def _create_groundtruth_template(self) -> np.ndarray:
|
29 |
+
"""Create ideal print template"""
|
30 |
+
# Create a 500x500 white background
|
31 |
+
template = np.zeros((500, 500), dtype=np.uint8)
|
32 |
+
|
33 |
+
# Draw 4 perfect squares at fixed positions
|
34 |
+
square_size = 100
|
35 |
+
positions = [
|
36 |
+
(50, 50),
|
37 |
+
(350, 50),
|
38 |
+
(50, 350),
|
39 |
+
(350, 350)
|
40 |
+
]
|
41 |
+
|
42 |
+
for x, y in positions:
|
43 |
+
cv2.rectangle(template,
|
44 |
+
(x, y),
|
45 |
+
(x + square_size, y + square_size),
|
46 |
+
255,
|
47 |
+
-1) # -1 means fill
|
48 |
+
|
49 |
+
return template
|
50 |
+
|
51 |
+
def detect_squares(self, image):
|
52 |
+
"""Detect and analyze printed squares in array
|
53 |
+
|
54 |
+
Args:
|
55 |
+
image: PIL.Image or numpy array of the print
|
56 |
+
|
57 |
+
Returns:
|
58 |
+
dict: Analysis results for each square
|
59 |
+
"""
|
60 |
+
# Convert to numpy array if needed
|
61 |
+
if isinstance(image, Image.Image):
|
62 |
+
image = np.array(image)
|
63 |
+
|
64 |
+
# Convert to grayscale
|
65 |
+
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
|
66 |
+
|
67 |
+
# Apply adaptive thresholding
|
68 |
+
thresh = cv2.adaptiveThreshold(
|
69 |
+
gray, 255,
|
70 |
+
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
|
71 |
+
cv2.THRESH_BINARY_INV,
|
72 |
+
21, 10
|
73 |
+
)
|
74 |
+
|
75 |
+
# Find contours
|
76 |
+
contours, _ = cv2.findContours(
|
77 |
+
thresh,
|
78 |
+
cv2.RETR_EXTERNAL,
|
79 |
+
cv2.CHAIN_APPROX_SIMPLE
|
80 |
+
)
|
81 |
+
|
82 |
+
squares = []
|
83 |
+
for contour in contours:
|
84 |
+
# Filter by area and shape
|
85 |
+
area = cv2.contourArea(contour)
|
86 |
+
if area < 100: # Minimum area threshold
|
87 |
+
continue
|
88 |
+
|
89 |
+
# Check if it's approximately square
|
90 |
+
peri = cv2.arcLength(contour, True)
|
91 |
+
approx = cv2.approxPolyDP(contour, 0.04 * peri, True)
|
92 |
+
|
93 |
+
if len(approx) == 4: # Square has 4 corners
|
94 |
+
squares.append(contour)
|
95 |
+
|
96 |
+
return self._analyze_squares(gray, squares)
|
97 |
+
|
98 |
+
def _analyze_squares(self, gray: np.ndarray, squares: List) -> Dict:
|
99 |
+
"""Analyze detected squares for defects"""
|
100 |
+
results = []
|
101 |
+
|
102 |
+
for square in squares:
|
103 |
+
# Get bounding box
|
104 |
+
x, y, w, h = cv2.boundingRect(square)
|
105 |
+
|
106 |
+
# Extract square region
|
107 |
+
roi = gray[y:y+h, x:x+w]
|
108 |
+
|
109 |
+
# Analyze uniformity
|
110 |
+
uniformity = self._analyze_uniformity(roi)
|
111 |
+
|
112 |
+
# Detect gaps
|
113 |
+
gaps = self._detect_gaps(roi)
|
114 |
+
|
115 |
+
# Calculate coverage
|
116 |
+
coverage = cv2.contourArea(square) / (w * h)
|
117 |
+
|
118 |
+
results.append({
|
119 |
+
'position': (x, y, w, h),
|
120 |
+
'uniformity': uniformity,
|
121 |
+
'gaps': gaps,
|
122 |
+
'coverage': coverage
|
123 |
+
})
|
124 |
+
|
125 |
+
return {
|
126 |
+
'square_count': len(squares),
|
127 |
+
'squares': results
|
128 |
+
}
|
129 |
+
|
130 |
+
def _analyze_uniformity(self, roi: np.ndarray) -> float:
|
131 |
+
"""Analyze print uniformity in region"""
|
132 |
+
# Calculate local standard deviation
|
133 |
+
local_std = filters.rank.std(roi, morphology.square(5))
|
134 |
+
|
135 |
+
# Normalize and invert (higher is better)
|
136 |
+
uniformity = 1 - (np.mean(local_std) / 255)
|
137 |
+
return float(uniformity)
|
138 |
+
|
139 |
+
def _detect_gaps(self, roi: np.ndarray) -> List[Dict]:
|
140 |
+
"""Detect gaps in printed region"""
|
141 |
+
# Edge detection
|
142 |
+
edges = feature.canny(roi, sigma=2)
|
143 |
+
|
144 |
+
# Find connected components
|
145 |
+
labels = measure.label(edges)
|
146 |
+
|
147 |
+
# Get coordinates of edge pixels
|
148 |
+
coords = np.column_stack(np.where(edges > 0))
|
149 |
+
|
150 |
+
if len(coords) < self.hdbscan_params['min_cluster_size']:
|
151 |
+
return []
|
152 |
+
|
153 |
+
# HDBSCAN clustering
|
154 |
+
clusterer = HDBSCAN(
|
155 |
+
**self.hdbscan_params
|
156 |
+
)
|
157 |
+
cluster_labels = clusterer.fit_predict(coords)
|
158 |
+
|
159 |
+
# Analyze gap clusters
|
160 |
+
gap_clusters = []
|
161 |
+
for label in set(cluster_labels):
|
162 |
+
if label == -1:
|
163 |
+
continue
|
164 |
+
|
165 |
+
cluster_points = coords[cluster_labels == label]
|
166 |
+
|
167 |
+
# Calculate cluster properties
|
168 |
+
min_x, min_y = np.min(cluster_points, axis=0)
|
169 |
+
max_x, max_y = np.max(cluster_points, axis=0)
|
170 |
+
area = len(cluster_points)
|
171 |
+
|
172 |
+
if area > 10: # Minimum gap size
|
173 |
+
gap_clusters.append({
|
174 |
+
"location": [int(min_x), int(min_y),
|
175 |
+
int(max_x - min_x), int(max_y - min_y)],
|
176 |
+
"area": int(area)
|
177 |
+
})
|
178 |
+
|
179 |
+
return gap_clusters
|
180 |
+
|
181 |
+
def _calculate_presence_score(self, region_props: List) -> float:
|
182 |
+
"""Calculate filament presence score based on region properties"""
|
183 |
+
if not region_props:
|
184 |
+
return 0.0
|
185 |
+
|
186 |
+
# Consider region sizes and shapes
|
187 |
+
scores = []
|
188 |
+
for prop in region_props:
|
189 |
+
area = prop.area
|
190 |
+
perimeter = prop.perimeter
|
191 |
+
circularity = 4 * np.pi * area / (perimeter * perimeter) if perimeter > 0 else 0
|
192 |
+
|
193 |
+
# Higher score for more circular regions
|
194 |
+
score = circularity * np.sqrt(area)
|
195 |
+
scores.append(score)
|
196 |
+
|
197 |
+
return min(1.0, np.mean(scores) / 100)
|
198 |
+
|
199 |
+
def _calculate_uniformity(self, img_gray: np.ndarray, binary: np.ndarray) -> float:
|
200 |
+
"""Calculate filament uniformity score"""
|
201 |
+
# Only consider regions with filament
|
202 |
+
filament_intensities = img_gray[binary]
|
203 |
+
if len(filament_intensities) == 0:
|
204 |
+
return 0.0
|
205 |
+
|
206 |
+
# Calculate intensity statistics
|
207 |
+
std_dev = np.std(filament_intensities)
|
208 |
+
mean_intensity = np.mean(filament_intensities)
|
209 |
+
|
210 |
+
# Calculate uniformity score (inverse of coefficient of variation)
|
211 |
+
uniformity = 1.0 - (std_dev / mean_intensity if mean_intensity > 0 else 1.0)
|
212 |
+
return max(0.0, uniformity)
|
213 |
+
|
214 |
+
def calculate_quality_metrics(self, image, expected_mask=None) -> Dict:
|
215 |
+
"""Calculate quantitative metrics for print quality
|
216 |
+
|
217 |
+
Args:
|
218 |
+
image: Print image
|
219 |
+
expected_mask: Expected print area mask (optional)
|
220 |
+
|
221 |
+
Returns:
|
222 |
+
dict: Dictionary containing quality metrics
|
223 |
+
"""
|
224 |
+
# Convert image format
|
225 |
+
if isinstance(image, Image.Image):
|
226 |
+
image = np.array(image)
|
227 |
+
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
|
228 |
+
|
229 |
+
# If no expected mask provided, generate using square detection
|
230 |
+
if expected_mask is None:
|
231 |
+
squares = self.detect_squares(image)
|
232 |
+
expected_mask = np.zeros_like(gray, dtype=np.uint8)
|
233 |
+
for square in squares['squares']:
|
234 |
+
x, y, w, h = square['position']
|
235 |
+
expected_mask[y:y+h, x:x+w] = 255
|
236 |
+
|
237 |
+
# 1. Calculate missing rate (Missing Rate)
|
238 |
+
_, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
|
239 |
+
missing_area = np.logical_and(binary == 0, expected_mask == 255)
|
240 |
+
missing_rate = np.sum(missing_area) / np.sum(expected_mask == 255)
|
241 |
+
|
242 |
+
# 2. Calculate excess rate (Excess Rate)
|
243 |
+
excess_area = np.logical_and(binary == 255, expected_mask == 0)
|
244 |
+
excess_rate = np.sum(excess_area) / np.sum(expected_mask == 255)
|
245 |
+
|
246 |
+
# 3. Detect stringing (Stringing)
|
247 |
+
stringing_score = self._detect_stringing_rate(gray, expected_mask)
|
248 |
+
|
249 |
+
# 4. Calculate uniformity (Uniformity)
|
250 |
+
uniformity_score = self._calculate_uniformity_score(gray, binary)
|
251 |
+
|
252 |
+
return {
|
253 |
+
"missing_rate": float(missing_rate),
|
254 |
+
"excess_rate": float(excess_rate),
|
255 |
+
"stringing_rate": float(stringing_score),
|
256 |
+
"uniformity_score": float(uniformity_score)
|
257 |
+
}
|
258 |
+
|
259 |
+
def _detect_stringing_rate(self, gray: np.ndarray, mask: np.ndarray) -> float:
|
260 |
+
"""Detect stringing and calculate rate"""
|
261 |
+
# Use morphological operations to detect long structures
|
262 |
+
kernel = np.ones((3,3), np.uint8)
|
263 |
+
dilated = cv2.dilate(gray, kernel, iterations=1)
|
264 |
+
eroded = cv2.erode(gray, kernel, iterations=1)
|
265 |
+
strings = cv2.subtract(dilated, eroded)
|
266 |
+
|
267 |
+
# Only consider strings outside non-print area
|
268 |
+
strings_outside = np.logical_and(strings > 50, mask == 0)
|
269 |
+
|
270 |
+
# Calculate stringing rate
|
271 |
+
string_rate = np.sum(strings_outside) / np.sum(mask == 255)
|
272 |
+
return min(1.0, string_rate)
|
273 |
+
|
274 |
+
def _calculate_uniformity_score(self, gray: np.ndarray, binary: np.ndarray) -> float:
|
275 |
+
"""Calculate uniformity score"""
|
276 |
+
# Only analyze printed area
|
277 |
+
print_area = gray[binary == 255]
|
278 |
+
if len(print_area) == 0:
|
279 |
+
return 0.0
|
280 |
+
|
281 |
+
# Calculate local standard deviation
|
282 |
+
local_std = filters.rank.std(gray, morphology.square(5))
|
283 |
+
local_std_normalized = local_std[binary == 255] / 255.0
|
284 |
+
|
285 |
+
# Convert to uniformity score (1 - mean of standard deviation)
|
286 |
+
uniformity = 1.0 - np.mean(local_std_normalized)
|
287 |
+
return max(0.0, uniformity)
|
288 |
+
|
289 |
+
def _calculate_pixel_comparison(self, image: np.ndarray, groundtruth: np.ndarray) -> Dict:
|
290 |
+
"""Calculate pixel-level comparison metrics
|
291 |
+
|
292 |
+
Args:
|
293 |
+
image: Actual print image
|
294 |
+
groundtruth: Ideal template image
|
295 |
+
|
296 |
+
Returns:
|
297 |
+
dict: Pixel-level comparison metrics
|
298 |
+
"""
|
299 |
+
# Ensure image size is consistent
|
300 |
+
image = cv2.resize(image, (500, 500))
|
301 |
+
if len(image.shape) == 3:
|
302 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
|
303 |
+
|
304 |
+
# Binarize image
|
305 |
+
_, binary = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY)
|
306 |
+
|
307 |
+
# Calculate pixel matching
|
308 |
+
matching_pixels = np.sum(binary == groundtruth)
|
309 |
+
total_pixels = binary.size
|
310 |
+
pixel_accuracy = matching_pixels / total_pixels
|
311 |
+
|
312 |
+
# Calculate number of pixels for each category
|
313 |
+
true_positives = np.sum(np.logical_and(binary == 255, groundtruth == 255))
|
314 |
+
false_positives = np.sum(np.logical_and(binary == 255, groundtruth == 0))
|
315 |
+
false_negatives = np.sum(np.logical_and(binary == 0, groundtruth == 255))
|
316 |
+
true_negatives = np.sum(np.logical_and(binary == 0, groundtruth == 0))
|
317 |
+
|
318 |
+
# Calculate precision and recall
|
319 |
+
precision = true_positives / (true_positives + false_positives) if (true_positives + false_positives) > 0 else 0
|
320 |
+
recall = true_positives / (true_positives + false_negatives) if (true_positives + false_negatives) > 0 else 0
|
321 |
+
|
322 |
+
# Calculate F1 score
|
323 |
+
f1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0
|
324 |
+
|
325 |
+
return {
|
326 |
+
"pixel_accuracy": float(pixel_accuracy),
|
327 |
+
"precision": float(precision),
|
328 |
+
"recall": float(recall),
|
329 |
+
"f1_score": float(f1_score),
|
330 |
+
"true_positives": int(true_positives),
|
331 |
+
"false_positives": int(false_positives),
|
332 |
+
"false_negatives": int(false_negatives),
|
333 |
+
"true_negatives": int(true_negatives)
|
334 |
+
}
|
335 |
+
|
336 |
+
def segment_print_area(self, image: np.ndarray) -> np.ndarray:
|
337 |
+
# Ensure image is grayscale
|
338 |
+
if len(image.shape) == 3:
|
339 |
+
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
|
340 |
+
else:
|
341 |
+
gray = image
|
342 |
+
|
343 |
+
# Use adaptive thresholding for segmentation
|
344 |
+
binary = cv2.adaptiveThreshold(
|
345 |
+
gray,
|
346 |
+
255,
|
347 |
+
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
|
348 |
+
cv2.THRESH_BINARY_INV,
|
349 |
+
21, # Neighborhood size
|
350 |
+
10 # Constant difference
|
351 |
+
)
|
352 |
+
|
353 |
+
# Morphological operations to remove noise
|
354 |
+
kernel = np.ones((3,3), np.uint8)
|
355 |
+
binary = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel)
|
356 |
+
binary = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel)
|
357 |
+
|
358 |
+
return binary
|
359 |
+
|
360 |
+
def analyze_print_quality(self, image: np.ndarray) -> Tuple[np.ndarray, np.ndarray, Dict]:
|
361 |
+
"""Analyze print quality and return visualization results
|
362 |
+
|
363 |
+
Args:
|
364 |
+
image: Print image
|
365 |
+
|
366 |
+
Returns:
|
367 |
+
Tuple[np.ndarray, np.ndarray, Dict]:
|
368 |
+
- groundtruth image
|
369 |
+
- defect analysis visualization image
|
370 |
+
- quality metrics dictionary
|
371 |
+
"""
|
372 |
+
# Ensure image size is consistent
|
373 |
+
image = cv2.resize(image, (500, 500))
|
374 |
+
|
375 |
+
# Segment print area
|
376 |
+
binary = self.segment_print_area(image)
|
377 |
+
|
378 |
+
# Calculate quality metrics
|
379 |
+
metrics = self.calculate_quality_metrics(binary, self.groundtruth)
|
380 |
+
|
381 |
+
# Add pixel-level comparison metrics
|
382 |
+
pixel_metrics = self._calculate_pixel_comparison(binary, self.groundtruth)
|
383 |
+
metrics.update({"pixel_comparison": pixel_metrics})
|
384 |
+
|
385 |
+
# Generate defect visualization image
|
386 |
+
defect_vis = self._generate_defect_visualization(binary, self.groundtruth, metrics)
|
387 |
+
|
388 |
+
# Convert groundtruth to RGB for display
|
389 |
+
groundtruth_rgb = cv2.cvtColor(self.groundtruth, cv2.COLOR_GRAY2RGB)
|
390 |
+
|
391 |
+
return groundtruth_rgb, defect_vis, metrics
|
392 |
+
|
393 |
+
def _generate_defect_visualization(self, binary: np.ndarray,
|
394 |
+
groundtruth: np.ndarray,
|
395 |
+
metrics: Dict) -> np.ndarray:
|
396 |
+
"""Generate defect visualization image"""
|
397 |
+
vis = np.zeros((500, 500, 3), dtype=np.uint8)
|
398 |
+
|
399 |
+
# Basic defect display
|
400 |
+
correct_area = np.logical_and(binary == 255, groundtruth == 255)
|
401 |
+
vis[correct_area] = [0, 255, 0] # Correct area (green)
|
402 |
+
|
403 |
+
missing_area = np.logical_and(binary == 0, groundtruth == 255)
|
404 |
+
vis[missing_area] = [255, 0, 0] # Missing area (red)
|
405 |
+
|
406 |
+
excess_area = np.logical_and(binary == 255, groundtruth == 0)
|
407 |
+
vis[excess_area] = [255, 255, 0] # Excess area (yellow)
|
408 |
+
|
409 |
+
# Use HDBSCAN to detect and label defect clusters
|
410 |
+
defect_mask = np.logical_or(missing_area, excess_area)
|
411 |
+
defect_clusters = self._detect_defect_clusters(defect_mask)
|
412 |
+
|
413 |
+
# Label defect clusters on image
|
414 |
+
for i, cluster in enumerate(defect_clusters):
|
415 |
+
# Draw bounding box
|
416 |
+
cv2.rectangle(
|
417 |
+
vis,
|
418 |
+
(int(cluster['bbox']['min_x']), int(cluster['bbox']['min_y'])),
|
419 |
+
(int(cluster['bbox']['max_x']), int(cluster['bbox']['max_y'])),
|
420 |
+
(0, 255, 255), # Cyan border
|
421 |
+
2
|
422 |
+
)
|
423 |
+
|
424 |
+
# Label defect ID and size
|
425 |
+
label = f"D{i+1}: {cluster['size']}px"
|
426 |
+
cv2.putText(vis, label,
|
427 |
+
(int(cluster['centroid'][1]), int(cluster['centroid'][0])),
|
428 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
|
429 |
+
|
430 |
+
# Add metrics text
|
431 |
+
cv2.putText(vis, f"Missing: {metrics['missing_rate']:.2f}",
|
432 |
+
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
|
433 |
+
cv2.putText(vis, f"Excess: {metrics['excess_rate']:.2f}",
|
434 |
+
(10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
|
435 |
+
cv2.putText(vis, f"String: {metrics['stringing_rate']:.2f}",
|
436 |
+
(10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
|
437 |
+
cv2.putText(vis, f"Uniformity: {metrics['uniformity_score']:.2f}",
|
438 |
+
(10, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
|
439 |
+
cv2.putText(vis, f"Defect Clusters: {len(defect_clusters)}",
|
440 |
+
(10, 150), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
|
441 |
+
|
442 |
+
return vis
|
443 |
+
|
444 |
+
def _detect_defect_clusters(self, binary: np.ndarray) -> List[Dict]:
|
445 |
+
"""Use HDBSCAN to detect defect clusters
|
446 |
+
|
447 |
+
Args:
|
448 |
+
binary: Binary defect image
|
449 |
+
|
450 |
+
Returns:
|
451 |
+
List[Dict]: Information for each defect area, including position and size
|
452 |
+
"""
|
453 |
+
# Get defect pixel coordinates
|
454 |
+
defect_coords = np.column_stack(np.where(binary > 0))
|
455 |
+
|
456 |
+
if len(defect_coords) < self.hdbscan_params['min_cluster_size']:
|
457 |
+
return []
|
458 |
+
|
459 |
+
# Use HDBSCAN for clustering
|
460 |
+
clusterer = HDBSCAN(
|
461 |
+
min_cluster_size=self.hdbscan_params['min_cluster_size'],
|
462 |
+
min_samples=self.hdbscan_params['min_samples'],
|
463 |
+
cluster_selection_epsilon=self.hdbscan_params['cluster_selection_epsilon']
|
464 |
+
)
|
465 |
+
|
466 |
+
cluster_labels = clusterer.fit_predict(defect_coords)
|
467 |
+
|
468 |
+
# Analyze each cluster
|
469 |
+
defect_clusters = []
|
470 |
+
for label in set(cluster_labels):
|
471 |
+
if label == -1: # Noise points
|
472 |
+
continue
|
473 |
+
|
474 |
+
# Get all points in this cluster
|
475 |
+
cluster_points = defect_coords[cluster_labels == label]
|
476 |
+
|
477 |
+
# Calculate cluster statistics
|
478 |
+
centroid = np.mean(cluster_points, axis=0)
|
479 |
+
size = len(cluster_points)
|
480 |
+
bbox = {
|
481 |
+
'min_x': np.min(cluster_points[:, 1]),
|
482 |
+
'max_x': np.max(cluster_points[:, 1]),
|
483 |
+
'min_y': np.min(cluster_points[:, 0]),
|
484 |
+
'max_y': np.max(cluster_points[:, 0])
|
485 |
+
}
|
486 |
+
|
487 |
+
defect_clusters.append({
|
488 |
+
'centroid': centroid,
|
489 |
+
'size': size,
|
490 |
+
'bbox': bbox,
|
491 |
+
'points': cluster_points
|
492 |
+
})
|
493 |
+
|
494 |
+
return defect_clusters
|
495 |
+
|
496 |
+
def analyze_print(self, image):
|
497 |
+
binary_mask = self.segmentation_model.segment_print_area(image)
|
498 |
+
analysis_results = self.segmentation_model.analyze_quality(image)
|
499 |
+
|
500 |
+
return analysis_results
|
core/analysis/image_processor.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
from typing import Tuple, Dict, Optional
|
4 |
+
from PIL import Image
|
5 |
+
|
6 |
+
class ImageProcessor:
|
7 |
+
"""Basic image processing utilities for print analysis"""
|
8 |
+
|
9 |
+
@staticmethod
|
10 |
+
def preprocess_image(image) -> np.ndarray:
|
11 |
+
"""Preprocess image for analysis
|
12 |
+
|
13 |
+
Args:
|
14 |
+
image: PIL.Image or numpy array
|
15 |
+
|
16 |
+
Returns:
|
17 |
+
np.ndarray: Preprocessed image
|
18 |
+
"""
|
19 |
+
# Convert to numpy array if needed
|
20 |
+
if isinstance(image, Image.Image):
|
21 |
+
image = np.array(image)
|
22 |
+
|
23 |
+
# Convert to grayscale if needed
|
24 |
+
if len(image.shape) == 3:
|
25 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
|
26 |
+
|
27 |
+
return image
|
28 |
+
|
29 |
+
@staticmethod
|
30 |
+
def enhance_contrast(image: np.ndarray) -> np.ndarray:
|
31 |
+
"""Enhance image contrast"""
|
32 |
+
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
|
33 |
+
return clahe.apply(image)
|
34 |
+
|
35 |
+
@staticmethod
|
36 |
+
def remove_noise(image: np.ndarray, kernel_size: int = 3) -> np.ndarray:
|
37 |
+
"""Remove image noise"""
|
38 |
+
return cv2.medianBlur(image, kernel_size)
|
39 |
+
|
40 |
+
@staticmethod
|
41 |
+
def detect_edges(image: np.ndarray, sigma: float = 2.0) -> np.ndarray:
|
42 |
+
"""Detect edges in image"""
|
43 |
+
return cv2.Canny(image, 100, 200)
|
core/analysis/segmentation_model.py
ADDED
@@ -0,0 +1,1129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import cv2
|
3 |
+
from PIL import Image
|
4 |
+
from skimage import feature, filters, morphology
|
5 |
+
from datetime import datetime
|
6 |
+
from typing import Tuple, Dict
|
7 |
+
|
8 |
+
def initialize_segmentation_model():
|
9 |
+
"""Initialize SegFormer model"""
|
10 |
+
model = SegformerForSemanticSegmentation.from_pretrained(
|
11 |
+
"nvidia/mit-b0",
|
12 |
+
num_labels=3, # defect, normal, background
|
13 |
+
ignore_mismatched_sizes=True
|
14 |
+
)
|
15 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
16 |
+
model.to(device)
|
17 |
+
return model, device
|
18 |
+
|
19 |
+
def process_image(image: Image.Image, model, device):
|
20 |
+
"""Process image through segmentation model"""
|
21 |
+
# Transform image
|
22 |
+
transform = transforms.Compose([
|
23 |
+
transforms.Resize((512, 512)),
|
24 |
+
transforms.ToTensor(),
|
25 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
26 |
+
std=[0.229, 0.224, 0.225])
|
27 |
+
])
|
28 |
+
|
29 |
+
input_tensor = transform(image).unsqueeze(0).to(device)
|
30 |
+
|
31 |
+
# Get predictions
|
32 |
+
with torch.no_grad():
|
33 |
+
outputs = model(input_tensor)
|
34 |
+
predictions = outputs.logits.argmax(dim=1)
|
35 |
+
|
36 |
+
return predictions.cpu().numpy()[0]
|
37 |
+
|
38 |
+
def analyze_print_quality(predictions: np.ndarray) -> dict:
|
39 |
+
"""Analyze print quality from segmentation predictions"""
|
40 |
+
total_pixels = predictions.size
|
41 |
+
defect_pixels = (predictions == 0).sum().item()
|
42 |
+
normal_pixels = (predictions == 1).sum().item()
|
43 |
+
|
44 |
+
return {
|
45 |
+
'quality_score': normal_pixels / total_pixels,
|
46 |
+
'defect_ratio': defect_pixels / total_pixels,
|
47 |
+
'normal_ratio': normal_pixels / total_pixels
|
48 |
+
}
|
49 |
+
|
50 |
+
class PrintQualitySegmentation:
|
51 |
+
"""Independent segmentation model for print quality analysis"""
|
52 |
+
|
53 |
+
def __init__(self):
|
54 |
+
self.model_params = {
|
55 |
+
'threshold': 127,
|
56 |
+
'kernel_size': 3,
|
57 |
+
'min_area': 100
|
58 |
+
}
|
59 |
+
|
60 |
+
def segment_print_area(self, image):
|
61 |
+
"""Segment print area from background"""
|
62 |
+
# Convert to numpy array if needed
|
63 |
+
if isinstance(image, Image.Image):
|
64 |
+
image = np.array(image)
|
65 |
+
|
66 |
+
# Convert to grayscale if needed
|
67 |
+
if len(image.shape) == 3:
|
68 |
+
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
|
69 |
+
else:
|
70 |
+
gray = image
|
71 |
+
|
72 |
+
# Basic thresholding
|
73 |
+
_, binary = cv2.threshold(
|
74 |
+
gray,
|
75 |
+
self.model_params['threshold'],
|
76 |
+
255,
|
77 |
+
cv2.THRESH_BINARY
|
78 |
+
)
|
79 |
+
|
80 |
+
# Clean up noise
|
81 |
+
kernel = np.ones(
|
82 |
+
(self.model_params['kernel_size'],
|
83 |
+
self.model_params['kernel_size']),
|
84 |
+
np.uint8
|
85 |
+
)
|
86 |
+
binary = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel)
|
87 |
+
|
88 |
+
return binary
|
89 |
+
|
90 |
+
def detect_defects(self, image, binary_mask):
|
91 |
+
"""Detect defects in segmented area"""
|
92 |
+
# Edge detection
|
93 |
+
edges = feature.canny(image, sigma=2)
|
94 |
+
|
95 |
+
# Find contours
|
96 |
+
contours, _ = cv2.findContours(
|
97 |
+
binary_mask,
|
98 |
+
cv2.RETR_EXTERNAL,
|
99 |
+
cv2.CHAIN_APPROX_SIMPLE
|
100 |
+
)
|
101 |
+
|
102 |
+
defects = []
|
103 |
+
for contour in contours:
|
104 |
+
area = cv2.contourArea(contour)
|
105 |
+
if area < self.model_params['min_area']:
|
106 |
+
continue
|
107 |
+
|
108 |
+
x, y, w, h = cv2.boundingRect(contour)
|
109 |
+
defects.append({
|
110 |
+
'position': (x, y, w, h),
|
111 |
+
'area': area
|
112 |
+
})
|
113 |
+
|
114 |
+
return defects
|
115 |
+
|
116 |
+
def analyze_quality(self, image) -> Dict:
|
117 |
+
"""Analyze print quality metrics"""
|
118 |
+
binary = self.segment_print_area(image)
|
119 |
+
defects = self.detect_defects(image, binary)
|
120 |
+
|
121 |
+
total_area = np.sum(binary > 0)
|
122 |
+
defect_area = sum(d['area'] for d in defects)
|
123 |
+
quality_score = 1.0 - (defect_area / total_area if total_area > 0 else 0)
|
124 |
+
|
125 |
+
return {
|
126 |
+
'quality_score': quality_score,
|
127 |
+
'defect_count': len(defects),
|
128 |
+
'total_area': total_area,
|
129 |
+
'defect_area': defect_area,
|
130 |
+
'binary_mask': binary,
|
131 |
+
'defects': defects
|
132 |
+
}
|
133 |
+
|
134 |
+
def calibrate_coordinates(self, gcode_file: str, image: np.ndarray):
|
135 |
+
"""Calibrate coordinate system using G-code and actual image
|
136 |
+
|
137 |
+
Args:
|
138 |
+
gcode_file: G-code file path
|
139 |
+
image: Print plate image
|
140 |
+
"""
|
141 |
+
# Get printer coordinates from G-code
|
142 |
+
printer_coords = self.gcode_analyzer.get_print_coordinates(gcode_file)
|
143 |
+
|
144 |
+
# Detect actual print positions in image
|
145 |
+
detected_positions = self._detect_print_positions(image)
|
146 |
+
|
147 |
+
# Calculate coordinate mapping
|
148 |
+
if len(printer_coords) >= 2 and len(detected_positions) >= 2:
|
149 |
+
# Use at least two points to establish mapping
|
150 |
+
self.coordinate_mapping = self._calculate_mapping(
|
151 |
+
printer_coords[:2], # Printer coordinates
|
152 |
+
detected_positions[:2] # Image pixel coordinates
|
153 |
+
)
|
154 |
+
|
155 |
+
def _calculate_mapping(self, printer_coords, pixel_coords):
|
156 |
+
"""Calculate coordinate mapping
|
157 |
+
|
158 |
+
Use affine transformation to calculate mapping matrix:
|
159 |
+
[x_pixel] = [a b c] [x_printer]
|
160 |
+
[y_pixel] = [d e f] [y_printer]
|
161 |
+
[1 ] [0 0 1] [1 ]
|
162 |
+
"""
|
163 |
+
# Build transformation matrix
|
164 |
+
src_pts = np.float32([[x, y, 1] for x, y in printer_coords])
|
165 |
+
dst_pts = np.float32([[x, y, 1] for x, y in pixel_coords])
|
166 |
+
|
167 |
+
# Calculate transformation matrix
|
168 |
+
transform_matrix, _ = cv2.findHomography(src_pts, dst_pts)
|
169 |
+
return transform_matrix
|
170 |
+
|
171 |
+
def printer_to_pixel(self, printer_coord: tuple) -> tuple:
|
172 |
+
"""Convert printer coordinates to image pixel coordinates"""
|
173 |
+
if self.coordinate_mapping is None:
|
174 |
+
raise ValueError("Coordinate mapping not calibrated")
|
175 |
+
|
176 |
+
x, y = printer_coord
|
177 |
+
point = np.array([x, y, 1])
|
178 |
+
pixel = np.dot(self.coordinate_mapping, point)
|
179 |
+
return (int(pixel[0]), int(pixel[1]))
|
180 |
+
|
181 |
+
def analyze_print_quality(self, image: np.ndarray, print_position: tuple = None) -> Tuple[np.ndarray, np.ndarray, Dict]:
|
182 |
+
"""Analyze print quality, incorporating temporal information
|
183 |
+
|
184 |
+
Args:
|
185 |
+
image: Current complete print bed image
|
186 |
+
print_position: Current print position (x, y)
|
187 |
+
|
188 |
+
Returns:
|
189 |
+
groundtruth: Ideal template image
|
190 |
+
defect_vis: Defect visualization image
|
191 |
+
metrics: Quality metrics
|
192 |
+
"""
|
193 |
+
# 1. Temporal segmentation: Get current print region
|
194 |
+
current_mask = self._get_current_print_mask(image, print_position)
|
195 |
+
|
196 |
+
# 2. Use SegFormer for semantic segmentation
|
197 |
+
segmentation_result = self._segment_with_model(image * current_mask)
|
198 |
+
|
199 |
+
# 3. Combine temporal information and semantic segmentation results
|
200 |
+
combined_mask = self._combine_masks(current_mask, segmentation_result)
|
201 |
+
|
202 |
+
# 4. Calculate quality metrics
|
203 |
+
metrics = self._calculate_metrics(combined_mask)
|
204 |
+
|
205 |
+
# 5. Generate visualization results
|
206 |
+
defect_vis = self._generate_visualization(image, combined_mask, metrics)
|
207 |
+
|
208 |
+
# 6. Update history
|
209 |
+
self._update_history(print_position, combined_mask)
|
210 |
+
|
211 |
+
return self.groundtruth, defect_vis, metrics
|
212 |
+
|
213 |
+
def _get_current_print_mask(self, image: np.ndarray, position: tuple) -> np.ndarray:
|
214 |
+
"""Get mask of current print region"""
|
215 |
+
mask = np.zeros_like(image[:,:,0], dtype=np.uint8)
|
216 |
+
|
217 |
+
if position is not None:
|
218 |
+
# Convert printer coordinates to pixel coordinates
|
219 |
+
pixel_pos = self.printer_to_pixel(position)
|
220 |
+
|
221 |
+
# Create mask for current print region
|
222 |
+
x, y = pixel_pos
|
223 |
+
cv2.rectangle(mask,
|
224 |
+
(x, y),
|
225 |
+
(x + 100, y + 100), #
|
226 |
+
255, -1)
|
227 |
+
else:
|
228 |
+
# First print, use position information
|
229 |
+
if position is not None:
|
230 |
+
mask = self._create_position_mask(position, image.shape[:2])
|
231 |
+
|
232 |
+
self.previous_image = image.copy()
|
233 |
+
return mask
|
234 |
+
|
235 |
+
def _create_position_mask(self, position: tuple, shape: tuple) -> np.ndarray:
|
236 |
+
"""Create mask based on print position"""
|
237 |
+
mask = np.zeros(shape, dtype=np.uint8)
|
238 |
+
x, y = position
|
239 |
+
# Assume each print block size is 100x100 pixels
|
240 |
+
# TODO: Need to adjust based on actual printer parameters
|
241 |
+
cv2.rectangle(mask,
|
242 |
+
(int(x), int(y)),
|
243 |
+
(int(x + 100), int(y + 100)),
|
244 |
+
255, -1)
|
245 |
+
return mask
|
246 |
+
|
247 |
+
def _verify_position(self, contour: np.ndarray, expected_position: tuple) -> bool:
|
248 |
+
"""Verify if detected region matches expected position"""
|
249 |
+
x, y = expected_position
|
250 |
+
center = np.mean(contour.reshape(-1, 2), axis=0)
|
251 |
+
distance = np.sqrt((center[0] - x)**2 + (center[1] - y)**2)
|
252 |
+
return distance < 50 # Allow 50 pixel error
|
253 |
+
|
254 |
+
def _combine_masks(self, time_mask: np.ndarray, seg_mask: np.ndarray) -> np.ndarray:
|
255 |
+
"""Combine temporal mask and semantic segmentation results"""
|
256 |
+
# Apply temporal mask to semantic segmentation result
|
257 |
+
combined = seg_mask * (time_mask > 0)
|
258 |
+
return combined
|
259 |
+
|
260 |
+
def _update_history(self, position: tuple, mask: np.ndarray):
|
261 |
+
"""Update print history"""
|
262 |
+
if position is not None:
|
263 |
+
self.print_history.append({
|
264 |
+
'position': position,
|
265 |
+
'mask': mask.copy(),
|
266 |
+
'timestamp': datetime.now()
|
267 |
+
})
|
268 |
+
|
269 |
+
# Extended printing parameter recommendation rules
|
270 |
+
self.parameter_rules = {
|
271 |
+
'over_extrusion': {
|
272 |
+
'severe': {
|
273 |
+
'extrusion_multi': (-0.1, "Reduce extrusion multiplier by 10%"),
|
274 |
+
'nozzle_temp': (-10, "Lower nozzle temperature by 10°C to reduce material flow"),
|
275 |
+
'print_speed': (+5, "Slightly increase speed to reduce material per unit length"),
|
276 |
+
'fan_speed': (+10, "Increase fan speed for better cooling"),
|
277 |
+
'acceleration': (-500, "Reduce acceleration to minimize extrusion fluctuation"),
|
278 |
+
'jerk': (-2, "Lower jerk to reduce extrusion instability"),
|
279 |
+
'priority': ['extrusion_multi', 'nozzle_temp', 'fan_speed', 'print_speed', 'acceleration', 'jerk'],
|
280 |
+
'reason': "Severe over-extrusion, comprehensive adjustment of extrusion and motion parameters"
|
281 |
+
},
|
282 |
+
'moderate': {
|
283 |
+
'extrusion_multi': (-0.05, "Reduce extrusion multiplier by 5%"),
|
284 |
+
'nozzle_temp': (-5, "Slightly lower nozzle temperature by 5°C"),
|
285 |
+
'fan_speed': (+5, "Slightly increase fan speed"),
|
286 |
+
'priority': ['extrusion_multi', 'nozzle_temp', 'fan_speed'],
|
287 |
+
'reason': "Minor over-extrusion, fine-tune basic parameters"
|
288 |
+
}
|
289 |
+
},
|
290 |
+
'under_extrusion': {
|
291 |
+
'severe': {
|
292 |
+
'extrusion_multi': (+0.1, "Increase extrusion multiplier by 10%"),
|
293 |
+
'nozzle_temp': (+15, "Raise nozzle temperature by 15°C for better flow"),
|
294 |
+
'print_speed': (-10, "Reduce print speed for proper extrusion"),
|
295 |
+
'retraction_speed': (-5, "Lower retraction speed to reduce clogging risk"),
|
296 |
+
'retraction_distance': (-0.5, "Reduce retraction distance to prevent feed issues"),
|
297 |
+
'min_layer_time': (+5, "Increase minimum layer time for cooling"),
|
298 |
+
'priority': ['nozzle_temp', 'extrusion_multi', 'print_speed', 'retraction_speed', 'retraction_distance'],
|
299 |
+
'reason': "Severe under-extrusion, comprehensive extrusion parameter adjustment needed"
|
300 |
+
},
|
301 |
+
'moderate': {
|
302 |
+
'extrusion_multi': (+0.05, "Increase extrusion multiplier by 5%"),
|
303 |
+
'nozzle_temp': (+10, "Raise nozzle temperature by 10°C"),
|
304 |
+
'priority': ['nozzle_temp', 'extrusion_multi'],
|
305 |
+
'reason': "Minor under-extrusion, mainly adjust temperature"
|
306 |
+
}
|
307 |
+
},
|
308 |
+
'layer_shift': {
|
309 |
+
'severe': {
|
310 |
+
'print_speed': (-20, "Significantly reduce print speed"),
|
311 |
+
'acceleration': (-1000, "Significantly reduce acceleration"),
|
312 |
+
'jerk': (-4, "Lower jerk to reduce speed"),
|
313 |
+
'outer_wall_speed': (-10, "Lower outer wall speed for better precision"),
|
314 |
+
'travel_speed': (-20, "Lower travel speed to reduce vibration"),
|
315 |
+
'priority': ['acceleration', 'print_speed', 'jerk', 'outer_wall_speed', 'travel_speed'],
|
316 |
+
'reason': "Severe layer shift, comprehensive reduction of motion parameters"
|
317 |
+
},
|
318 |
+
'moderate': {
|
319 |
+
'print_speed': (-10, "Slightly reduce print speed"),
|
320 |
+
'priority': ['print_speed'],
|
321 |
+
'reason': "Minor layer shift, reduce speed is enough"
|
322 |
+
}
|
323 |
+
},
|
324 |
+
'stringing': {
|
325 |
+
'severe': {
|
326 |
+
'retraction_distance': (+1, "Increase retraction distance by 1mm"),
|
327 |
+
'retraction_speed': (+10, "Increase retraction speed by 10mm/s"),
|
328 |
+
'nozzle_temp': (-5, "Lower temperature to reduce oozing"),
|
329 |
+
'travel_speed': (+20, "Increase travel speed to reduce oozing time"),
|
330 |
+
'wipe_distance': (+2, "Increase wipe distance"),
|
331 |
+
'coasting_volume': (+0.03, "Increase coasting volume to reduce stringing"),
|
332 |
+
'priority': ['retraction_distance', 'retraction_speed', 'nozzle_temp', 'travel_speed', 'wipe_distance'],
|
333 |
+
'reason': "Severe stringing, optimize retraction and motion parameters"
|
334 |
+
},
|
335 |
+
'moderate': {
|
336 |
+
'retraction_distance': (+0.5, "Slightly increase retraction distance by 0.5mm"),
|
337 |
+
'nozzle_temp': (-3, "Slightly lower temperature by 3°C"),
|
338 |
+
'priority': ['retraction_distance', 'nozzle_temp'],
|
339 |
+
'reason': "Minor stringing, fine-tune retraction parameters"
|
340 |
+
}
|
341 |
+
},
|
342 |
+
'warping': {
|
343 |
+
'severe': {
|
344 |
+
'bed_temp': (+10, "Increase bed temperature for better adhesion"),
|
345 |
+
'first_layer_temp': (+5, "Increase first layer temperature"),
|
346 |
+
'first_layer_speed': (-10, "Reduce first layer speed"),
|
347 |
+
'fan_speed_initial_layer': (-50, "Reduce initial layer fan speed"),
|
348 |
+
'skirt_distance': (-0.5, "Reduce skirt distance"),
|
349 |
+
'brim_width': (+5, "Increase brim width"),
|
350 |
+
'priority': ['bed_temp', 'first_layer_temp', 'fan_speed_initial_layer', 'brim_width'],
|
351 |
+
'reason': "Severe warping, optimize first layer and temperature parameters"
|
352 |
+
}
|
353 |
+
},
|
354 |
+
'gaps': {
|
355 |
+
'severe': {
|
356 |
+
'line_width': (+0.05, "Increase line width"),
|
357 |
+
'infill_overlap': (+10, "Increase infill overlap"),
|
358 |
+
'skin_overlap': (+25, "Increase skin overlap"),
|
359 |
+
'infill_density': (+5, "Increase infill density"),
|
360 |
+
'wall_thickness': (+0.4, "Increase wall thickness"),
|
361 |
+
'priority': ['line_width', 'infill_overlap', 'skin_overlap', 'wall_thickness'],
|
362 |
+
'reason': "Severe gaps, optimize infill and overlap parameters"
|
363 |
+
}
|
364 |
+
}
|
365 |
+
}
|
366 |
+
|
367 |
+
# Add common parameter preset combinations
|
368 |
+
self.parameter_presets = {
|
369 |
+
'quality_first': {
|
370 |
+
'name': "Quality-First Mode",
|
371 |
+
'description': "Focus on print quality, suitable for small precision models",
|
372 |
+
'params': {
|
373 |
+
'layer_height': 0.12,
|
374 |
+
'print_speed': 40,
|
375 |
+
'acceleration': 500,
|
376 |
+
'jerk': 8,
|
377 |
+
'outer_wall_speed': 20,
|
378 |
+
'initial_layer_speed': 15,
|
379 |
+
'fan_speed': 100,
|
380 |
+
'infill_density': 25,
|
381 |
+
'wall_thickness': 1.2
|
382 |
+
},
|
383 |
+
'suitable_for': ["Small models", "Precision parts", "Display items"],
|
384 |
+
'trade_offs': "Longer print time but best quality"
|
385 |
+
},
|
386 |
+
'speed_first': {
|
387 |
+
'name': "Speed-First Mode",
|
388 |
+
'description': "Focus on print speed, suitable for prototypes",
|
389 |
+
'params': {
|
390 |
+
'layer_height': 0.28,
|
391 |
+
'print_speed': 100,
|
392 |
+
'acceleration': 2000,
|
393 |
+
'jerk': 15,
|
394 |
+
'outer_wall_speed': 50,
|
395 |
+
'initial_layer_speed': 25,
|
396 |
+
'fan_speed': 100,
|
397 |
+
'infill_density': 15,
|
398 |
+
'wall_thickness': 0.8
|
399 |
+
},
|
400 |
+
'suitable_for': ["Quick prototypes", "Concept validation", "Large simple models"],
|
401 |
+
'trade_offs': "Rougher quality but fast printing"
|
402 |
+
},
|
403 |
+
'balanced': {
|
404 |
+
'name': "Balanced Mode",
|
405 |
+
'description': "Balance between quality and speed, suitable for daily printing",
|
406 |
+
'params': {
|
407 |
+
'layer_height': 0.2,
|
408 |
+
'print_speed': 60,
|
409 |
+
'acceleration': 1000,
|
410 |
+
'jerk': 10,
|
411 |
+
'outer_wall_speed': 30,
|
412 |
+
'initial_layer_speed': 20,
|
413 |
+
'fan_speed': 100,
|
414 |
+
'infill_density': 20,
|
415 |
+
'wall_thickness': 1.0
|
416 |
+
},
|
417 |
+
'suitable_for': ["Daily items", "Medium-sized models", "General parts"],
|
418 |
+
'trade_offs': "Balanced compromise between quality and speed"
|
419 |
+
},
|
420 |
+
'strong_mechanical': {
|
421 |
+
'name': "Strength-First Mode",
|
422 |
+
'description': "Focus on mechanical strength, suitable for functional parts",
|
423 |
+
'params': {
|
424 |
+
'layer_height': 0.2,
|
425 |
+
'print_speed': 50,
|
426 |
+
'wall_thickness': 1.6,
|
427 |
+
'infill_density': 40,
|
428 |
+
'infill_pattern': 'gyroid',
|
429 |
+
'top_bottom_thickness': 1.2,
|
430 |
+
'fan_speed': 80,
|
431 |
+
'temperature': 215 # Slightly higher temperature for PLA
|
432 |
+
},
|
433 |
+
'suitable_for': ["Mechanical parts", "Load-bearing components", "Tools"],
|
434 |
+
'trade_offs': "Higher material usage and longer print time"
|
435 |
+
}
|
436 |
+
}
|
437 |
+
|
438 |
+
# Add specific quick solutions for problems
|
439 |
+
self.quick_fixes = {
|
440 |
+
'stringing': {
|
441 |
+
'name': "Quick solution for stringing",
|
442 |
+
'description': "Quick solution for stringing problems",
|
443 |
+
'adjustments': {
|
444 |
+
'retraction_distance': (+1, "Increase retraction distance"),
|
445 |
+
'retraction_speed': (+10, "Increase retraction speed"),
|
446 |
+
'temperature': (-5, "Slightly lower temperature"),
|
447 |
+
'travel_speed': (+20, "Increase travel speed")
|
448 |
+
},
|
449 |
+
'explanation': "This set of parameters mainly increases retraction effect and cooling to reduce stringing"
|
450 |
+
},
|
451 |
+
'layer_adhesion': {
|
452 |
+
'name': "Increase layer adhesion",
|
453 |
+
'description': "Solve layer adhesion problems",
|
454 |
+
'adjustments': {
|
455 |
+
'temperature': (+10, "Increase temperature"),
|
456 |
+
'print_speed': (-5, "Slightly lower speed"),
|
457 |
+
'fan_speed': (-20, "Lower fan speed"),
|
458 |
+
'layer_height': (-0.04, "Reduce layer height")
|
459 |
+
},
|
460 |
+
'explanation': "Increase temperature and reduce cooling to increase layer adhesion"
|
461 |
+
}
|
462 |
+
}
|
463 |
+
|
464 |
+
def preprocess_image(self, image):
|
465 |
+
"""Preprocess image for model input
|
466 |
+
|
467 |
+
Args:
|
468 |
+
image (PIL.Image): Input image
|
469 |
+
|
470 |
+
Returns:
|
471 |
+
torch.Tensor: Preprocessed image tensor
|
472 |
+
"""
|
473 |
+
# Convert to grayscale for better feature extraction
|
474 |
+
gray = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
|
475 |
+
|
476 |
+
# Enhance contrast
|
477 |
+
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
|
478 |
+
enhanced = clahe.apply(gray)
|
479 |
+
|
480 |
+
# Convert back to RGB
|
481 |
+
enhanced_rgb = cv2.cvtColor(enhanced, cv2.COLOR_GRAY2RGB)
|
482 |
+
enhanced_pil = Image.fromarray(enhanced_rgb)
|
483 |
+
|
484 |
+
# Transform for model
|
485 |
+
transform = transforms.Compose([
|
486 |
+
transforms.Resize((512, 512)),
|
487 |
+
transforms.ToTensor(),
|
488 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
489 |
+
std=[0.229, 0.224, 0.225])
|
490 |
+
])
|
491 |
+
|
492 |
+
return transform(enhanced_pil).unsqueeze(0)
|
493 |
+
|
494 |
+
def _detect_defects(self, pred_mask):
|
495 |
+
"""Enhanced defect detection using HDBSCAN and traditional image processing
|
496 |
+
|
497 |
+
Args:
|
498 |
+
pred_mask (np.array): Prediction mask from segmentation model
|
499 |
+
|
500 |
+
Returns:
|
501 |
+
dict: Detected defects with their characteristics
|
502 |
+
"""
|
503 |
+
defects = {}
|
504 |
+
|
505 |
+
# 1. Traditional Image Processing
|
506 |
+
# Edge detection using Canny
|
507 |
+
edges = feature.canny(
|
508 |
+
pred_mask,
|
509 |
+
sigma=2,
|
510 |
+
low_threshold=0.1,
|
511 |
+
high_threshold=0.3
|
512 |
+
)
|
513 |
+
|
514 |
+
# Local Binary Patterns for texture analysis
|
515 |
+
lbp = feature.local_binary_pattern(
|
516 |
+
pred_mask,
|
517 |
+
P=8,
|
518 |
+
R=1,
|
519 |
+
method='uniform'
|
520 |
+
)
|
521 |
+
|
522 |
+
# Blob detection for finding continuous defect regions
|
523 |
+
blobs = feature.blob_log(
|
524 |
+
pred_mask,
|
525 |
+
max_sigma=30,
|
526 |
+
num_sigma=10,
|
527 |
+
threshold=.1
|
528 |
+
)
|
529 |
+
|
530 |
+
# 2. HDBSCAN Clustering
|
531 |
+
# Convert mask to point coordinates
|
532 |
+
defect_coords = np.column_stack(np.where(pred_mask > 0))
|
533 |
+
|
534 |
+
if len(defect_coords) > 0:
|
535 |
+
# Apply HDBSCAN clustering
|
536 |
+
clusterer = HDBSCAN(
|
537 |
+
min_cluster_size=5,
|
538 |
+
min_samples=3,
|
539 |
+
metric='euclidean',
|
540 |
+
cluster_selection_epsilon=0.5
|
541 |
+
)
|
542 |
+
cluster_labels = clusterer.fit_predict(defect_coords)
|
543 |
+
|
544 |
+
# Analyze each cluster
|
545 |
+
for label in set(cluster_labels):
|
546 |
+
if label == -1: # Noise points
|
547 |
+
continue
|
548 |
+
|
549 |
+
cluster_points = defect_coords[cluster_labels == label]
|
550 |
+
|
551 |
+
# Calculate cluster characteristics
|
552 |
+
center = np.mean(cluster_points, axis=0)
|
553 |
+
size = len(cluster_points)
|
554 |
+
density = size / cv2.convexHull(cluster_points).area
|
555 |
+
|
556 |
+
# Analyze local texture using LBP
|
557 |
+
x_min, y_min = np.min(cluster_points, axis=0)
|
558 |
+
x_max, y_max = np.max(cluster_points, axis=0)
|
559 |
+
local_lbp = lbp[x_min:x_max+1, y_min:y_max+1]
|
560 |
+
texture_score = np.histogram(local_lbp, bins=10)[0]
|
561 |
+
|
562 |
+
# Calculate edge density in cluster region
|
563 |
+
local_edges = edges[x_min:x_max+1, y_min:y_max+1]
|
564 |
+
edge_density = np.sum(local_edges) / local_edges.size
|
565 |
+
|
566 |
+
# Determine defect type based on characteristics
|
567 |
+
defect_type = self._classify_defect(
|
568 |
+
density=density,
|
569 |
+
edge_density=edge_density,
|
570 |
+
texture_score=texture_score,
|
571 |
+
size=size
|
572 |
+
)
|
573 |
+
|
574 |
+
if defect_type not in defects:
|
575 |
+
defects[defect_type] = []
|
576 |
+
|
577 |
+
defects[defect_type].append({
|
578 |
+
'center': center,
|
579 |
+
'size': size,
|
580 |
+
'density': density,
|
581 |
+
'edge_density': edge_density,
|
582 |
+
'texture_score': texture_score,
|
583 |
+
'points': cluster_points,
|
584 |
+
'confidence': self._calculate_confidence(
|
585 |
+
density, edge_density, size
|
586 |
+
)
|
587 |
+
})
|
588 |
+
|
589 |
+
return defects
|
590 |
+
|
591 |
+
def _classify_defect(self, density, edge_density, texture_score, size):
|
592 |
+
"""Classify defect type based on characteristics
|
593 |
+
|
594 |
+
Args:
|
595 |
+
density (float): Point density in cluster
|
596 |
+
edge_density (float): Edge density in cluster region
|
597 |
+
texture_score (np.array): LBP histogram
|
598 |
+
size (int): Number of points in cluster
|
599 |
+
|
600 |
+
Returns:
|
601 |
+
str: Defect type classification
|
602 |
+
"""
|
603 |
+
# High density + High edge density -> Over extrusion
|
604 |
+
if density > 0.8 and edge_density > 0.6:
|
605 |
+
return 'over_extrusion'
|
606 |
+
|
607 |
+
# Low density + High edge density -> Under extrusion
|
608 |
+
if density < 0.4 and edge_density > 0.7:
|
609 |
+
return 'under_extrusion'
|
610 |
+
|
611 |
+
# High density + Linear arrangement -> Layer shift
|
612 |
+
if density > 0.6 and self._is_linear_arrangement(texture_score):
|
613 |
+
return 'layer_shift'
|
614 |
+
|
615 |
+
# Low density + Scattered pattern -> Stringing
|
616 |
+
if density < 0.3 and self._is_scattered_pattern(texture_score):
|
617 |
+
return 'stringing'
|
618 |
+
|
619 |
+
# Default case
|
620 |
+
return 'unknown_defect'
|
621 |
+
|
622 |
+
def _calculate_confidence(self, density, edge_density, size):
|
623 |
+
"""Calculate confidence score for defect detection
|
624 |
+
|
625 |
+
Args:
|
626 |
+
density (float): Point density
|
627 |
+
edge_density (float): Edge density
|
628 |
+
size (int): Cluster size
|
629 |
+
|
630 |
+
Returns:
|
631 |
+
float: Confidence score between 0 and 1
|
632 |
+
"""
|
633 |
+
# Normalize size
|
634 |
+
size_score = min(size / 1000, 1.0)
|
635 |
+
|
636 |
+
# Combine metrics with weights
|
637 |
+
confidence = (
|
638 |
+
0.4 * density +
|
639 |
+
0.3 * edge_density +
|
640 |
+
0.3 * size_score
|
641 |
+
)
|
642 |
+
|
643 |
+
return min(confidence, 1.0)
|
644 |
+
|
645 |
+
def _is_linear_arrangement(self, texture_score):
|
646 |
+
"""Check if texture suggests linear arrangement"""
|
647 |
+
# Analyze LBP histogram for linear patterns
|
648 |
+
peak_ratio = np.max(texture_score) / np.mean(texture_score)
|
649 |
+
return peak_ratio > 3.0
|
650 |
+
|
651 |
+
def _is_scattered_pattern(self, texture_score):
|
652 |
+
"""Check if texture suggests scattered pattern"""
|
653 |
+
# Analyze LBP histogram for scattered patterns
|
654 |
+
entropy = -np.sum(texture_score * np.log2(texture_score + 1e-10))
|
655 |
+
return entropy > 3.0
|
656 |
+
|
657 |
+
def detect_defects(self, image):
|
658 |
+
"""Detect printing defects in real-time
|
659 |
+
|
660 |
+
Args:
|
661 |
+
image (PIL.Image): Input image from camera
|
662 |
+
|
663 |
+
Returns:
|
664 |
+
dict: {
|
665 |
+
'mask': segmentation mask,
|
666 |
+
'defects': list of detected defects with coordinates and types,
|
667 |
+
'quality_score': overall quality score,
|
668 |
+
'suggestions': parameter adjustment suggestions
|
669 |
+
}
|
670 |
+
"""
|
671 |
+
# Preprocess image
|
672 |
+
img_tensor = self.preprocess_image(image)
|
673 |
+
|
674 |
+
# Get model prediction
|
675 |
+
with torch.no_grad():
|
676 |
+
outputs = self.model(img_tensor.to(self.device))
|
677 |
+
logits = outputs.logits
|
678 |
+
pred_mask = torch.argmax(logits, dim=1)[0].cpu().numpy()
|
679 |
+
|
680 |
+
# Analyze different types of defects
|
681 |
+
defects = []
|
682 |
+
|
683 |
+
# 1. Detect over-extrusion regions
|
684 |
+
over_regions = self._detect_over_extrusion(pred_mask)
|
685 |
+
defects.extend([{
|
686 |
+
'type': 'over_extrusion',
|
687 |
+
'region': region,
|
688 |
+
'confidence': conf,
|
689 |
+
'suggestion': {
|
690 |
+
'extrusion_multi': '-0.05',
|
691 |
+
'nozzle_temp': '-5°C',
|
692 |
+
'reason': 'Reduce material flow and temperature'
|
693 |
+
}
|
694 |
+
} for region, conf in over_regions])
|
695 |
+
|
696 |
+
# 2. Detect under-extrusion regions
|
697 |
+
under_regions = self._detect_under_extrusion(pred_mask)
|
698 |
+
defects.extend([{
|
699 |
+
'type': 'under_extrusion',
|
700 |
+
'region': region,
|
701 |
+
'confidence': conf,
|
702 |
+
'suggestion': {
|
703 |
+
'extrusion_multi': '+0.05',
|
704 |
+
'nozzle_temp': '+10°C',
|
705 |
+
'reason': 'Increase material flow and temperature'
|
706 |
+
}
|
707 |
+
} for region, conf in under_regions])
|
708 |
+
|
709 |
+
# 3. Detect layer shifts
|
710 |
+
shifts = self._detect_layer_shifts(pred_mask)
|
711 |
+
defects.extend([{
|
712 |
+
'type': 'layer_shift',
|
713 |
+
'region': region,
|
714 |
+
'confidence': conf,
|
715 |
+
'suggestion': {
|
716 |
+
'print_speed': '-10mm/s',
|
717 |
+
'reason': 'Reduce speed to prevent shifts'
|
718 |
+
}
|
719 |
+
} for region, conf in shifts])
|
720 |
+
|
721 |
+
# Calculate overall quality score
|
722 |
+
quality_score = self._calculate_quality_score(pred_mask, defects)
|
723 |
+
|
724 |
+
return {
|
725 |
+
'mask': pred_mask,
|
726 |
+
'defects': defects,
|
727 |
+
'quality_score': quality_score,
|
728 |
+
'visualization': self._generate_visualization(image, defects)
|
729 |
+
}
|
730 |
+
|
731 |
+
def _detect_over_extrusion(self, mask):
|
732 |
+
"""Detect over-extrusion regions using image processing"""
|
733 |
+
over_mask = mask == 2 # over-extrusion class
|
734 |
+
|
735 |
+
# Find connected components
|
736 |
+
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(
|
737 |
+
over_mask.astype(np.uint8), connectivity=8
|
738 |
+
)
|
739 |
+
|
740 |
+
regions = []
|
741 |
+
for i in range(1, num_labels): # Skip background
|
742 |
+
if stats[i, cv2.CC_STAT_AREA] > 50: # Min area threshold
|
743 |
+
x = stats[i, cv2.CC_STAT_LEFT]
|
744 |
+
y = stats[i, cv2.CC_STAT_TOP]
|
745 |
+
w = stats[i, cv2.CC_STAT_WIDTH]
|
746 |
+
h = stats[i, cv2.CC_STAT_HEIGHT]
|
747 |
+
|
748 |
+
# Calculate confidence based on area and intensity
|
749 |
+
area_score = min(stats[i, cv2.CC_STAT_AREA] / 500, 1.0)
|
750 |
+
intensity = np.mean(mask[labels == i])
|
751 |
+
conf = (area_score + intensity) / 2
|
752 |
+
|
753 |
+
regions.append(((x, y, w, h), conf))
|
754 |
+
|
755 |
+
return regions
|
756 |
+
|
757 |
+
def _detect_under_extrusion(self, mask):
|
758 |
+
"""Detect under-extrusion using morphological operations"""
|
759 |
+
under_mask = mask == 3 # under-extrusion class
|
760 |
+
|
761 |
+
# Apply morphological operations to find gaps
|
762 |
+
kernel = np.ones((3,3), np.uint8)
|
763 |
+
dilated = cv2.dilate(under_mask.astype(np.uint8), kernel, iterations=1)
|
764 |
+
gaps = cv2.subtract(dilated, under_mask.astype(np.uint8))
|
765 |
+
|
766 |
+
# Find contours of gaps
|
767 |
+
contours, _ = cv2.findContours(gaps, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
768 |
+
|
769 |
+
regions = []
|
770 |
+
for contour in contours:
|
771 |
+
if cv2.contourArea(contour) > 30: # Min area threshold
|
772 |
+
x, y, w, h = cv2.boundingRect(contour)
|
773 |
+
# Calculate confidence based on gap size
|
774 |
+
conf = min(cv2.contourArea(contour) / 300, 1.0)
|
775 |
+
regions.append(((x, y, w, h), conf))
|
776 |
+
|
777 |
+
return regions
|
778 |
+
|
779 |
+
def _detect_layer_shifts(self, mask):
|
780 |
+
"""Detect layer shifts using edge detection and line analysis"""
|
781 |
+
# Edge detection
|
782 |
+
edges = cv2.Canny(mask.astype(np.uint8), 100, 200)
|
783 |
+
|
784 |
+
# Detect lines using Hough transform
|
785 |
+
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 50,
|
786 |
+
minLineLength=40, maxLineGap=10)
|
787 |
+
|
788 |
+
regions = []
|
789 |
+
if lines is not None:
|
790 |
+
for line in lines:
|
791 |
+
x1, y1, x2, y2 = line[0]
|
792 |
+
# Check for horizontal displacement
|
793 |
+
angle = np.abs(np.arctan2(y2-y1, x2-x1) * 180 / np.pi)
|
794 |
+
if 80 < angle < 100: # Near horizontal lines
|
795 |
+
conf = min((100 - np.abs(90 - angle)) / 10, 1.0)
|
796 |
+
regions.append(((x1, y1, x2-x1, 10), conf))
|
797 |
+
|
798 |
+
return regions
|
799 |
+
|
800 |
+
def _calculate_quality_score(self, mask, defects):
|
801 |
+
"""Calculate overall quality score based on defects"""
|
802 |
+
# Base score from normal print area ratio
|
803 |
+
normal_ratio = np.sum(mask == 1) / mask.size
|
804 |
+
base_score = normal_ratio * 100
|
805 |
+
|
806 |
+
# Deduct points for defects based on confidence
|
807 |
+
for defect in defects:
|
808 |
+
penalty = defect['confidence'] * 10
|
809 |
+
if defect['type'] == 'layer_shift':
|
810 |
+
penalty *= 2 # Layer shifts are more serious
|
811 |
+
base_score -= penalty
|
812 |
+
|
813 |
+
return max(0, min(100, base_score))
|
814 |
+
|
815 |
+
def _analyze_pixel_defects(self, pred_mask, reference_gcode=None):
|
816 |
+
"""Analyze pixel-level defects
|
817 |
+
|
818 |
+
Args:
|
819 |
+
pred_mask (np.array): Prediction mask
|
820 |
+
reference_gcode: Optional G-code reference
|
821 |
+
|
822 |
+
Returns:
|
823 |
+
dict: Detailed defect metrics
|
824 |
+
"""
|
825 |
+
total_pixels = pred_mask.size
|
826 |
+
normal_print = np.sum(pred_mask == 1)
|
827 |
+
over_extrusion = np.sum(pred_mask == 2)
|
828 |
+
under_extrusion = np.sum(pred_mask == 3)
|
829 |
+
|
830 |
+
# Compare with reference if available
|
831 |
+
if reference_gcode is not None:
|
832 |
+
expected_mask = self._generate_expected_mask(reference_gcode)
|
833 |
+
unexpected_print = np.logical_and(pred_mask > 0, expected_mask == 0)
|
834 |
+
missing_print = np.logical_and(pred_mask == 0, expected_mask > 0)
|
835 |
+
else:
|
836 |
+
unexpected_print = np.zeros_like(pred_mask, dtype=bool)
|
837 |
+
missing_print = np.zeros_like(pred_mask, dtype=bool)
|
838 |
+
|
839 |
+
return {
|
840 |
+
'normal_ratio': normal_print / total_pixels,
|
841 |
+
'over_extrusion_ratio': over_extrusion / total_pixels,
|
842 |
+
'under_extrusion_ratio': under_extrusion / total_pixels,
|
843 |
+
'unexpected_print_pixels': np.sum(unexpected_print),
|
844 |
+
'missing_print_pixels': np.sum(missing_print),
|
845 |
+
'defect_locations': self._get_defect_locations(pred_mask)
|
846 |
+
}
|
847 |
+
|
848 |
+
def _get_defect_locations(self, pred_mask):
|
849 |
+
"""Get coordinates of defect regions
|
850 |
+
|
851 |
+
Args:
|
852 |
+
pred_mask (np.array): Prediction mask
|
853 |
+
|
854 |
+
Returns:
|
855 |
+
dict: Coordinates of different defect types
|
856 |
+
"""
|
857 |
+
defect_coords = {
|
858 |
+
'over_extrusion': np.where(pred_mask == 2),
|
859 |
+
'under_extrusion': np.where(pred_mask == 3),
|
860 |
+
}
|
861 |
+
|
862 |
+
# Group nearby defects into regions
|
863 |
+
defect_regions = {}
|
864 |
+
for defect_type, coords in defect_coords.items():
|
865 |
+
regions = self._cluster_defect_points(coords)
|
866 |
+
defect_regions[defect_type] = regions
|
867 |
+
|
868 |
+
return defect_regions
|
869 |
+
|
870 |
+
def _cluster_defect_points(self, coords):
|
871 |
+
"""Cluster nearby defect points into regions using DBSCAN
|
872 |
+
|
873 |
+
Args:
|
874 |
+
coords: Tuple of x,y coordinates
|
875 |
+
|
876 |
+
Returns:
|
877 |
+
list: List of defect regions with their coordinates
|
878 |
+
"""
|
879 |
+
if len(coords[0]) == 0:
|
880 |
+
return []
|
881 |
+
|
882 |
+
points = np.column_stack(coords)
|
883 |
+
clustering = DBSCAN(eps=5, min_samples=4).fit(points)
|
884 |
+
|
885 |
+
regions = []
|
886 |
+
for label in set(clustering.labels_):
|
887 |
+
if label == -1: # Noise points
|
888 |
+
continue
|
889 |
+
mask = clustering.labels_ == label
|
890 |
+
region_points = points[mask]
|
891 |
+
regions.append({
|
892 |
+
'center': region_points.mean(axis=0),
|
893 |
+
'size': len(region_points),
|
894 |
+
'points': region_points
|
895 |
+
})
|
896 |
+
|
897 |
+
return regions
|
898 |
+
|
899 |
+
def _generate_expected_mask(self, reference_gcode):
|
900 |
+
# Implementation of _generate_expected_mask method
|
901 |
+
pass
|
902 |
+
|
903 |
+
def _generate_visualization(self, image, defects):
|
904 |
+
# Implementation of _generate_visualization method
|
905 |
+
pass
|
906 |
+
|
907 |
+
def generate_parameter_suggestions(self, defects):
|
908 |
+
"""Generate parameter adjustment suggestions based on detected defects
|
909 |
+
|
910 |
+
Args:
|
911 |
+
defects (dict): Detected defect information
|
912 |
+
|
913 |
+
Returns:
|
914 |
+
dict: Parameter adjustment suggestions
|
915 |
+
"""
|
916 |
+
suggestions = {}
|
917 |
+
current_adjustments = {} # Record already suggested adjustments
|
918 |
+
|
919 |
+
# Sort defects by confidence
|
920 |
+
sorted_defects = sorted(
|
921 |
+
defects.items(),
|
922 |
+
key=lambda x: x[1]['confidence'],
|
923 |
+
reverse=True
|
924 |
+
)
|
925 |
+
|
926 |
+
for defect_type, defect_info in sorted_defects:
|
927 |
+
if defect_info['confidence'] > 0.8:
|
928 |
+
severity = 'severe'
|
929 |
+
elif defect_info['confidence'] > 0.5:
|
930 |
+
severity = 'moderate'
|
931 |
+
else:
|
932 |
+
continue
|
933 |
+
|
934 |
+
if defect_type in self.parameter_rules:
|
935 |
+
rule = self.parameter_rules[defect_type][severity]
|
936 |
+
|
937 |
+
# Apply adjustment suggestions based on priority
|
938 |
+
for param in rule['priority']:
|
939 |
+
if param not in current_adjustments:
|
940 |
+
adjustment, reason = rule[param]
|
941 |
+
current_adjustments[param] = adjustment
|
942 |
+
|
943 |
+
if param not in suggestions:
|
944 |
+
suggestions[param] = {
|
945 |
+
'adjustment': adjustment,
|
946 |
+
'reason': f"{reason} (Due to {defect_type})",
|
947 |
+
'confidence': defect_info['confidence']
|
948 |
+
}
|
949 |
+
else:
|
950 |
+
# If there's already a suggestion, take the larger adjustment
|
951 |
+
if abs(adjustment) > abs(suggestions[param]['adjustment']):
|
952 |
+
suggestions[param]['adjustment'] = adjustment
|
953 |
+
suggestions[param]['reason'] += f"\n{reason} (Due to {defect_type})"
|
954 |
+
|
955 |
+
return suggestions
|
956 |
+
|
957 |
+
def _analyze_defect_severity(self, defect_regions):
|
958 |
+
"""Analyze the severity of defects
|
959 |
+
|
960 |
+
Args:
|
961 |
+
defect_regions (dict): Defect region information
|
962 |
+
|
963 |
+
Returns:
|
964 |
+
dict: Severity assessment of different defect types
|
965 |
+
"""
|
966 |
+
severity = {}
|
967 |
+
for defect_type, regions in defect_regions.items():
|
968 |
+
if not regions:
|
969 |
+
continue
|
970 |
+
|
971 |
+
# Calculate total area of defect regions
|
972 |
+
total_area = sum(region['size'] for region in regions)
|
973 |
+
# Calculate maximum continuous defect region
|
974 |
+
max_area = max(region['size'] for region in regions)
|
975 |
+
# Calculate number of defect regions
|
976 |
+
num_regions = len(regions)
|
977 |
+
|
978 |
+
# Comprehensive severity assessment
|
979 |
+
severity[defect_type] = {
|
980 |
+
'confidence': min(1.0, (total_area / 1000 + max_area / 500 + num_regions / 5) / 3),
|
981 |
+
'total_area': total_area,
|
982 |
+
'max_area': max_area,
|
983 |
+
'num_regions': num_regions
|
984 |
+
}
|
985 |
+
|
986 |
+
def suggest_preset(self, model_size, quality_requirement, time_constraint):
|
987 |
+
"""Recommend preset parameter combination based on printing requirements
|
988 |
+
|
989 |
+
Args:
|
990 |
+
model_size: "small"/"medium"/"large"
|
991 |
+
quality_requirement: "high"/"medium"/"low"
|
992 |
+
time_constraint: "tight"/"normal"/"relaxed"
|
993 |
+
|
994 |
+
Returns:
|
995 |
+
dict: Recommended preset configuration
|
996 |
+
"""
|
997 |
+
if quality_requirement == "high" and time_constraint == "relaxed":
|
998 |
+
return self.parameter_presets['quality_first']
|
999 |
+
elif time_constraint == "tight" and quality_requirement == "low":
|
1000 |
+
return self.parameter_presets['speed_first']
|
1001 |
+
else:
|
1002 |
+
return self.parameter_presets['balanced']
|
1003 |
+
|
1004 |
+
def analyze_print_quality(self, image, gcode_layer):
|
1005 |
+
"""Analyze print quality by comparing actual print with expected G-code path
|
1006 |
+
|
1007 |
+
Args:
|
1008 |
+
image (PIL.Image): Current layer image from camera
|
1009 |
+
gcode_layer (dict): Current layer G-code information
|
1010 |
+
|
1011 |
+
Returns:
|
1012 |
+
dict: Quality analysis results
|
1013 |
+
"""
|
1014 |
+
# Get expected filament path from G-code
|
1015 |
+
expected_path = self.gcode_analyzer.get_layer_path(gcode_layer)
|
1016 |
+
|
1017 |
+
# Align camera image with G-code coordinates
|
1018 |
+
aligned_image = self.align_image_to_gcode(image, expected_path)
|
1019 |
+
|
1020 |
+
# Segment the actual printed material
|
1021 |
+
actual_material = self.segment_material(aligned_image)
|
1022 |
+
|
1023 |
+
# Generate expected material mask from G-code path
|
1024 |
+
expected_mask = self.generate_path_mask(expected_path)
|
1025 |
+
|
1026 |
+
# Calculate coverage metrics
|
1027 |
+
metrics = self.calculate_coverage_metrics(actual_material, expected_mask)
|
1028 |
+
|
1029 |
+
return {
|
1030 |
+
'metrics': metrics,
|
1031 |
+
'visualization': self.visualize_comparison(
|
1032 |
+
aligned_image, actual_material, expected_mask)
|
1033 |
+
}
|
1034 |
+
|
1035 |
+
def calculate_coverage_metrics(self, actual, expected):
|
1036 |
+
"""Calculate coverage metrics between actual print and expected path
|
1037 |
+
|
1038 |
+
Args:
|
1039 |
+
actual (np.array): Binary mask of actual printed material
|
1040 |
+
expected (np.array): Binary mask of expected material from G-code
|
1041 |
+
|
1042 |
+
Returns:
|
1043 |
+
dict: Coverage metrics
|
1044 |
+
"""
|
1045 |
+
# Where material should be (positive coverage)
|
1046 |
+
should_have_material = expected == 1
|
1047 |
+
correct_material = np.logical_and(actual == 1, should_have_material)
|
1048 |
+
missing_material = np.logical_and(actual == 0, should_have_material)
|
1049 |
+
|
1050 |
+
# Where material shouldn't be (negative coverage)
|
1051 |
+
should_not_have_material = expected == 0
|
1052 |
+
excess_material = np.logical_and(actual == 1, should_not_have_material)
|
1053 |
+
correct_empty = np.logical_and(actual == 0, should_not_have_material)
|
1054 |
+
|
1055 |
+
# Calculate metrics
|
1056 |
+
positive_coverage = np.sum(correct_material) / np.sum(should_have_material)
|
1057 |
+
negative_coverage = np.sum(correct_empty) / np.sum(should_not_have_material)
|
1058 |
+
|
1059 |
+
# Calculate error metrics
|
1060 |
+
missing_ratio = np.sum(missing_material) / np.sum(should_have_material)
|
1061 |
+
excess_ratio = np.sum(excess_material) / np.sum(should_not_have_material)
|
1062 |
+
|
1063 |
+
return {
|
1064 |
+
'positive_coverage': positive_coverage, # Material where it should be
|
1065 |
+
'negative_coverage': negative_coverage, # No material where it shouldn't be
|
1066 |
+
'missing_ratio': missing_ratio, # Missing material ratio
|
1067 |
+
'excess_ratio': excess_ratio, # Excess material ratio
|
1068 |
+
'overall_score': (positive_coverage + negative_coverage) / 2
|
1069 |
+
}
|
1070 |
+
|
1071 |
+
def align_image_to_gcode(self, image, gcode_path):
|
1072 |
+
"""Align camera image with G-code coordinates using reference points
|
1073 |
+
|
1074 |
+
Args:
|
1075 |
+
image (PIL.Image): Camera image
|
1076 |
+
gcode_path (dict): G-code path information
|
1077 |
+
|
1078 |
+
Returns:
|
1079 |
+
np.array: Aligned image
|
1080 |
+
"""
|
1081 |
+
# Find reference points in image (e.g., bed corners, calibration marks)
|
1082 |
+
image_points = self.detect_reference_points(image)
|
1083 |
+
|
1084 |
+
# Get corresponding points from G-code coordinates
|
1085 |
+
gcode_points = self.gcode_analyzer.get_reference_points()
|
1086 |
+
|
1087 |
+
# Calculate transformation matrix
|
1088 |
+
transform_matrix = cv2.getPerspectiveTransform(
|
1089 |
+
image_points.astype(np.float32),
|
1090 |
+
gcode_points.astype(np.float32)
|
1091 |
+
)
|
1092 |
+
|
1093 |
+
# Apply transformation
|
1094 |
+
aligned_image = cv2.warpPerspective(
|
1095 |
+
np.array(image),
|
1096 |
+
transform_matrix,
|
1097 |
+
(image.width, image.height)
|
1098 |
+
)
|
1099 |
+
|
1100 |
+
return aligned_image
|
1101 |
+
|
1102 |
+
def visualize_comparison(self, image, actual, expected):
|
1103 |
+
"""Generate visualization comparing actual print with expected path
|
1104 |
+
|
1105 |
+
Args:
|
1106 |
+
image (np.array): Original aligned image
|
1107 |
+
actual (np.array): Binary mask of actual material
|
1108 |
+
expected (np.array): Binary mask of expected material
|
1109 |
+
|
1110 |
+
Returns:
|
1111 |
+
PIL.Image: Visualization image
|
1112 |
+
"""
|
1113 |
+
# Create RGB visualization
|
1114 |
+
vis = np.zeros((*image.shape[:2], 3), dtype=np.uint8)
|
1115 |
+
|
1116 |
+
# Green: Correct material placement
|
1117 |
+
vis[np.logical_and(actual == 1, expected == 1)] = [0, 255, 0]
|
1118 |
+
|
1119 |
+
# Red: Missing material
|
1120 |
+
vis[np.logical_and(actual == 0, expected == 1)] = [255, 0, 0]
|
1121 |
+
|
1122 |
+
# Yellow: Excess material
|
1123 |
+
vis[np.logical_and(actual == 1, expected == 0)] = [255, 255, 0]
|
1124 |
+
|
1125 |
+
# Blend with original image
|
1126 |
+
alpha = 0.6
|
1127 |
+
blended = cv2.addWeighted(image, 1-alpha, vis, alpha, 0)
|
1128 |
+
|
1129 |
+
return Image.fromarray(blended)
|
mo_optimizer.py
ADDED
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import hdbscan
|
3 |
+
from skimage import feature, filters
|
4 |
+
import cv2
|
5 |
+
from typing import Dict, Any
|
6 |
+
|
7 |
+
class MOPrintOptimizer:
|
8 |
+
"""Multi-objective optimizer for print parameters"""
|
9 |
+
|
10 |
+
def __init__(self):
|
11 |
+
# Weights for different objectives
|
12 |
+
self.weights = {
|
13 |
+
'quality': 0.4,
|
14 |
+
'speed': 0.3,
|
15 |
+
'material': 0.3
|
16 |
+
}
|
17 |
+
|
18 |
+
# Quality thresholds
|
19 |
+
self.quality_thresholds = {
|
20 |
+
'missing_rate': 0.1, # 10% missing is bad
|
21 |
+
'excess_rate': 0.1, # 10% excess is bad
|
22 |
+
'stringing_rate': 0.05, # 5% stringing is bad
|
23 |
+
'uniformity': 0.8 # At least 80% uniformity is good
|
24 |
+
}
|
25 |
+
|
26 |
+
# Material efficiency parameters
|
27 |
+
self.material_params = {
|
28 |
+
'optimal_flow_rate': 100, # 100% flow rate
|
29 |
+
'flow_tolerance': 10, # ±10% tolerance
|
30 |
+
'optimal_layer_height': 0.2 # 0.2mm layer height
|
31 |
+
}
|
32 |
+
|
33 |
+
def evaluate_quality(self, metrics: Dict[str, float]) -> float:
|
34 |
+
"""Evaluate print quality score
|
35 |
+
|
36 |
+
Args:
|
37 |
+
metrics: Dictionary containing quality metrics
|
38 |
+
- missing_rate: Percentage of missing material
|
39 |
+
- excess_rate: Percentage of excess material
|
40 |
+
- stringing_rate: Percentage of stringing
|
41 |
+
- uniformity_score: Score for print uniformity
|
42 |
+
|
43 |
+
Returns:
|
44 |
+
float: Quality score (0-1)
|
45 |
+
"""
|
46 |
+
# Convert each metric to a score (0-1)
|
47 |
+
missing_score = 1.0 - min(1.0, metrics['missing_rate'] / self.quality_thresholds['missing_rate'])
|
48 |
+
excess_score = 1.0 - min(1.0, metrics['excess_rate'] / self.quality_thresholds['excess_rate'])
|
49 |
+
stringing_score = 1.0 - min(1.0, metrics['stringing_rate'] / self.quality_thresholds['stringing_rate'])
|
50 |
+
uniformity_score = metrics['uniformity_score']
|
51 |
+
|
52 |
+
# Combine scores with equal weights
|
53 |
+
quality_score = np.mean([
|
54 |
+
missing_score,
|
55 |
+
excess_score,
|
56 |
+
stringing_score,
|
57 |
+
uniformity_score
|
58 |
+
])
|
59 |
+
|
60 |
+
return float(quality_score)
|
61 |
+
|
62 |
+
def evaluate_material_efficiency(self, params: Dict[str, float]) -> float:
|
63 |
+
"""Evaluate material efficiency
|
64 |
+
|
65 |
+
Args:
|
66 |
+
params: Current print parameters
|
67 |
+
|
68 |
+
Returns:
|
69 |
+
float: Material efficiency score (0-1)
|
70 |
+
"""
|
71 |
+
# Flow rate deviation from optimal
|
72 |
+
flow_deviation = abs(params['flow_rate'] - self.material_params['optimal_flow_rate'])
|
73 |
+
flow_score = 1.0 - min(1.0, flow_deviation / self.material_params['flow_tolerance'])
|
74 |
+
|
75 |
+
# Layer height optimization (thicker layers use less material for same volume)
|
76 |
+
layer_score = params['layer_height'] / self.material_params['optimal_layer_height']
|
77 |
+
layer_score = min(1.0, layer_score) # Cap at 1.0
|
78 |
+
|
79 |
+
# Retraction optimization (less retraction is better for material efficiency)
|
80 |
+
retraction_score = 1.0 - (params['retraction_distance'] / 10.0) # Assuming max 10mm
|
81 |
+
|
82 |
+
# Combine scores
|
83 |
+
material_score = np.mean([
|
84 |
+
flow_score * 0.4, # Flow rate is most important
|
85 |
+
layer_score * 0.4, # Layer height equally important
|
86 |
+
retraction_score * 0.2 # Retraction less important
|
87 |
+
])
|
88 |
+
|
89 |
+
return float(material_score)
|
90 |
+
|
91 |
+
def evaluate_objectives(self, image: np.ndarray, params: Dict[str, float]) -> Dict[str, Any]:
|
92 |
+
"""Evaluate all objectives and combine them
|
93 |
+
|
94 |
+
Args:
|
95 |
+
image: Print image for quality analysis
|
96 |
+
params: Current print parameters
|
97 |
+
|
98 |
+
Returns:
|
99 |
+
dict: Evaluation results including individual scores and total
|
100 |
+
"""
|
101 |
+
# Get quality metrics from image analysis
|
102 |
+
quality_metrics = {
|
103 |
+
'missing_rate': 0.05, # These should come from DefectDetector
|
104 |
+
'excess_rate': 0.03, # in real implementation
|
105 |
+
'stringing_rate': 0.02,
|
106 |
+
'uniformity_score': 0.95
|
107 |
+
}
|
108 |
+
|
109 |
+
# Calculate individual objective scores
|
110 |
+
quality_score = self.evaluate_quality(quality_metrics)
|
111 |
+
speed_score = params['print_speed'] / 150.0 # Normalize to max speed
|
112 |
+
material_score = self.evaluate_material_efficiency(params)
|
113 |
+
|
114 |
+
# Combine objectives using weights
|
115 |
+
total_score = (
|
116 |
+
quality_score * self.weights['quality'] +
|
117 |
+
speed_score * self.weights['speed'] +
|
118 |
+
material_score * self.weights['material']
|
119 |
+
)
|
120 |
+
|
121 |
+
return {
|
122 |
+
'objectives': {
|
123 |
+
'quality': float(quality_score),
|
124 |
+
'speed': float(speed_score),
|
125 |
+
'material': float(material_score),
|
126 |
+
'total': float(total_score)
|
127 |
+
},
|
128 |
+
'metrics': quality_metrics
|
129 |
+
}
|
130 |
+
|
131 |
+
def evaluate_print_quality(self, image, expected_pattern=None):
|
132 |
+
"""Evaluate print quality using hybrid approach
|
133 |
+
|
134 |
+
Args:
|
135 |
+
image: Current print image
|
136 |
+
expected_pattern: Expected print pattern (optional)
|
137 |
+
|
138 |
+
Returns:
|
139 |
+
dict: Quality metrics
|
140 |
+
"""
|
141 |
+
# 1. Traditional Image Processing
|
142 |
+
edge_metrics = self._analyze_edges(image)
|
143 |
+
surface_metrics = self._analyze_surface(image)
|
144 |
+
|
145 |
+
# 2. HDBSCAN-based defect clustering
|
146 |
+
defect_metrics = self._cluster_defects(image)
|
147 |
+
|
148 |
+
# 3. Pattern matching if expected pattern provided
|
149 |
+
pattern_metrics = self._analyze_pattern(image, expected_pattern) if expected_pattern else {}
|
150 |
+
|
151 |
+
return {
|
152 |
+
'edge_quality': edge_metrics,
|
153 |
+
'surface_quality': surface_metrics,
|
154 |
+
'defect_analysis': defect_metrics,
|
155 |
+
'pattern_accuracy': pattern_metrics
|
156 |
+
}
|
157 |
+
|
158 |
+
def _analyze_edges(self, image):
|
159 |
+
"""Analyze edge quality using traditional methods"""
|
160 |
+
# Multi-scale edge detection
|
161 |
+
edges_fine = feature.canny(image, sigma=1)
|
162 |
+
edges_medium = feature.canny(image, sigma=2)
|
163 |
+
edges_coarse = feature.canny(image, sigma=3)
|
164 |
+
|
165 |
+
return {
|
166 |
+
'fine_edge_score': np.mean(edges_fine),
|
167 |
+
'medium_edge_score': np.mean(edges_medium),
|
168 |
+
'coarse_edge_score': np.mean(edges_coarse),
|
169 |
+
'edge_consistency': self._calculate_edge_consistency(
|
170 |
+
[edges_fine, edges_medium, edges_coarse]
|
171 |
+
)
|
172 |
+
}
|
173 |
+
|
174 |
+
def _analyze_surface(self, image):
|
175 |
+
"""Analyze surface quality using texture analysis"""
|
176 |
+
# Local Binary Patterns for texture
|
177 |
+
lbp = feature.local_binary_pattern(image, P=8, R=1, method='uniform')
|
178 |
+
|
179 |
+
# GLCM features
|
180 |
+
glcm = feature.graycomatrix(image, [1], [0, np.pi/4, np.pi/2, 3*np.pi/4])
|
181 |
+
contrast = feature.graycoprops(glcm, 'contrast')
|
182 |
+
homogeneity = feature.graycoprops(glcm, 'homogeneity')
|
183 |
+
|
184 |
+
return {
|
185 |
+
'texture_uniformity': np.std(lbp),
|
186 |
+
'surface_contrast': np.mean(contrast),
|
187 |
+
'surface_homogeneity': np.mean(homogeneity)
|
188 |
+
}
|
189 |
+
|
190 |
+
def _cluster_defects(self, image):
|
191 |
+
"""Use HDBSCAN to cluster potential defects"""
|
192 |
+
# Extract potential defect points
|
193 |
+
defect_points = self._extract_defect_points(image)
|
194 |
+
|
195 |
+
if len(defect_points) > 0:
|
196 |
+
# Apply HDBSCAN clustering
|
197 |
+
clusterer = hdbscan.HDBSCAN(
|
198 |
+
min_cluster_size=3,
|
199 |
+
min_samples=2,
|
200 |
+
metric='euclidean',
|
201 |
+
cluster_selection_epsilon=0.5
|
202 |
+
)
|
203 |
+
cluster_labels = clusterer.fit_predict(defect_points)
|
204 |
+
|
205 |
+
# Analyze clusters
|
206 |
+
return self._analyze_defect_clusters(defect_points, cluster_labels)
|
207 |
+
|
208 |
+
return {'defect_count': 0, 'cluster_sizes': [], 'defect_density': 0}
|
209 |
+
|
210 |
+
def _calculate_edge_consistency(self, edges):
|
211 |
+
"""Calculate edge consistency"""
|
212 |
+
return np.mean([np.mean(edge) for edge in edges])
|
213 |
+
|
214 |
+
def _analyze_pattern(self, image, expected_pattern):
|
215 |
+
"""Analyze pattern accuracy"""
|
216 |
+
# Placeholder for pattern matching
|
217 |
+
return 0.8 # Assuming 80% accuracy
|
218 |
+
|
219 |
+
def _extract_defect_points(self, image):
|
220 |
+
"""Extract potential defect points"""
|
221 |
+
# Placeholder for defect point extraction
|
222 |
+
return np.array([[0, 0], [1, 1], [2, 2]]) # Placeholder points
|
223 |
+
|
224 |
+
def _analyze_defect_clusters(self, defect_points, cluster_labels):
|
225 |
+
"""Analyze defect clusters"""
|
226 |
+
# Placeholder for cluster analysis
|
227 |
+
return {'defect_count': len(np.unique(cluster_labels)), 'cluster_sizes': [], 'defect_density': 0}
|
228 |
+
|
229 |
+
def _apply_parameter_adjustments(self, current_params, adjustments):
|
230 |
+
"""Apply parameter adjustments"""
|
231 |
+
# Placeholder for parameter adjustment logic
|
232 |
+
return current_params # Placeholder return
|
requirements.txt
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# UI and Web
|
2 |
+
gradio>=4.0.0
|
3 |
+
|
4 |
+
# Core Data Processing
|
5 |
+
numpy>=1.20.0
|
6 |
+
pandas>=1.3.0
|
7 |
+
scipy>=1.7.0
|
8 |
+
|
9 |
+
# Image Processing
|
10 |
+
opencv-python-headless>=4.5.0
|
11 |
+
scikit-image>=0.19.0
|
12 |
+
Pillow>=8.0.0
|
13 |
+
|
14 |
+
# Machine Learning
|
15 |
+
scikit-learn>=0.24.0
|
16 |
+
hdbscan>=0.8.29
|
17 |
+
|
18 |
+
# Communication
|
19 |
+
paho-mqtt>=1.6.1
|
20 |
+
|
21 |
+
# Configuration
|
22 |
+
python-dotenv>=0.19.0
|
23 |
+
|
24 |
+
# Utilities
|
25 |
+
tqdm>=4.62.0
|
26 |
+
matplotlib>=3.4.0
|