File size: 8,991 Bytes
477c54f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 |
import numpy as np
import hdbscan
from skimage import feature, filters
import cv2
from typing import Dict, Any
class MOPrintOptimizer:
"""Multi-objective optimizer for print parameters"""
def __init__(self):
# Weights for different objectives
self.weights = {
'quality': 0.4,
'speed': 0.3,
'material': 0.3
}
# Quality thresholds
self.quality_thresholds = {
'missing_rate': 0.1, # 10% missing is bad
'excess_rate': 0.1, # 10% excess is bad
'stringing_rate': 0.05, # 5% stringing is bad
'uniformity': 0.8 # At least 80% uniformity is good
}
# Material efficiency parameters
self.material_params = {
'optimal_flow_rate': 100, # 100% flow rate
'flow_tolerance': 10, # ±10% tolerance
'optimal_layer_height': 0.2 # 0.2mm layer height
}
def evaluate_quality(self, metrics: Dict[str, float]) -> float:
"""Evaluate print quality score
Args:
metrics: Dictionary containing quality metrics
- missing_rate: Percentage of missing material
- excess_rate: Percentage of excess material
- stringing_rate: Percentage of stringing
- uniformity_score: Score for print uniformity
Returns:
float: Quality score (0-1)
"""
# Convert each metric to a score (0-1)
missing_score = 1.0 - min(1.0, metrics['missing_rate'] / self.quality_thresholds['missing_rate'])
excess_score = 1.0 - min(1.0, metrics['excess_rate'] / self.quality_thresholds['excess_rate'])
stringing_score = 1.0 - min(1.0, metrics['stringing_rate'] / self.quality_thresholds['stringing_rate'])
uniformity_score = metrics['uniformity_score']
# Combine scores with equal weights
quality_score = np.mean([
missing_score,
excess_score,
stringing_score,
uniformity_score
])
return float(quality_score)
def evaluate_material_efficiency(self, params: Dict[str, float]) -> float:
"""Evaluate material efficiency
Args:
params: Current print parameters
Returns:
float: Material efficiency score (0-1)
"""
# Flow rate deviation from optimal
flow_deviation = abs(params['flow_rate'] - self.material_params['optimal_flow_rate'])
flow_score = 1.0 - min(1.0, flow_deviation / self.material_params['flow_tolerance'])
# Layer height optimization (thicker layers use less material for same volume)
layer_score = params['layer_height'] / self.material_params['optimal_layer_height']
layer_score = min(1.0, layer_score) # Cap at 1.0
# Retraction optimization (less retraction is better for material efficiency)
retraction_score = 1.0 - (params['retraction_distance'] / 10.0) # Assuming max 10mm
# Combine scores
material_score = np.mean([
flow_score * 0.4, # Flow rate is most important
layer_score * 0.4, # Layer height equally important
retraction_score * 0.2 # Retraction less important
])
return float(material_score)
def evaluate_objectives(self, image: np.ndarray, params: Dict[str, float]) -> Dict[str, Any]:
"""Evaluate all objectives and combine them
Args:
image: Print image for quality analysis
params: Current print parameters
Returns:
dict: Evaluation results including individual scores and total
"""
# Get quality metrics from image analysis
quality_metrics = {
'missing_rate': 0.05, # These should come from DefectDetector
'excess_rate': 0.03, # in real implementation
'stringing_rate': 0.02,
'uniformity_score': 0.95
}
# Calculate individual objective scores
quality_score = self.evaluate_quality(quality_metrics)
speed_score = params['print_speed'] / 150.0 # Normalize to max speed
material_score = self.evaluate_material_efficiency(params)
# Combine objectives using weights
total_score = (
quality_score * self.weights['quality'] +
speed_score * self.weights['speed'] +
material_score * self.weights['material']
)
return {
'objectives': {
'quality': float(quality_score),
'speed': float(speed_score),
'material': float(material_score),
'total': float(total_score)
},
'metrics': quality_metrics
}
def evaluate_print_quality(self, image, expected_pattern=None):
"""Evaluate print quality using hybrid approach
Args:
image: Current print image
expected_pattern: Expected print pattern (optional)
Returns:
dict: Quality metrics
"""
# 1. Traditional Image Processing
edge_metrics = self._analyze_edges(image)
surface_metrics = self._analyze_surface(image)
# 2. HDBSCAN-based defect clustering
defect_metrics = self._cluster_defects(image)
# 3. Pattern matching if expected pattern provided
pattern_metrics = self._analyze_pattern(image, expected_pattern) if expected_pattern else {}
return {
'edge_quality': edge_metrics,
'surface_quality': surface_metrics,
'defect_analysis': defect_metrics,
'pattern_accuracy': pattern_metrics
}
def _analyze_edges(self, image):
"""Analyze edge quality using traditional methods"""
# Multi-scale edge detection
edges_fine = feature.canny(image, sigma=1)
edges_medium = feature.canny(image, sigma=2)
edges_coarse = feature.canny(image, sigma=3)
return {
'fine_edge_score': np.mean(edges_fine),
'medium_edge_score': np.mean(edges_medium),
'coarse_edge_score': np.mean(edges_coarse),
'edge_consistency': self._calculate_edge_consistency(
[edges_fine, edges_medium, edges_coarse]
)
}
def _analyze_surface(self, image):
"""Analyze surface quality using texture analysis"""
# Local Binary Patterns for texture
lbp = feature.local_binary_pattern(image, P=8, R=1, method='uniform')
# GLCM features
glcm = feature.graycomatrix(image, [1], [0, np.pi/4, np.pi/2, 3*np.pi/4])
contrast = feature.graycoprops(glcm, 'contrast')
homogeneity = feature.graycoprops(glcm, 'homogeneity')
return {
'texture_uniformity': np.std(lbp),
'surface_contrast': np.mean(contrast),
'surface_homogeneity': np.mean(homogeneity)
}
def _cluster_defects(self, image):
"""Use HDBSCAN to cluster potential defects"""
# Extract potential defect points
defect_points = self._extract_defect_points(image)
if len(defect_points) > 0:
# Apply HDBSCAN clustering
clusterer = hdbscan.HDBSCAN(
min_cluster_size=3,
min_samples=2,
metric='euclidean',
cluster_selection_epsilon=0.5
)
cluster_labels = clusterer.fit_predict(defect_points)
# Analyze clusters
return self._analyze_defect_clusters(defect_points, cluster_labels)
return {'defect_count': 0, 'cluster_sizes': [], 'defect_density': 0}
def _calculate_edge_consistency(self, edges):
"""Calculate edge consistency"""
return np.mean([np.mean(edge) for edge in edges])
def _analyze_pattern(self, image, expected_pattern):
"""Analyze pattern accuracy"""
# Placeholder for pattern matching
return 0.8 # Assuming 80% accuracy
def _extract_defect_points(self, image):
"""Extract potential defect points"""
# Placeholder for defect point extraction
return np.array([[0, 0], [1, 1], [2, 2]]) # Placeholder points
def _analyze_defect_clusters(self, defect_points, cluster_labels):
"""Analyze defect clusters"""
# Placeholder for cluster analysis
return {'defect_count': len(np.unique(cluster_labels)), 'cluster_sizes': [], 'defect_density': 0}
def _apply_parameter_adjustments(self, current_params, adjustments):
"""Apply parameter adjustments"""
# Placeholder for parameter adjustment logic
return current_params # Placeholder return
|