File size: 21,394 Bytes
6f4e394
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13b45d3
6f4e394
 
 
13b45d3
6f4e394
 
 
 
 
 
 
 
 
 
 
13b45d3
 
 
 
 
 
 
 
6f4e394
 
 
 
13b45d3
 
 
 
 
6f4e394
13b45d3
6f4e394
 
 
13b45d3
6f4e394
13b45d3
6f4e394
 
fbfa85f
 
 
 
 
 
 
 
6f4e394
 
 
 
 
13b45d3
 
 
 
 
6f4e394
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fbfa85f
 
6f4e394
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fbfa85f
6f4e394
fbfa85f
6f4e394
 
fbfa85f
6f4e394
fbfa85f
 
 
 
 
 
6f4e394
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
import os
import sys
import torch
import numpy as np
import gradio as gr
import torchaudio
import torchvision
import spaces
import json

# Add parent directory to path to import preprocess functions
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# Import functions from preprocess and model definitions
from preprocess import process_image_data
from evaluate_backbones import WatermelonModelModular, IMAGE_BACKBONES, AUDIO_BACKBONES

# Define the top-performing models based on evaluation
TOP_MODELS = [
    {"image_backbone": "efficientnet_b3", "audio_backbone": "transformer"},
    {"image_backbone": "efficientnet_b0", "audio_backbone": "transformer"},
    {"image_backbone": "resnet50", "audio_backbone": "transformer"}
]

# Define the MoE Model
class WatermelonMoEModel(torch.nn.Module):
    def __init__(self, model_configs, model_dir="models", weights=None):
        """
        Mixture of Experts model that combines multiple backbone models.
        
        Args:
            model_configs: List of dictionaries with 'image_backbone' and 'audio_backbone' keys
            model_dir: Directory where model checkpoints are stored
            weights: Optional list of weights for each model (None for equal weighting)
        """
        super(WatermelonMoEModel, self).__init__()
        self.models = torch.nn.ModuleList()  # Use ModuleList instead of regular list
        self.model_configs = model_configs
        
        # Load each model
        loaded_count = 0
        for config in model_configs:
            img_backbone = config["image_backbone"]
            audio_backbone = config["audio_backbone"]
            
            # Initialize model
            model = WatermelonModelModular(img_backbone, audio_backbone)
            
            # Load weights
            model_path = os.path.join(model_dir, f"{img_backbone}_{audio_backbone}_model.pt")
            if os.path.exists(model_path):
                print(f"\033[92mINFO\033[0m: Loading model {img_backbone}_{audio_backbone} from {model_path}")
                try:
                    model.load_state_dict(torch.load(model_path, map_location='cpu'))
                    model.eval()  # Set to evaluation mode
                    self.models.append(model)
                    loaded_count += 1
                except Exception as e:
                    print(f"\033[91mERR!\033[0m: Failed to load model from {model_path}: {e}")
                    continue
            else:
                print(f"\033[91mERR!\033[0m: Model checkpoint not found at {model_path}")
                continue
        
        # Add a dummy parameter if no models were loaded to prevent StopIteration
        if loaded_count == 0:
            print(f"\033[91mERR!\033[0m: No models were successfully loaded!")
            self.dummy_param = torch.nn.Parameter(torch.zeros(1))
            
        # Set model weights (uniform by default)
        if weights and loaded_count > 0:
            assert len(weights) == len(self.models), "Number of weights must match number of models"
            self.weights = weights
        else:
            self.weights = [1.0 / max(loaded_count, 1)] * max(loaded_count, 1)
            
        print(f"\033[92mINFO\033[0m: Loaded {loaded_count} models for MoE ensemble")
        print(f"\033[92mINFO\033[0m: Model weights: {self.weights}")

    def to(self, device):
        """
        Override to() method to ensure all sub-models are moved to the same device
        """
        for model in self.models:
            model.to(device)
        return super(WatermelonMoEModel, self).to(device)

    def forward(self, mfcc, image):
        """
        Forward pass through the MoE model.
        Returns the weighted average of all model outputs.
        """
        # Check if we have models loaded
        if not self.models:
            print(f"\033[91mERR!\033[0m: No models available for inference!")
            return torch.tensor([0.0], device=mfcc.device)  # Return a default value
            
        outputs = []
        
        # Get outputs from each model
        with torch.no_grad():
            for i, model in enumerate(self.models):
                output = model(mfcc, image)
                outputs.append(output * self.weights[i])
        
        # Return weighted average
        return torch.sum(torch.stack(outputs), dim=0)

# Modified version of process_audio_data specifically for the app to handle various tensor shapes
def app_process_audio_data(waveform, sample_rate):
    """Modified version of process_audio_data for the app that handles different tensor dimensions"""
    try:
        print(f"\033[92mDEBUG\033[0m: Processing audio - Initial shape: {waveform.shape}, Sample rate: {sample_rate}")
        
        # Handle different tensor dimensions
        if waveform.dim() == 3:
            print(f"\033[92mDEBUG\033[0m: Found 3D tensor, converting to 2D")
            # For 3D tensor, take the first item (batch dimension)
            waveform = waveform[0]
            
        if waveform.dim() == 2:
            # Use the first channel for stereo audio
            waveform = waveform[0]
            print(f"\033[92mDEBUG\033[0m: Using first channel, new shape: {waveform.shape}")
        
        # Resample to 16kHz if needed
        resample_rate = 16000
        if sample_rate != resample_rate:
            print(f"\033[92mDEBUG\033[0m: Resampling from {sample_rate}Hz to {resample_rate}Hz")
            waveform = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=resample_rate)(waveform)
        
        # Ensure 3 seconds of audio
        if waveform.size(0) < 3 * resample_rate:
            print(f"\033[92mDEBUG\033[0m: Padding audio from {waveform.size(0)} to {3 * resample_rate} samples")
            waveform = torch.nn.functional.pad(waveform, (0, 3 * resample_rate - waveform.size(0)))
        else:
            print(f"\033[92mDEBUG\033[0m: Trimming audio from {waveform.size(0)} to {3 * resample_rate} samples")
            waveform = waveform[: 3 * resample_rate]
        
        # Apply MFCC transformation
        print(f"\033[92mDEBUG\033[0m: Applying MFCC transformation")
        mfcc_transform = torchaudio.transforms.MFCC(
            sample_rate=resample_rate,
            n_mfcc=13,
            melkwargs={
                "n_fft": 256,
                "win_length": 256,
                "hop_length": 128,
                "n_mels": 40,
            }
        )
        
        mfcc = mfcc_transform(waveform)
        print(f"\033[92mDEBUG\033[0m: MFCC output shape: {mfcc.shape}")
        
        return mfcc
    except Exception as e:
        import traceback
        print(f"\033[91mERR!\033[0m: Error in audio processing: {e}")
        print(traceback.format_exc())
        return None

# Using the decorator for GPU acceleration
@spaces.GPU
def predict_sugar_content(audio, image, model_dir="models", weights=None):
    """Function with GPU acceleration to predict watermelon sugar content in Brix using MoE model"""
    try:
        # Check CUDA availability inside the GPU-decorated function
        if torch.cuda.is_available():
            device = torch.device("cuda")
            print(f"\033[92mINFO\033[0m: CUDA is available. Using device: {device}")
        else:
            device = torch.device("cpu")
            print(f"\033[92mINFO\033[0m: CUDA is not available. Using device: {device}")
        
        # Load MoE model
        moe_model = WatermelonMoEModel(TOP_MODELS, model_dir, weights)
        # Explicitly move the entire model to device
        moe_model = moe_model.to(device)
        moe_model.eval()
        print(f"\033[92mINFO\033[0m: Loaded MoE model with {len(moe_model.models)} backbone models")
        
        # Debug information about input types
        print(f"\033[92mDEBUG\033[0m: Audio input type: {type(audio)}")
        print(f"\033[92mDEBUG\033[0m: Audio input shape/length: {len(audio)}")
        print(f"\033[92mDEBUG\033[0m: Image input type: {type(image)}")
        if isinstance(image, np.ndarray):
            print(f"\033[92mDEBUG\033[0m: Image input shape: {image.shape}")
        
        # Handle different audio input formats
        if isinstance(audio, tuple) and len(audio) == 2:
            # Standard Gradio format: (sample_rate, audio_data)
            sample_rate, audio_data = audio
            print(f"\033[92mDEBUG\033[0m: Audio sample rate: {sample_rate}")
            print(f"\033[92mDEBUG\033[0m: Audio data shape: {audio_data.shape}")
        elif isinstance(audio, tuple) and len(audio) > 2:
            # Sometimes Gradio returns (sample_rate, audio_data, other_info...)
            sample_rate, audio_data = audio[0], audio[-1]
            print(f"\033[92mDEBUG\033[0m: Audio sample rate: {sample_rate}")
            print(f"\033[92mDEBUG\033[0m: Audio data shape: {audio_data.shape}")
        elif isinstance(audio, str):
            # Direct path to audio file
            audio_data, sample_rate = torchaudio.load(audio)
            print(f"\033[92mDEBUG\033[0m: Loaded audio from path with shape: {audio_data.shape}")
        else:
            return f"Error: Unsupported audio format. Got {type(audio)}"
        
        # Create a temporary file path for the audio and image
        temp_dir = "temp"
        os.makedirs(temp_dir, exist_ok=True)
        
        temp_audio_path = os.path.join(temp_dir, "temp_audio.wav")
        temp_image_path = os.path.join(temp_dir, "temp_image.jpg")
        
        # Import necessary libraries
        from PIL import Image
        
        # Audio handling - direct processing from the data in memory
        if isinstance(audio_data, np.ndarray):
            # Convert numpy array to tensor
            print(f"\033[92mDEBUG\033[0m: Converting numpy audio with shape {audio_data.shape} to tensor")
            audio_tensor = torch.tensor(audio_data).float()
            
            # Handle different audio dimensions
            if audio_data.ndim == 1:
                # Single channel audio
                audio_tensor = audio_tensor.unsqueeze(0)
            elif audio_data.ndim == 2:
                # Ensure channels are first dimension
                if audio_data.shape[0] > audio_data.shape[1]:
                    # More rows than columns, probably (samples, channels)
                    audio_tensor = torch.tensor(audio_data.T).float()
        else:
            # Already a tensor
            audio_tensor = audio_data.float()
        
        print(f"\033[92mDEBUG\033[0m: Audio tensor shape before processing: {audio_tensor.shape}")
        
        # Skip saving/loading and process directly
        mfcc = app_process_audio_data(audio_tensor, sample_rate)
        print(f"\033[92mDEBUG\033[0m: MFCC tensor shape after processing: {mfcc.shape if mfcc is not None else None}")
        
        # Image handling
        if isinstance(image, np.ndarray):
            print(f"\033[92mDEBUG\033[0m: Converting numpy image with shape {image.shape} to PIL")
            pil_image = Image.fromarray(image)
            pil_image.save(temp_image_path)
            print(f"\033[92mDEBUG\033[0m: Saved image to {temp_image_path}")
        elif isinstance(image, str):
            # If image is already a path
            temp_image_path = image
            print(f"\033[92mDEBUG\033[0m: Using provided image path: {temp_image_path}")
        else:
            return f"Error: Unsupported image format. Got {type(image)}"
        
        # Process image
        print(f"\033[92mDEBUG\033[0m: Loading and preprocessing image from {temp_image_path}")
        image_tensor = torchvision.io.read_image(temp_image_path)
        print(f"\033[92mDEBUG\033[0m: Loaded image shape: {image_tensor.shape}")
        image_tensor = image_tensor.float()
        processed_image = process_image_data(image_tensor)
        print(f"\033[92mDEBUG\033[0m: Processed image shape: {processed_image.shape if processed_image is not None else None}")
        
        # Add batch dimension for inference and move to device
        if mfcc is not None:
            # Ensure mfcc is on the same device as the model
            mfcc = mfcc.unsqueeze(0).to(device)
            print(f"\033[92mDEBUG\033[0m: Final MFCC shape with batch dimension: {mfcc.shape}, device: {mfcc.device}")
        
        if processed_image is not None:
            # Ensure processed_image is on the same device as the model
            processed_image = processed_image.unsqueeze(0).to(device)
            print(f"\033[92mDEBUG\033[0m: Final image shape with batch dimension: {processed_image.shape}, device: {processed_image.device}")
        
        # Double-check model is on the correct device
        print(f"\033[92mDEBUG\033[0m: MoE model device: {next(moe_model.parameters()).device}")
        for i, model in enumerate(moe_model.models):
            print(f"\033[92mDEBUG\033[0m: Model {i} device: {next(model.parameters()).device}")
        
        # Run inference with MoE model
        print(f"\033[92mDEBUG\033[0m: Running inference with MoE model on device: {device}")
        if mfcc is not None and processed_image is not None:
            with torch.no_grad():
                brix_value = moe_model(mfcc, processed_image)
                print(f"\033[92mDEBUG\033[0m: Prediction successful: {brix_value.item()}")
        else:
            return "Error: Failed to process inputs. Please check the debug logs."
        
        # Format the result with a range display
        if brix_value is not None:
            brix_score = brix_value.item()
            
            # Create a header with the numerical result
            result = f"πŸ‰ Predicted Sugar Content: {brix_score:.1f}Β° Brix πŸ‰\n\n"
            
            # Add extra info about the MoE model
            result += "Using Ensemble of Top-3 Models:\n"
            result += "- EfficientNet-B3 + Transformer\n"
            result += "- EfficientNet-B0 + Transformer\n"
            result += "- ResNet-50 + Transformer\n\n"
            
            # Add Brix scale visualization
            result += "Sugar Content Scale (in Β°Brix):\n"
            result += "──────────────────────────────────\n"
            
            # Create the scale display with Brix ranges
            scale_ranges = [
                (0, 8, "Low Sugar (< 8Β° Brix)"),
                (8, 9, "Mild Sweetness (8-9Β° Brix)"),
                (9, 10, "Medium Sweetness (9-10Β° Brix)"),
                (10, 11, "Sweet (10-11Β° Brix)"),
                (11, 13, "Very Sweet (11-13Β° Brix)")
            ]
            
            # Find which category the prediction falls into
            user_category = None
            for min_val, max_val, category_name in scale_ranges:
                if min_val <= brix_score < max_val:
                    user_category = category_name
                    break
            if brix_score >= scale_ranges[-1][0]:  # Handle edge case
                user_category = scale_ranges[-1][2]
            
            # Display the scale with the user's result highlighted
            for min_val, max_val, category_name in scale_ranges:
                if category_name == user_category:
                    result += f"β–Ά {min_val}-{max_val}: {category_name} β—€ (YOUR WATERMELON)\n"
                else:
                    result += f"  {min_val}-{max_val}: {category_name}\n"
            
            result += "──────────────────────────────────\n\n"
            
            # Add assessment of the watermelon's sugar content
            if brix_score < 8:
                result += "Assessment: This watermelon has low sugar content. It may taste bland or slightly bitter."
            elif brix_score < 9:
                result += "Assessment: This watermelon has mild sweetness. Acceptable flavor but not very sweet."
            elif brix_score < 10:
                result += "Assessment: This watermelon has moderate sugar content. It should have pleasant sweetness."
            elif brix_score < 11:
                result += "Assessment: This watermelon has good sugar content! It should be sweet and juicy."
            else:
                result += "Assessment: This watermelon has excellent sugar content! Perfect choice for maximum sweetness and flavor."
                
            return result
        else:
            return "Error: Could not predict sugar content. Please try again with different inputs."
    
    except Exception as e:
        import traceback
        error_msg = f"Error: {str(e)}\n\n"
        error_msg += traceback.format_exc()
        print(f"\033[91mERR!\033[0m: {error_msg}")
        return error_msg

def create_app(model_dir="models", weights=None):
    """Create and launch the Gradio interface"""
    # Define the prediction function with model path
    def predict_fn(audio, image):
        return predict_sugar_content(audio, image, model_dir, weights)
    
    # Create Gradio interface
    with gr.Blocks(title="Watermelon Sugar Content Predictor (MoE)", theme=gr.themes.Soft()) as interface:
        gr.Markdown("# πŸ‰ Watermelon Sugar Content Predictor (Ensemble Model)")
        gr.Markdown("""
        This app predicts the sugar content (in Β°Brix) of a watermelon based on its sound and appearance.
        
        ## What's New
        This version uses a Mixture of Experts (MoE) ensemble model that combines the three best-performing models:
        - EfficientNet-B3 + Transformer
        - EfficientNet-B0 + Transformer
        - ResNet-50 + Transformer
        
        The ensemble approach provides more accurate predictions than any single model!
        
        ## Instructions:
        1. Upload or record an audio of tapping the watermelon
        2. Upload or capture an image of the watermelon
        3. Click 'Predict' to get the sugar content estimation
        """)
        
        with gr.Row():
            with gr.Column():
                audio_input = gr.Audio(label="Upload or Record Audio", type="numpy")
                image_input = gr.Image(label="Upload or Capture Image")
                submit_btn = gr.Button("Predict Sugar Content", variant="primary")
            
            with gr.Column():
                output = gr.Textbox(label="Prediction Results", lines=15)
                
        submit_btn.click(
            fn=predict_fn,
            inputs=[audio_input, image_input],
            outputs=output
        )
        
        gr.Markdown("""
        ## Tips for best results
        - For audio: Tap the watermelon with your knuckle and record the sound
        - For image: Take a clear photo of the whole watermelon in good lighting
        
        ## About Brix Measurement
        Brix (Β°Bx) is a measurement of sugar content in a solution. For watermelons, higher Brix values indicate sweeter fruit.
        The average ripe watermelon has a Brix value between 9-11Β°.
        
        ## About the Mixture of Experts Model
        This app uses a Mixture of Experts (MoE) model that combines predictions from multiple neural networks.
        Our testing shows the ensemble approach achieves a Mean Absolute Error (MAE) of ~0.22, which is significantly 
        better than any individual model (best individual model: ~0.36 MAE).
        """)
    
    return interface

if __name__ == "__main__":
    import argparse
    
    parser = argparse.ArgumentParser(description="Watermelon Sugar Content Prediction App (MoE)")
    parser.add_argument(
        "--model_dir", 
        type=str, 
        default="models", 
        help="Directory containing the model checkpoints"
    )
    parser.add_argument(
        "--share", 
        action="store_true", 
        help="Create a shareable link for the app"
    )
    parser.add_argument(
        "--debug", 
        action="store_true", 
        help="Enable verbose debug output"
    )
    parser.add_argument(
        "--weighting", 
        type=str, 
        choices=["uniform", "performance"], 
        default="uniform", 
        help="How to weight the models (uniform or based on performance)"
    )
    
    args = parser.parse_args()
    
    if args.debug:
        print(f"\033[92mINFO\033[0m: Debug mode enabled")
    
    # Check if model directory exists
    if not os.path.exists(args.model_dir):
        print(f"\033[91mERR!\033[0m: Model directory not found at {args.model_dir}")
        sys.exit(1)
    
    # Determine weights based on argument
    weights = None
    if args.weighting == "performance":
        # Weights inversely proportional to the MAE (better models get higher weights)
        # These are the MAE values from the evaluation results
        mae_values = [0.3635, 0.3765, 0.3959]  # efficientnet_b3+transformer, efficientnet_b0+transformer, resnet50+transformer
        
        # Convert to weights (inverse of MAE, normalized)
        inverse_mae = [1/mae for mae in mae_values]
        total = sum(inverse_mae)
        weights = [val/total for val in inverse_mae]
        
        print(f"\033[92mINFO\033[0m: Using performance-based weights: {weights}")
    else:
        print(f"\033[92mINFO\033[0m: Using uniform weights")
    
    # Create and launch the app
    app = create_app(args.model_dir, weights)
    app.launch(share=args.share)