yinboliao commited on
Commit
9fe3c2e
·
1 Parent(s): 15b8bd8

update model

Browse files
20_feb_best_model_deployment.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a150be068d26551c5215c32f6a06ad15e539123be3e6099b30207777445ab2a
3
+ size 106778322
image_app.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from torchvision import transforms
3
+ from PIL import Image
4
+ import cv2
5
+ import torch
6
+ import numpy as np
7
+
8
+ # Modify the plotting section to:
9
+ import matplotlib
10
+ matplotlib.use('Agg') # Set non-interactive backend
11
+ import matplotlib.pyplot as plt
12
+
13
+ # First define or import RetinaNet class
14
+ # RetinaNet implementation:
15
+ class RetinaNet(torch.nn.Module):
16
+ def __init__(self):
17
+ super(RetinaNet, self).__init__()
18
+ # Add your model architecture here
19
+
20
+ def forward(self, x):
21
+ # Add forward pass
22
+ return x
23
+ # Add RetinaNet to safe globals before loading
24
+ torch.serialization.add_safe_globals([RetinaNet])
25
+ PATH = './20_feb_best_model_deployment.pth'
26
+ # Load model with proper safety measures
27
+ try:
28
+ model = torch.load(PATH,
29
+ map_location=torch.device("cpu"),
30
+ weights_only=False)
31
+ except Exception as e:
32
+ print(f"Error loading model: {e}")
33
+ exit()
34
+
35
+ # Preprocessing function
36
+ def preprocess_frame(frame):
37
+ rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
38
+ resized_frame = cv2.resize(rgb_frame, (224, 224))
39
+ normalized_frame = resized_frame / 255.0
40
+ return np.expand_dims(normalized_frame, axis=0)
41
+
42
+ # Loop through images in the folder
43
+ for img_name in os.listdir('./images'):
44
+ print(f"Processing image: {img_name}")
45
+
46
+ # Load and preprocess the image
47
+ # Define device
48
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
49
+
50
+ # Load image from correct path
51
+ img_path = os.path.join('./images', img_name)
52
+ img = Image.open(img_path)
53
+
54
+ # Define transformation: resize and convert to tensor
55
+ transform = transforms.Compose([
56
+ transforms.Resize((224, 224)),
57
+ transforms.ToTensor()
58
+ ])
59
+
60
+ # Apply transformation to loaded image
61
+ new_image = transform(img)
62
+ # Add batch dimension
63
+ new_image_batch = new_image.unsqueeze(0)
64
+
65
+ print(new_image_batch.shape) # Should output: torch.Size([1, 3, 224, 224])
66
+
67
+
68
+
69
+ # Predict using the best model
70
+ model.eval()
71
+ with torch.no_grad():
72
+ # Convert tensor to correct format for model
73
+ new_image_batch = new_image_batch.permute(0, 3, 1, 2)
74
+ prediction = model(new_image_batch.to(device).float())
75
+
76
+ # print("Prediction:", abs(prediction))
77
+
78
+ # Interpret the prediction
79
+ # if prediction[0][0] > 0.5:
80
+ # sum_value=torch.sum(abs(prediction[0]))
81
+ # # print("Sum Value:", sum_value)
82
+ # p_true=abs(prediction[0][0])
83
+ # print("p_true:",p_true)
84
+ # p_false=abs(prediction[0][1])
85
+ # print("p_false:",p_false)
86
+ # Interpret the prediction
87
+ if prediction.shape[1] == 2: # If binary classification output
88
+ # Convert to probabilities using softmax
89
+ probabilities = torch.nn.functional.softmax(prediction, dim=1)
90
+ p_accept = probabilities[0][0].item() * 100 # Convert to percentage
91
+ p_reject = probabilities[0][1].item() * 100
92
+ else: # If using regression/other output
93
+ # Take mean of the output channels
94
+ p_accept = abs(prediction[0][0]).mean().item() * 100
95
+ p_reject = abs(prediction[0][1]).mean().item() * 100
96
+
97
+ print(f"Accept probability: {p_accept:.2f}%")
98
+ print(f"Reject probability: {p_reject:.2f}%")
99
+
100
+ # Threshold for classification (now using percentage values)
101
+ if p_accept > 35: # 40% threshold
102
+ result = "Acceptable"
103
+ else:
104
+ result = "Rejectable"
105
+
106
+
107
+
108
+ # Threshold for classification (may need adjustment based on model training)
109
+ if p_accept > 0.42: # Consider making this a configurable parameter
110
+ result = "Acceptable"
111
+ else:
112
+ result = "Rejectable"
113
+ print(f"Predicted: {result}")
114
+
115
+ # Then in your loop:
116
+ fig = plt.figure()
117
+ plt.imshow(new_image.permute(1, 2, 0))
118
+ plt.title(f"Prediction: {result} ({p_accept:.1f}% vs {p_reject:.1f}%)")
119
+ plt.axis('off')
120
+ plt.savefig(f'./output_images/output_{img_name}') # Save instead of show
121
+ plt.close(fig) # Important for memory management
images/pipe1.jpg ADDED
images/pipe2.jpg ADDED
images/pipe3.jpg ADDED
images/pipe4.jpg ADDED
output_images/output_pipe4.jpg ADDED
output_pipe4.jpg ADDED
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ numpy
2
+ opencv-python
3
+ torch
4
+ colorama
5
+ matplotlib
6
+ pillow
7
+ torchvision