AIQuest commited on
Commit
3c639b6
·
verified ·
1 Parent(s): 3a4c1a3

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +153 -0
main.py CHANGED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ultralytics import YOLO
2
+ import numpy as np
3
+ import matplotlib.pyplot as plt
4
+ import cv2
5
+ import gradio as gr
6
+ import pickle
7
+
8
+ # function which is returning the number of object detected
9
+ def number_object_detected(image):
10
+
11
+ custom_model = YOLO('best.pt') # custome yolo model path
12
+ results = custom_model(image,verbose= False)
13
+
14
+ dic = results[0].names
15
+ classes = results[0].boxes.cls.cpu().numpy()
16
+ probability = results[0].boxes.conf
17
+
18
+ class_count = {}
19
+ unique_elements, counts = np.unique(classes, return_counts=True)
20
+ for e , count in zip(unique_elements,counts):
21
+ a = dic[e]
22
+ class_count[a] = count
23
+
24
+
25
+ return (class_count,results )
26
+
27
+
28
+
29
+ def car_detection_and_Cropping(image_path):
30
+ simple_yolo = YOLO('yolov8m.pt')
31
+ r = simple_yolo(image_path,verbose = False)
32
+
33
+
34
+ names = r[0].names
35
+ boxes = r[0].boxes.xyxy.cpu().numpy().astype(int)
36
+ classes = set(r[0].boxes.cls.cpu().numpy())
37
+ classes2 = [names[i] for i in classes]
38
+
39
+ # checking if the detected object is the car or not
40
+ # if it is car then crop if not then pass the image as it is
41
+ if boxes.size != 0 and 'car' in classes2:
42
+
43
+ area = []
44
+ for x1, y1, x2, y2 in boxes:
45
+ area.append((x2 - x1) * (y2 - y1))
46
+ max_index, max_a = max(enumerate(area), key=lambda x: x[1])
47
+
48
+
49
+ # Load the image using OpenCV
50
+ image = cv2.imread(image_path)
51
+
52
+ # Crop the image
53
+ crop_image = image[boxes[max_index][1]:boxes[max_index][3], boxes[max_index][0]:boxes[max_index][2]]
54
+
55
+ # passing the crop image to the detection model
56
+
57
+ class_c ,result = number_object_detected(crop_image)
58
+ else:
59
+ class_c ,result= number_object_detected(image_path)
60
+ return class_c ,result
61
+
62
+ severity_points = {
63
+ 'scratch': 1,
64
+ 'dent': 2,
65
+ 'rust': 2,
66
+ 'paint-damage': 2,
67
+ 'crack':2
68
+ }
69
+
70
+ def calculate_condition_score(detections):
71
+ total_score = 0
72
+ for detection, count in detections.items():
73
+ if detection in severity_points:
74
+ total_score += severity_points[detection] * count
75
+ return total_score
76
+
77
+ def normalize_score(score, max_score):
78
+ return (score / max_score) * 10
79
+
80
+ def estimate_condition(detections):
81
+ print("Detedtion list",detections)
82
+ max_possible_score = sum(severity_points.values()) # Assuming all types of damage detected
83
+ score = calculate_condition_score(detections)
84
+ normalized_score = normalize_score(score, max_possible_score)
85
+ print("normalized Score",normalized_score)
86
+ # Assign condition rating
87
+ if normalized_score <= 4: # If score is low, condition is Excellent
88
+
89
+ return "Excellent"
90
+ elif (normalized_score >4 and normalized_score <=7): # If score is moderately low, condition is Good
91
+
92
+ return "Good"
93
+ elif (normalized_score >7 and normalized_score <15): # If score is moderate, condition is Fair
94
+
95
+ return "Fair"
96
+ elif (normalized_score >15 and normalized_score<=20): # If score is moderately high, condition is Poor
97
+
98
+ return "Poor"
99
+ else: # If score is high, condition is Very Poor
100
+
101
+ return "Very Poor"
102
+
103
+ with open('Price_prediction_decision_tree.pkl', 'rb') as file:
104
+ loaded_pipe_lr = pickle.load(file)
105
+
106
+
107
+ ## loading the model
108
+ def process_data(files,car_brand, car_name, model_year, mileage, city_registered, color, engine_c, trans, fuel_type, Cate):
109
+
110
+ file_names = [f[0] for f in files]
111
+ image_r = []
112
+
113
+
114
+ damage_dic = {}
115
+
116
+ for f in file_names:
117
+
118
+ damage, result = car_detection_and_Cropping(f)
119
+ for r in result:
120
+ im_array = r.plot(pil = True) # plot a BGR numpy array of predictions
121
+ array = im_array[..., ::-1] # Convert BGR to RGB PIL image
122
+ image_r.append(array)
123
+ for key in damage.keys():
124
+ if key in damage_dic:
125
+ damage_dic[key] += damage[key]
126
+ else:
127
+ damage_dic[key] = damage[key]
128
+ condition = estimate_condition(damage_dic)
129
+
130
+
131
+ price = loaded_pipe_lr.predict([[model_year,mileage,city_registered,color,engine_c,car_brand,car_name,trans,fuel_type,condition,Cate]])
132
+ print(price)
133
+ if price[0] >= 100:
134
+ price[0] = price[0]/100
135
+
136
+ return (condition , str(price[0])+'lacs' , image_r)
137
+
138
+ years_list = list(range(2024, 1899, -1))
139
+ gr.Interface(fn = process_data,
140
+ inputs=[gr.Gallery(label="Upload Files", type="filepath"),
141
+ gr.Dropdown(['suzuki','toyota','honda','kia','changan'], label='Brand'),
142
+ gr.Textbox(lines=1, label="Car Name"),
143
+ gr.Dropdown(choices=years_list, label='Model Year'),
144
+ gr.Number(label="Mileage Km"),
145
+ gr.Textbox(lines=1, label="City Register"),
146
+ gr.Textbox(lines=1, label="Color"),
147
+ gr.Number(label="Engine Capacity in CC"),
148
+ gr.Radio(["automatic", "manual"], label="Transmission Type"),
149
+ # gr.Radio(["imported", "local"], label="Assembly Type"),
150
+ gr.Radio(["hybrid", "petrol",'diesel'], label="Fuel Type"),
151
+ gr.Radio(["hatchback", "sedan",'suv','croosover','van'], label="Category")],
152
+ outputs=[gr.Textbox(label="Condition"),gr.Textbox(label="Predicted Price"),gr.Gallery(label='output',type='pil')]).launch()
153
+