Spaces:
Running
Running
File size: 4,410 Bytes
98d811d 8dd8457 b9abb3e 98d811d 004faf4 345eb60 004faf4 98d811d 004faf4 98d811d 004faf4 98d811d fb04ed6 004faf4 98d811d 004faf4 98d811d fb04ed6 004faf4 fb04ed6 b9abb3e 71980fc fb04ed6 71980fc fb04ed6 71980fc fb04ed6 b9abb3e 004faf4 fb04ed6 044a945 fb04ed6 1c45da8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
import cv2
from ultralytics import YOLO ## for Yolov8
import matplotlib.pyplot as plt
import gradio as gr
import numpy as np
import pickle
# function which is returning the number of object detected
def number_object_detected(image):
custom_model = YOLO('best2.pt') # custome yolo model path
results = custom_model(image,verbose= False)
dic = results[0].names
classes = results[0].boxes.cls.cpu().numpy()
probability = results[0].boxes.conf
class_count = {}
unique_elements, counts = np.unique(classes, return_counts=True)
for e , count in zip(unique_elements,counts):
a = dic[e]
class_count[a] = count
print(class_count)
return (class_count,results )
def car_detection_and_Cropping(image_path):
simple_yolo = YOLO('yolov8m.pt')
r = simple_yolo(image_path,verbose = False)
names = r[0].names
boxes = r[0].boxes.xyxy.cpu().numpy().astype(int)
classes = set(r[0].boxes.cls.cpu().numpy())
classes2 = [names[i] for i in classes]
# checking if the detected object is the car or not
# if it is car then crop if not then pass the image as it is
if boxes.size != 0 and 'car' in classes2:
area = []
for x1, y1, x2, y2 in boxes:
area.append((x2 - x1) * (y2 - y1))
max_index, max_a = max(enumerate(area), key=lambda x: x[1])
# Load the image using OpenCV
image = cv2.imread(image_path)
# Crop the image
crop_image = image[boxes[max_index][1]:boxes[max_index][3], boxes[max_index][0]:boxes[max_index][2]]
# passing the crop image to the detection model
class_c ,result = number_object_detected(crop_image)
else:
class_c ,result= number_object_detected(image_path)
return class_c ,result
severity_points = {
'scratch': 1,
'dent': 2,
'rust': 2,
'paint-damage': 2,
'crack':2
}
def calculate_condition_score(detections):
total_score = 0
for detection, count in detections.items():
if detection in severity_points:
total_score += severity_points[detection] * count
return total_score
def normalize_score(score, max_score):
return (score / max_score) * 10
## this function will take the image url and call all the related functions
def estimate_condition(detections):
print("Detedtion list",detections)
max_possible_score = sum(severity_points.values()) # Assuming all types of damage detected
score = calculate_condition_score(detections)
normalized_score = normalize_score(score, max_possible_score)
if normalized_score <= 2: # If score is low, condition is Excellent
print("Condition Excellent")
return "Excellent"
elif (normalized_score >2 and normalized_score <=7): # If score is moderately low, condition is Good
print("Condition Good")
return "Good"
elif (normalized_score >7 and normalized_score <15): # If score is moderate, condition is Fair
print("Condition Fair")
return "Fair"
elif (normalized_score >15 and normalized_score<=20): # If score is moderately high, condition is Poor
print("Condition Poor")
return "Poor"
else: # If score is high, condition is Very Poor
print("Condition Very Poor")
return "Very Poor"
## loading the model
def process_data(files):
print(files)
file_names = [f[0] for f in files]
image_r = []
print('fileName',file_names)
damage_dic = {}
for f in file_names:
print('image is ',f)
damage, result = car_detection_and_Cropping(f)
for r in result:
im_array = r.plot(pil = True) # plot a BGR numpy array of predictions
array = im_array[..., ::-1] # Convert BGR to RGB PIL image
image_r.append(array)
for key in damage.keys():
if key in damage_dic:
damage_dic[key] += damage[key]
else:
damage_dic[key] = damage[key]
condition = estimate_condition(damage_dic)
return (condition,image_r)
interface = gr.Interface(fn=process_data, inputs=gr.Gallery(label='Upload Image of Car',type= 'filepath'),
outputs=[gr.Textbox(label="Number of Objects detected "),gr.Gallery(label='output',type='pil')], title=" 🚘Car Scratch and Dent Detection")
interface.launch() |