Rehman1603 commited on
Commit
52d2429
·
verified ·
1 Parent(s): 8eb4c7b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -85
app.py CHANGED
@@ -5,15 +5,10 @@ import cv2
5
  import gradio as gr
6
  import yolov9
7
 
8
- # Load the first YOLOv9 model
9
- model1 = yolov9.load('best (1).pt', device="cpu")
10
- model1.conf = 0.40
11
- model1.iou = 0.45
12
-
13
- # Load the second YOLO model (assuming you have a second YOLOv9 model or another YOLO model)
14
- model2 = yolov9.load('best (1).pt', device="cpu")
15
- model2.conf = 0.40
16
- model2.iou = 0.45
17
 
18
  def remove_lines(img):
19
  # Convert the image to grayscale
@@ -33,91 +28,27 @@ def remove_lines(img):
33
  return img
34
 
35
  def Predict(img):
36
- objects_name = []
37
- cropped_images = []
38
- img_name_list = []
39
-
40
- # Make a copy of the image for cropping
41
- img_for_cropping = img.copy()
42
-
43
- # Run inference using the first model
44
- results1 = model1(img, size=224)
45
- annotator1 = Annotator(img, line_width=2, example=str('Organ'))
46
-
47
- detections1 = {}
48
- for result in results1.xyxy[0]:
49
- xmin, ymin, xmax, ymax, confidence, class_id = result
50
- label = results1.names[int(class_id)]
51
- confidence = float(confidence)
52
-
53
- if label not in detections1 or detections1[label]['confidence'] < confidence:
54
- detections1[label] = {
55
- 'box': [xmin, ymin, xmax, ymax],
56
- 'confidence': confidence
57
- }
58
-
59
- # Run inference using the second model
60
- results2 = model2(img, size=224)
61
- annotator2 = Annotator(img, line_width=2, example=str('Organ'))
62
 
63
- detections2 = {}
64
- for result in results2.xyxy[0]:
65
  xmin, ymin, xmax, ymax, confidence, class_id = result
66
- label = results2.names[int(class_id)]
67
  confidence = float(confidence)
68
 
69
- if label not in detections2 or detections2[label]['confidence'] < confidence:
70
- detections2[label] = {
71
- 'box': [xmin, ymin, xmax, ymax],
72
- 'confidence': confidence
73
- }
74
-
75
- # Combine detections from both models
76
- combined_detections = {**detections1, **detections2}
77
-
78
- for label, data in combined_detections.items():
79
- xmin, ymin, xmax, ymax = data['box']
80
- confidence = data['confidence']
81
 
82
- # Cropping the detected object from the original image
83
- cropped_img = img_for_cropping[int(ymin):int(ymax), int(xmin):int(xmax)]
84
-
85
- # Remove lines from the cropped image
86
- cropped_img_cleaned = remove_lines(cropped_img)
87
-
88
- cropped_images.append((label, confidence, cropped_img_cleaned))
89
-
90
- # Convert the cropped image from BGR to RGB before saving
91
- cropped_img_rgb = cv2.cvtColor(cropped_img_cleaned, cv2.COLOR_BGR2RGB)
92
-
93
- # Save the cropped image
94
- crop_filename = f"{label}.jpg"
95
- img_name_list.append(crop_filename)
96
- cv2.imwrite(crop_filename, cropped_img_rgb)
97
-
98
- # Annotating the image (after cropping to ensure the line is not in the cropped images)
99
- annotator1.box_label([xmin, ymin, xmax, ymax], f"{label} {confidence:.2f}", color=(255, 0, 0))
100
-
101
- annotated_img = annotator1.result()
102
- objects_name = [(label, data['confidence']) for label, data in combined_detections.items()]
103
- labels = [{"label": label, "confidence": confidence} for label, confidence in objects_name]
104
-
105
- return annotated_img, cropped_images, objects_name
106
 
107
  def output_display(img):
108
- annotated_img, cropped_images, objects_name = Predict(img)
109
-
110
- # Extract cropped images and labels separately
111
- crops = [crop for _, _, crop in cropped_images]
112
- labels = [{"label": label, "confidence": confidence} for label, confidence in objects_name]
113
-
114
- return annotated_img, crops, labels
115
 
116
  interface = gr.Interface(fn=output_display,
117
  inputs=["image"],
118
- outputs=[gr.Image(label="Annotated Image"),
119
- gr.Gallery(label="Cropped Images"),
120
- gr.JSON(label="Labels and Confidence")])
121
 
122
  interface.launch(debug=True)
123
-
 
5
  import gradio as gr
6
  import yolov9
7
 
8
+ # Load the YOLOv9 model
9
+ model = yolov9.load('best (1).pt', device="cpu")
10
+ model.conf = 0.40
11
+ model.iou = 0.45
 
 
 
 
 
12
 
13
  def remove_lines(img):
14
  # Convert the image to grayscale
 
28
  return img
29
 
30
  def Predict(img):
31
+ # Run inference using the model
32
+ results = model(img, size=224)
33
+ annotator = Annotator(img, line_width=2, example=str('Organ'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
+ for result in results.xyxy[0]:
 
36
  xmin, ymin, xmax, ymax, confidence, class_id = result
37
+ label = results.names[int(class_id)]
38
  confidence = float(confidence)
39
 
40
+ # Annotate the image
41
+ annotator.box_label([xmin, ymin, xmax, ymax], f"{label} {confidence:.2f}", color=(255, 0, 0))
 
 
 
 
 
 
 
 
 
 
42
 
43
+ annotated_img = annotator.result()
44
+ return annotated_img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
  def output_display(img):
47
+ annotated_img = Predict(img)
48
+ return annotated_img
 
 
 
 
 
49
 
50
  interface = gr.Interface(fn=output_display,
51
  inputs=["image"],
52
+ outputs=gr.Image(label="Annotated Image"))
 
 
53
 
54
  interface.launch(debug=True)