yadonglu commited on
Commit
b20c0ea
·
1 Parent(s): b48570a
app.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import gradio as gr
4
+ import numpy as np
5
+ import torch
6
+ from PIL import Image
7
+ import io
8
+
9
+
10
+ import base64, os
11
+ from util.utils import check_ocr_box, get_yolo_model, get_caption_model_processor, get_som_labeled_img
12
+ import torch
13
+ from PIL import Image
14
+
15
+ from huggingface_hub import snapshot_download
16
+
17
+ # Define repository and local directory
18
+ repo_id = "microsoft/OmniParser-v2.0" # HF repo
19
+ local_dir = "weights" # Target local directory
20
+
21
+ # Download the entire repository
22
+ snapshot_download(repo_id=repo_id, local_dir=local_dir)
23
+
24
+ print(f"Repository downloaded to: {local_dir}")
25
+
26
+
27
+ yolo_model = get_yolo_model(model_path='weights/icon_detect/model.pt')
28
+ caption_model_processor = get_caption_model_processor(model_name="florence2", model_name_or_path="weights/icon_caption")
29
+ # caption_model_processor = get_caption_model_processor(model_name="blip2", model_name_or_path="weights/icon_caption_blip2")
30
+
31
+ MARKDOWN = """
32
+ # OmniParser for Pure Vision Based General GUI Agent 🔥
33
+ <div>
34
+ <a href="https://arxiv.org/pdf/2408.00203">
35
+ <img src="https://img.shields.io/badge/arXiv-2408.00203-b31b1b.svg" alt="Arxiv" style="display:inline-block;">
36
+ </a>
37
+ </div>
38
+
39
+ OmniParser is a screen parsing tool to convert general GUI screen to structured elements.
40
+ """
41
+
42
+ DEVICE = torch.device('cuda')
43
+
44
+ # @spaces.GPU
45
+ # @torch.inference_mode()
46
+ # @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
47
+ def process(
48
+ image_input,
49
+ box_threshold,
50
+ iou_threshold,
51
+ use_paddleocr,
52
+ imgsz
53
+ ) -> Optional[Image.Image]:
54
+
55
+ # image_save_path = 'imgs/saved_image_demo.png'
56
+ # image_input.save(image_save_path)
57
+ # image = Image.open(image_save_path)
58
+ box_overlay_ratio = image_input.size[0] / 3200
59
+ draw_bbox_config = {
60
+ 'text_scale': 0.8 * box_overlay_ratio,
61
+ 'text_thickness': max(int(2 * box_overlay_ratio), 1),
62
+ 'text_padding': max(int(3 * box_overlay_ratio), 1),
63
+ 'thickness': max(int(3 * box_overlay_ratio), 1),
64
+ }
65
+ # import pdb; pdb.set_trace()
66
+
67
+ ocr_bbox_rslt, is_goal_filtered = check_ocr_box(image_input, display_img = False, output_bb_format='xyxy', goal_filtering=None, easyocr_args={'paragraph': False, 'text_threshold':0.9}, use_paddleocr=use_paddleocr)
68
+ text, ocr_bbox = ocr_bbox_rslt
69
+ dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image_input, yolo_model, BOX_TRESHOLD = box_threshold, output_coord_in_ratio=True, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=caption_model_processor, ocr_text=text,iou_threshold=iou_threshold, imgsz=imgsz,)
70
+ image = Image.open(io.BytesIO(base64.b64decode(dino_labled_img)))
71
+ print('finish processing')
72
+ parsed_content_list = '\n'.join([f'icon {i}: ' + str(v) for i,v in enumerate(parsed_content_list)])
73
+ # parsed_content_list = str(parsed_content_list)
74
+ return image, str(parsed_content_list)
75
+
76
+ with gr.Blocks() as demo:
77
+ gr.Markdown(MARKDOWN)
78
+ with gr.Row():
79
+ with gr.Column():
80
+ image_input_component = gr.Image(
81
+ type='pil', label='Upload image')
82
+ # set the threshold for removing the bounding boxes with low confidence, default is 0.05
83
+ box_threshold_component = gr.Slider(
84
+ label='Box Threshold', minimum=0.01, maximum=1.0, step=0.01, value=0.05)
85
+ # set the threshold for removing the bounding boxes with large overlap, default is 0.1
86
+ iou_threshold_component = gr.Slider(
87
+ label='IOU Threshold', minimum=0.01, maximum=1.0, step=0.01, value=0.1)
88
+ use_paddleocr_component = gr.Checkbox(
89
+ label='Use PaddleOCR', value=True)
90
+ imgsz_component = gr.Slider(
91
+ label='Icon Detect Image Size', minimum=640, maximum=1920, step=32, value=640)
92
+ submit_button_component = gr.Button(
93
+ value='Submit', variant='primary')
94
+ with gr.Column():
95
+ image_output_component = gr.Image(type='pil', label='Image Output')
96
+ text_output_component = gr.Textbox(label='Parsed screen elements', placeholder='Text Output')
97
+
98
+ submit_button_component.click(
99
+ fn=process,
100
+ inputs=[
101
+ image_input_component,
102
+ box_threshold_component,
103
+ iou_threshold_component,
104
+ use_paddleocr_component,
105
+ imgsz_component
106
+ ],
107
+ outputs=[image_output_component, text_output_component]
108
+ )
109
+
110
+ # demo.launch(debug=False, show_error=True, share=True)
111
+ demo.launch(share=True, server_port=7861, server_name='0.0.0.0')
requirements.txt ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ easyocr
3
+ torchvision
4
+ supervision==0.18.0
5
+ openai==1.3.5
6
+ transformers
7
+ ultralytics==8.3.70
8
+ azure-identity
9
+ numpy==1.26.4
10
+ opencv-python
11
+ opencv-python-headless
12
+ gradio
13
+ dill
14
+ accelerate
15
+ timm
16
+ einops==0.8.0
17
+ paddlepaddle
18
+ paddleocr
19
+ ruff==0.6.7
20
+ pre-commit==3.8.0
21
+ pytest==8.3.3
22
+ pytest-asyncio==0.23.6
23
+ pyautogui==0.9.54
24
+ streamlit>=1.38.0
25
+ anthropic[bedrock,vertex]>=0.37.1
26
+ jsonschema==4.22.0
27
+ boto3>=1.28.57
28
+ google-auth<3,>=2
29
+ screeninfo
30
+ uiautomation
31
+ dashscope
32
+ groq
33
+ huggingface_hub
util/__init__.py ADDED
File without changes
util/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (162 Bytes). View file
 
util/__pycache__/box_annotator.cpython-312.pyc ADDED
Binary file (9.83 kB). View file
 
util/__pycache__/omniparser.cpython-312.pyc ADDED
Binary file (2.91 kB). View file
 
util/__pycache__/utils.cpython-312.pyc ADDED
Binary file (29.6 kB). View file
 
util/box_annotator.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Union, Tuple
2
+
3
+ import cv2
4
+ import numpy as np
5
+
6
+ from supervision.detection.core import Detections
7
+ from supervision.draw.color import Color, ColorPalette
8
+
9
+
10
+ class BoxAnnotator:
11
+ """
12
+ A class for drawing bounding boxes on an image using detections provided.
13
+
14
+ Attributes:
15
+ color (Union[Color, ColorPalette]): The color to draw the bounding box,
16
+ can be a single color or a color palette
17
+ thickness (int): The thickness of the bounding box lines, default is 2
18
+ text_color (Color): The color of the text on the bounding box, default is white
19
+ text_scale (float): The scale of the text on the bounding box, default is 0.5
20
+ text_thickness (int): The thickness of the text on the bounding box,
21
+ default is 1
22
+ text_padding (int): The padding around the text on the bounding box,
23
+ default is 5
24
+
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
30
+ thickness: int = 3, # 1 for seeclick 2 for mind2web and 3 for demo
31
+ text_color: Color = Color.BLACK,
32
+ text_scale: float = 0.5, # 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
33
+ text_thickness: int = 2, #1, # 2 for demo
34
+ text_padding: int = 10,
35
+ avoid_overlap: bool = True,
36
+ ):
37
+ self.color: Union[Color, ColorPalette] = color
38
+ self.thickness: int = thickness
39
+ self.text_color: Color = text_color
40
+ self.text_scale: float = text_scale
41
+ self.text_thickness: int = text_thickness
42
+ self.text_padding: int = text_padding
43
+ self.avoid_overlap: bool = avoid_overlap
44
+
45
+ def annotate(
46
+ self,
47
+ scene: np.ndarray,
48
+ detections: Detections,
49
+ labels: Optional[List[str]] = None,
50
+ skip_label: bool = False,
51
+ image_size: Optional[Tuple[int, int]] = None,
52
+ ) -> np.ndarray:
53
+ """
54
+ Draws bounding boxes on the frame using the detections provided.
55
+
56
+ Args:
57
+ scene (np.ndarray): The image on which the bounding boxes will be drawn
58
+ detections (Detections): The detections for which the
59
+ bounding boxes will be drawn
60
+ labels (Optional[List[str]]): An optional list of labels
61
+ corresponding to each detection. If `labels` are not provided,
62
+ corresponding `class_id` will be used as label.
63
+ skip_label (bool): Is set to `True`, skips bounding box label annotation.
64
+ Returns:
65
+ np.ndarray: The image with the bounding boxes drawn on it
66
+
67
+ Example:
68
+ ```python
69
+ import supervision as sv
70
+
71
+ classes = ['person', ...]
72
+ image = ...
73
+ detections = sv.Detections(...)
74
+
75
+ box_annotator = sv.BoxAnnotator()
76
+ labels = [
77
+ f"{classes[class_id]} {confidence:0.2f}"
78
+ for _, _, confidence, class_id, _ in detections
79
+ ]
80
+ annotated_frame = box_annotator.annotate(
81
+ scene=image.copy(),
82
+ detections=detections,
83
+ labels=labels
84
+ )
85
+ ```
86
+ """
87
+ font = cv2.FONT_HERSHEY_SIMPLEX
88
+ for i in range(len(detections)):
89
+ x1, y1, x2, y2 = detections.xyxy[i].astype(int)
90
+ class_id = (
91
+ detections.class_id[i] if detections.class_id is not None else None
92
+ )
93
+ idx = class_id if class_id is not None else i
94
+ color = (
95
+ self.color.by_idx(idx)
96
+ if isinstance(self.color, ColorPalette)
97
+ else self.color
98
+ )
99
+ cv2.rectangle(
100
+ img=scene,
101
+ pt1=(x1, y1),
102
+ pt2=(x2, y2),
103
+ color=color.as_bgr(),
104
+ thickness=self.thickness,
105
+ )
106
+ if skip_label:
107
+ continue
108
+
109
+ text = (
110
+ f"{class_id}"
111
+ if (labels is None or len(detections) != len(labels))
112
+ else labels[i]
113
+ )
114
+
115
+ text_width, text_height = cv2.getTextSize(
116
+ text=text,
117
+ fontFace=font,
118
+ fontScale=self.text_scale,
119
+ thickness=self.text_thickness,
120
+ )[0]
121
+
122
+ if not self.avoid_overlap:
123
+ text_x = x1 + self.text_padding
124
+ text_y = y1 - self.text_padding
125
+
126
+ text_background_x1 = x1
127
+ text_background_y1 = y1 - 2 * self.text_padding - text_height
128
+
129
+ text_background_x2 = x1 + 2 * self.text_padding + text_width
130
+ text_background_y2 = y1
131
+ # text_x = x1 - self.text_padding - text_width
132
+ # text_y = y1 + self.text_padding + text_height
133
+ # text_background_x1 = x1 - 2 * self.text_padding - text_width
134
+ # text_background_y1 = y1
135
+ # text_background_x2 = x1
136
+ # text_background_y2 = y1 + 2 * self.text_padding + text_height
137
+ else:
138
+ text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2 = get_optimal_label_pos(self.text_padding, text_width, text_height, x1, y1, x2, y2, detections, image_size)
139
+
140
+ cv2.rectangle(
141
+ img=scene,
142
+ pt1=(text_background_x1, text_background_y1),
143
+ pt2=(text_background_x2, text_background_y2),
144
+ color=color.as_bgr(),
145
+ thickness=cv2.FILLED,
146
+ )
147
+ # import pdb; pdb.set_trace()
148
+ box_color = color.as_rgb()
149
+ luminance = 0.299 * box_color[0] + 0.587 * box_color[1] + 0.114 * box_color[2]
150
+ text_color = (0,0,0) if luminance > 160 else (255,255,255)
151
+ cv2.putText(
152
+ img=scene,
153
+ text=text,
154
+ org=(text_x, text_y),
155
+ fontFace=font,
156
+ fontScale=self.text_scale,
157
+ # color=self.text_color.as_rgb(),
158
+ color=text_color,
159
+ thickness=self.text_thickness,
160
+ lineType=cv2.LINE_AA,
161
+ )
162
+ return scene
163
+
164
+
165
+ def box_area(box):
166
+ return (box[2] - box[0]) * (box[3] - box[1])
167
+
168
+ def intersection_area(box1, box2):
169
+ x1 = max(box1[0], box2[0])
170
+ y1 = max(box1[1], box2[1])
171
+ x2 = min(box1[2], box2[2])
172
+ y2 = min(box1[3], box2[3])
173
+ return max(0, x2 - x1) * max(0, y2 - y1)
174
+
175
+ def IoU(box1, box2, return_max=True):
176
+ intersection = intersection_area(box1, box2)
177
+ union = box_area(box1) + box_area(box2) - intersection
178
+ if box_area(box1) > 0 and box_area(box2) > 0:
179
+ ratio1 = intersection / box_area(box1)
180
+ ratio2 = intersection / box_area(box2)
181
+ else:
182
+ ratio1, ratio2 = 0, 0
183
+ if return_max:
184
+ return max(intersection / union, ratio1, ratio2)
185
+ else:
186
+ return intersection / union
187
+
188
+
189
+ def get_optimal_label_pos(text_padding, text_width, text_height, x1, y1, x2, y2, detections, image_size):
190
+ """ check overlap of text and background detection box, and get_optimal_label_pos,
191
+ pos: str, position of the text, must be one of 'top left', 'top right', 'outer left', 'outer right' TODO: if all are overlapping, return the last one, i.e. outer right
192
+ Threshold: default to 0.3
193
+ """
194
+
195
+ def get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size):
196
+ is_overlap = False
197
+ for i in range(len(detections)):
198
+ detection = detections.xyxy[i].astype(int)
199
+ if IoU([text_background_x1, text_background_y1, text_background_x2, text_background_y2], detection) > 0.3:
200
+ is_overlap = True
201
+ break
202
+ # check if the text is out of the image
203
+ if text_background_x1 < 0 or text_background_x2 > image_size[0] or text_background_y1 < 0 or text_background_y2 > image_size[1]:
204
+ is_overlap = True
205
+ return is_overlap
206
+
207
+ # if pos == 'top left':
208
+ text_x = x1 + text_padding
209
+ text_y = y1 - text_padding
210
+
211
+ text_background_x1 = x1
212
+ text_background_y1 = y1 - 2 * text_padding - text_height
213
+
214
+ text_background_x2 = x1 + 2 * text_padding + text_width
215
+ text_background_y2 = y1
216
+ is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
217
+ if not is_overlap:
218
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
219
+
220
+ # elif pos == 'outer left':
221
+ text_x = x1 - text_padding - text_width
222
+ text_y = y1 + text_padding + text_height
223
+
224
+ text_background_x1 = x1 - 2 * text_padding - text_width
225
+ text_background_y1 = y1
226
+
227
+ text_background_x2 = x1
228
+ text_background_y2 = y1 + 2 * text_padding + text_height
229
+ is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
230
+ if not is_overlap:
231
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
232
+
233
+
234
+ # elif pos == 'outer right':
235
+ text_x = x2 + text_padding
236
+ text_y = y1 + text_padding + text_height
237
+
238
+ text_background_x1 = x2
239
+ text_background_y1 = y1
240
+
241
+ text_background_x2 = x2 + 2 * text_padding + text_width
242
+ text_background_y2 = y1 + 2 * text_padding + text_height
243
+
244
+ is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
245
+ if not is_overlap:
246
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
247
+
248
+ # elif pos == 'top right':
249
+ text_x = x2 - text_padding - text_width
250
+ text_y = y1 - text_padding
251
+
252
+ text_background_x1 = x2 - 2 * text_padding - text_width
253
+ text_background_y1 = y1 - 2 * text_padding - text_height
254
+
255
+ text_background_x2 = x2
256
+ text_background_y2 = y1
257
+
258
+ is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
259
+ if not is_overlap:
260
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
261
+
262
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
util/omniparser.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from util.utils import get_som_labeled_img, get_caption_model_processor, get_yolo_model, check_ocr_box
2
+ import torch
3
+ from PIL import Image
4
+ import io
5
+ import base64
6
+ from typing import Dict
7
+ class Omniparser(object):
8
+ def __init__(self, config: Dict):
9
+ self.config = config
10
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
11
+
12
+ self.som_model = get_yolo_model(model_path=config['som_model_path'])
13
+ self.caption_model_processor = get_caption_model_processor(model_name=config['caption_model_name'], model_name_or_path=config['caption_model_path'], device=device)
14
+ print('Omniparser initialized!!!')
15
+
16
+ def parse(self, image_base64: str):
17
+ image_bytes = base64.b64decode(image_base64)
18
+ image = Image.open(io.BytesIO(image_bytes))
19
+ print('image size:', image.size)
20
+
21
+ box_overlay_ratio = max(image.size) / 3200
22
+ draw_bbox_config = {
23
+ 'text_scale': 0.8 * box_overlay_ratio,
24
+ 'text_thickness': max(int(2 * box_overlay_ratio), 1),
25
+ 'text_padding': max(int(3 * box_overlay_ratio), 1),
26
+ 'thickness': max(int(3 * box_overlay_ratio), 1),
27
+ }
28
+
29
+ (text, ocr_bbox), _ = check_ocr_box(image, display_img=False, output_bb_format='xyxy', easyocr_args={'text_threshold': 0.8}, use_paddleocr=False)
30
+ dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image, self.som_model, BOX_TRESHOLD = self.config['BOX_TRESHOLD'], output_coord_in_ratio=True, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=self.caption_model_processor, ocr_text=text,use_local_semantics=True, iou_threshold=0.7, scale_img=False, batch_size=128)
31
+
32
+ return dino_labled_img, parsed_content_list
util/utils.py ADDED
@@ -0,0 +1,543 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from ultralytics import YOLO
2
+ import os
3
+ import io
4
+ import base64
5
+ import time
6
+ from PIL import Image, ImageDraw, ImageFont
7
+ import json
8
+ import requests
9
+ # utility function
10
+ import os
11
+ from openai import AzureOpenAI
12
+
13
+ import json
14
+ import sys
15
+ import os
16
+ import cv2
17
+ import numpy as np
18
+ # %matplotlib inline
19
+ from matplotlib import pyplot as plt
20
+ import easyocr
21
+ from paddleocr import PaddleOCR
22
+ reader = easyocr.Reader(['en'])
23
+ paddle_ocr = PaddleOCR(
24
+ lang='en', # other lang also available
25
+ use_angle_cls=False,
26
+ use_gpu=False, # using cuda will conflict with pytorch in the same process
27
+ show_log=False,
28
+ max_batch_size=1024,
29
+ use_dilation=True, # improves accuracy
30
+ det_db_score_mode='slow', # improves accuracy
31
+ rec_batch_num=1024)
32
+ import time
33
+ import base64
34
+
35
+ import os
36
+ import ast
37
+ import torch
38
+ from typing import Tuple, List, Union
39
+ from torchvision.ops import box_convert
40
+ import re
41
+ from torchvision.transforms import ToPILImage
42
+ import supervision as sv
43
+ import torchvision.transforms as T
44
+ from util.box_annotator import BoxAnnotator
45
+
46
+
47
+ def get_caption_model_processor(model_name, model_name_or_path="Salesforce/blip2-opt-2.7b", device=None):
48
+ if not device:
49
+ device = "cuda" if torch.cuda.is_available() else "cpu"
50
+ if model_name == "blip2":
51
+ from transformers import Blip2Processor, Blip2ForConditionalGeneration
52
+ processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
53
+ if device == 'cpu':
54
+ model = Blip2ForConditionalGeneration.from_pretrained(
55
+ model_name_or_path, device_map=None, torch_dtype=torch.float32
56
+ )
57
+ else:
58
+ model = Blip2ForConditionalGeneration.from_pretrained(
59
+ model_name_or_path, device_map=None, torch_dtype=torch.float16
60
+ ).to(device)
61
+ elif model_name == "florence2":
62
+ from transformers import AutoProcessor, AutoModelForCausalLM
63
+ processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True)
64
+ if device == 'cpu':
65
+ model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float32, trust_remote_code=True)
66
+ else:
67
+ model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float16, trust_remote_code=True).to(device)
68
+ return {'model': model.to(device), 'processor': processor}
69
+
70
+
71
+ def get_yolo_model(model_path):
72
+ from ultralytics import YOLO
73
+ # Load the model.
74
+ model = YOLO(model_path)
75
+ return model
76
+
77
+
78
+ @torch.inference_mode()
79
+ def get_parsed_content_icon(filtered_boxes, starting_idx, image_source, caption_model_processor, prompt=None, batch_size=None):
80
+ # Number of samples per batch, --> 256 roughly takes 23 GB of GPU memory for florence model
81
+ to_pil = ToPILImage()
82
+ if starting_idx:
83
+ non_ocr_boxes = filtered_boxes[starting_idx:]
84
+ else:
85
+ non_ocr_boxes = filtered_boxes
86
+ croped_pil_image = []
87
+ for i, coord in enumerate(non_ocr_boxes):
88
+ try:
89
+ xmin, xmax = int(coord[0]*image_source.shape[1]), int(coord[2]*image_source.shape[1])
90
+ ymin, ymax = int(coord[1]*image_source.shape[0]), int(coord[3]*image_source.shape[0])
91
+ cropped_image = image_source[ymin:ymax, xmin:xmax, :]
92
+ cropped_image = cv2.resize(cropped_image, (64, 64))
93
+ croped_pil_image.append(to_pil(cropped_image))
94
+ except:
95
+ continue
96
+
97
+ model, processor = caption_model_processor['model'], caption_model_processor['processor']
98
+ if not prompt:
99
+ if 'florence' in model.config.name_or_path:
100
+ prompt = "<CAPTION>"
101
+ else:
102
+ prompt = "The image shows"
103
+
104
+ generated_texts = []
105
+ device = model.device
106
+ # batch_size = 64
107
+ for i in range(0, len(croped_pil_image), batch_size):
108
+ start = time.time()
109
+ batch = croped_pil_image[i:i+batch_size]
110
+ t1 = time.time()
111
+ if model.device.type == 'cuda':
112
+ inputs = processor(images=batch, text=[prompt]*len(batch), return_tensors="pt", do_resize=False).to(device=device, dtype=torch.float16)
113
+ else:
114
+ inputs = processor(images=batch, text=[prompt]*len(batch), return_tensors="pt").to(device=device)
115
+ if 'florence' in model.config.name_or_path:
116
+ generated_ids = model.generate(input_ids=inputs["input_ids"],pixel_values=inputs["pixel_values"],max_new_tokens=20,num_beams=1, do_sample=False)
117
+ else:
118
+ generated_ids = model.generate(**inputs, max_length=100, num_beams=5, no_repeat_ngram_size=2, early_stopping=True, num_return_sequences=1) # temperature=0.01, do_sample=True,
119
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)
120
+ generated_text = [gen.strip() for gen in generated_text]
121
+ generated_texts.extend(generated_text)
122
+
123
+ return generated_texts
124
+
125
+
126
+
127
+ def get_parsed_content_icon_phi3v(filtered_boxes, ocr_bbox, image_source, caption_model_processor):
128
+ to_pil = ToPILImage()
129
+ if ocr_bbox:
130
+ non_ocr_boxes = filtered_boxes[len(ocr_bbox):]
131
+ else:
132
+ non_ocr_boxes = filtered_boxes
133
+ croped_pil_image = []
134
+ for i, coord in enumerate(non_ocr_boxes):
135
+ xmin, xmax = int(coord[0]*image_source.shape[1]), int(coord[2]*image_source.shape[1])
136
+ ymin, ymax = int(coord[1]*image_source.shape[0]), int(coord[3]*image_source.shape[0])
137
+ cropped_image = image_source[ymin:ymax, xmin:xmax, :]
138
+ croped_pil_image.append(to_pil(cropped_image))
139
+
140
+ model, processor = caption_model_processor['model'], caption_model_processor['processor']
141
+ device = model.device
142
+ messages = [{"role": "user", "content": "<|image_1|>\ndescribe the icon in one sentence"}]
143
+ prompt = processor.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
144
+
145
+ batch_size = 5 # Number of samples per batch
146
+ generated_texts = []
147
+
148
+ for i in range(0, len(croped_pil_image), batch_size):
149
+ images = croped_pil_image[i:i+batch_size]
150
+ image_inputs = [processor.image_processor(x, return_tensors="pt") for x in images]
151
+ inputs ={'input_ids': [], 'attention_mask': [], 'pixel_values': [], 'image_sizes': []}
152
+ texts = [prompt] * len(images)
153
+ for i, txt in enumerate(texts):
154
+ input = processor._convert_images_texts_to_inputs(image_inputs[i], txt, return_tensors="pt")
155
+ inputs['input_ids'].append(input['input_ids'])
156
+ inputs['attention_mask'].append(input['attention_mask'])
157
+ inputs['pixel_values'].append(input['pixel_values'])
158
+ inputs['image_sizes'].append(input['image_sizes'])
159
+ max_len = max([x.shape[1] for x in inputs['input_ids']])
160
+ for i, v in enumerate(inputs['input_ids']):
161
+ inputs['input_ids'][i] = torch.cat([processor.tokenizer.pad_token_id * torch.ones(1, max_len - v.shape[1], dtype=torch.long), v], dim=1)
162
+ inputs['attention_mask'][i] = torch.cat([torch.zeros(1, max_len - v.shape[1], dtype=torch.long), inputs['attention_mask'][i]], dim=1)
163
+ inputs_cat = {k: torch.concatenate(v).to(device) for k, v in inputs.items()}
164
+
165
+ generation_args = {
166
+ "max_new_tokens": 25,
167
+ "temperature": 0.01,
168
+ "do_sample": False,
169
+ }
170
+ generate_ids = model.generate(**inputs_cat, eos_token_id=processor.tokenizer.eos_token_id, **generation_args)
171
+ # # remove input tokens
172
+ generate_ids = generate_ids[:, inputs_cat['input_ids'].shape[1]:]
173
+ response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
174
+ response = [res.strip('\n').strip() for res in response]
175
+ generated_texts.extend(response)
176
+
177
+ return generated_texts
178
+
179
+ def remove_overlap(boxes, iou_threshold, ocr_bbox=None):
180
+ assert ocr_bbox is None or isinstance(ocr_bbox, List)
181
+
182
+ def box_area(box):
183
+ return (box[2] - box[0]) * (box[3] - box[1])
184
+
185
+ def intersection_area(box1, box2):
186
+ x1 = max(box1[0], box2[0])
187
+ y1 = max(box1[1], box2[1])
188
+ x2 = min(box1[2], box2[2])
189
+ y2 = min(box1[3], box2[3])
190
+ return max(0, x2 - x1) * max(0, y2 - y1)
191
+
192
+ def IoU(box1, box2):
193
+ intersection = intersection_area(box1, box2)
194
+ union = box_area(box1) + box_area(box2) - intersection + 1e-6
195
+ if box_area(box1) > 0 and box_area(box2) > 0:
196
+ ratio1 = intersection / box_area(box1)
197
+ ratio2 = intersection / box_area(box2)
198
+ else:
199
+ ratio1, ratio2 = 0, 0
200
+ return max(intersection / union, ratio1, ratio2)
201
+
202
+ def is_inside(box1, box2):
203
+ # return box1[0] >= box2[0] and box1[1] >= box2[1] and box1[2] <= box2[2] and box1[3] <= box2[3]
204
+ intersection = intersection_area(box1, box2)
205
+ ratio1 = intersection / box_area(box1)
206
+ return ratio1 > 0.95
207
+
208
+ boxes = boxes.tolist()
209
+ filtered_boxes = []
210
+ if ocr_bbox:
211
+ filtered_boxes.extend(ocr_bbox)
212
+ # print('ocr_bbox!!!', ocr_bbox)
213
+ for i, box1 in enumerate(boxes):
214
+ # if not any(IoU(box1, box2) > iou_threshold and box_area(box1) > box_area(box2) for j, box2 in enumerate(boxes) if i != j):
215
+ is_valid_box = True
216
+ for j, box2 in enumerate(boxes):
217
+ # keep the smaller box
218
+ if i != j and IoU(box1, box2) > iou_threshold and box_area(box1) > box_area(box2):
219
+ is_valid_box = False
220
+ break
221
+ if is_valid_box:
222
+ # add the following 2 lines to include ocr bbox
223
+ if ocr_bbox:
224
+ # only add the box if it does not overlap with any ocr bbox
225
+ if not any(IoU(box1, box3) > iou_threshold and not is_inside(box1, box3) for k, box3 in enumerate(ocr_bbox)):
226
+ filtered_boxes.append(box1)
227
+ else:
228
+ filtered_boxes.append(box1)
229
+ return torch.tensor(filtered_boxes)
230
+
231
+
232
+ def remove_overlap_new(boxes, iou_threshold, ocr_bbox=None):
233
+ '''
234
+ ocr_bbox format: [{'type': 'text', 'bbox':[x,y], 'interactivity':False, 'content':str }, ...]
235
+ boxes format: [{'type': 'icon', 'bbox':[x,y], 'interactivity':True, 'content':None }, ...]
236
+
237
+ '''
238
+ assert ocr_bbox is None or isinstance(ocr_bbox, List)
239
+
240
+ def box_area(box):
241
+ return (box[2] - box[0]) * (box[3] - box[1])
242
+
243
+ def intersection_area(box1, box2):
244
+ x1 = max(box1[0], box2[0])
245
+ y1 = max(box1[1], box2[1])
246
+ x2 = min(box1[2], box2[2])
247
+ y2 = min(box1[3], box2[3])
248
+ return max(0, x2 - x1) * max(0, y2 - y1)
249
+
250
+ def IoU(box1, box2):
251
+ intersection = intersection_area(box1, box2)
252
+ union = box_area(box1) + box_area(box2) - intersection + 1e-6
253
+ if box_area(box1) > 0 and box_area(box2) > 0:
254
+ ratio1 = intersection / box_area(box1)
255
+ ratio2 = intersection / box_area(box2)
256
+ else:
257
+ ratio1, ratio2 = 0, 0
258
+ return max(intersection / union, ratio1, ratio2)
259
+
260
+ def is_inside(box1, box2):
261
+ # return box1[0] >= box2[0] and box1[1] >= box2[1] and box1[2] <= box2[2] and box1[3] <= box2[3]
262
+ intersection = intersection_area(box1, box2)
263
+ ratio1 = intersection / box_area(box1)
264
+ return ratio1 > 0.80
265
+
266
+ # boxes = boxes.tolist()
267
+ filtered_boxes = []
268
+ if ocr_bbox:
269
+ filtered_boxes.extend(ocr_bbox)
270
+ # print('ocr_bbox!!!', ocr_bbox)
271
+ for i, box1_elem in enumerate(boxes):
272
+ box1 = box1_elem['bbox']
273
+ is_valid_box = True
274
+ for j, box2_elem in enumerate(boxes):
275
+ # keep the smaller box
276
+ box2 = box2_elem['bbox']
277
+ if i != j and IoU(box1, box2) > iou_threshold and box_area(box1) > box_area(box2):
278
+ is_valid_box = False
279
+ break
280
+ if is_valid_box:
281
+ if ocr_bbox:
282
+ # keep yolo boxes + prioritize ocr label
283
+ box_added = False
284
+ ocr_labels = ''
285
+ for box3_elem in ocr_bbox:
286
+ if not box_added:
287
+ box3 = box3_elem['bbox']
288
+ if is_inside(box3, box1): # ocr inside icon
289
+ # box_added = True
290
+ # delete the box3_elem from ocr_bbox
291
+ try:
292
+ # gather all ocr labels
293
+ ocr_labels += box3_elem['content'] + ' '
294
+ filtered_boxes.remove(box3_elem)
295
+ except:
296
+ continue
297
+ # break
298
+ elif is_inside(box1, box3): # icon inside ocr, don't added this icon box, no need to check other ocr bbox bc no overlap between ocr bbox, icon can only be in one ocr box
299
+ box_added = True
300
+ break
301
+ else:
302
+ continue
303
+ if not box_added:
304
+ if ocr_labels:
305
+ filtered_boxes.append({'type': 'icon', 'bbox': box1_elem['bbox'], 'interactivity': True, 'content': ocr_labels,})
306
+ else:
307
+ filtered_boxes.append({'type': 'icon', 'bbox': box1_elem['bbox'], 'interactivity': True, 'content': None, })
308
+ else:
309
+ filtered_boxes.append(box1)
310
+ return filtered_boxes # torch.tensor(filtered_boxes)
311
+
312
+
313
+ def load_image(image_path: str) -> Tuple[np.array, torch.Tensor]:
314
+ transform = T.Compose(
315
+ [
316
+ T.RandomResize([800], max_size=1333),
317
+ T.ToTensor(),
318
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
319
+ ]
320
+ )
321
+ image_source = Image.open(image_path).convert("RGB")
322
+ image = np.asarray(image_source)
323
+ image_transformed, _ = transform(image_source, None)
324
+ return image, image_transformed
325
+
326
+
327
+ def annotate(image_source: np.ndarray, boxes: torch.Tensor, logits: torch.Tensor, phrases: List[str], text_scale: float,
328
+ text_padding=5, text_thickness=2, thickness=3) -> np.ndarray:
329
+ """
330
+ This function annotates an image with bounding boxes and labels.
331
+
332
+ Parameters:
333
+ image_source (np.ndarray): The source image to be annotated.
334
+ boxes (torch.Tensor): A tensor containing bounding box coordinates. in cxcywh format, pixel scale
335
+ logits (torch.Tensor): A tensor containing confidence scores for each bounding box.
336
+ phrases (List[str]): A list of labels for each bounding box.
337
+ text_scale (float): The scale of the text to be displayed. 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
338
+
339
+ Returns:
340
+ np.ndarray: The annotated image.
341
+ """
342
+ h, w, _ = image_source.shape
343
+ boxes = boxes * torch.Tensor([w, h, w, h])
344
+ xyxy = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xyxy").numpy()
345
+ xywh = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xywh").numpy()
346
+ detections = sv.Detections(xyxy=xyxy)
347
+
348
+ labels = [f"{phrase}" for phrase in range(boxes.shape[0])]
349
+
350
+ box_annotator = BoxAnnotator(text_scale=text_scale, text_padding=text_padding,text_thickness=text_thickness,thickness=thickness) # 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
351
+ annotated_frame = image_source.copy()
352
+ annotated_frame = box_annotator.annotate(scene=annotated_frame, detections=detections, labels=labels, image_size=(w,h))
353
+
354
+ label_coordinates = {f"{phrase}": v for phrase, v in zip(phrases, xywh)}
355
+ return annotated_frame, label_coordinates
356
+
357
+
358
+ def predict(model, image, caption, box_threshold, text_threshold):
359
+ """ Use huggingface model to replace the original model
360
+ """
361
+ model, processor = model['model'], model['processor']
362
+ device = model.device
363
+
364
+ inputs = processor(images=image, text=caption, return_tensors="pt").to(device)
365
+ with torch.no_grad():
366
+ outputs = model(**inputs)
367
+
368
+ results = processor.post_process_grounded_object_detection(
369
+ outputs,
370
+ inputs.input_ids,
371
+ box_threshold=box_threshold, # 0.4,
372
+ text_threshold=text_threshold, # 0.3,
373
+ target_sizes=[image.size[::-1]]
374
+ )[0]
375
+ boxes, logits, phrases = results["boxes"], results["scores"], results["labels"]
376
+ return boxes, logits, phrases
377
+
378
+
379
+ def predict_yolo(model, image, box_threshold, imgsz, scale_img, iou_threshold=0.7):
380
+ """ Use huggingface model to replace the original model
381
+ """
382
+ # model = model['model']
383
+ if scale_img:
384
+ result = model.predict(
385
+ source=image,
386
+ conf=box_threshold,
387
+ imgsz=imgsz,
388
+ iou=iou_threshold, # default 0.7
389
+ )
390
+ else:
391
+ result = model.predict(
392
+ source=image,
393
+ conf=box_threshold,
394
+ iou=iou_threshold, # default 0.7
395
+ )
396
+ boxes = result[0].boxes.xyxy#.tolist() # in pixel space
397
+ conf = result[0].boxes.conf
398
+ phrases = [str(i) for i in range(len(boxes))]
399
+
400
+ return boxes, conf, phrases
401
+
402
+ def int_box_area(box, w, h):
403
+ x1, y1, x2, y2 = box
404
+ int_box = [int(x1*w), int(y1*h), int(x2*w), int(y2*h)]
405
+ area = (int_box[2] - int_box[0]) * (int_box[3] - int_box[1])
406
+ return area
407
+
408
+ def get_som_labeled_img(image_source: Union[str, Image.Image], model=None, BOX_TRESHOLD=0.01, output_coord_in_ratio=False, ocr_bbox=None, text_scale=0.4, text_padding=5, draw_bbox_config=None, caption_model_processor=None, ocr_text=[], use_local_semantics=True, iou_threshold=0.9,prompt=None, scale_img=False, imgsz=None, batch_size=64):
409
+ """Process either an image path or Image object
410
+
411
+ Args:
412
+ image_source: Either a file path (str) or PIL Image object
413
+ ...
414
+ """
415
+ if isinstance(image_source, str):
416
+ image_source = Image.open(image_source).convert("RGB")
417
+
418
+ w, h = image_source.size
419
+ if not imgsz:
420
+ imgsz = (h, w)
421
+ # print('image size:', w, h)
422
+ xyxy, logits, phrases = predict_yolo(model=model, image=image_source, box_threshold=BOX_TRESHOLD, imgsz=imgsz, scale_img=scale_img, iou_threshold=0.1)
423
+ xyxy = xyxy / torch.Tensor([w, h, w, h]).to(xyxy.device)
424
+ image_source = np.asarray(image_source)
425
+ phrases = [str(i) for i in range(len(phrases))]
426
+
427
+ # annotate the image with labels
428
+ if ocr_bbox:
429
+ ocr_bbox = torch.tensor(ocr_bbox) / torch.Tensor([w, h, w, h])
430
+ ocr_bbox=ocr_bbox.tolist()
431
+ else:
432
+ print('no ocr bbox!!!')
433
+ ocr_bbox = None
434
+
435
+ ocr_bbox_elem = [{'type': 'text', 'bbox':box, 'interactivity':False, 'content':txt,} for box, txt in zip(ocr_bbox, ocr_text) if int_box_area(box, w, h) > 0]
436
+ xyxy_elem = [{'type': 'icon', 'bbox':box, 'interactivity':True, 'content':None} for box in xyxy.tolist() if int_box_area(box, w, h) > 0]
437
+ filtered_boxes = remove_overlap_new(boxes=xyxy_elem, iou_threshold=iou_threshold, ocr_bbox=ocr_bbox_elem)
438
+
439
+ # sort the filtered_boxes so that the one with 'content': None is at the end, and get the index of the first 'content': None
440
+ filtered_boxes_elem = sorted(filtered_boxes, key=lambda x: x['content'] is None)
441
+ # get the index of the first 'content': None
442
+ starting_idx = next((i for i, box in enumerate(filtered_boxes_elem) if box['content'] is None), -1)
443
+ filtered_boxes = torch.tensor([box['bbox'] for box in filtered_boxes_elem])
444
+ print('len(filtered_boxes):', len(filtered_boxes), starting_idx)
445
+
446
+ # get parsed icon local semantics
447
+ time1 = time.time()
448
+ if use_local_semantics:
449
+ caption_model = caption_model_processor['model']
450
+ if 'phi3_v' in caption_model.config.model_type:
451
+ parsed_content_icon = get_parsed_content_icon_phi3v(filtered_boxes, ocr_bbox, image_source, caption_model_processor)
452
+ else:
453
+ parsed_content_icon = get_parsed_content_icon(filtered_boxes, starting_idx, image_source, caption_model_processor, prompt=prompt,batch_size=batch_size)
454
+ ocr_text = [f"Text Box ID {i}: {txt}" for i, txt in enumerate(ocr_text)]
455
+ icon_start = len(ocr_text)
456
+ parsed_content_icon_ls = []
457
+ # fill the filtered_boxes_elem None content with parsed_content_icon in order
458
+ for i, box in enumerate(filtered_boxes_elem):
459
+ if box['content'] is None:
460
+ box['content'] = parsed_content_icon.pop(0)
461
+ for i, txt in enumerate(parsed_content_icon):
462
+ parsed_content_icon_ls.append(f"Icon Box ID {str(i+icon_start)}: {txt}")
463
+ parsed_content_merged = ocr_text + parsed_content_icon_ls
464
+ else:
465
+ ocr_text = [f"Text Box ID {i}: {txt}" for i, txt in enumerate(ocr_text)]
466
+ parsed_content_merged = ocr_text
467
+ print('time to get parsed content:', time.time()-time1)
468
+
469
+ filtered_boxes = box_convert(boxes=filtered_boxes, in_fmt="xyxy", out_fmt="cxcywh")
470
+
471
+ phrases = [i for i in range(len(filtered_boxes))]
472
+
473
+ # draw boxes
474
+ if draw_bbox_config:
475
+ annotated_frame, label_coordinates = annotate(image_source=image_source, boxes=filtered_boxes, logits=logits, phrases=phrases, **draw_bbox_config)
476
+ else:
477
+ annotated_frame, label_coordinates = annotate(image_source=image_source, boxes=filtered_boxes, logits=logits, phrases=phrases, text_scale=text_scale, text_padding=text_padding)
478
+
479
+ pil_img = Image.fromarray(annotated_frame)
480
+ buffered = io.BytesIO()
481
+ pil_img.save(buffered, format="PNG")
482
+ encoded_image = base64.b64encode(buffered.getvalue()).decode('ascii')
483
+ if output_coord_in_ratio:
484
+ label_coordinates = {k: [v[0]/w, v[1]/h, v[2]/w, v[3]/h] for k, v in label_coordinates.items()}
485
+ assert w == annotated_frame.shape[1] and h == annotated_frame.shape[0]
486
+
487
+ return encoded_image, label_coordinates, filtered_boxes_elem
488
+
489
+
490
+ def get_xywh(input):
491
+ x, y, w, h = input[0][0], input[0][1], input[2][0] - input[0][0], input[2][1] - input[0][1]
492
+ x, y, w, h = int(x), int(y), int(w), int(h)
493
+ return x, y, w, h
494
+
495
+ def get_xyxy(input):
496
+ x, y, xp, yp = input[0][0], input[0][1], input[2][0], input[2][1]
497
+ x, y, xp, yp = int(x), int(y), int(xp), int(yp)
498
+ return x, y, xp, yp
499
+
500
+ def get_xywh_yolo(input):
501
+ x, y, w, h = input[0], input[1], input[2] - input[0], input[3] - input[1]
502
+ x, y, w, h = int(x), int(y), int(w), int(h)
503
+ return x, y, w, h
504
+
505
+ def check_ocr_box(image_source: Union[str, Image.Image], display_img = True, output_bb_format='xywh', goal_filtering=None, easyocr_args=None, use_paddleocr=False):
506
+ if isinstance(image_source, str):
507
+ image_source = Image.open(image_source)
508
+ if image_source.mode == 'RGBA':
509
+ # Convert RGBA to RGB to avoid alpha channel issues
510
+ image_source = image_source.convert('RGB')
511
+ image_np = np.array(image_source)
512
+ w, h = image_source.size
513
+ if use_paddleocr:
514
+ if easyocr_args is None:
515
+ text_threshold = 0.5
516
+ else:
517
+ text_threshold = easyocr_args['text_threshold']
518
+ result = paddle_ocr.ocr(image_np, cls=False)[0]
519
+ coord = [item[0] for item in result if item[1][1] > text_threshold]
520
+ text = [item[1][0] for item in result if item[1][1] > text_threshold]
521
+ else: # EasyOCR
522
+ if easyocr_args is None:
523
+ easyocr_args = {}
524
+ result = reader.readtext(image_np, **easyocr_args)
525
+ coord = [item[0] for item in result]
526
+ text = [item[1] for item in result]
527
+ if display_img:
528
+ opencv_img = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
529
+ bb = []
530
+ for item in coord:
531
+ x, y, a, b = get_xywh(item)
532
+ bb.append((x, y, a, b))
533
+ cv2.rectangle(opencv_img, (x, y), (x+a, y+b), (0, 255, 0), 2)
534
+ # matplotlib expects RGB
535
+ plt.imshow(cv2.cvtColor(opencv_img, cv2.COLOR_BGR2RGB))
536
+ else:
537
+ if output_bb_format == 'xywh':
538
+ bb = [get_xywh(item) for item in coord]
539
+ elif output_bb_format == 'xyxy':
540
+ bb = [get_xyxy(item) for item in coord]
541
+ return (text, bb), goal_filtering
542
+
543
+