SakshiRathi77 commited on
Commit
6ef9ea4
1 Parent(s): 88e409c

Upload 8 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ master_tmp.png filter=lfs diff=lfs merge=lfs -text
37
+ to_score_planogram_tmp.png filter=lfs diff=lfs merge=lfs -text
app (1).py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://planogram-compliance.herokuapp.com/
2
+ # https://dashboard.heroku.com/apps/planogram-compliance/deploy/heroku-git
3
+
4
+ # https://medium.com/@mohcufe/how-to-deploy-your-trained-pytorch-model-on-heroku-ff4b73085ddd\
5
+ # https://stackoverflow.com/questions/51730880/where-do-i-get-a-cpu-only-version-of-pytorch
6
+ # https://blog.jcharistech.com/2020/02/26/how-to-deploy-a-face-detection-streamlit-app-on-heroku/
7
+ # https://towardsdatascience.com/a-quick-tutorial-on-how-to-deploy-your-streamlit-app-to-heroku-
8
+ # https://www.analyticsvidhya.com/blog/2021/06/deploy-your-ml-dl-streamlit-application-on-heroku/
9
+ # https://gist.github.com/jeremyjordan/6b506257509e8ba673f145baa568a1ea
10
+
11
+ import json
12
+
13
+ # https://www.r-bloggers.com/2020/12/creating-a-streamlit-web-app-building-with-docker-github-actions-and-hosting-on-heroku/
14
+ # https://devcenter.heroku.com/articles/container-registry-and-runtime
15
+ # from yolo_inference_util import run_yolo_v5
16
+ import os
17
+ from tempfile import NamedTemporaryFile
18
+
19
+ import cv2
20
+ import numpy as np
21
+ import pandas as pd
22
+ import streamlit as st
23
+
24
+ # import matplotlib.pyplot as plt
25
+ from app_utils import annotate_planogram_compliance, bucket_sort, do_sorting, xml_to_csv
26
+ from inference import run
27
+
28
+ # from utils.plots import Annotator, colors
29
+ # from utils.general import scale_coords
30
+
31
+ app_formal_name = "Planogram Compliance"
32
+
33
+ FILE_UPLOAD_DIR = "tmp"
34
+
35
+ os.makedirs(FILE_UPLOAD_DIR, exist_ok=True)
36
+ # Start the app in wide-mode
37
+ st.set_page_config(
38
+ layout="wide",
39
+ page_title=app_formal_name,
40
+ )
41
+ # https://github.com/streamlit/streamlit/issues/1361
42
+ uploaded_file = st.file_uploader(
43
+ "Choose a planogram image to score",
44
+ type=["jpg", "JPEG", "PNG", "JPG", "jpeg"],
45
+ )
46
+ uploaded_master_planogram_file = st.file_uploader(
47
+ "Upload a master planogram", type=["jpg", "JPEG", "PNG", "JPG", "jpeg"]
48
+ )
49
+ annotation_file = st.file_uploader("upload master polanogram", type=["xml"])
50
+ temp_file = NamedTemporaryFile(delete=False)
51
+
52
+ target_names = [
53
+ "Bottle,100PLUS ACTIVE 1.5L",
54
+ "Bottle,100PLUS ACTIVE 500ML",
55
+ "Bottle,100PLUS LEMON LIME 1.5L",
56
+ "Bottle,100PLUS ORANGE 500ML",
57
+ "Bottle,100PLUS ORIGINAL 1.5L",
58
+ "Bottle,100PLUS TANGY ORANGE 1.5L",
59
+ "Bottle,100PLUS ZERO 1.5L",
60
+ "Bottle,100PLUS ZERO 500ML",
61
+ "Packet,F:M MAGNOLIA CHOC 1L",
62
+ "Bottle,F&N GINGER ADE 1.5L",
63
+ "Bottle,F&N GRAPE 1.5L",
64
+ "Bottle,F&N ICE CREAM SODA 1.5L",
65
+ "Bottle,F&N LYCHEE PEAR 1.5L",
66
+ "Bottle,F&N ORANGE 1.5L",
67
+ "Bottle,F&N PINEAPPLE PET 1.5L",
68
+ "Bottle,F&N SARSI 1.5L",
69
+ "Bottle,F&N SS ICE LEM TEA RS 500ML",
70
+ "Bottle,F&N SS ICE LEMON TEA RS 1.5L",
71
+ "Bottle,F&N SS ICE LEMON TEA 1.5L",
72
+ "Bottle,F&N SS ICE LEMON TEA 500ML",
73
+ "Bottle,F&N SS ICE PEACH TEA 1.5L",
74
+ "Bottle,SS ICE LEMON GT 1.48L",
75
+ "Bottle,SS WHITE CHRYS TEA 1.48L",
76
+ "Packet,FARMHOUSE FRESH MILK 1L FNDM",
77
+ "Packet,FARMHOUSE PLAIN LF 1L",
78
+ "Packet,PURA FRESH MILK 1L FS",
79
+ "Packet,NUTRISOY REG NO SUGAR ADDED 1L",
80
+ "Packet,NUTRISOY PLAIN 475ML",
81
+ "Packet,NUTRISOY PLAIN 1L",
82
+ "Packet,NUTRISOY OMEGA RD SUGAR 1L",
83
+ "Packet,NUTRISOY OMEGA NSA 1L",
84
+ "Packet,NUTRISOY ALMOND 1L",
85
+ "Packet,MAGNOLIA FRESH MILK 1L FNDM",
86
+ "Packet,FM MAG FC PLAIN 200ML",
87
+ "Packet,MAG OMEGA PLUS PLAIN 200ML",
88
+ "Packet,MAG KURMA MILK 500ML",
89
+ "Packet,MAG KURMA MILK 1L",
90
+ "Packet,MAG CHOCOLATE FC 500ML",
91
+ "Packet,MAG BROWN SUGAR SS MILK 1L",
92
+ "Packet,FM MAG LFHC PLN 500ML",
93
+ "Packet,FM MAG LFHC OAT 500ML",
94
+ "Packet,FM MAG LFHC OAT 1L",
95
+ "Packet,FM MAG FC PLAIN 500ML",
96
+ "Void,PARTIAL VOID",
97
+ "Void,FULL VOID",
98
+ "Bottle,F&N SS ICE LEM TEA 500ML",
99
+ ]
100
+
101
+ run_app = st.button("Run the compliance check")
102
+ if run_app and uploaded_file is not None:
103
+ # Convert the file to an opencv image.
104
+ file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
105
+ temp_file.write(uploaded_file.getvalue())
106
+ uploaded_img = cv2.imdecode(file_bytes, 1)
107
+ cv2.imwrite("tmp/to_score_planogram_tmp.png", uploaded_img)
108
+
109
+ # if uploaded_master_planogram_file is None:
110
+ # master = cv2.imread('./sample_master_planogram.jpeg')
111
+
112
+ names_dict = {name: id for id, name in enumerate(target_names)}
113
+
114
+ sorted_xml_df = None
115
+ # https://discuss.streamlit.io/t/unable-to-read-files-using-standard-file-uploader/2258/2
116
+ if uploaded_master_planogram_file and annotation_file:
117
+ file_bytes = np.asarray(
118
+ bytearray(uploaded_master_planogram_file.read()), dtype=np.uint8
119
+ )
120
+ master = cv2.imdecode(file_bytes, 1)
121
+ cv2.imwrite("tmp/master_tmp.png", master)
122
+ # cv2.imwrite("tmp_uploaded_master_planogram_img.png", master)
123
+ # xml = annotation_file.read()
124
+ # tmp_xml ="tmp_xml_annotation.xml"
125
+ # with open(tmp_xml ,'w',encoding='utf-8') as f:
126
+ # xml = f.write(xml)
127
+ xml_df = xml_to_csv(annotation_file)
128
+ xml_df["cls"] = xml_df["cls"].map(names_dict)
129
+ sorted_xml_df = do_sorting(xml_df)
130
+ sorted_xml_df.line_number.value_counts()
131
+
132
+ line_data = sorted_xml_df.line_number.value_counts()
133
+ n_rows = int(len(line_data))
134
+ n_cols = int(max(line_data))
135
+ master_table = np.zeros((n_rows, n_cols)) + 101
136
+ master_annotations = []
137
+ for i, row in sorted_xml_df.groupby("line_number"):
138
+ # print(f"Adding products in the row {i} to the detected planogram", row.cls.tolist())
139
+ products = row.cls.tolist()
140
+ master_table[int(i - 1), 0 : len(products)] = products
141
+ annotations = [
142
+ (int(k), int(v))
143
+ for k, v in list(
144
+ zip(row.cls.unique(), row.cls.value_counts().tolist())
145
+ )
146
+ ]
147
+ master_annotations.append(annotations)
148
+ master_table.shape
149
+ # print("Annoatated planogram")
150
+ # print(np.matrix(master_table))
151
+
152
+ elif uploaded_master_planogram_file:
153
+ print(
154
+ "Finding the amster annotations with the YOLOv5 model predictions"
155
+ )
156
+ file_bytes = np.asarray(
157
+ bytearray(uploaded_master_planogram_file.read()), dtype=np.uint8
158
+ )
159
+ master = cv2.imdecode(file_bytes, 1)
160
+ cv2.imwrite("tmp/master_tmp.png", master)
161
+ master_results = run(
162
+ weights="base_line_best_model_exp5.pt",
163
+ source="tmp/master_tmp.png",
164
+ imgsz=[640, 640],
165
+ conf_thres=0.6,
166
+ iou_thres=0.6,
167
+ )
168
+
169
+ bb_df = pd.DataFrame(
170
+ master_results[0][1].tolist(),
171
+ columns=["xmin", "ymin", "xmax", "ymax", "conf", "cls"],
172
+ )
173
+ sorted_df = do_sorting(bb_df)
174
+
175
+ n_rows = int(sorted_df.line_number.max())
176
+ n_cols = int(
177
+ sorted_df.groupby("line_number")
178
+ .size()
179
+ .reset_index(name="counts")["counts"]
180
+ .max()
181
+ )
182
+ non_null_product = 101
183
+ print("master size", n_rows, n_cols)
184
+ master_annotations = []
185
+ master_table = np.zeros((int(n_rows), int(n_cols))) + non_null_product
186
+ for i, row in sorted_df.groupby("line_number"):
187
+ # print(f"Adding products in the row {i} to the detected planogram", row.cls.tolist())
188
+ products = row.cls.tolist()
189
+ col_len = min(len(products), n_cols)
190
+ print("col size: ", col_len)
191
+ print("row size: ", i - 1)
192
+ if n_rows <= (i - 1):
193
+ print("more rows than expected in the predictions")
194
+ break
195
+ master_table[int(i - 1), 0:col_len] = products[:col_len]
196
+ annotations = [
197
+ (int(k), int(v))
198
+ for k, v in list(
199
+ zip(row.cls.unique(), row.cls.value_counts().tolist())
200
+ )
201
+ ]
202
+ master_annotations.append(annotations)
203
+ else:
204
+ master = cv2.imread("./sample_master_planogram.jpeg")
205
+ n_rows = 3
206
+ n_cols = 16
207
+ master_table = np.zeros((n_rows, n_cols)) + 101
208
+ master_annotations = [
209
+ [(32, 12), (8, 4)],
210
+ [(36, 1), (41, 6), (50, 4), (51, 3), (52, 2)],
211
+ [(23, 5), (24, 6), (54, 5)],
212
+ ]
213
+
214
+ for i, row in enumerate(master_annotations):
215
+ idx = 0
216
+ for product, count in row:
217
+ master_table[i, idx : idx + count] = product
218
+ idx = idx + count
219
+ # Now do something with the image! For example, let's display it:
220
+ # st.image(opencv_image, channels="BGR")
221
+
222
+ # uploaded_img = '/content/drive/My Drive/0.CV/0.Planogram_Compliance/planogram_data/images/test/IMG_5718.jpg'
223
+ result_list = run(
224
+ weights="base_line_best_model_exp5.pt",
225
+ source="tmp/to_score_planogram_tmp.png",
226
+ imgsz=[640, 640],
227
+ conf_thres=0.6,
228
+ iou_thres=0.6,
229
+ )
230
+
231
+ bb_df = pd.DataFrame(
232
+ result_list[0][1].tolist(),
233
+ columns=["xmin", "ymin", "xmax", "ymax", "conf", "cls"],
234
+ )
235
+ sorted_df = do_sorting(bb_df)
236
+
237
+ non_null_product = 101
238
+ print("master size", n_rows, n_cols)
239
+ detected_table = np.zeros((n_rows, n_cols)) + non_null_product
240
+ for i, row in sorted_df.groupby("line_number"):
241
+ # print(f"Adding products in the row {i} to the detected planogram", row.cls.tolist())
242
+ products = row.cls.tolist()
243
+ col_len = min(len(products), n_cols)
244
+ print("col size: ", col_len)
245
+ print("row size: ", i - 1)
246
+ if n_rows <= (i - 1):
247
+ print("more rows than expected in the predictions")
248
+ break
249
+ detected_table[int(i - 1), 0:col_len] = products[:col_len]
250
+
251
+ # score = (master_table == detected_table).sum() / (master_table != non_null_product).sum()
252
+ correct_matches = (
253
+ np.ma.masked_equal(master_table, non_null_product) == detected_table
254
+ ).sum()
255
+ total_products = (master_table != non_null_product).sum()
256
+ score = correct_matches / total_products
257
+ # if sorted_xml_df is not None:
258
+ # annotate_df = sorted_xml_df[["xmin","ymin", "xmax", "ymax", "line_number","cls"]].astype(int)
259
+ # else:
260
+ annotate_df = sorted_df[
261
+ ["xmin", "ymin", "xmax", "ymax", "line_number", "cls"]
262
+ ].astype(int)
263
+
264
+ mask = master_table != non_null_product
265
+ m_detected_table = np.ma.masked_array(master_table, mask=mask)
266
+ m_annotated_table = np.ma.masked_array(detected_table, mask=mask)
267
+
268
+ # wrong_indexes = np.ravel_multi_index(master_table*mask != detected_table*mask, master_table.shape)
269
+ wrong_indexes = np.where(master_table != detected_table)
270
+ correct_indexes = np.where(master_table == detected_table)
271
+ annotated_planogram = annotate_planogram_compliance(
272
+ uploaded_img, annotate_df, correct_indexes, wrong_indexes, target_names
273
+ )
274
+ st.title("Target Products")
275
+ st.write(json.dumps(target_names))
276
+ st.title("The master planogram annotation")
277
+ st.write(
278
+ "The annotations are based on the index of products from Target products list "
279
+ )
280
+ st.write(json.dumps(master_annotations))
281
+
282
+ # https://github.com/streamlit/streamlit/issues/888
283
+ st.image(
284
+ [master, annotated_planogram, result_list[0][0]],
285
+ width=512,
286
+ caption=[
287
+ "Master planogram",
288
+ "Planogram Compliance",
289
+ "Planogram Predictions",
290
+ ],
291
+ channels="BGR",
292
+ )
293
+ # st.image([master, annotated_planogram], width=512, caption=["Master planogram", "Planogram Compliance"], channels="BGR")
294
+ st.title("Planogram Compiance score")
295
+ # st.write(f"{correct_matches} / {total_products}")
296
+ st.write(score)
app_utils.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import json
3
+ import os
4
+ import xml.etree.ElementTree as ET
5
+
6
+ import cv2
7
+
8
+ # from sklearn.externals import joblib
9
+ import joblib
10
+ import numpy as np
11
+ import pandas as pd
12
+
13
+ # from .variables import old_ocr_req_cols
14
+ # from .skew_correction import PageSkewWraper
15
+
16
+ const_HW = 1.294117647
17
+ const_W = 600
18
+ # https://www.forbes.com/sites/forbestechcouncil/2020/06/02/leveraging-technologies-to-align-realograms-and-planograms-for-grocery/?sh=506b8b78e86c
19
+
20
+
21
+ # https://stackoverflow.com/questions/39403183/python-opencv-sorting-contours
22
+ # http://devdoc.net/linux/OpenCV-3.2.0/da/d0c/tutorial_bounding_rects_circles.html
23
+ # https://stackoverflow.com/questions/10297713/find-contour-of-the-set-of-points-in-opencv
24
+ # https://stackoverflow.com/questions/16538774/dealing-with-contours-and-bounding-rectangle-in-opencv-2-4-python-2-7
25
+ # https://stackoverflow.com/questions/50308055/creating-bounding-boxes-for-contours
26
+ # https://stackoverflow.com/questions/57296398/how-can-i-get-better-results-of-bounding-box-using-find-contours-of-opencv
27
+ # http://amroamroamro.github.io/mexopencv/opencv/generalContours_demo1.html
28
+ # https://gist.github.com/bigsnarfdude/d811e31ee17495f82f10db12651ae82d
29
+ # http://man.hubwiz.com/docset/OpenCV.docset/Contents/Resources/Documents/da/d0c/tutorial_bounding_rects_circles.html
30
+ # https://www.analyticsvidhya.com/blog/2021/05/document-layout-detection-and-ocr-with-detectron2/
31
+ # https://colab.research.google.com/drive/1m6gaQF6Q4M0IaSjoo_4jWllKJjK-i6fw?usp=sharing#scrollTo=lEyl3wYKHAe1
32
+ # https://stackoverflow.com/questions/39403183/python-opencv-sorting-contours
33
+ # https://docs.opencv.org/2.4/doc/tutorials/imgproc/shapedescriptors/bounding_rects_circles/bounding_rects_circles.html
34
+ # https://www.pyimagesearch.com/2016/03/21/ordering-coordinates-clockwise-with-python-and-opencv/
35
+
36
+
37
+ def bucket_sort(df, colmn, ymax_col="ymax", ymin_col="ymin"):
38
+ df["line_number"] = 0
39
+ colmn.append("line_number")
40
+ array_value = df[colmn].values
41
+ start_index = Line_counter = counter = 0
42
+ ymax, ymin, line_no = (
43
+ colmn.index(ymax_col),
44
+ colmn.index(ymin_col),
45
+ colmn.index("line_number"),
46
+ )
47
+ while counter < len(array_value):
48
+ current_ymax = array_value[start_index][ymax]
49
+ for next_index in range(start_index, len(array_value)):
50
+ counter += 1
51
+
52
+ next_ymin = array_value[next_index][ymin]
53
+ next_ymax = array_value[next_index][ymax]
54
+ if current_ymax > next_ymin:
55
+
56
+ array_value[next_index][line_no] = Line_counter + 1
57
+ # if current_ymax < next_ymax:
58
+
59
+ # current_ymax = next_ymax
60
+ else:
61
+ counter -= 1
62
+ break
63
+ # print(counter, len(array_value), start_index)
64
+ start_index = counter
65
+ Line_counter += 1
66
+ return pd.DataFrame(array_value, columns=colmn)
67
+
68
+
69
+ def do_sorting(df):
70
+ df.sort_values(["ymin", "xmin"], ascending=True, inplace=True)
71
+ df["idx"] = df.index
72
+ if "line_number" in df.columns:
73
+ print("line number removed")
74
+ df.drop("line_number", axis=1, inplace=True)
75
+ req_colns = ["xmin", "ymin", "xmax", "ymax", "idx"]
76
+ temp_df = df.copy()
77
+ temp = bucket_sort(temp_df.copy(), req_colns)
78
+ df = df.merge(temp[["idx", "line_number"]], on="idx")
79
+ df.sort_values(["line_number", "xmin"], ascending=True, inplace=True)
80
+ df = df.reset_index(drop=True)
81
+ df = df.reset_index(drop=True)
82
+ return df
83
+
84
+
85
+ def xml_to_csv(xml_file):
86
+ # https://gist.github.com/rotemtam/88d9a4efae243fc77ed4a0f9917c8f6c
87
+ xml_list = []
88
+ # for xml_file in glob.glob(path + '/*.xml'):
89
+ # https://discuss.streamlit.io/t/unable-to-read-files-using-standard-file-uploader/2258/2
90
+ tree = ET.parse(xml_file)
91
+ root = tree.getroot()
92
+ for member in root.findall("object"):
93
+ bbx = member.find("bndbox")
94
+ xmin = int(bbx.find("xmin").text)
95
+ ymin = int(bbx.find("ymin").text)
96
+ xmax = int(bbx.find("xmax").text)
97
+ ymax = int(bbx.find("ymax").text)
98
+ label = member.find("name").text
99
+
100
+ value = (
101
+ root.find("filename").text,
102
+ int(root.find("size")[0].text),
103
+ int(root.find("size")[1].text),
104
+ label,
105
+ xmin,
106
+ ymin,
107
+ xmax,
108
+ ymax,
109
+ )
110
+ xml_list.append(value)
111
+ column_name = [
112
+ "filename",
113
+ "width",
114
+ "height",
115
+ "cls",
116
+ "xmin",
117
+ "ymin",
118
+ "xmax",
119
+ "ymax",
120
+ ]
121
+ xml_df = pd.DataFrame(xml_list, columns=column_name)
122
+ return xml_df
123
+
124
+
125
+ # def annotate_planogram_compliance(img0, sorted_xml_df, wrong_indexes, target_names):
126
+ # # annotator = Annotator(img0, line_width=3, pil=True)
127
+ # det = sorted_xml_df[['xmin', 'ymin', 'xmax', 'ymax','cls']].values
128
+ # # det[:, :4] = scale_coords((640, 640), det[:, :4], img0.shape).round()
129
+ # for i, (*xyxy, cls) in enumerate(det):
130
+
131
+ # c = int(cls) # integer class
132
+
133
+ # if i in wrong_indexes:
134
+ # # print(xyxy, "Wrong detection", (255, 0, 0))
135
+ # label = "Wrong detection"
136
+ # color = (0,0,255)
137
+ # else:
138
+ # # print(xyxy, label, (0, 255, 0))
139
+ # label = f'{target_names[c]}'
140
+ # color = (0,255, 0)
141
+ # org = (int(xyxy[0]), int(xyxy[1]) )
142
+ # top_left = org
143
+ # bottom_right = (int(xyxy[2]), int(xyxy[3]))
144
+ # # print("#"*50)
145
+ # # print(f"Anooatting cv2 rectangle with shape: { img0.shape}, top left: { top_left}, bottom right: { bottom_right} , color : { color }, thickness: {3}, cv2.LINE_8")
146
+ # # print("#"*50)
147
+ # cv2.rectangle(img0, top_left, bottom_right , color, 3, cv2.LINE_8)
148
+
149
+ # cv2.putText(img0, label, tuple(org), cv2. FONT_HERSHEY_SIMPLEX , 0.5, color)
150
+
151
+ # return img0
152
+
153
+
154
+ def annotate_planogram_compliance(
155
+ img0, sorted_df, correct_indexes, wrong_indexes, target_names
156
+ ):
157
+ # annotator = Annotator(img0, line_width=3, pil=True)
158
+ det = sorted_df[["xmin", "ymin", "xmax", "ymax", "cls"]].values
159
+ # det[:, :4] = scale_coords((640, 640), det[:, :4], img0.shape).round()
160
+ for x, y in zip(*correct_indexes):
161
+ try:
162
+ row = sorted_df[sorted_df["line_number"] == x + 1].iloc[y]
163
+ xyxy = row[["xmin", "ymin", "xmax", "ymax"]].values
164
+ label = f'{target_names[row["cls"]]}'
165
+ color = (0, 255, 0)
166
+ # org = (int(xyxy[0]), int(xyxy[1]) )
167
+ top_left = (int(row["xmin"]), int(row["ymin"]))
168
+ bottom_right = (int(row["xmax"]), int(row["ymax"]))
169
+ cv2.rectangle(img0, top_left, bottom_right, color, 3, cv2.LINE_8)
170
+
171
+ cv2.putText(
172
+ img0, label, top_left, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color
173
+ )
174
+ except Exception as e:
175
+ print("Error: " + str(e))
176
+ continue
177
+
178
+ for x, y in zip(*wrong_indexes):
179
+ try:
180
+ row = sorted_df[sorted_df["line_number"] == x + 1].iloc[y]
181
+ xyxy = row[["xmin", "ymin", "xmax", "ymax"]].values
182
+ label = f'{target_names[row["cls"]]}'
183
+ color = (0, 0, 255)
184
+ # org = (int(xyxy[0]), int(xyxy[1]) )
185
+ top_left = (row["xmin"], row["ymin"])
186
+ bottom_right = (row["xmax"], row["ymax"])
187
+ cv2.rectangle(img0, top_left, bottom_right, color, 3, cv2.LINE_8)
188
+
189
+ cv2.putText(
190
+ img0, label, top_left, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color
191
+ )
192
+ except Exception as e:
193
+ print("Error: " + str(e))
194
+ continue
195
+
196
+ return img0
base_line_best_model_exp5.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c259d5e97010ee1c9775d6d8c3bc8bb73f52a5ad871ca920902f35563f2acb42
3
+ size 14621601
best_sku_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46627e4923a4cbb695e2f1da5944ec7e2930acb640b822227aab334bddf1548b
3
+ size 14355573
inference.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+ """
3
+ Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
4
+
5
+ Usage - sources:
6
+ $ python detect.py --weights yolov5s.pt --source 0 # webcam
7
+ img.jpg # image
8
+ vid.mp4 # video
9
+ screen # screenshot
10
+ path/ # directory
11
+ list.txt # list of images
12
+ list.streams # list of streams
13
+ 'path/*.jpg' # glob
14
+ 'https://youtu.be/Zgi9g1ksQHc' # YouTube
15
+ 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
16
+
17
+ Usage - formats:
18
+ $ python detect.py --weights yolov5s.pt # PyTorch
19
+ yolov5s.torchscript # TorchScript
20
+ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
21
+ yolov5s_openvino_model # OpenVINO
22
+ yolov5s.engine # TensorRT
23
+ yolov5s.mlmodel # CoreML (macOS-only)
24
+ yolov5s_saved_model # TensorFlow SavedModel
25
+ yolov5s.pb # TensorFlow GraphDef
26
+ yolov5s.tflite # TensorFlow Lite
27
+ yolov5s_edgetpu.tflite # TensorFlow Edge TPU
28
+ yolov5s_paddle_model # PaddlePaddle
29
+ """
30
+
31
+ import argparse
32
+ import os
33
+ import platform
34
+ import sys
35
+ from pathlib import Path
36
+
37
+ import torch
38
+
39
+ FILE = Path(__file__).resolve()
40
+ ROOT = FILE.parents[0] # YOLOv5 root directory
41
+ if str(ROOT) not in sys.path:
42
+ sys.path.append(str(ROOT)) # add ROOT to PATH
43
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
44
+
45
+ from models.common import DetectMultiBackend
46
+ from utils.dataloaders import (
47
+ IMG_FORMATS,
48
+ VID_FORMATS,
49
+ LoadImages,
50
+ LoadScreenshots,
51
+ LoadStreams,
52
+ )
53
+ from utils.general import (
54
+ LOGGER,
55
+ Profile,
56
+ check_file,
57
+ check_img_size,
58
+ check_imshow,
59
+ check_requirements,
60
+ colorstr,
61
+ cv2,
62
+ increment_path,
63
+ non_max_suppression,
64
+ print_args,
65
+ scale_boxes,
66
+ strip_optimizer,
67
+ xyxy2xywh,
68
+ )
69
+ from utils.plots import Annotator, colors, save_one_box
70
+ from utils.torch_utils import select_device, smart_inference_mode
71
+
72
+
73
+ @smart_inference_mode()
74
+ def run(
75
+ weights=ROOT / "yolov5s.pt", # model path or triton URL
76
+ source=ROOT / "data/images", # file/dir/URL/glob/screen/0(webcam)
77
+ data=ROOT / "data/coco128.yaml", # dataset.yaml path
78
+ imgsz=(640, 640), # inference size (height, width)
79
+ conf_thres=0.25, # confidence threshold
80
+ iou_thres=0.45, # NMS IOU threshold
81
+ max_det=1000, # maximum detections per image
82
+ device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
83
+ view_img=False, # show results
84
+ save_txt=False, # save results to *.txt
85
+ save_conf=False, # save confidences in --save-txt labels
86
+ save_crop=False, # save cropped prediction boxes
87
+ nosave=False, # do not save images/videos
88
+ classes=None, # filter by class: --class 0, or --class 0 2 3
89
+ agnostic_nms=False, # class-agnostic NMS
90
+ augment=False, # augmented inference
91
+ visualize=False, # visualize features
92
+ update=False, # update all models
93
+ project=ROOT / "runs/detect", # save results to project/name
94
+ name="exp", # save results to project/name
95
+ exist_ok=False, # existing project/name ok, do not increment
96
+ line_thickness=3, # bounding box thickness (pixels)
97
+ hide_labels=False, # hide labels
98
+ hide_conf=False, # hide confidences
99
+ half=False, # use FP16 half-precision inference
100
+ dnn=False, # use OpenCV DNN for ONNX inference
101
+ vid_stride=1, # video frame-rate stride
102
+ ):
103
+ source = str(source)
104
+ save_img = not nosave and not source.endswith(
105
+ ".txt"
106
+ ) # save inference images
107
+ is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
108
+ is_url = source.lower().startswith(
109
+ ("rtsp://", "rtmp://", "http://", "https://")
110
+ )
111
+ webcam = (
112
+ source.isnumeric()
113
+ or source.endswith(".streams")
114
+ or (is_url and not is_file)
115
+ )
116
+ screenshot = source.lower().startswith("screen")
117
+ if is_url and is_file:
118
+ source = check_file(source) # download
119
+
120
+ # Directories
121
+ save_dir = increment_path(
122
+ Path(project) / name, exist_ok=exist_ok
123
+ ) # increment run
124
+ (save_dir / "labels" if save_txt else save_dir).mkdir(
125
+ parents=True, exist_ok=True
126
+ ) # make dir
127
+
128
+ # Load model
129
+ device = select_device(device)
130
+ model = DetectMultiBackend(
131
+ weights, device=device, dnn=dnn, data=data, fp16=half
132
+ )
133
+ stride, names, pt = model.stride, model.names, model.pt
134
+ imgsz = check_img_size(imgsz, s=stride) # check image size
135
+
136
+ # Dataloader
137
+ bs = 1 # batch_size
138
+ if webcam:
139
+ view_img = check_imshow(warn=True)
140
+ dataset = LoadStreams(
141
+ source,
142
+ img_size=imgsz,
143
+ stride=stride,
144
+ auto=pt,
145
+ vid_stride=vid_stride,
146
+ )
147
+ bs = len(dataset)
148
+ elif screenshot:
149
+ dataset = LoadScreenshots(
150
+ source, img_size=imgsz, stride=stride, auto=pt
151
+ )
152
+ else:
153
+ dataset = LoadImages(
154
+ source,
155
+ img_size=imgsz,
156
+ stride=stride,
157
+ auto=pt,
158
+ vid_stride=vid_stride,
159
+ )
160
+ vid_path, vid_writer = [None] * bs, [None] * bs
161
+
162
+ # Run inference
163
+ model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup
164
+ seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
165
+ for path, im, im0s, vid_cap, s in dataset:
166
+ with dt[0]:
167
+ im = torch.from_numpy(im).to(model.device)
168
+ im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
169
+ im /= 255 # 0 - 255 to 0.0 - 1.0
170
+ if len(im.shape) == 3:
171
+ im = im[None] # expand for batch dim
172
+
173
+ # Inference
174
+ with dt[1]:
175
+ visualize = (
176
+ increment_path(save_dir / Path(path).stem, mkdir=True)
177
+ if visualize
178
+ else False
179
+ )
180
+ pred = model(im, augment=augment, visualize=visualize)
181
+
182
+ # NMS
183
+ with dt[2]:
184
+ pred = non_max_suppression(
185
+ pred,
186
+ conf_thres,
187
+ iou_thres,
188
+ classes,
189
+ agnostic_nms,
190
+ max_det=max_det,
191
+ )
192
+
193
+ # Second-stage classifier (optional)
194
+ # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
195
+
196
+ # Process predictions
197
+ for i, det in enumerate(pred): # per image
198
+ seen += 1
199
+ if webcam: # batch_size >= 1
200
+ p, im0, frame = path[i], im0s[i].copy(), dataset.count
201
+ s += f"{i}: "
202
+ else:
203
+ p, im0, frame = path, im0s.copy(), getattr(dataset, "frame", 0)
204
+
205
+ p = Path(p) # to Path
206
+ save_path = str(save_dir / p.name) # im.jpg
207
+ txt_path = str(save_dir / "labels" / p.stem) + (
208
+ "" if dataset.mode == "image" else f"_{frame}"
209
+ ) # im.txt
210
+ s += "%gx%g " % im.shape[2:] # print string
211
+ gn = torch.tensor(im0.shape)[
212
+ [1, 0, 1, 0]
213
+ ] # normalization gain whwh
214
+ imc = im0.copy() if save_crop else im0 # for save_crop
215
+ annotator = Annotator(
216
+ im0, line_width=line_thickness, example=str(names)
217
+ )
218
+ results = []
219
+ if len(det):
220
+ # Rescale boxes from img_size to im0 size
221
+ det[:, :4] = scale_boxes(
222
+ im.shape[2:], det[:, :4], im0.shape
223
+ ).round()
224
+ results.append((path, det))
225
+
226
+ return results
master_tmp.png ADDED

Git LFS Details

  • SHA256: 11ee014f839a2869040253223d5073a403a63c906ac91ed129ed0c7d3d57701b
  • Pointer size: 133 Bytes
  • Size of remote file: 18.8 MB
requirements (1).txt ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # pip install -r requirements.txt
3
+ # Base ----------------------------------------
4
+ seaborn
5
+ ipython
6
+ psutil
7
+ matplotlib>=3.2.2
8
+ numpy>=1.18.5
9
+ opencv-python==4.7.0.68
10
+ Pillow>=7.1.2
11
+ PyYAML>=5.3.1
12
+ requests>=2.23.0
13
+ scipy>=1.4.1
14
+ torch>=1.7.0
15
+ torchvision>=0.8.1
16
+ tqdm>=4.41.0
17
+ joblib
18
+ # Logging -------------------------------------
19
+ #tensorboard>=2.4.1
20
+ #wandb
21
+
22
+ # Plotting ------------------------------------
23
+ pandas>=1.1.4
24
+ #seaborn>=0.11.0
25
+
26
+ # Export --------------------------------------
27
+ # coremltools>=4.1 # CoreML export
28
+ # onnx>=1.9.0 # ONNX export
29
+ # onnx-simplifier>=0.3.6 # ONNX simplifier
30
+ # scikit-learn==0.19.2 # CoreML quantization
31
+ # tensorflow>=2.4.1 # TFLite export
32
+ # tensorflowjs>=3.9.0 # TF.js export
33
+ # openvino-dev # OpenVINO export
34
+
35
+ # Extras --------------------------------------
36
+ # albumentations>=1.0.3
37
+ # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172
38
+ # pycocotools>=2.0 # COCO mAP
39
+ # roboflow
40
+ #thop # FLOPs computation
to_score_planogram_tmp.png ADDED

Git LFS Details

  • SHA256: 4eba500ab69ae7b537a6cf8a8bd854a7e30dbb27a40d9ab75c0e9047e45ae5df
  • Pointer size: 132 Bytes
  • Size of remote file: 2.18 MB