# https://planogram-compliance.herokuapp.com/ # https://dashboard.heroku.com/apps/planogram-compliance/deploy/heroku-git # https://medium.com/@mohcufe/how-to-deploy-your-trained-pytorch-model-on-heroku-ff4b73085ddd\ # https://stackoverflow.com/questions/51730880/where-do-i-get-a-cpu-only-version-of-pytorch # https://blog.jcharistech.com/2020/02/26/how-to-deploy-a-face-detection-streamlit-app-on-heroku/ # https://towardsdatascience.com/a-quick-tutorial-on-how-to-deploy-your-streamlit-app-to-heroku- # https://www.analyticsvidhya.com/blog/2021/06/deploy-your-ml-dl-streamlit-application-on-heroku/ # https://gist.github.com/jeremyjordan/6b506257509e8ba673f145baa568a1ea import json # https://www.r-bloggers.com/2020/12/creating-a-streamlit-web-app-building-with-docker-github-actions-and-hosting-on-heroku/ # https://devcenter.heroku.com/articles/container-registry-and-runtime # from yolo_inference_util import run_yolo_v5 import os from tempfile import NamedTemporaryFile import cv2 import numpy as np import pandas as pd import streamlit as st # import matplotlib.pyplot as plt from app_utils import annotate_planogram_compliance, bucket_sort, do_sorting, xml_to_csv from inference import run # from utils.plots import Annotator, colors # from utils.general import scale_coords app_formal_name = "Planogram Compliance" FILE_UPLOAD_DIR = "tmp" os.makedirs(FILE_UPLOAD_DIR, exist_ok=True) # Start the app in wide-mode st.set_page_config( layout="wide", page_title=app_formal_name, ) # https://github.com/streamlit/streamlit/issues/1361 uploaded_file = st.file_uploader( "Choose a planogram image to score", type=["jpg", "JPEG", "PNG", "JPG", "jpeg"], ) uploaded_master_planogram_file = st.file_uploader( "Upload a master planogram", type=["jpg", "JPEG", "PNG", "JPG", "jpeg"] ) annotation_file = st.file_uploader("upload master polanogram", type=["xml"]) temp_file = NamedTemporaryFile(delete=False) target_names = [ "Bottle,100PLUS ACTIVE 1.5L", "Bottle,100PLUS ACTIVE 500ML", "Bottle,100PLUS LEMON LIME 1.5L", "Bottle,100PLUS ORANGE 500ML", "Bottle,100PLUS ORIGINAL 1.5L", "Bottle,100PLUS TANGY ORANGE 1.5L", "Bottle,100PLUS ZERO 1.5L", "Bottle,100PLUS ZERO 500ML", "Packet,F:M MAGNOLIA CHOC 1L", "Bottle,F&N GINGER ADE 1.5L", "Bottle,F&N GRAPE 1.5L", "Bottle,F&N ICE CREAM SODA 1.5L", "Bottle,F&N LYCHEE PEAR 1.5L", "Bottle,F&N ORANGE 1.5L", "Bottle,F&N PINEAPPLE PET 1.5L", "Bottle,F&N SARSI 1.5L", "Bottle,F&N SS ICE LEM TEA RS 500ML", "Bottle,F&N SS ICE LEMON TEA RS 1.5L", "Bottle,F&N SS ICE LEMON TEA 1.5L", "Bottle,F&N SS ICE LEMON TEA 500ML", "Bottle,F&N SS ICE PEACH TEA 1.5L", "Bottle,SS ICE LEMON GT 1.48L", "Bottle,SS WHITE CHRYS TEA 1.48L", "Packet,FARMHOUSE FRESH MILK 1L FNDM", "Packet,FARMHOUSE PLAIN LF 1L", "Packet,PURA FRESH MILK 1L FS", "Packet,NUTRISOY REG NO SUGAR ADDED 1L", "Packet,NUTRISOY PLAIN 475ML", "Packet,NUTRISOY PLAIN 1L", "Packet,NUTRISOY OMEGA RD SUGAR 1L", "Packet,NUTRISOY OMEGA NSA 1L", "Packet,NUTRISOY ALMOND 1L", "Packet,MAGNOLIA FRESH MILK 1L FNDM", "Packet,FM MAG FC PLAIN 200ML", "Packet,MAG OMEGA PLUS PLAIN 200ML", "Packet,MAG KURMA MILK 500ML", "Packet,MAG KURMA MILK 1L", "Packet,MAG CHOCOLATE FC 500ML", "Packet,MAG BROWN SUGAR SS MILK 1L", "Packet,FM MAG LFHC PLN 500ML", "Packet,FM MAG LFHC OAT 500ML", "Packet,FM MAG LFHC OAT 1L", "Packet,FM MAG FC PLAIN 500ML", "Void,PARTIAL VOID", "Void,FULL VOID", "Bottle,F&N SS ICE LEM TEA 500ML", ] run_app = st.button("Run the compliance check") if run_app and uploaded_file is not None: # Convert the file to an opencv image. file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8) temp_file.write(uploaded_file.getvalue()) uploaded_img = cv2.imdecode(file_bytes, 1) cv2.imwrite("tmp/to_score_planogram_tmp.png", uploaded_img) # if uploaded_master_planogram_file is None: # master = cv2.imread('./sample_master_planogram.jpeg') names_dict = {name: id for id, name in enumerate(target_names)} sorted_xml_df = None # https://discuss.streamlit.io/t/unable-to-read-files-using-standard-file-uploader/2258/2 if uploaded_master_planogram_file and annotation_file: file_bytes = np.asarray( bytearray(uploaded_master_planogram_file.read()), dtype=np.uint8 ) master = cv2.imdecode(file_bytes, 1) cv2.imwrite("tmp/master_tmp.png", master) # cv2.imwrite("tmp_uploaded_master_planogram_img.png", master) # xml = annotation_file.read() # tmp_xml ="tmp_xml_annotation.xml" # with open(tmp_xml ,'w',encoding='utf-8') as f: # xml = f.write(xml) xml_df = xml_to_csv(annotation_file) xml_df["cls"] = xml_df["cls"].map(names_dict) sorted_xml_df = do_sorting(xml_df) sorted_xml_df.line_number.value_counts() line_data = sorted_xml_df.line_number.value_counts() n_rows = int(len(line_data)) n_cols = int(max(line_data)) master_table = np.zeros((n_rows, n_cols)) + 101 master_annotations = [] for i, row in sorted_xml_df.groupby("line_number"): # print(f"Adding products in the row {i} to the detected planogram", row.cls.tolist()) products = row.cls.tolist() master_table[int(i - 1), 0 : len(products)] = products annotations = [ (int(k), int(v)) for k, v in list( zip(row.cls.unique(), row.cls.value_counts().tolist()) ) ] master_annotations.append(annotations) master_table.shape # print("Annoatated planogram") # print(np.matrix(master_table)) elif uploaded_master_planogram_file: print( "Finding the amster annotations with the YOLOv5 model predictions" ) file_bytes = np.asarray( bytearray(uploaded_master_planogram_file.read()), dtype=np.uint8 ) master = cv2.imdecode(file_bytes, 1) cv2.imwrite("tmp/master_tmp.png", master) master_results = run( weights="base_line_best_model_exp5.pt", source="tmp/master_tmp.png", imgsz=[640, 640], conf_thres=0.6, iou_thres=0.6, ) bb_df = pd.DataFrame( master_results[0][1].tolist(), columns=["xmin", "ymin", "xmax", "ymax", "conf", "cls"], ) sorted_df = do_sorting(bb_df) n_rows = int(sorted_df.line_number.max()) n_cols = int( sorted_df.groupby("line_number") .size() .reset_index(name="counts")["counts"] .max() ) non_null_product = 101 print("master size", n_rows, n_cols) master_annotations = [] master_table = np.zeros((int(n_rows), int(n_cols))) + non_null_product for i, row in sorted_df.groupby("line_number"): # print(f"Adding products in the row {i} to the detected planogram", row.cls.tolist()) products = row.cls.tolist() col_len = min(len(products), n_cols) print("col size: ", col_len) print("row size: ", i - 1) if n_rows <= (i - 1): print("more rows than expected in the predictions") break master_table[int(i - 1), 0:col_len] = products[:col_len] annotations = [ (int(k), int(v)) for k, v in list( zip(row.cls.unique(), row.cls.value_counts().tolist()) ) ] master_annotations.append(annotations) else: master = cv2.imread("./sample_master_planogram.jpeg") n_rows = 3 n_cols = 16 master_table = np.zeros((n_rows, n_cols)) + 101 master_annotations = [ [(32, 12), (8, 4)], [(36, 1), (41, 6), (50, 4), (51, 3), (52, 2)], [(23, 5), (24, 6), (54, 5)], ] for i, row in enumerate(master_annotations): idx = 0 for product, count in row: master_table[i, idx : idx + count] = product idx = idx + count # Now do something with the image! For example, let's display it: # st.image(opencv_image, channels="BGR") # uploaded_img = '/content/drive/My Drive/0.CV/0.Planogram_Compliance/planogram_data/images/test/IMG_5718.jpg' result_list = run( weights="base_line_best_model_exp5.pt", source="tmp/to_score_planogram_tmp.png", imgsz=[640, 640], conf_thres=0.6, iou_thres=0.6, ) bb_df = pd.DataFrame( result_list[0][1].tolist(), columns=["xmin", "ymin", "xmax", "ymax", "conf", "cls"], ) sorted_df = do_sorting(bb_df) non_null_product = 101 print("master size", n_rows, n_cols) detected_table = np.zeros((n_rows, n_cols)) + non_null_product for i, row in sorted_df.groupby("line_number"): # print(f"Adding products in the row {i} to the detected planogram", row.cls.tolist()) products = row.cls.tolist() col_len = min(len(products), n_cols) print("col size: ", col_len) print("row size: ", i - 1) if n_rows <= (i - 1): print("more rows than expected in the predictions") break detected_table[int(i - 1), 0:col_len] = products[:col_len] # score = (master_table == detected_table).sum() / (master_table != non_null_product).sum() correct_matches = ( np.ma.masked_equal(master_table, non_null_product) == detected_table ).sum() total_products = (master_table != non_null_product).sum() score = correct_matches / total_products # if sorted_xml_df is not None: # annotate_df = sorted_xml_df[["xmin","ymin", "xmax", "ymax", "line_number","cls"]].astype(int) # else: annotate_df = sorted_df[ ["xmin", "ymin", "xmax", "ymax", "line_number", "cls"] ].astype(int) mask = master_table != non_null_product m_detected_table = np.ma.masked_array(master_table, mask=mask) m_annotated_table = np.ma.masked_array(detected_table, mask=mask) # wrong_indexes = np.ravel_multi_index(master_table*mask != detected_table*mask, master_table.shape) wrong_indexes = np.where(master_table != detected_table) correct_indexes = np.where(master_table == detected_table) annotated_planogram = annotate_planogram_compliance( uploaded_img, annotate_df, correct_indexes, wrong_indexes, target_names ) st.title("Target Products") st.write(json.dumps(target_names)) st.title("The master planogram annotation") st.write( "The annotations are based on the index of products from Target products list " ) st.write(json.dumps(master_annotations)) # https://github.com/streamlit/streamlit/issues/888 st.image( [master, annotated_planogram, result_list[0][0]], width=512, caption=[ "Master planogram", "Planogram Compliance", "Planogram Predictions", ], channels="BGR", ) # st.image([master, annotated_planogram], width=512, caption=["Master planogram", "Planogram Compliance"], channels="BGR") st.title("Planogram Compiance score") # st.write(f"{correct_matches} / {total_products}") st.write(score)