import os import cv2 import numpy as np import tensorflow.compat.v1 as tf from numpy.linalg import norm from local_utils import detect_lp from os.path import splitext from tensorflow.python.keras.backend import set_session from tensorflow.keras.models import model_from_json from tensorflow.compat.v1 import ConfigProto #from tensorflow.compat.v1 import InteractiveSession from vars import models_path class DetectLicensePlate: def __init__(self): tf.compat.v1.disable_eager_execution() #config = ConfigProto() #config.gpu_options.allow_growth = False gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.433) self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) #self.sess = tf.Session(config=config) self.graph = tf.get_default_graph() set_session(self.sess) self.wpod_net_path = models_path+"wpod-net.json" # model path self.wpod_net = self.load_model(self.wpod_net_path) def load_model(self, path): try: path = splitext(path)[0] with open('%s.json' % path, 'r') as json_file: model_json = json_file.read() model = model_from_json(model_json, custom_objects={}) model.load_weights('%s.h5' % path) print("Loading model successfully...") self.graph = tf.get_default_graph() return model except Exception as e: print(e) def preprocess_image(self, image_path, resize=False): img = image_path img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = img / 255 if resize: img = cv2.resize(img, (224, 224)) return img def get_plate(self, image_path, Dmax=608, Dmin=608): vehicle = self.preprocess_image(image_path) ratio = float(max(vehicle.shape[:2])) / min(vehicle.shape[:2]) side = int(ratio * Dmin) bound_dim = min(side, Dmax) _, plates, _, cor = detect_lp(self.graph,self.sess,self.wpod_net, vehicle, bound_dim, lp_threshold=0.5) return vehicle, plates, cor class Segmentation: # to grab the contour of each digit from left to right def sort_contours(self, cnts, reverse=False): i = 0 boundingBoxes = [cv2.boundingRect(c) for c in cnts] (cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes), key=lambda b: b[1][i], reverse=False)) return cnts def find_characters(self, thresh, plate_image): # 2li 3lü gruplama için tmp_space = None short_space = None letter_num = 0 state_ = 0 cont, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # creat a copy version "test_roi" of plat_image to draw bounding box test_roi = thresh.copy() # Initialize a list which will be used to append charater image crop_characters = [] # define standard width and height of character digit_w, digit_h = 32, 32 try: conturs = self.sort_contours(cont) except: return for c in conturs: (x, y, w, h) = cv2.boundingRect(c) ratio = h / w if 1 <= ratio <= 5.7: # Only select contour with defined ratio if h / plate_image.shape[0] >= 0.3: # Select contour which has the height larger than 50% of the plate if letter_num == 1: short_space = x - tmp_space if letter_num == 3: space = x - tmp_space if space > short_space + 2: # print("4.karakter sayı") state_ = 2 letter_num += 2 elif letter_num == 4: space = x - tmp_space if space > short_space + 2: state_ = 3 # print("5.karakter sayı") else: state_ = 4 # print("5.karakter harf") tmp_space = x + w # Draw bounding box arroung digit number cv2.rectangle(test_roi, (x, y), (x + w, y + h), (0, 255, 0), 2) try: # Sperate number and gibe prediction curr_num = thresh[y - 1:y + h + 2, x - 1:x + w + 2] curr_num = cv2.resize(curr_num, dsize=(digit_w, digit_h)) crop_characters.append(curr_num) letter_num += 1 except Exception as e: print(str(e)) return crop_characters, state_ class SVM(object): def __init__(self, C=1.0, gamma=0.5): # def C = 1.0, gamma = 0.5 self.model = cv2.ml.SVM_create() self.model.setGamma(gamma) self.model.setC(C) self.model.setKernel(cv2.ml.SVM_RBF) self.model.setType(cv2.ml.SVM_C_SVC) def train(self, samples, responses): self.model.train(samples, cv2.ml.ROW_SAMPLE, responses) def predict(self, samples): return self.model.predict(samples)[1].ravel() def load(self, fn): self.model = cv2.ml.SVM_load(fn) def save(self, fn): self.model.save(fn) class Ocr: def __init__(self): self.letter_dict = dict() self.letter_dict["0"] = "O" self.letter_dict["1"] = "I" self.letter_dict["2"] = "Z" self.letter_dict["3"] = "3" self.letter_dict["4"] = "L" self.letter_dict["5"] = "S" self.letter_dict["6"] = "G" self.letter_dict["7"] = "7" self.letter_dict["8"] = "B" self.letter_dict["9"] = "9" self.letter_dict["10"] = "A" self.letter_dict["11"] = "B" self.letter_dict["12"] = "C" self.letter_dict["13"] = "D" self.letter_dict["14"] = "E" self.letter_dict["15"] = "F" self.letter_dict["16"] = "G" self.letter_dict["17"] = "H" self.letter_dict["18"] = "I" self.letter_dict["19"] = "J" self.letter_dict["20"] = "K" self.letter_dict["21"] = "L" self.letter_dict["22"] = "M" self.letter_dict["23"] = "N" self.letter_dict["24"] = "O" self.letter_dict["25"] = "P" self.letter_dict["26"] = "R" self.letter_dict["27"] = "S" self.letter_dict["28"] = "T" self.letter_dict["29"] = "U" self.letter_dict["30"] = "V" self.letter_dict["31"] = "Y" self.letter_dict["32"] = "Z" self.num_dict = dict() self.num_dict["0"] = "0" self.num_dict["1"] = "1" self.num_dict["2"] = "2" self.num_dict["3"] = "3" self.num_dict["4"] = "4" self.num_dict["5"] = "5" self.num_dict["6"] = "6" self.num_dict["7"] = "7" self.num_dict["8"] = "8" self.num_dict["9"] = "9" self.num_dict["10"] = "A" self.num_dict["11"] = "8" self.num_dict["12"] = "C" self.num_dict["13"] = "0" self.num_dict["14"] = "E" self.num_dict["15"] = "F" self.num_dict["16"] = "6" self.num_dict["17"] = "H" self.num_dict["18"] = "1" self.num_dict["19"] = "3" self.num_dict["20"] = "K" self.num_dict["21"] = "4" self.num_dict["22"] = "M" self.num_dict["23"] = "N" self.num_dict["24"] = "0" self.num_dict["25"] = "P" self.num_dict["26"] = "8" self.num_dict["27"] = "5" self.num_dict["28"] = "T" self.num_dict["29"] = "U" self.num_dict["30"] = "V" self.num_dict["31"] = "Y" self.num_dict["32"] = "2" def preprocess_hog(self, digits): samples = [] for img in digits: gx = cv2.Sobel(img, cv2.CV_32F, 1, 0) gy = cv2.Sobel(img, cv2.CV_32F, 0, 1) mag, ang = cv2.cartToPolar(gx, gy) bin_n = 16 bin = np.int32(bin_n * ang / (2 * np.pi)) bin_cells = bin[:10, :10], bin[10:, :10], bin[:10, 10:], bin[10:, 10:] mag_cells = mag[:10, :10], mag[10:, :10], mag[:10, 10:], mag[10:, 10:] hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)] hist = np.hstack(hists) # transform to Hellinger kernel eps = 1e-7 hist /= hist.sum() + eps hist = np.sqrt(hist) hist /= norm(hist) + eps samples.append(hist) return np.float32(samples) def deskew(self, img): SZ = 32 m = cv2.moments(img) if abs(m['mu02']) < 1e-2: return img.copy() skew = m['mu11'] / m['mu02'] M = np.float32([[1, skew, -0.5 * SZ * skew], [0, 1, 0]]) img = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR) return img def find_label(self, character_, model_): arr = [character_] arr = np.array(arr) mapped = list(map(self.deskew, arr)) mapped = self.preprocess_hog(mapped) predict = model_.predict(mapped)[0].ravel() return str(predict).split(".")[0].replace("[", "").replace(" ","").replace("]","") def recognize_characters(self, characters, model_, state): plate_text = "" num = 0 for character in characters: character = cv2.bitwise_not(character) pred = self.find_label(character, model_) if num < 2 or num > state: plate_text += self.num_dict.get(pred) else: plate_text += self.letter_dict.get(pred) num += 1 return plate_text def alpr(frame,license_plate): plate_image = frame.copy() try: vehicle, plates, cor = license_plate.get_plate(frame) return plates[0] except Exception as e: print(str(e))