Upload alpr.py
Browse files
alpr.py
ADDED
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import cv2
|
4 |
+
import numpy as np
|
5 |
+
import tensorflow.compat.v1 as tf
|
6 |
+
from numpy.linalg import norm
|
7 |
+
from local_utils import detect_lp
|
8 |
+
from os.path import splitext
|
9 |
+
from tensorflow.python.keras.backend import set_session
|
10 |
+
from tensorflow.keras.models import model_from_json
|
11 |
+
from tensorflow.compat.v1 import ConfigProto
|
12 |
+
#from tensorflow.compat.v1 import InteractiveSession
|
13 |
+
from vars import models_path
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
class DetectLicensePlate:
|
18 |
+
def __init__(self):
|
19 |
+
tf.compat.v1.disable_eager_execution()
|
20 |
+
#config = ConfigProto()
|
21 |
+
#config.gpu_options.allow_growth = False
|
22 |
+
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.433)
|
23 |
+
self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
|
24 |
+
#self.sess = tf.Session(config=config)
|
25 |
+
self.graph = tf.get_default_graph()
|
26 |
+
set_session(self.sess)
|
27 |
+
|
28 |
+
self.wpod_net_path = models_path+"wpod-net.json" # model path
|
29 |
+
self.wpod_net = self.load_model(self.wpod_net_path)
|
30 |
+
|
31 |
+
def load_model(self, path):
|
32 |
+
try:
|
33 |
+
path = splitext(path)[0]
|
34 |
+
with open('%s.json' % path, 'r') as json_file:
|
35 |
+
model_json = json_file.read()
|
36 |
+
model = model_from_json(model_json, custom_objects={})
|
37 |
+
model.load_weights('%s.h5' % path)
|
38 |
+
print("Loading model successfully...")
|
39 |
+
self.graph = tf.get_default_graph()
|
40 |
+
return model
|
41 |
+
except Exception as e:
|
42 |
+
print(e)
|
43 |
+
|
44 |
+
def preprocess_image(self, image_path, resize=False):
|
45 |
+
img = image_path
|
46 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
47 |
+
img = img / 255
|
48 |
+
if resize:
|
49 |
+
img = cv2.resize(img, (224, 224))
|
50 |
+
return img
|
51 |
+
|
52 |
+
def get_plate(self, image_path, Dmax=608, Dmin=608):
|
53 |
+
vehicle = self.preprocess_image(image_path)
|
54 |
+
ratio = float(max(vehicle.shape[:2])) / min(vehicle.shape[:2])
|
55 |
+
side = int(ratio * Dmin)
|
56 |
+
bound_dim = min(side, Dmax)
|
57 |
+
_, plates, _, cor = detect_lp(self.graph,self.sess,self.wpod_net, vehicle, bound_dim, lp_threshold=0.5)
|
58 |
+
return vehicle, plates, cor
|
59 |
+
|
60 |
+
|
61 |
+
class Segmentation:
|
62 |
+
# to grab the contour of each digit from left to right
|
63 |
+
def sort_contours(self, cnts, reverse=False):
|
64 |
+
i = 0
|
65 |
+
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
|
66 |
+
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
|
67 |
+
key=lambda b: b[1][i], reverse=False))
|
68 |
+
return cnts
|
69 |
+
|
70 |
+
def find_characters(self, thresh, plate_image):
|
71 |
+
# 2li 3lü gruplama için
|
72 |
+
tmp_space = None
|
73 |
+
short_space = None
|
74 |
+
letter_num = 0
|
75 |
+
state_ = 0
|
76 |
+
|
77 |
+
cont, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
78 |
+
|
79 |
+
# creat a copy version "test_roi" of plat_image to draw bounding box
|
80 |
+
test_roi = thresh.copy()
|
81 |
+
|
82 |
+
# Initialize a list which will be used to append charater image
|
83 |
+
crop_characters = []
|
84 |
+
|
85 |
+
# define standard width and height of character
|
86 |
+
digit_w, digit_h = 32, 32
|
87 |
+
try:
|
88 |
+
conturs = self.sort_contours(cont)
|
89 |
+
except:
|
90 |
+
return
|
91 |
+
for c in conturs:
|
92 |
+
(x, y, w, h) = cv2.boundingRect(c)
|
93 |
+
ratio = h / w
|
94 |
+
if 1 <= ratio <= 5.7: # Only select contour with defined ratio
|
95 |
+
if h / plate_image.shape[0] >= 0.3: # Select contour which has the height larger than 50% of the plate
|
96 |
+
if letter_num == 1:
|
97 |
+
short_space = x - tmp_space
|
98 |
+
if letter_num == 3:
|
99 |
+
space = x - tmp_space
|
100 |
+
if space > short_space + 2:
|
101 |
+
# print("4.karakter sayı")
|
102 |
+
state_ = 2
|
103 |
+
letter_num += 2
|
104 |
+
elif letter_num == 4:
|
105 |
+
space = x - tmp_space
|
106 |
+
if space > short_space + 2:
|
107 |
+
state_ = 3
|
108 |
+
# print("5.karakter sayı")
|
109 |
+
else:
|
110 |
+
state_ = 4
|
111 |
+
# print("5.karakter harf")
|
112 |
+
tmp_space = x + w
|
113 |
+
|
114 |
+
# Draw bounding box arroung digit number
|
115 |
+
cv2.rectangle(test_roi, (x, y), (x + w, y + h), (0, 255, 0), 2)
|
116 |
+
try:
|
117 |
+
# Sperate number and gibe prediction
|
118 |
+
curr_num = thresh[y - 1:y + h + 2, x - 1:x + w + 2]
|
119 |
+
curr_num = cv2.resize(curr_num, dsize=(digit_w, digit_h))
|
120 |
+
crop_characters.append(curr_num)
|
121 |
+
letter_num += 1
|
122 |
+
except Exception as e:
|
123 |
+
print(str(e))
|
124 |
+
return crop_characters, state_
|
125 |
+
|
126 |
+
|
127 |
+
class SVM(object):
|
128 |
+
def __init__(self, C=1.0, gamma=0.5): # def C = 1.0, gamma = 0.5
|
129 |
+
self.model = cv2.ml.SVM_create()
|
130 |
+
self.model.setGamma(gamma)
|
131 |
+
self.model.setC(C)
|
132 |
+
self.model.setKernel(cv2.ml.SVM_RBF)
|
133 |
+
self.model.setType(cv2.ml.SVM_C_SVC)
|
134 |
+
|
135 |
+
def train(self, samples, responses):
|
136 |
+
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
|
137 |
+
|
138 |
+
def predict(self, samples):
|
139 |
+
return self.model.predict(samples)[1].ravel()
|
140 |
+
|
141 |
+
def load(self, fn):
|
142 |
+
self.model = cv2.ml.SVM_load(fn)
|
143 |
+
|
144 |
+
def save(self, fn):
|
145 |
+
self.model.save(fn)
|
146 |
+
|
147 |
+
|
148 |
+
class Ocr:
|
149 |
+
def __init__(self):
|
150 |
+
|
151 |
+
self.letter_dict = dict()
|
152 |
+
self.letter_dict["0"] = "O"
|
153 |
+
self.letter_dict["1"] = "I"
|
154 |
+
self.letter_dict["2"] = "Z"
|
155 |
+
self.letter_dict["3"] = "3"
|
156 |
+
self.letter_dict["4"] = "L"
|
157 |
+
self.letter_dict["5"] = "S"
|
158 |
+
self.letter_dict["6"] = "G"
|
159 |
+
self.letter_dict["7"] = "7"
|
160 |
+
self.letter_dict["8"] = "B"
|
161 |
+
self.letter_dict["9"] = "9"
|
162 |
+
self.letter_dict["10"] = "A"
|
163 |
+
self.letter_dict["11"] = "B"
|
164 |
+
self.letter_dict["12"] = "C"
|
165 |
+
self.letter_dict["13"] = "D"
|
166 |
+
self.letter_dict["14"] = "E"
|
167 |
+
self.letter_dict["15"] = "F"
|
168 |
+
self.letter_dict["16"] = "G"
|
169 |
+
self.letter_dict["17"] = "H"
|
170 |
+
self.letter_dict["18"] = "I"
|
171 |
+
self.letter_dict["19"] = "J"
|
172 |
+
self.letter_dict["20"] = "K"
|
173 |
+
self.letter_dict["21"] = "L"
|
174 |
+
self.letter_dict["22"] = "M"
|
175 |
+
self.letter_dict["23"] = "N"
|
176 |
+
self.letter_dict["24"] = "O"
|
177 |
+
self.letter_dict["25"] = "P"
|
178 |
+
self.letter_dict["26"] = "R"
|
179 |
+
self.letter_dict["27"] = "S"
|
180 |
+
self.letter_dict["28"] = "T"
|
181 |
+
self.letter_dict["29"] = "U"
|
182 |
+
self.letter_dict["30"] = "V"
|
183 |
+
self.letter_dict["31"] = "Y"
|
184 |
+
self.letter_dict["32"] = "Z"
|
185 |
+
|
186 |
+
self.num_dict = dict()
|
187 |
+
self.num_dict["0"] = "0"
|
188 |
+
self.num_dict["1"] = "1"
|
189 |
+
self.num_dict["2"] = "2"
|
190 |
+
self.num_dict["3"] = "3"
|
191 |
+
self.num_dict["4"] = "4"
|
192 |
+
self.num_dict["5"] = "5"
|
193 |
+
self.num_dict["6"] = "6"
|
194 |
+
self.num_dict["7"] = "7"
|
195 |
+
self.num_dict["8"] = "8"
|
196 |
+
self.num_dict["9"] = "9"
|
197 |
+
self.num_dict["10"] = "A"
|
198 |
+
self.num_dict["11"] = "8"
|
199 |
+
self.num_dict["12"] = "C"
|
200 |
+
self.num_dict["13"] = "0"
|
201 |
+
self.num_dict["14"] = "E"
|
202 |
+
self.num_dict["15"] = "F"
|
203 |
+
self.num_dict["16"] = "6"
|
204 |
+
self.num_dict["17"] = "H"
|
205 |
+
self.num_dict["18"] = "1"
|
206 |
+
self.num_dict["19"] = "3"
|
207 |
+
self.num_dict["20"] = "K"
|
208 |
+
self.num_dict["21"] = "4"
|
209 |
+
self.num_dict["22"] = "M"
|
210 |
+
self.num_dict["23"] = "N"
|
211 |
+
self.num_dict["24"] = "0"
|
212 |
+
self.num_dict["25"] = "P"
|
213 |
+
self.num_dict["26"] = "8"
|
214 |
+
self.num_dict["27"] = "5"
|
215 |
+
self.num_dict["28"] = "T"
|
216 |
+
self.num_dict["29"] = "U"
|
217 |
+
self.num_dict["30"] = "V"
|
218 |
+
self.num_dict["31"] = "Y"
|
219 |
+
self.num_dict["32"] = "2"
|
220 |
+
|
221 |
+
def preprocess_hog(self, digits):
|
222 |
+
samples = []
|
223 |
+
for img in digits:
|
224 |
+
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
|
225 |
+
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
|
226 |
+
mag, ang = cv2.cartToPolar(gx, gy)
|
227 |
+
bin_n = 16
|
228 |
+
bin = np.int32(bin_n * ang / (2 * np.pi))
|
229 |
+
bin_cells = bin[:10, :10], bin[10:, :10], bin[:10, 10:], bin[10:, 10:]
|
230 |
+
mag_cells = mag[:10, :10], mag[10:, :10], mag[:10, 10:], mag[10:, 10:]
|
231 |
+
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
|
232 |
+
hist = np.hstack(hists)
|
233 |
+
|
234 |
+
# transform to Hellinger kernel
|
235 |
+
eps = 1e-7
|
236 |
+
hist /= hist.sum() + eps
|
237 |
+
hist = np.sqrt(hist)
|
238 |
+
hist /= norm(hist) + eps
|
239 |
+
samples.append(hist)
|
240 |
+
return np.float32(samples)
|
241 |
+
|
242 |
+
def deskew(self, img):
|
243 |
+
SZ = 32
|
244 |
+
m = cv2.moments(img)
|
245 |
+
if abs(m['mu02']) < 1e-2:
|
246 |
+
return img.copy()
|
247 |
+
skew = m['mu11'] / m['mu02']
|
248 |
+
M = np.float32([[1, skew, -0.5 * SZ * skew], [0, 1, 0]])
|
249 |
+
img = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
|
250 |
+
return img
|
251 |
+
|
252 |
+
def find_label(self, character_, model_):
|
253 |
+
arr = [character_]
|
254 |
+
arr = np.array(arr)
|
255 |
+
mapped = list(map(self.deskew, arr))
|
256 |
+
mapped = self.preprocess_hog(mapped)
|
257 |
+
predict = model_.predict(mapped)[0].ravel()
|
258 |
+
return str(predict).split(".")[0].replace("[", "").replace(" ","").replace("]","")
|
259 |
+
|
260 |
+
def recognize_characters(self, characters, model_, state):
|
261 |
+
plate_text = ""
|
262 |
+
num = 0
|
263 |
+
for character in characters:
|
264 |
+
character = cv2.bitwise_not(character)
|
265 |
+
pred = self.find_label(character, model_)
|
266 |
+
if num < 2 or num > state:
|
267 |
+
plate_text += self.num_dict.get(pred)
|
268 |
+
else:
|
269 |
+
plate_text += self.letter_dict.get(pred)
|
270 |
+
num += 1
|
271 |
+
return plate_text
|
272 |
+
|
273 |
+
|
274 |
+
|
275 |
+
|
276 |
+
def alpr(frame,license_plate):
|
277 |
+
|
278 |
+
plate_image = frame.copy()
|
279 |
+
|
280 |
+
try:
|
281 |
+
vehicle, plates, cor = license_plate.get_plate(frame)
|
282 |
+
return plates[0]
|
283 |
+
|
284 |
+
except Exception as e:
|
285 |
+
print(str(e))
|
286 |
+
|
287 |
+
|