Spaces:
Runtime error
Runtime error
import gradio as gr | |
import re, datetime,time, cv2, numpy as np, tensorflow as tf, sys | |
CHARS = "ABCDEFGHIJKLMNPQRSTUVWXYZ0123456789" # exclude I, O | |
CHARS_DICT = {char:i for i, char in enumerate(CHARS)} | |
DECODE_DICT = {i:char for i, char in enumerate(CHARS)} | |
interpreter = tf.lite.Interpreter(model_path='detection.tflite') | |
interpreter.allocate_tensors() | |
recog_interpreter = tf.lite.Interpreter(model_path='recognition.tflite') | |
recog_interpreter.allocate_tensors() | |
input_details = interpreter.get_input_details() | |
output_details = interpreter.get_output_details() | |
recog_input_details = recog_interpreter.get_input_details() | |
recog_output_details = recog_interpreter.get_output_details() | |
def execute_text_recognition_tflite( boxes, frame, interpreter, input_details, output_details): | |
x1, x2, y1, y2 = boxes[1], boxes[3], boxes[0], boxes[2] | |
save_frame = frame[ | |
max( 0, int(y1*1079) ) : min( 1079, int(y2*1079) ), | |
max( 0, int(x1*1920) ) : min( 1920, int(x2*1920) ) | |
] | |
# Execute text recognition | |
test_image = cv2.resize(save_frame,(94,24))/256 | |
test_image = np.expand_dims(test_image,axis=0) | |
test_image = test_image.astype(np.float32) | |
interpreter.set_tensor(input_details[0]['index'], test_image) | |
interpreter.invoke() | |
output_data = interpreter.get_tensor(output_details[0]['index']) | |
decoded = tf.keras.backend.ctc_decode(output_data,(24,),greedy=False) | |
text = "" | |
for i in np.array(decoded[0][0][0]): | |
if i >-1: | |
text += DECODE_DICT[i] | |
# Do nothing if text is empty | |
if not len(text): return | |
license_plate = text | |
text[:3].replace("0",'O') | |
return text | |
def greet(image): | |
resized = cv2.resize(image, (320,320), interpolation=cv2.INTER_AREA) | |
demo_frame = cv2.resize(image, (680,480), interpolation=cv2.INTER_AREA) | |
input_data = resized.astype(np.float32) # Set as 3D RGB float array | |
input_data /= 255. # Normalize | |
input_data = np.expand_dims(input_data, axis=0) # Batch dimension (wrap in 4D) | |
# Initialize input tensor | |
interpreter.set_tensor(input_details[0]['index'], input_data) | |
interpreter.invoke() | |
output_data = interpreter.get_tensor(output_details[0]['index']) | |
# Bounding boxes | |
boxes = interpreter.get_tensor(output_details[1]['index']) | |
text = None | |
# For index and confidence value of the first class [0] | |
for i, confidence in enumerate(output_data[0]): | |
if confidence > .3: | |
text = execute_text_recognition_tflite( | |
boxes[0][i], image, | |
recog_interpreter, recog_input_details, recog_output_details, | |
) | |
return text | |
image = gr.inputs.Image(shape=(320,320)) | |
iface = gr.Interface(fn=greet, inputs=image, outputs="text") | |
iface.launch() |