Spaces:
Build error
Build error
| import matplotlib.pyplot as plt | |
| import numpy as np | |
| from six import BytesIO | |
| from PIL import Image | |
| import tensorflow as tf | |
| from object_detection.utils import label_map_util | |
| from object_detection.utils import visualization_utils as viz_utils | |
| from object_detection.utils import ops as utils_op | |
| import tarfile | |
| import wget | |
| import gradio as gr | |
| from huggingface_hub import snapshot_download | |
| import os | |
| import cv2 | |
| from tqdm import tqdm | |
| PATH_TO_LABELS = 'data/label_map.pbtxt' | |
| category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True) | |
| def pil_image_as_numpy_array(pilimg): | |
| img_array = tf.keras.utils.img_to_array(pilimg) | |
| img_array = np.expand_dims(img_array, axis=0) | |
| return img_array | |
| def load_image_into_numpy_array(path): | |
| image = None | |
| image_data = tf.io.gfile.GFile(path, 'rb').read() | |
| image = Image.open(BytesIO(image_data)) | |
| return pil_image_as_numpy_array(image) | |
| def load_model(model_repo_id): | |
| download_dir = snapshot_download(model_repo_id) | |
| saved_model_dir = os.path.join(download_dir, "saved_model") | |
| detection_model = tf.saved_model.load(saved_model_dir) | |
| return detection_model | |
| def predict(pilimg): | |
| image_np = pil_image_as_numpy_array(pilimg) | |
| return predict2(image_np) | |
| def predict2(image_np): | |
| results = detection_model(image_np) | |
| # different object detection models have additional results | |
| result = {key:value.numpy() for key,value in results.items()} | |
| label_id_offset = 0 | |
| image_np_with_detections = image_np.copy() | |
| viz_utils.visualize_boxes_and_labels_on_image_array( | |
| image_np_with_detections[0], | |
| result['detection_boxes'][0], | |
| (result['detection_classes'][0] + label_id_offset).astype(int), | |
| result['detection_scores'][0], | |
| category_index, | |
| use_normalized_coordinates=True, | |
| max_boxes_to_draw=200, | |
| min_score_thresh=.60, | |
| agnostic_mode=False, | |
| line_thickness=2) | |
| result_pil_img2 = tf.keras.utils.array_to_img(image_np_with_detections[0]) | |
| return result_pil_img2 | |
| def predict3(pilimg): | |
| image_np = pil_image_as_numpy_array(pilimg) | |
| return predict4(image_np) | |
| def predict4(image_np): | |
| results = detection_model2(image_np) | |
| # different object detection models have additional results | |
| result = {key:value.numpy() for key,value in results.items()} | |
| label_id_offset = 0 | |
| image_np_with_detections = image_np.copy() | |
| viz_utils.visualize_boxes_and_labels_on_image_array( | |
| image_np_with_detections[0], | |
| result['detection_boxes'][0], | |
| (result['detection_classes'][0] + label_id_offset).astype(int), | |
| result['detection_scores'][0], | |
| category_index, | |
| use_normalized_coordinates=True, | |
| max_boxes_to_draw=200, | |
| min_score_thresh=.60, | |
| agnostic_mode=False, | |
| line_thickness=2) | |
| result_pil_img4 = tf.keras.utils.array_to_img(image_np_with_detections[0]) | |
| return result_pil_img4 | |
| def detect_video(video): | |
| # Create a video capture object | |
| cap = cv2.VideoCapture(video) | |
| # Process frames in a loop | |
| while cap.isOpened(): | |
| ret, frame = cap.read() | |
| if not ret: | |
| break | |
| # Expand dimensions since model expects images to have shape: [1, None, None, 3] | |
| image_np_expanded = np.expand_dims(frame, axis=0) | |
| # Run inference | |
| output_dict = model(image_np_expanded) | |
| # Extract detections | |
| boxes = output_dict['detection_boxes'][0].numpy() | |
| scores = output_dict['detection_scores'][0].numpy() | |
| classes = output_dict['detection_classes'][0].numpy().astype(np.int64) | |
| # Draw bounding boxes and labels | |
| image_np_with_detections = viz_utils.visualize_boxes_and_labels_on_image_array( | |
| frame, | |
| boxes, | |
| classes, | |
| scores, | |
| category_index, | |
| use_normalized_coordinates=True, | |
| max_boxes_to_draw=20, | |
| min_score_thresh=.5, | |
| agnostic_mode=False) | |
| # Yield the processed frame | |
| yield image_np_with_detections | |
| # Release resources | |
| cap.release() | |
| a = os.path.join(os.path.dirname(__file__), "data/c_base_detected.mp4") # Video | |
| b = os.path.join(os.path.dirname(__file__), "data/c_tuned_detected.mp4") # Video | |
| def video_demo(video1, video2): | |
| return [video1, video2] | |
| label_id_offset = 0 | |
| REPO_ID = "apailang/mytfodmodel" | |
| detection_model = load_model(REPO_ID) | |
| REPO_ID2 = "apailang/mytfodmodeltuned" | |
| detection_model2 = load_model(REPO_ID2) | |
| samples_folder = 'data' | |
| # pil_image = Image.open(image_path) | |
| # image_arr = pil_image_as_numpy_array(pil_image) | |
| # predicted_img = predict(image_arr) | |
| # predicted_img.save('predicted.jpg') | |
| test1 = os.path.join(os.path.dirname(__file__), "data/test1.jpeg") | |
| test2 = os.path.join(os.path.dirname(__file__), "data/test2.jpeg") | |
| test3 = os.path.join(os.path.dirname(__file__), "data/test3.jpeg") | |
| test4 = os.path.join(os.path.dirname(__file__), "data/test4.jpeg") | |
| test5 = os.path.join(os.path.dirname(__file__), "data/test5.jpeg") | |
| test6 = os.path.join(os.path.dirname(__file__), "data/test6.jpeg") | |
| test7 = os.path.join(os.path.dirname(__file__), "data/test7.jpeg") | |
| test8 = os.path.join(os.path.dirname(__file__), "data/test8.jpeg") | |
| test9 = os.path.join(os.path.dirname(__file__), "data/test9.jpeg") | |
| test10 = os.path.join(os.path.dirname(__file__), "data/test10.jpeg") | |
| test11 = os.path.join(os.path.dirname(__file__), "data/test11.jpeg") | |
| test12 = os.path.join(os.path.dirname(__file__), "data/test12.jpeg") | |
| base_image = gr.Interface( | |
| fn=predict, | |
| inputs=gr.Image(type="pil"), | |
| outputs=gr.Image(type="pil"), | |
| title="Luffy and Chopper face detection (Base mobile net model)", | |
| description="Upload a Image for prediction or click on below examples", | |
| examples=[[test1],[test2],[test3],[test4],[test5],[test6],[test7],[test8],[test9],[test10],[test11],[test12],], | |
| cache_examples=True | |
| )#.launch(share=True) | |
| tuned_image = gr.Interface( | |
| fn=predict3, | |
| inputs=gr.Image(type="pil"), | |
| outputs=gr.Image(type="pil"), | |
| title="Luffy and Chopper face detection (tuned mobile net model)", | |
| description="Upload a Image for prediction or click on below examples. Mobile net tuned with data Augmentation", | |
| examples=[[test1],[test2],[test3],[test4],[test5],[test6],[test7],[test8],[test9],[test10],[test11],[test12],], | |
| cache_examples=True | |
| )#.launch(share=True) | |
| # a = os.path.join(os.path.dirname(__file__), "data/a.mp4") # Video | |
| # b = os.path.join(os.path.dirname(__file__), "data/b.mp4") # Video | |
| # c = os.path.join(os.path.dirname(__file__), "data/c.mp4") # Video | |
| # video_out_file = os.path.join(samples_folder,'detected' + '.mp4') | |
| # stt_demo = gr.Interface( | |
| # fn=display_two_videos, | |
| # inputs=gr.Video(), | |
| # outputs=gr.Video(type="mp4",label="Detected Video"), | |
| # examples=[ | |
| # [a], | |
| # [b], | |
| # [c], | |
| # ], | |
| # cache_examples=False | |
| # ) | |
| video = gr.Interface( | |
| fn=video_demo, | |
| inputs=[gr.Video(label="base model Video"),gr.Textbox(label="tuned model Video")], | |
| outputs=[gr.Video(label="base model"), gr.Video(label="Tuned model")], # Specify video outputs | |
| examples=[ | |
| [a, b] | |
| ], | |
| title="Comparing base vs tuned detected video", | |
| description="using SSD mobile net V2 320x320. Model has been customed trained to detect Character of Luffy and Chopper" | |
| ) | |
| demo = gr.TabbedInterface([base_image,tuned_image, video], ["Image (Base Model)","Image (Tuned Model)", "Display Detected Video"]) | |
| if __name__ == "__main__": | |
| demo.launch() |