File size: 2,575 Bytes
6602745
 
 
 
 
 
 
 
 
d99575f
6602745
 
 
4059946
 
 
6602745
 
 
 
 
 
 
 
 
22fad2c
6602745
 
4059946
6602745
 
4059946
 
6602745
 
 
 
 
 
 
 
 
 
 
 
 
 
4059946
 
6602745
 
 
4059946
6602745
4059946
6602745
 
 
 
 
 
 
 
 
 
 
 
 
 
4059946
6602745
c3f5f74
4059946
6602745
 
 
 
 
 
c3f5f74
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81

import gradio as gr
import cv2
import requests
import os
import torch 
import numpy as np
from ultralytics import YOLO


model = torch.hub.load('ultralytics/yolov5', 'yolov5x', pretrained=True)
path  = [['image_0.jpg'], ['image_1.jpg']]
video_path = [['TresPass_Detection_1.mp4']]
# area =  [(215, 180), (110, 75), (370, 55), (520, 140), (215, 180) ]
# area =  [(190, 180), (100, 75), (360, 55), (510, 140), (190, 180) ]
area =  [(215, 180), (110, 80), (360, 55), (510, 140), (215, 180) ]
# def show_preds_video(video_path):
def show_preds_video():
    cap = cv2.VideoCapture('TresPass_Detection_1.mp4')
    count=0
    while(cap.isOpened()):
        ret, frame = cap.read()
        if not ret:
            break
        count += 1
        if count % 10 != 0:
            continue
            # frame = cv2.imread(video_path)

        frame=cv2.resize(frame,(1020,600))
        frame_copy = frame.copy()

        cv2.polylines(frame_copy, [np.array(area, np.int32)], True, (0,255,0), 2) 

        results=model(frame)
        for index, row in results.pandas().xyxy[0].iterrows():
            x1 = int(row['xmin'])
            y1 = int(row['ymin'])
            x2 = int(row['xmax'])
            y2 = int(row['ymax'])
            d=(row['name'])

            cx=int(x1+x2)//2
            cy=int(y1+y2)//2

            if ('person') in d:
                results = cv2.pointPolygonTest(np.array(area, np.int32), ((cx,cy)), False)
                # results = cv2.pointPolygonTest(np.array(area, np.int32), ((x2,y1)), False)
                # results = cv2.pointPolygonTest(np.array(area, np.int32), ((x2,y2)), False)
                if results >0:
                        cv2.rectangle(frame_copy,(x1,y1),(x2,y2),(0,0,255),2)
                        cv2.putText(frame_copy,str(d),(x1,y1),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),1)
                        cv2.putText(frame_copy,str("Alert !!! Trespasser detected !!!"),(50,300),cv2.FONT_HERSHEY_PLAIN,2,(0,0,255),3)

       

        yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)

inputs_video = [  #gr.components.Video(type="filepath", label="Input Video", visible =False), 
               ]

outputs_video = [
    gr.components.Image(type="numpy", label="Output Image"),
]

interface_video = gr.Interface(
    fn=show_preds_video,
    inputs=inputs_video,
    outputs=outputs_video,
    title="Security - Trespasser monitoring  ",
    examples=video_path,
    cache_examples=False,

)

gr.TabbedInterface(
    [interface_video],
    # [interface_image, interface_video],
    tab_names=['Video inference']
).queue().launch()