Spaces:
Runtime error
Runtime error
M.Shoaib Shafique
commited on
Commit
·
e19f940
1
Parent(s):
81edd14
let's deploy to huggingface spaces
Browse files- .gitattributes +1 -0
- .gitignore +167 -0
- README.md +9 -1
- app.py +102 -0
- camera_movement_estimator/__init__.py +1 -0
- camera_movement_estimator/camera_movement_estimator.py +108 -0
- input_videos/08fd33_4.mp4 +3 -0
- models/best.pt +3 -0
- player_ball_assigner/__init__.py +1 -0
- player_ball_assigner/player_ball_assigner.py +26 -0
- requirements.txt +8 -0
- speed_and_distance_estimator/__init__.py +1 -0
- speed_and_distance_estimator/speed_and_distance_estimator.py +73 -0
- team_assigner/__init__.py +1 -0
- team_assigner/team_assigner.py +72 -0
- trackers/__init__.py +1 -0
- trackers/tracker.py +241 -0
- utils/__init__.py +2 -0
- utils/bbox_utils.py +16 -0
- utils/video_utils.py +26 -0
- view_transformer/__init__.py +1 -0
- view_transformer/view_transformer.py +46 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
|
7 |
+
.vscode/
|
8 |
+
runs/
|
9 |
+
development_and_analysis/
|
10 |
+
stubs/
|
11 |
+
training/
|
12 |
+
|
13 |
+
# C extensions
|
14 |
+
*.so
|
15 |
+
|
16 |
+
# Distribution / packaging
|
17 |
+
.Python
|
18 |
+
build/
|
19 |
+
develop-eggs/
|
20 |
+
dist/
|
21 |
+
downloads/
|
22 |
+
eggs/
|
23 |
+
.eggs/
|
24 |
+
lib/
|
25 |
+
lib64/
|
26 |
+
parts/
|
27 |
+
sdist/
|
28 |
+
var/
|
29 |
+
wheels/
|
30 |
+
share/python-wheels/
|
31 |
+
*.egg-info/
|
32 |
+
.installed.cfg
|
33 |
+
*.egg
|
34 |
+
MANIFEST
|
35 |
+
|
36 |
+
# PyInstaller
|
37 |
+
# Usually these files are written by a python script from a template
|
38 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
39 |
+
*.manifest
|
40 |
+
*.spec
|
41 |
+
|
42 |
+
# Installer logs
|
43 |
+
pip-log.txt
|
44 |
+
pip-delete-this-directory.txt
|
45 |
+
|
46 |
+
# Unit test / coverage reports
|
47 |
+
htmlcov/
|
48 |
+
.tox/
|
49 |
+
.nox/
|
50 |
+
.coverage
|
51 |
+
.coverage.*
|
52 |
+
.cache
|
53 |
+
nosetests.xml
|
54 |
+
coverage.xml
|
55 |
+
*.cover
|
56 |
+
*.py,cover
|
57 |
+
.hypothesis/
|
58 |
+
.pytest_cache/
|
59 |
+
cover/
|
60 |
+
|
61 |
+
# Translations
|
62 |
+
*.mo
|
63 |
+
*.pot
|
64 |
+
|
65 |
+
# Django stuff:
|
66 |
+
*.log
|
67 |
+
local_settings.py
|
68 |
+
db.sqlite3
|
69 |
+
db.sqlite3-journal
|
70 |
+
|
71 |
+
# Flask stuff:
|
72 |
+
instance/
|
73 |
+
.webassets-cache
|
74 |
+
|
75 |
+
# Scrapy stuff:
|
76 |
+
.scrapy
|
77 |
+
|
78 |
+
# Sphinx documentation
|
79 |
+
docs/_build/
|
80 |
+
|
81 |
+
# PyBuilder
|
82 |
+
.pybuilder/
|
83 |
+
target/
|
84 |
+
|
85 |
+
# Jupyter Notebook
|
86 |
+
.ipynb_checkpoints
|
87 |
+
|
88 |
+
# IPython
|
89 |
+
profile_default/
|
90 |
+
ipython_config.py
|
91 |
+
|
92 |
+
# pyenv
|
93 |
+
# For a library or package, you might want to ignore these files since the code is
|
94 |
+
# intended to run in multiple environments; otherwise, check them in:
|
95 |
+
# .python-version
|
96 |
+
|
97 |
+
# pipenv
|
98 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
99 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
100 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
101 |
+
# install all needed dependencies.
|
102 |
+
#Pipfile.lock
|
103 |
+
|
104 |
+
# poetry
|
105 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
106 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
107 |
+
# commonly ignored for libraries.
|
108 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
109 |
+
#poetry.lock
|
110 |
+
|
111 |
+
# pdm
|
112 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
113 |
+
#pdm.lock
|
114 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
115 |
+
# in version control.
|
116 |
+
# https://pdm.fming.dev/#use-with-ide
|
117 |
+
.pdm.toml
|
118 |
+
|
119 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
120 |
+
__pypackages__/
|
121 |
+
|
122 |
+
# Celery stuff
|
123 |
+
celerybeat-schedule
|
124 |
+
celerybeat.pid
|
125 |
+
|
126 |
+
# SageMath parsed files
|
127 |
+
*.sage.py
|
128 |
+
|
129 |
+
# Environments
|
130 |
+
.env
|
131 |
+
.venv
|
132 |
+
env/
|
133 |
+
venv/
|
134 |
+
ENV/
|
135 |
+
env.bak/
|
136 |
+
venv.bak/
|
137 |
+
|
138 |
+
# Spyder project settings
|
139 |
+
.spyderproject
|
140 |
+
.spyproject
|
141 |
+
|
142 |
+
# Rope project settings
|
143 |
+
.ropeproject
|
144 |
+
|
145 |
+
# mkdocs documentation
|
146 |
+
/site
|
147 |
+
|
148 |
+
# mypy
|
149 |
+
.mypy_cache/
|
150 |
+
.dmypy.json
|
151 |
+
dmypy.json
|
152 |
+
|
153 |
+
# Pyre type checker
|
154 |
+
.pyre/
|
155 |
+
|
156 |
+
# pytype static type analyzer
|
157 |
+
.pytype/
|
158 |
+
|
159 |
+
# Cython debug symbols
|
160 |
+
cython_debug/
|
161 |
+
|
162 |
+
# PyCharm
|
163 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
164 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
165 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
166 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
167 |
+
#.idea/
|
README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
---
|
2 |
title: Football Analysis
|
3 |
-
emoji:
|
4 |
colorFrom: purple
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
@@ -11,3 +11,11 @@ license: apache-2.0
|
|
11 |
---
|
12 |
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
title: Football Analysis
|
3 |
+
emoji: ⚽
|
4 |
colorFrom: purple
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
|
|
11 |
---
|
12 |
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
14 |
+
|
15 |
+
## Similar Input Videos
|
16 |
+
|
17 |
+
<https://www.kaggle.com/competitions/dfl-bundesliga-data-shootout/data?select=clips>
|
18 |
+
|
19 |
+
The aim of this project is to detect and track players, referees, and footballs in video footage using YOLO, a top-tier AI object detection model. ⚽ We will enhance the model's performance through training. Additionally, we will classify players into teams based on their t-shirt colors using K-means for pixel segmentation and clustering. 🎽📊 This data will enable us to calculate a team's ball possession percentage during a match.
|
20 |
+
|
21 |
+
To accurately measure player movement, we will use optical flow to assess camera motion between frames and apply perspective transformation to represent the scene's depth and perspective. This allows us to convert player movement from pixels to meters. 📐 Finally, we will calculate each player's distance covered. This comprehensive project encompasses a range of concepts and tackles real-world problems, making it ideal for both novice and experienced machine learning engineers. 🚀
|
app.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from click import launch
|
2 |
+
from player_ball_assigner import PlayerBallAssigner
|
3 |
+
from utils import read_video,save_video,check_video_resolution
|
4 |
+
from trackers import Tracker
|
5 |
+
from team_assigner import TeamAssigner
|
6 |
+
import numpy as np
|
7 |
+
from camera_movement_estimator import CameraMovementEstimator
|
8 |
+
from view_transformer import ViewTransformer
|
9 |
+
from speed_and_distance_estimator import SpeedAndDistance_Estimator
|
10 |
+
import gradio as gr
|
11 |
+
|
12 |
+
def annotate(input):
|
13 |
+
|
14 |
+
# Read Video
|
15 |
+
video_frames = read_video(input)
|
16 |
+
|
17 |
+
|
18 |
+
#intialize tracker
|
19 |
+
tracker = Tracker('models/best.pt')
|
20 |
+
tracks = tracker.get_object_tracks(video_frames,read_from_stub=False,stub_path=None)
|
21 |
+
|
22 |
+
# Get Object Postions
|
23 |
+
tracker.add_postion_to_tracks(tracks)
|
24 |
+
|
25 |
+
# camera movement estimator
|
26 |
+
camera_movement_estimator = CameraMovementEstimator(video_frames[0])
|
27 |
+
camera_movement_per_frame = camera_movement_estimator.get_camera_movement(video_frames,
|
28 |
+
read_from_stub=False,
|
29 |
+
stub_path=None)
|
30 |
+
camera_movement_estimator.add_adjust_positions_to_tracks(tracks,camera_movement_per_frame)
|
31 |
+
|
32 |
+
# View Trasnformer
|
33 |
+
view_transformer = ViewTransformer()
|
34 |
+
view_transformer.add_transformed_position_to_tracks(tracks)
|
35 |
+
|
36 |
+
# interpolate ball positions
|
37 |
+
tracks['ball'] = tracker.interpolate_ball_positions(tracks['ball'])
|
38 |
+
|
39 |
+
|
40 |
+
# Speed and distance estimator
|
41 |
+
speed_and_distance_estimator = SpeedAndDistance_Estimator()
|
42 |
+
speed_and_distance_estimator.add_speed_and_distance_to_tracks(tracks)
|
43 |
+
|
44 |
+
# Assign player teams
|
45 |
+
team_assigner = TeamAssigner()
|
46 |
+
team_assigner.assign_team_color(video_frames[0],tracks['players'][0])
|
47 |
+
|
48 |
+
for frame_num, player_track in enumerate(tracks['players']):
|
49 |
+
for player_id,track in player_track.items():
|
50 |
+
team = team_assigner.get_player_team(video_frames[frame_num],
|
51 |
+
track['bbox'],
|
52 |
+
player_id)
|
53 |
+
tracks['players'][frame_num][player_id]['team'] = team
|
54 |
+
tracks['players'][frame_num][player_id]['team_color'] = team_assigner.team_colors[team]
|
55 |
+
|
56 |
+
|
57 |
+
|
58 |
+
# Assign Ball Acquisition
|
59 |
+
player_assigner = PlayerBallAssigner()
|
60 |
+
team_ball_control = []
|
61 |
+
|
62 |
+
for frame_num,player_track in enumerate(tracks['players']):
|
63 |
+
ball_bbox = tracks['ball'][frame_num][1]['bbox']
|
64 |
+
assigned_player = player_assigner.assign_ball_to_player(player_track,ball_bbox)
|
65 |
+
|
66 |
+
if assigned_player != -1:
|
67 |
+
tracks['players'][frame_num][assigned_player]['has_ball'] = True
|
68 |
+
team_ball_control.append(tracks['players'][frame_num][assigned_player]['team'])
|
69 |
+
else:
|
70 |
+
team_ball_control.append(team_ball_control[-1])
|
71 |
+
|
72 |
+
team_ball_control = np.array(team_ball_control)
|
73 |
+
|
74 |
+
# Draw Output
|
75 |
+
## Draw object tracks
|
76 |
+
output_video_frames = tracker.draw_annotations(video_frames,tracks,team_ball_control)
|
77 |
+
|
78 |
+
## Draw Camera movement
|
79 |
+
output_video_frames = camera_movement_estimator.draw_camera_movement(output_video_frames,camera_movement_per_frame)
|
80 |
+
|
81 |
+
## Draw Speed and Distance
|
82 |
+
speed_and_distance_estimator.draw_speed_and_distance(output_video_frames,tracks)
|
83 |
+
|
84 |
+
output_path= 'output_videos/out.mp4'
|
85 |
+
# Save Video
|
86 |
+
save_video(output_video_frames,output_path)
|
87 |
+
|
88 |
+
|
89 |
+
return output_path
|
90 |
+
|
91 |
+
|
92 |
+
|
93 |
+
|
94 |
+
iface = gr.Interface(
|
95 |
+
fn=annotate,
|
96 |
+
inputs=['video'],
|
97 |
+
outputs=['video'],
|
98 |
+
title="Football Analysis",
|
99 |
+
examples=['input_videos/08fd33_4.mp4'],
|
100 |
+
|
101 |
+
).queue(default_concurrency_limit=2).launch()
|
102 |
+
|
camera_movement_estimator/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .camera_movement_estimator import CameraMovementEstimator
|
camera_movement_estimator/camera_movement_estimator.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pickle
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
import os
|
5 |
+
import sys
|
6 |
+
sys.path.append('../')
|
7 |
+
from utils import measure_distance,measure_xy_distance
|
8 |
+
|
9 |
+
class CameraMovementEstimator():
|
10 |
+
def __init__(self,frame):
|
11 |
+
self.minimum_distance = 5
|
12 |
+
|
13 |
+
self.lk_params = dict(
|
14 |
+
winSize = (15,15),
|
15 |
+
maxLevel = 2,
|
16 |
+
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,10,0.03)
|
17 |
+
)
|
18 |
+
|
19 |
+
first_frame_grayscale = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
|
20 |
+
mask_features = np.zeros_like(first_frame_grayscale)
|
21 |
+
mask_features[:,0:20] = 1
|
22 |
+
mask_features[:,600:700] = 1
|
23 |
+
|
24 |
+
self.features = dict(
|
25 |
+
maxCorners = 100,
|
26 |
+
qualityLevel = 0.3,
|
27 |
+
minDistance =3,
|
28 |
+
blockSize = 7,
|
29 |
+
mask = mask_features
|
30 |
+
)
|
31 |
+
|
32 |
+
|
33 |
+
def add_adjust_positions_to_tracks(self,tracks, camera_movement_per_frame):
|
34 |
+
for object, object_tracks in tracks.items():
|
35 |
+
for frame_num, track in enumerate(object_tracks):
|
36 |
+
for track_id, track_info in track.items():
|
37 |
+
position = track_info['position']
|
38 |
+
camera_movement = camera_movement_per_frame[frame_num]
|
39 |
+
position_adjusted = (position[0]-camera_movement[0],position[1]-camera_movement[1])
|
40 |
+
tracks[object][frame_num][track_id]['position_adjusted'] = position_adjusted
|
41 |
+
|
42 |
+
def add_adjust_positions_to_tracks(self,tracks, camera_movement_per_frame):
|
43 |
+
for object, object_tracks in tracks.items():
|
44 |
+
for frame_num, track in enumerate(object_tracks):
|
45 |
+
for track_id, track_info in track.items():
|
46 |
+
position = track_info['position']
|
47 |
+
camera_movement = camera_movement_per_frame[frame_num]
|
48 |
+
position_adjusted = (position[0]-camera_movement[0],position[1]-camera_movement[1])
|
49 |
+
tracks[object][frame_num][track_id]['position_adjusted'] = position_adjusted
|
50 |
+
|
51 |
+
|
52 |
+
|
53 |
+
def get_camera_movement(self,frames,read_from_stub=False, stub_path=None):
|
54 |
+
# Read the stub
|
55 |
+
if read_from_stub and stub_path is not None and os.path.exists(stub_path):
|
56 |
+
with open(stub_path,'rb') as f:
|
57 |
+
return pickle.load(f)
|
58 |
+
|
59 |
+
camera_movement = [[0,0]]*len(frames)
|
60 |
+
|
61 |
+
old_gray = cv2.cvtColor(frames[0],cv2.COLOR_BGR2GRAY)
|
62 |
+
old_features = cv2.goodFeaturesToTrack(old_gray,**self.features)
|
63 |
+
|
64 |
+
for frame_num in range(1,len(frames)):
|
65 |
+
frame_gray = cv2.cvtColor(frames[frame_num],cv2.COLOR_BGR2GRAY)
|
66 |
+
new_features, _,_ = cv2.calcOpticalFlowPyrLK(old_gray,frame_gray,old_features,None,**self.lk_params)
|
67 |
+
|
68 |
+
max_distance = 0
|
69 |
+
camera_movement_x, camera_movement_y = 0,0
|
70 |
+
|
71 |
+
for i, (new,old) in enumerate(zip(new_features,old_features)):
|
72 |
+
new_features_point = new.ravel()
|
73 |
+
old_features_point = old.ravel()
|
74 |
+
|
75 |
+
distance = measure_distance(new_features_point,old_features_point)
|
76 |
+
if distance>max_distance:
|
77 |
+
max_distance = distance
|
78 |
+
camera_movement_x,camera_movement_y = measure_xy_distance(old_features_point, new_features_point )
|
79 |
+
|
80 |
+
if max_distance > self.minimum_distance:
|
81 |
+
camera_movement[frame_num] = [camera_movement_x,camera_movement_y]
|
82 |
+
old_features = cv2.goodFeaturesToTrack(frame_gray,**self.features)
|
83 |
+
|
84 |
+
old_gray = frame_gray.copy()
|
85 |
+
|
86 |
+
if stub_path is not None:
|
87 |
+
with open(stub_path,'wb') as f:
|
88 |
+
pickle.dump(camera_movement,f)
|
89 |
+
|
90 |
+
return camera_movement
|
91 |
+
|
92 |
+
def draw_camera_movement(self,frames, camera_movement_per_frame):
|
93 |
+
output_frames=[]
|
94 |
+
|
95 |
+
for frame_num, frame in enumerate(frames):
|
96 |
+
|
97 |
+
overlay = frame.copy()
|
98 |
+
cv2.rectangle(overlay,(0,0),(350,70),(255,255,255),-1)
|
99 |
+
alpha =0.6
|
100 |
+
cv2.addWeighted(overlay,alpha,frame,1-alpha,0,frame)
|
101 |
+
|
102 |
+
x_movement, y_movement = camera_movement_per_frame[frame_num]
|
103 |
+
frame = cv2.putText(frame,f"Camera Movement X: {x_movement:.2f}",(10,20), cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,0),3)
|
104 |
+
frame = cv2.putText(frame,f"Camera Movement Y: {y_movement:.2f}",(10,60), cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,0),3)
|
105 |
+
|
106 |
+
output_frames.append(frame)
|
107 |
+
|
108 |
+
return output_frames
|
input_videos/08fd33_4.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:077985a55b61fe2ca413221c082711655b7ba736a3bbd8ff97b4dfc0e5b03c57
|
3 |
+
size 20341580
|
models/best.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5ffeb0ff661427e8cbe0763ebfba72d385f18737c5e0070d36e911a586f5e7bc
|
3 |
+
size 194957941
|
player_ball_assigner/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .player_ball_assigner import PlayerBallAssigner
|
player_ball_assigner/player_ball_assigner.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
sys.path.append('../')
|
3 |
+
from utils import get_center_of_bbox,measure_distance
|
4 |
+
|
5 |
+
class PlayerBallAssigner():
|
6 |
+
def __init__(self) -> None:
|
7 |
+
self.max_player_ball_dist = 70
|
8 |
+
|
9 |
+
def assign_ball_to_player(self,players,ball_bbox):
|
10 |
+
ball_position = get_center_of_bbox(ball_bbox)
|
11 |
+
|
12 |
+
minimum_dist = 99999
|
13 |
+
assigned_player = -1
|
14 |
+
|
15 |
+
for player_id,player in players.items():
|
16 |
+
player_bbox = player['bbox']
|
17 |
+
|
18 |
+
dist_left = measure_distance((player_bbox[0],player_bbox[-1]),ball_position)
|
19 |
+
dist_right = measure_distance((player_bbox[2],player_bbox[-1]),ball_position)
|
20 |
+
distance = min(dist_left,dist_right)
|
21 |
+
|
22 |
+
if distance < self.max_player_ball_dist:
|
23 |
+
minimum_dist=distance
|
24 |
+
assigned_player=player_id
|
25 |
+
|
26 |
+
return assigned_player
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
ultralytics
|
2 |
+
cv2
|
3 |
+
gradio
|
4 |
+
numpy
|
5 |
+
pandas
|
6 |
+
pickle
|
7 |
+
supervision
|
8 |
+
sklearn
|
speed_and_distance_estimator/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .speed_and_distance_estimator import SpeedAndDistance_Estimator
|
speed_and_distance_estimator/speed_and_distance_estimator.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import sys
|
3 |
+
sys.path.append('../')
|
4 |
+
from utils import measure_distance ,get_foot_position
|
5 |
+
|
6 |
+
class SpeedAndDistance_Estimator():
|
7 |
+
def __init__(self):
|
8 |
+
self.frame_window=5
|
9 |
+
self.frame_rate=24
|
10 |
+
|
11 |
+
def add_speed_and_distance_to_tracks(self,tracks):
|
12 |
+
total_distance= {}
|
13 |
+
|
14 |
+
for object, object_tracks in tracks.items():
|
15 |
+
if object == "ball" or object == "referees":
|
16 |
+
continue
|
17 |
+
number_of_frames = len(object_tracks)
|
18 |
+
for frame_num in range(0,number_of_frames, self.frame_window):
|
19 |
+
last_frame = min(frame_num+self.frame_window,number_of_frames-1 )
|
20 |
+
|
21 |
+
for track_id,_ in object_tracks[frame_num].items():
|
22 |
+
if track_id not in object_tracks[last_frame]:
|
23 |
+
continue
|
24 |
+
|
25 |
+
start_position = object_tracks[frame_num][track_id]['position_transformed']
|
26 |
+
end_position = object_tracks[last_frame][track_id]['position_transformed']
|
27 |
+
|
28 |
+
if start_position is None or end_position is None:
|
29 |
+
continue
|
30 |
+
|
31 |
+
distance_covered = measure_distance(start_position,end_position)
|
32 |
+
time_elapsed = (last_frame-frame_num)/self.frame_rate
|
33 |
+
speed_meteres_per_second = distance_covered/time_elapsed
|
34 |
+
speed_km_per_hour = speed_meteres_per_second*3.6
|
35 |
+
|
36 |
+
if object not in total_distance:
|
37 |
+
total_distance[object]= {}
|
38 |
+
|
39 |
+
if track_id not in total_distance[object]:
|
40 |
+
total_distance[object][track_id] = 0
|
41 |
+
|
42 |
+
total_distance[object][track_id] += distance_covered
|
43 |
+
|
44 |
+
for frame_num_batch in range(frame_num,last_frame):
|
45 |
+
if track_id not in tracks[object][frame_num_batch]:
|
46 |
+
continue
|
47 |
+
tracks[object][frame_num_batch][track_id]['speed'] = speed_km_per_hour
|
48 |
+
tracks[object][frame_num_batch][track_id]['distance'] = total_distance[object][track_id]
|
49 |
+
|
50 |
+
def draw_speed_and_distance(self,frames,tracks):
|
51 |
+
output_frames = []
|
52 |
+
for frame_num, frame in enumerate(frames):
|
53 |
+
for object, object_tracks in tracks.items():
|
54 |
+
if object == "ball" or object == "referees":
|
55 |
+
continue
|
56 |
+
for _, track_info in object_tracks[frame_num].items():
|
57 |
+
if "speed" in track_info:
|
58 |
+
speed = track_info.get('speed',None)
|
59 |
+
distance = track_info.get('distance',None)
|
60 |
+
if speed is None or distance is None:
|
61 |
+
continue
|
62 |
+
|
63 |
+
bbox = track_info['bbox']
|
64 |
+
position = get_foot_position(bbox)
|
65 |
+
position = list(position)
|
66 |
+
position[1]+=40
|
67 |
+
|
68 |
+
position = tuple(map(int,position))
|
69 |
+
cv2.putText(frame, f"{distance:.2f} m",position,cv2.FONT_HERSHEY_SIMPLEX,0.4,(0,0,0),2)
|
70 |
+
# cv2.putText(frame, f"{speed:.2f} km/h",(position[0],position[1]+20),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),2)
|
71 |
+
output_frames.append(frame)
|
72 |
+
|
73 |
+
return output_frames
|
team_assigner/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .team_assigner import TeamAssigner
|
team_assigner/team_assigner.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from sklearn.cluster import KMeans
|
2 |
+
|
3 |
+
class TeamAssigner:
|
4 |
+
def __init__(self) -> None:
|
5 |
+
self.team_colors = {}
|
6 |
+
self.player_team_dict = {}
|
7 |
+
|
8 |
+
|
9 |
+
def get_clustering_model(self,img):
|
10 |
+
# Reshape the image to 2D Array
|
11 |
+
img_2d = img.reshape(-1,3)
|
12 |
+
|
13 |
+
# Perform K-means clustering with 2 clusters
|
14 |
+
kmeans = KMeans(n_clusters=2,init="k-means++",n_init=1).fit(img_2d)
|
15 |
+
|
16 |
+
return kmeans
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
def get_player_color(self,frame,bbox):
|
21 |
+
img = frame[int(bbox[1]):int(bbox[3]),int(bbox[0]):int(bbox[2])]
|
22 |
+
|
23 |
+
top_img_hlf = img[0:int(img.shape[0]/2),:]
|
24 |
+
|
25 |
+
# Get Clustering model
|
26 |
+
kmeans = self.get_clustering_model(top_img_hlf)
|
27 |
+
|
28 |
+
# Get the cluster labels for each pixel
|
29 |
+
labels = kmeans.labels_
|
30 |
+
|
31 |
+
# Reshape the labels to original img shape
|
32 |
+
clustered_img = labels.reshape(top_img_hlf.shape[0],top_img_hlf.shape[1])
|
33 |
+
|
34 |
+
# Get the player cluster
|
35 |
+
corner_clusters = [clustered_img[0,0],clustered_img[0,-1],clustered_img[-1,0],clustered_img[-1,-1]]
|
36 |
+
np_cluster = max(set(corner_clusters),key=corner_clusters.count)
|
37 |
+
player_cluster = 1-np_cluster
|
38 |
+
|
39 |
+
player_color=kmeans.cluster_centers_[player_cluster]
|
40 |
+
|
41 |
+
return player_color
|
42 |
+
|
43 |
+
|
44 |
+
def assign_team_color(self,frame,player_detections):
|
45 |
+
|
46 |
+
player_colors = []
|
47 |
+
for _,player_detection in player_detections.items():
|
48 |
+
bbox = player_detection['bbox']
|
49 |
+
player_color = self.get_player_color(frame,bbox)
|
50 |
+
player_colors.append(player_color)
|
51 |
+
|
52 |
+
kmeans = KMeans(n_clusters=2,init='k-means++',n_init=10).fit(player_colors)
|
53 |
+
|
54 |
+
self.kmeans = kmeans
|
55 |
+
|
56 |
+
self.team_colors[1] = kmeans.cluster_centers_[0]
|
57 |
+
self.team_colors[2] = kmeans.cluster_centers_[1]
|
58 |
+
|
59 |
+
|
60 |
+
def get_player_team(self,frame,player_bbox,player_id):
|
61 |
+
if player_id in self.player_team_dict:
|
62 |
+
return self.player_team_dict[player_id]
|
63 |
+
|
64 |
+
player_color = self.get_player_color(frame,player_bbox)
|
65 |
+
|
66 |
+
team_id = self.kmeans.predict(player_color.reshape(1,-1))[0]
|
67 |
+
team_id +=1
|
68 |
+
|
69 |
+
self.player_team_dict[player_id] = team_id
|
70 |
+
|
71 |
+
return team_id
|
72 |
+
|
trackers/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .tracker import Tracker
|
trackers/tracker.py
ADDED
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ultralytics import YOLO
|
2 |
+
import supervision as sv
|
3 |
+
import pickle
|
4 |
+
import numpy as np
|
5 |
+
import pandas as pd
|
6 |
+
import os
|
7 |
+
import cv2
|
8 |
+
import sys
|
9 |
+
|
10 |
+
sys.path.append('../')
|
11 |
+
from utils import get_center_of_bbox,get_bbox_width,get_foot_position
|
12 |
+
|
13 |
+
class Tracker:
|
14 |
+
def __init__(self,model_path):
|
15 |
+
self.model = YOLO(model_path)
|
16 |
+
self.tracker = sv.ByteTrack()
|
17 |
+
|
18 |
+
|
19 |
+
def add_postion_to_tracks(self,tracks):
|
20 |
+
for object,object_tracks in tracks.items():
|
21 |
+
for frame_num,track in enumerate(object_tracks):
|
22 |
+
for tracks_id,track_info in track.items():
|
23 |
+
bbox = track_info['bbox']
|
24 |
+
if object == 'ball':
|
25 |
+
position = get_center_of_bbox(bbox)
|
26 |
+
else:
|
27 |
+
position = get_foot_position(bbox)
|
28 |
+
tracks[object][frame_num][tracks_id]['position'] = position
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
def interpolate_ball_positions(self,ball_positions):
|
34 |
+
ball_positions = [x.get(1,{}).get('bbox',[]) for x in ball_positions]
|
35 |
+
df_ball_positions = pd.DataFrame(ball_positions,columns=['x1','y1','x2','y2'])
|
36 |
+
|
37 |
+
# Interpolate ball postions
|
38 |
+
df_ball_positions = df_ball_positions.interpolate()
|
39 |
+
df_ball_positions = df_ball_positions.bfill()
|
40 |
+
|
41 |
+
ball_positions = [{1:{"bbox":x}} for x in df_ball_positions.to_numpy().tolist()]
|
42 |
+
|
43 |
+
return ball_positions
|
44 |
+
|
45 |
+
def detect_frames(self,frames):
|
46 |
+
batch_size = 50
|
47 |
+
detections = []
|
48 |
+
for i in range(0,len(frames),batch_size):
|
49 |
+
detections_btach = self.model.predict(frames[i:i+batch_size],conf=0.1)
|
50 |
+
detections += detections_btach
|
51 |
+
|
52 |
+
|
53 |
+
return detections
|
54 |
+
|
55 |
+
def get_object_tracks(self,frames,read_from_stub=False,stub_path=None):
|
56 |
+
|
57 |
+
if read_from_stub and stub_path is not None and os.path.exists(stub_path):
|
58 |
+
with open(stub_path,'rb') as f:
|
59 |
+
tracks = pickle.load(f)
|
60 |
+
|
61 |
+
return tracks
|
62 |
+
|
63 |
+
detections = self.detect_frames(frames)
|
64 |
+
|
65 |
+
tracks = {
|
66 |
+
"players":[],
|
67 |
+
"referees":[],
|
68 |
+
"ball":[]
|
69 |
+
}
|
70 |
+
|
71 |
+
for frame_num , detection in enumerate(detections):
|
72 |
+
class_names = detection.names
|
73 |
+
class_names_inv = {v:k for k,v in class_names.items()}
|
74 |
+
|
75 |
+
# Convert to supervision detection format
|
76 |
+
detection_supervision = sv.Detections.from_ultralytics(detection)
|
77 |
+
|
78 |
+
# Convert goalkeeper to player object
|
79 |
+
for index , class_id in enumerate(detection_supervision.class_id):
|
80 |
+
if class_names[class_id] == 'goalkeeper':
|
81 |
+
detection_supervision.class_id[index] = class_names_inv['player']
|
82 |
+
|
83 |
+
# Track Objects
|
84 |
+
detection_with_tracks = self.tracker.update_with_detections(detection_supervision)
|
85 |
+
|
86 |
+
tracks["players"].append({})
|
87 |
+
tracks["referees"].append({})
|
88 |
+
tracks['ball'].append({})
|
89 |
+
|
90 |
+
for frame_detection in detection_with_tracks:
|
91 |
+
bbox = frame_detection[0].tolist()
|
92 |
+
cls_id = frame_detection[3]
|
93 |
+
track_id = frame_detection[4]
|
94 |
+
|
95 |
+
if cls_id == class_names_inv['player']:
|
96 |
+
tracks['players'][frame_num][track_id] = {"bbox":bbox}
|
97 |
+
|
98 |
+
if cls_id == class_names_inv['referee']:
|
99 |
+
tracks['referees'][frame_num][track_id] = {"bbox":bbox}
|
100 |
+
|
101 |
+
for frame_detection in detection_supervision:
|
102 |
+
bbox = frame_detection[0].tolist()
|
103 |
+
cls_id = frame_detection[3]
|
104 |
+
|
105 |
+
if cls_id == class_names_inv['ball']:
|
106 |
+
tracks['ball'][frame_num][1] = {"bbox":bbox}
|
107 |
+
|
108 |
+
|
109 |
+
if stub_path is not None:
|
110 |
+
with open(stub_path,'wb') as f:
|
111 |
+
pickle.dump(tracks,f)
|
112 |
+
|
113 |
+
return tracks
|
114 |
+
|
115 |
+
|
116 |
+
def draw_ellipse(self,frame,bbox,color,track_id=None):
|
117 |
+
y2 = int(bbox[3])
|
118 |
+
x_center,_ = get_center_of_bbox(bbox)
|
119 |
+
width = get_bbox_width(bbox)
|
120 |
+
|
121 |
+
cv2.ellipse(
|
122 |
+
frame,
|
123 |
+
center=(int(x_center),int(y2)),
|
124 |
+
axes=(int(width),int(0.35*width)),
|
125 |
+
angle=0.0,
|
126 |
+
startAngle=-45.0,
|
127 |
+
endAngle=235.0,
|
128 |
+
color=color,
|
129 |
+
thickness=2,
|
130 |
+
lineType=cv2.LINE_4,
|
131 |
+
)
|
132 |
+
|
133 |
+
rect_width = 40
|
134 |
+
rect_height = 20
|
135 |
+
x1_rect = x_center - rect_width//2
|
136 |
+
x2_rect = x_center + rect_width//2
|
137 |
+
y1_rect = (y2 - rect_height//2) + 15
|
138 |
+
y2_rect = (y2 + rect_height//2) + 15
|
139 |
+
|
140 |
+
if track_id is not None:
|
141 |
+
cv2.rectangle(
|
142 |
+
frame,
|
143 |
+
(int(x1_rect),int(y1_rect)),
|
144 |
+
(int(x2_rect),int(y2_rect)),
|
145 |
+
color,
|
146 |
+
cv2.FILLED
|
147 |
+
)
|
148 |
+
|
149 |
+
x1_text = x1_rect + 12
|
150 |
+
if track_id > 99:
|
151 |
+
x1_text -=10
|
152 |
+
|
153 |
+
cv2.putText(
|
154 |
+
frame,
|
155 |
+
f"{track_id}",
|
156 |
+
(int(x1_text),int(y1_rect+15)),
|
157 |
+
cv2.FONT_HERSHEY_SIMPLEX,
|
158 |
+
0.6,
|
159 |
+
(0,0,0),
|
160 |
+
2
|
161 |
+
)
|
162 |
+
|
163 |
+
|
164 |
+
return frame
|
165 |
+
|
166 |
+
|
167 |
+
def draw_triangle(self,frame,bbox,color):
|
168 |
+
y = int(bbox[1])
|
169 |
+
x,_ = get_center_of_bbox(bbox)
|
170 |
+
|
171 |
+
triangle_points = np.array([
|
172 |
+
[x,y],
|
173 |
+
[x-10,y-20],
|
174 |
+
[x+10,y-20],
|
175 |
+
])
|
176 |
+
|
177 |
+
cv2.drawContours(frame,[triangle_points],0,color,cv2.FILLED)
|
178 |
+
cv2.drawContours(frame,[triangle_points],0,(0,0,0),2)
|
179 |
+
|
180 |
+
return frame
|
181 |
+
|
182 |
+
def draw_team_ball_control(self,frame,frame_num,team_ball_control):
|
183 |
+
# Draw semi transparent rectangle
|
184 |
+
overlay = frame.copy()
|
185 |
+
cv2.rectangle(overlay,(900,570),(1250,660),(255,255,255),-1)
|
186 |
+
alpha=0.4
|
187 |
+
cv2.addWeighted(overlay,alpha,frame,1-alpha,0,frame)
|
188 |
+
|
189 |
+
team_ball_control_till_frame = team_ball_control[:frame_num+1]
|
190 |
+
|
191 |
+
# get the number of times each team had the ball
|
192 |
+
team_1_num_frames = team_ball_control_till_frame[team_ball_control_till_frame==1].shape[0]
|
193 |
+
team_2_num_frames = team_ball_control_till_frame[team_ball_control_till_frame==2].shape[0]
|
194 |
+
team_1 = team_1_num_frames/(team_1_num_frames+team_2_num_frames)
|
195 |
+
team_2 = team_2_num_frames/(team_1_num_frames+team_2_num_frames)
|
196 |
+
|
197 |
+
cv2.putText(frame,f"Team 1 Ball Control {team_1*100:.2f}%",(920,600),cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,0),3)
|
198 |
+
cv2.putText(frame,f"Team 2 Ball Control {team_2*100:.2f}%",(920,640),cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,0),3)
|
199 |
+
|
200 |
+
return frame
|
201 |
+
|
202 |
+
|
203 |
+
def draw_annotations(self,video_frames,tracks,team_ball_control):
|
204 |
+
output_video_frames = []
|
205 |
+
for frame_num,frame in enumerate(video_frames):
|
206 |
+
# frame = frame.copy()
|
207 |
+
|
208 |
+
player_dict = tracks['players'][frame_num]
|
209 |
+
referee_dict = tracks['referees'][frame_num]
|
210 |
+
ball_dict = tracks['ball'][frame_num]
|
211 |
+
|
212 |
+
# Draw player annotations
|
213 |
+
for track_id,player in player_dict.items():
|
214 |
+
color = player.get('team_color',(0,0,255))
|
215 |
+
frame = self.draw_ellipse(frame,player['bbox'],color,track_id)
|
216 |
+
|
217 |
+
if player.get('has_ball',False):
|
218 |
+
frame = self.draw_triangle(frame,player['bbox'],(0,0,255))
|
219 |
+
|
220 |
+
|
221 |
+
# Draw referee annotations
|
222 |
+
for _,referee in referee_dict.items():
|
223 |
+
frame = self.draw_ellipse(frame,referee['bbox'],(0,255,255))
|
224 |
+
|
225 |
+
# Draw ball annotaion
|
226 |
+
|
227 |
+
for _,ball in ball_dict.items():
|
228 |
+
frame = self.draw_triangle(frame,ball['bbox'],(0,255,0))
|
229 |
+
|
230 |
+
# Draw team ball control
|
231 |
+
|
232 |
+
frame = self.draw_team_ball_control(frame,frame_num,team_ball_control)
|
233 |
+
|
234 |
+
output_video_frames.append(frame)
|
235 |
+
|
236 |
+
return output_video_frames
|
237 |
+
|
238 |
+
|
239 |
+
|
240 |
+
|
241 |
+
|
utils/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
from .video_utils import save_video,read_video,check_video_resolution
|
2 |
+
from .bbox_utils import get_center_of_bbox,get_bbox_width,measure_distance,measure_xy_distance,get_foot_position
|
utils/bbox_utils.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def get_center_of_bbox(bbox):
|
2 |
+
x1,y1,x2,y2 = bbox
|
3 |
+
return int((x1+x2)/2),int((y1+y2)/2)
|
4 |
+
|
5 |
+
def get_bbox_width(bbox):
|
6 |
+
return bbox[2]-bbox[0]
|
7 |
+
|
8 |
+
def measure_distance(p1,p2):
|
9 |
+
return ((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)**0.5
|
10 |
+
|
11 |
+
def measure_xy_distance(p1,p2):
|
12 |
+
return p1[0]-p2[0],p1[1]-p2[1]
|
13 |
+
|
14 |
+
def get_foot_position(bbox):
|
15 |
+
x1,y1,x2,y2 = bbox
|
16 |
+
return int((x1+x2)/2),int(y2)
|
utils/video_utils.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
|
3 |
+
|
4 |
+
def check_video_resolution(video_path):
|
5 |
+
vid = cv2.VideoCapture(video_path)
|
6 |
+
height = vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
|
7 |
+
width = vid.get(cv2.CAP_PROP_FRAME_WIDTH)
|
8 |
+
return height,width
|
9 |
+
|
10 |
+
def read_video(video_path):
|
11 |
+
cap = cv2.VideoCapture(video_path)
|
12 |
+
frames = []
|
13 |
+
while True:
|
14 |
+
ret, frame = cap.read()
|
15 |
+
if not ret:
|
16 |
+
break
|
17 |
+
frame = cv2.resize(frame,(1280,720))
|
18 |
+
frames.append(frame)
|
19 |
+
return frames
|
20 |
+
|
21 |
+
def save_video(ouput_video_frames,output_video_path):
|
22 |
+
fourcc = cv2.VideoWriter_fourcc(*'XVID')
|
23 |
+
out = cv2.VideoWriter(output_video_path, fourcc, 24, (ouput_video_frames[0].shape[1], ouput_video_frames[0].shape[0]))
|
24 |
+
for frame in ouput_video_frames:
|
25 |
+
out.write(frame)
|
26 |
+
out.release()
|
view_transformer/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .view_transformer import ViewTransformer
|
view_transformer/view_transformer.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import cv2
|
3 |
+
|
4 |
+
class ViewTransformer():
|
5 |
+
def __init__(self):
|
6 |
+
court_width = 68
|
7 |
+
court_length = 29.15
|
8 |
+
|
9 |
+
self.pixel_vertices = np.array([
|
10 |
+
[78,689],
|
11 |
+
[177,182],
|
12 |
+
[699,169],
|
13 |
+
[1280,599]])
|
14 |
+
|
15 |
+
self.target_vertices = np.array([
|
16 |
+
[0,court_width],
|
17 |
+
[0, 0],
|
18 |
+
[court_length, 0],
|
19 |
+
[court_length, court_width]
|
20 |
+
])
|
21 |
+
|
22 |
+
self.pixel_vertices = self.pixel_vertices.astype(np.float32)
|
23 |
+
self.target_vertices = self.target_vertices.astype(np.float32)
|
24 |
+
|
25 |
+
self.persepctive_trasnformer = cv2.getPerspectiveTransform(self.pixel_vertices, self.target_vertices)
|
26 |
+
|
27 |
+
def transform_point(self,point):
|
28 |
+
p = (int(point[0]),int(point[1]))
|
29 |
+
is_inside = cv2.pointPolygonTest(self.pixel_vertices,p,False) >= 0
|
30 |
+
if not is_inside:
|
31 |
+
return None
|
32 |
+
|
33 |
+
reshaped_point = point.reshape(-1,1,2).astype(np.float32)
|
34 |
+
tranform_point = cv2.perspectiveTransform(reshaped_point,self.persepctive_trasnformer)
|
35 |
+
return tranform_point.reshape(-1,2)
|
36 |
+
|
37 |
+
def add_transformed_position_to_tracks(self,tracks):
|
38 |
+
for object, object_tracks in tracks.items():
|
39 |
+
for frame_num, track in enumerate(object_tracks):
|
40 |
+
for track_id, track_info in track.items():
|
41 |
+
position = track_info['position_adjusted']
|
42 |
+
position = np.array(position)
|
43 |
+
position_trasnformed = self.transform_point(position)
|
44 |
+
if position_trasnformed is not None:
|
45 |
+
position_trasnformed = position_trasnformed.squeeze().tolist()
|
46 |
+
tracks[object][frame_num][track_id]['position_transformed'] = position_trasnformed
|