sahandv commited on
Commit
04491d3
·
1 Parent(s): 70d1449

added installation instruction and local run python script. Added .gitkeep to keep the directory in the repository

Browse files
Files changed (3) hide show
  1. INSTALL.md +34 -0
  2. app-local.py +140 -0
  3. output/.gitkeep +0 -0
INSTALL.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Installation Instruction
2
+
3
+
4
+ ### Create a virtual environment
5
+
6
+
7
+ ```bash
8
+ conda create -n smplerx39 python=3.9
9
+ conda activate smplerx39
10
+ ```
11
+ ### Install the dependencies
12
+
13
+
14
+ ```bash
15
+ conda install pytorch==2.0.0 torchvision==0.15.0 torchaudio==2.0.0 -c pytorch -c nvidia
16
+
17
+
18
+ pip install -r requirements.txt
19
+ ```
20
+
21
+
22
+ ### Troubleshooting the installation
23
+
24
+
25
+ Try installing the mkl and numpy packages separately if you encounter any issues with the installation.
26
+
27
+
28
+ ```bash
29
+ pip install gradio
30
+ pip install spaces
31
+ pip install mmpose
32
+ pip install mkl==2024.0
33
+ pip install numpy==1.23
34
+ ```
app-local.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import os.path as osp
4
+ from pathlib import Path
5
+ import cv2
6
+ import gradio as gr
7
+ import torch
8
+ import math
9
+ import spaces
10
+ from huggingface_hub import hf_hub_download
11
+ try:
12
+ import mmpose
13
+ except:
14
+ try:
15
+ os.system('pip install ./main/transformer_utils')
16
+ except:
17
+ raise ImportError("Please install mmpose library from the transformer_utils folder")
18
+
19
+ # hf_hub_download(repo_id="caizhongang/SMPLer-X", filename="smpler_x_h32.pth.tar", local_dir="/home/user/app/pretrained_models")
20
+ # os.system('cp -rf /home/user/app/assets/conversions.py /home/user/.pyenv/versions/3.9.19/lib/python3.9/site-packages/torchgeometry/core/conversions.py')
21
+ DEFAULT_MODEL='smpler_x_h32'
22
+ OUT_FOLDER = 'output'
23
+ os.makedirs(OUT_FOLDER, exist_ok=True)
24
+ num_gpus = 1 if torch.cuda.is_available() else -1
25
+ print("!!!", torch.cuda.is_available())
26
+ print(torch.cuda.device_count())
27
+ print(torch.version.cuda)
28
+ index = torch.cuda.current_device()
29
+ print(index)
30
+ print(torch.cuda.get_device_name(index))
31
+ from main.inference import Inferer
32
+ inferer = Inferer(DEFAULT_MODEL, num_gpus, OUT_FOLDER)
33
+
34
+ @spaces.GPU(enable_queue=True, duration=300)
35
+ def infer(video_input, in_threshold=0.5, num_people="Single person", render_mesh=False):
36
+ # from main.inference import Inferer
37
+ # inferer = Inferer(DEFAULT_MODEL, num_gpus, OUT_FOLDER)
38
+ os.system(f'rm -rf {OUT_FOLDER}/*')
39
+ multi_person = False if (num_people == "Single person") else True
40
+ cap = cv2.VideoCapture(video_input)
41
+ fps = math.ceil(cap.get(5))
42
+ width = int(cap.get(3))
43
+ height = int(cap.get(4))
44
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
45
+ video_path = osp.join(OUT_FOLDER, f'out.m4v')
46
+ final_video_path = osp.join(OUT_FOLDER, f'out.mp4')
47
+ video_output = cv2.VideoWriter(video_path, fourcc, fps, (width, height))
48
+ success = 1
49
+ frame = 0
50
+ while success:
51
+ success, original_img = cap.read()
52
+ if not success:
53
+ break
54
+ frame += 1
55
+ img, mesh_paths, smplx_paths = inferer.infer(original_img, in_threshold, frame, multi_person, not(render_mesh))
56
+ video_output.write(img)
57
+ yield img, None, None, None
58
+ cap.release()
59
+ video_output.release()
60
+ cv2.destroyAllWindows()
61
+ os.system(f'ffmpeg -i {video_path} -c copy {final_video_path}')
62
+
63
+ #Compress mesh and smplx files
64
+ save_path_mesh = os.path.join(OUT_FOLDER, 'mesh')
65
+ save_mesh_file = os.path.join(OUT_FOLDER, 'mesh.zip')
66
+ os.makedirs(save_path_mesh, exist_ok= True)
67
+ save_path_smplx = os.path.join(OUT_FOLDER, 'smplx')
68
+ save_smplx_file = os.path.join(OUT_FOLDER, 'smplx.zip')
69
+ os.makedirs(save_path_smplx, exist_ok= True)
70
+ os.system(f'zip -r {save_mesh_file} {save_path_mesh}')
71
+ os.system(f'zip -r {save_smplx_file} {save_path_smplx}')
72
+ yield img, video_path, save_mesh_file, save_smplx_file
73
+
74
+ TITLE = '''<h1 align="center">SMPLer-X: Scaling Up Expressive Human Pose and Shape Estimation</h1>'''
75
+ VIDEO = '''
76
+ <center><iframe width="960" height="540"
77
+ src="https://www.youtube.com/embed/DepTqbPpVzY?si=qSeQuX-bgm_rON7E"title="SMPLer-X: Scaling Up Expressive Human Pose and Shape Estimation" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen>
78
+ </iframe>
79
+ </center><br>'''
80
+ DESCRIPTION = '''
81
+ <b>Official Gradio demo</b> for <a href="https://caizhongang.com/projects/SMPLer-X/"><b>SMPLer-X: Scaling Up Expressive Human Pose and Shape Estimation</b></a>.<br>
82
+ <p>
83
+ Note: You can drop a video at the panel (or select one of the examples)
84
+ to obtain the 3D parametric reconstructions of the detected humans.
85
+ </p>
86
+ '''
87
+
88
+ with gr.Blocks(title="SMPLer-X", css=".gradio-container") as demo:
89
+
90
+ gr.Markdown(TITLE)
91
+ gr.HTML(VIDEO)
92
+ gr.Markdown(DESCRIPTION)
93
+
94
+ with gr.Row():
95
+ with gr.Column():
96
+ video_input = gr.Video(label="Input video", elem_classes="video")
97
+ threshold = gr.Slider(0, 1.0, value=0.5, label='BBox detection threshold')
98
+ with gr.Column(scale=2):
99
+ num_people = gr.Radio(
100
+ choices=["Single person", "Multiple people"],
101
+ value="Single person",
102
+ label="Number of people",
103
+ info="Choose how many people are there in the video. Choose 'single person' for faster inference.",
104
+ interactive=True,
105
+ scale=1,)
106
+ gr.HTML("""<br/>""")
107
+ mesh_as_vertices = gr.Checkbox(
108
+ label="Render as mesh",
109
+ info="By default, the estimated SMPL-X parameters are rendered as vertices for faster visualization. Check this option if you want to visualize meshes instead.",
110
+ interactive=True,
111
+ scale=1,)
112
+
113
+ send_button = gr.Button("Infer")
114
+ gr.HTML("""<br/>""")
115
+
116
+ with gr.Row():
117
+ with gr.Column():
118
+ processed_frames = gr.Image(label="Last processed frame")
119
+ video_output = gr.Video(elem_classes="video")
120
+ with gr.Column():
121
+ meshes_output = gr.File(label="3D meshes")
122
+ smplx_output = gr.File(label= "SMPL-X models")
123
+ # example_images = gr.Examples([])
124
+ send_button.click(fn=infer, inputs=[video_input, threshold, num_people, mesh_as_vertices], outputs=[processed_frames, video_output, meshes_output, smplx_output])
125
+ # with gr.Row():
126
+ example_videos = gr.Examples([
127
+ ['/home/user/app/assets/01.mp4'],
128
+ ['/home/user/app/assets/02.mp4'],
129
+ ['/home/user/app/assets/03.mp4'],
130
+ ['/home/user/app/assets/04.mp4'],
131
+ ['/home/user/app/assets/05.mp4'],
132
+ ['/home/user/app/assets/06.mp4'],
133
+ ['/home/user/app/assets/07.mp4'],
134
+ ['/home/user/app/assets/08.mp4'],
135
+ ['/home/user/app/assets/09.mp4'],
136
+ ],
137
+ inputs=[video_input, 0.5])
138
+
139
+ #demo.queue()
140
+ demo.queue().launch(debug=True)
output/.gitkeep ADDED
File without changes