hysts HF staff commited on
Commit
82b20ab
·
1 Parent(s): 7ab4c5c
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.jpg filter=lfs diff=lfs merge=lfs -text
.pre-commit-config.yaml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v5.0.0
4
+ hooks:
5
+ - id: check-executables-have-shebangs
6
+ - id: check-json
7
+ - id: check-merge-conflict
8
+ - id: check-shebang-scripts-are-executable
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: end-of-file-fixer
12
+ - id: mixed-line-ending
13
+ args: ["--fix=lf"]
14
+ - id: requirements-txt-fixer
15
+ - id: trailing-whitespace
16
+ - repo: https://github.com/astral-sh/ruff-pre-commit
17
+ rev: v0.8.6
18
+ hooks:
19
+ - id: ruff
20
+ args: ["--fix"]
21
+ - id: ruff-format
22
+ args: ["--line-length", "119"]
23
+ - repo: https://github.com/pre-commit/mirrors-mypy
24
+ rev: v1.14.1
25
+ hooks:
26
+ - id: mypy
27
+ args: ["--ignore-missing-imports"]
28
+ additional_dependencies:
29
+ [
30
+ "types-python-slugify",
31
+ "types-requests",
32
+ "types-PyYAML",
33
+ "types-pytz",
34
+ ]
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.10
.vscode/extensions.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "recommendations": [
3
+ "ms-python.python",
4
+ "charliermarsh.ruff",
5
+ "streetsidesoftware.code-spell-checker",
6
+ "tamasfe.even-better-toml"
7
+ ]
8
+ }
.vscode/settings.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "editor.formatOnSave": true,
3
+ "files.insertFinalNewline": false,
4
+ "[python]": {
5
+ "editor.defaultFormatter": "charliermarsh.ruff",
6
+ "editor.formatOnType": true,
7
+ "editor.codeActionsOnSave": {
8
+ "source.fixAll.ruff": "explicit",
9
+ "source.organizeImports": "explicit"
10
+ }
11
+ },
12
+ "[jupyter]": {
13
+ "files.insertFinalNewline": false
14
+ },
15
+ "notebook.output.scrolling": true,
16
+ "notebook.formatOnSave.enabled": true
17
+ }
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 hysts
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
  title: ViTPose Transformers
3
- emoji: 💻
4
- colorFrom: indigo
5
- colorTo: red
6
  sdk: gradio
7
  sdk_version: 5.11.0
8
  app_file: app.py
 
1
  ---
2
  title: ViTPose Transformers
3
+ emoji:
4
+ colorFrom: red
5
+ colorTo: purple
6
  sdk: gradio
7
  sdk_version: 5.11.0
8
  app_file: app.py
app.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ """A demo of the VitPose model.
4
+
5
+ This code is based on the implementation from the Colab notebook:
6
+ https://colab.research.google.com/drive/1e8fcby5rhKZWcr9LSN8mNbQ0TU4Dxxpo
7
+ """
8
+
9
+ import pathlib
10
+
11
+ import gradio as gr
12
+ import PIL.Image
13
+ import spaces
14
+ import supervision as sv
15
+ import torch
16
+ from transformers import AutoProcessor, RTDetrForObjectDetection, VitPoseForPoseEstimation
17
+
18
+ DESCRIPTION = "# ViTPose"
19
+
20
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
21
+
22
+ person_detector_name = "PekingU/rtdetr_r50vd_coco_o365"
23
+ person_image_processor = AutoProcessor.from_pretrained(person_detector_name)
24
+ person_model = RTDetrForObjectDetection.from_pretrained(person_detector_name, device_map=device)
25
+
26
+ pose_model_name = "usyd-community/vitpose-base-simple"
27
+ pose_image_processor = AutoProcessor.from_pretrained(pose_model_name)
28
+ pose_model = VitPoseForPoseEstimation.from_pretrained(pose_model_name, device_map=device)
29
+
30
+
31
+ @spaces.GPU
32
+ @torch.inference_mode()
33
+ def run(image: PIL.Image.Image) -> tuple[PIL.Image.Image, list[dict]]:
34
+ inputs = person_image_processor(images=image, return_tensors="pt").to(device)
35
+ outputs = person_model(**inputs)
36
+ results = person_image_processor.post_process_object_detection(
37
+ outputs, target_sizes=torch.tensor([(image.height, image.width)]), threshold=0.3
38
+ )
39
+ result = results[0] # take first image results
40
+
41
+ # Human label refers 0 index in COCO dataset
42
+ person_boxes_xyxy = result["boxes"][result["labels"] == 0]
43
+ person_boxes_xyxy = person_boxes_xyxy.cpu().numpy()
44
+
45
+ # Convert boxes from VOC (x1, y1, x2, y2) to COCO (x1, y1, w, h) format
46
+ person_boxes = person_boxes_xyxy.copy()
47
+ person_boxes[:, 2] = person_boxes[:, 2] - person_boxes[:, 0]
48
+ person_boxes[:, 3] = person_boxes[:, 3] - person_boxes[:, 1]
49
+
50
+ inputs = pose_image_processor(image, boxes=[person_boxes], return_tensors="pt").to(device)
51
+
52
+ # for vitpose-plus-base checkpoint we should additionaly provide dataset_index
53
+ # to sepcify which MOE experts to use for inference
54
+ if pose_model.config.backbone_config.num_experts > 1:
55
+ dataset_index = torch.tensor([0] * len(inputs["pixel_values"]))
56
+ dataset_index = dataset_index.to(inputs["pixel_values"].device)
57
+ inputs["dataset_index"] = dataset_index
58
+
59
+ outputs = pose_model(**inputs)
60
+
61
+ pose_results = pose_image_processor.post_process_pose_estimation(outputs, boxes=[person_boxes])
62
+ image_pose_result = pose_results[0] # results for first image
63
+
64
+ # make results more human-readable
65
+ human_readable_results = []
66
+ for i, person_pose in enumerate(image_pose_result):
67
+ data = {
68
+ "person_id": i,
69
+ "bbox": person_pose["bbox"].numpy().tolist(),
70
+ "keypoints": [],
71
+ }
72
+ for keypoint, label, score in zip(
73
+ person_pose["keypoints"], person_pose["labels"], person_pose["scores"], strict=True
74
+ ):
75
+ keypoint_name = pose_model.config.id2label[label.item()]
76
+ x, y = keypoint
77
+ data["keypoints"].append({"name": keypoint_name, "x": x.item(), "y": y.item(), "score": score.item()})
78
+ human_readable_results.append(data)
79
+
80
+ # preprocess to torch tensor of shape (n_objects, n_keypoints, 2)
81
+ xy = [pose_result["keypoints"] for pose_result in image_pose_result]
82
+ xy = torch.stack(xy).cpu().numpy()
83
+
84
+ scores = [pose_result["scores"] for pose_result in image_pose_result]
85
+ scores = torch.stack(scores).cpu().numpy()
86
+
87
+ keypoints = sv.KeyPoints(xy=xy, confidence=scores)
88
+ detections = sv.Detections(xyxy=person_boxes_xyxy)
89
+
90
+ edge_annotator = sv.EdgeAnnotator(color=sv.Color.GREEN, thickness=1)
91
+ vertex_annotator = sv.VertexAnnotator(color=sv.Color.RED, radius=2)
92
+ bounding_box_annotator = sv.BoxAnnotator(color=sv.Color.WHITE, color_lookup=sv.ColorLookup.INDEX, thickness=1)
93
+
94
+ annotated_frame = image.copy()
95
+
96
+ # annotate boundg boxes
97
+ annotated_frame = bounding_box_annotator.annotate(scene=image.copy(), detections=detections)
98
+
99
+ # annotate edges and verticies
100
+ annotated_frame = edge_annotator.annotate(scene=annotated_frame, key_points=keypoints)
101
+ return vertex_annotator.annotate(scene=annotated_frame, key_points=keypoints), human_readable_results
102
+
103
+
104
+ paths = sorted(pathlib.Path("images").glob("*.jpg"))
105
+
106
+
107
+ with gr.Blocks(css_paths="style.css") as demo:
108
+ gr.Markdown(DESCRIPTION)
109
+ with gr.Row():
110
+ with gr.Column():
111
+ input_image = gr.Image(label="Input Image", type="pil")
112
+ run_button = gr.Button()
113
+ with gr.Column():
114
+ output_image = gr.Image(label="Output Image")
115
+ output_json = gr.JSON(label="Output JSON")
116
+
117
+ gr.Examples(examples=paths, inputs=input_image, outputs=[output_image, output_json], fn=run)
118
+
119
+ run_button.click(
120
+ fn=run,
121
+ inputs=input_image,
122
+ outputs=[output_image, output_json],
123
+ )
124
+
125
+
126
+ if __name__ == "__main__":
127
+ demo.queue(max_size=20).launch()
images/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ These images are from the following public domain:
2
+
3
+ - https://www.pexels.com/photo/women-in-active-wear-balancing-their-body-while-leaning-by-the-doorway-5770445/
4
+ - https://www.pexels.com/photo/woman-balancing-her-body-on-a-handstand-using-one-hand-5770708/
5
+ - https://www.pexels.com/photo/persons-in-black-shirt-and-pants-690598/
6
+ - https://www.pexels.com/photo/photo-of-woman-doing-a-ballet-dance-1164975/
7
+ - https://www.pexels.com/photo/beautiful-woman-in-a-red-dress-wearing-red-lipstick-7909580/
8
+ - https://www.pexels.com/photo/girl-in-red-jacket-riding-bicycle-5792907/
9
+ - https://www.pexels.com/photo/woman-wearing-a-white-gown-walking-on-grass-field-8574605/
images/pexels-cottonbro-5770445.jpg ADDED

Git LFS Details

  • SHA256: b4548cd4a16238f559a149670c6ad2606b3b2147c92e5a2a380dd12fd922f276
  • Pointer size: 131 Bytes
  • Size of remote file: 379 kB
images/pexels-cottonbro-5770708.jpg ADDED

Git LFS Details

  • SHA256: 951720e6bb6053756ef555e5fcae4b54927582c4974e5908ea1984a9f14d7843
  • Pointer size: 131 Bytes
  • Size of remote file: 478 kB
images/pexels-haste-leart-v-690598.jpg ADDED

Git LFS Details

  • SHA256: 90009cbaceb3c3802d0df460862434e446e5cfad7892986444146ce73a02f61c
  • Pointer size: 131 Bytes
  • Size of remote file: 329 kB
images/pexels-luis-gallegos-alvarez-1164975.jpg ADDED

Git LFS Details

  • SHA256: 05cb7605dbac48915eee1b6ef0de3aba386abb7ab06ef27d58c092df2c76a176
  • Pointer size: 131 Bytes
  • Size of remote file: 553 kB
images/pexels-victoria-borodinova-7909580.jpg ADDED

Git LFS Details

  • SHA256: c05ceaf9c468dd21d24977f2c50e3f3b9b1ba83474d93180f66496635216b573
  • Pointer size: 131 Bytes
  • Size of remote file: 279 kB
images/pexels-yan-krukov-5792907.jpg ADDED

Git LFS Details

  • SHA256: 0500121b9044cb1d4c7913e48ebe5e2374848d57d6a2905f3b7c9469f959f2fe
  • Pointer size: 131 Bytes
  • Size of remote file: 648 kB
images/pexels-лиза-медведева-8574605.jpg ADDED

Git LFS Details

  • SHA256: 85cf4db499f0c5b11397af648e66178a4e40e6d478f1e6b31ade35e225ff6ceb
  • Pointer size: 131 Bytes
  • Size of remote file: 816 kB
pyproject.toml ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "vitpose-transformers"
3
+ version = "0.1.0"
4
+ description = ""
5
+ readme = "README.md"
6
+ requires-python = ">=3.10"
7
+ dependencies = [
8
+ "accelerate>=1.2.1",
9
+ "gradio>=5.11.0",
10
+ "hf-transfer>=0.1.9",
11
+ "setuptools>=75.8.0",
12
+ "spaces>=0.32.0",
13
+ "supervision>=0.25.1",
14
+ "torch==2.4.0",
15
+ "transformers>=4.47.1",
16
+ ]
17
+
18
+ [tool.ruff]
19
+ line-length = 119
20
+
21
+ [tool.ruff.lint]
22
+ select = ["ALL"]
23
+ ignore = [
24
+ "COM812", # missing-trailing-comma
25
+ "D203", # one-blank-line-before-class
26
+ "D213", # multi-line-summary-second-line
27
+ "E501", # line-too-long
28
+ "SIM117", # multiple-with-statements
29
+ ]
30
+ extend-ignore = [
31
+ "D100", # undocumented-public-module
32
+ "D101", # undocumented-public-class
33
+ "D102", # undocumented-public-method
34
+ "D103", # undocumented-public-function
35
+ "D104", # undocumented-public-package
36
+ "D105", # undocumented-magic-method
37
+ "D107", # undocumented-public-init
38
+ "EM101", # raw-string-in-exception
39
+ "FBT001", # boolean-type-hint-positional-argument
40
+ "FBT002", # boolean-default-value-positional-argument
41
+ "PD901", # pandas-df-variable-name
42
+ "PGH003", # blanket-type-ignore
43
+ "PLR0913", # too-many-arguments
44
+ "PLR0915", # too-many-statements
45
+ "TRY003", # raise-vanilla-args
46
+ ]
47
+ unfixable = [
48
+ "F401", # unused-import
49
+ ]
50
+
51
+ [tool.ruff.lint.per-file-ignores]
52
+ "*.ipynb" = ["T201"]
53
+
54
+ [tool.ruff.format]
55
+ docstring-code-format = true
56
+
57
+ [tool.uv.sources]
58
+ transformers = { git = "https://github.com/huggingface/transformers.git", rev = "a6256ec0982fee2c57cc41237bff7e64ed4dcda9" }
requirements.txt ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv pip compile pyproject.toml -o requirements.txt
3
+ accelerate==1.2.1
4
+ # via vitpose-transformers (pyproject.toml)
5
+ aiofiles==23.2.1
6
+ # via gradio
7
+ annotated-types==0.7.0
8
+ # via pydantic
9
+ anyio==4.8.0
10
+ # via
11
+ # gradio
12
+ # httpx
13
+ # starlette
14
+ certifi==2024.12.14
15
+ # via
16
+ # httpcore
17
+ # httpx
18
+ # requests
19
+ charset-normalizer==3.4.1
20
+ # via requests
21
+ click==8.1.8
22
+ # via
23
+ # typer
24
+ # uvicorn
25
+ contourpy==1.3.1
26
+ # via
27
+ # matplotlib
28
+ # supervision
29
+ cycler==0.12.1
30
+ # via matplotlib
31
+ defusedxml==0.7.1
32
+ # via supervision
33
+ exceptiongroup==1.2.2
34
+ # via anyio
35
+ fastapi==0.115.6
36
+ # via gradio
37
+ ffmpy==0.5.0
38
+ # via gradio
39
+ filelock==3.16.1
40
+ # via
41
+ # huggingface-hub
42
+ # torch
43
+ # transformers
44
+ # triton
45
+ fonttools==4.55.3
46
+ # via matplotlib
47
+ fsspec==2024.12.0
48
+ # via
49
+ # gradio-client
50
+ # huggingface-hub
51
+ # torch
52
+ gradio==5.11.0
53
+ # via
54
+ # vitpose-transformers (pyproject.toml)
55
+ # spaces
56
+ gradio-client==1.5.3
57
+ # via gradio
58
+ h11==0.14.0
59
+ # via
60
+ # httpcore
61
+ # uvicorn
62
+ hf-transfer==0.1.9
63
+ # via vitpose-transformers (pyproject.toml)
64
+ httpcore==1.0.7
65
+ # via httpx
66
+ httpx==0.28.1
67
+ # via
68
+ # gradio
69
+ # gradio-client
70
+ # safehttpx
71
+ # spaces
72
+ huggingface-hub==0.27.1
73
+ # via
74
+ # accelerate
75
+ # gradio
76
+ # gradio-client
77
+ # tokenizers
78
+ # transformers
79
+ idna==3.10
80
+ # via
81
+ # anyio
82
+ # httpx
83
+ # requests
84
+ jinja2==3.1.5
85
+ # via
86
+ # gradio
87
+ # torch
88
+ kiwisolver==1.4.8
89
+ # via matplotlib
90
+ markdown-it-py==3.0.0
91
+ # via rich
92
+ markupsafe==2.1.5
93
+ # via
94
+ # gradio
95
+ # jinja2
96
+ matplotlib==3.10.0
97
+ # via supervision
98
+ mdurl==0.1.2
99
+ # via markdown-it-py
100
+ mpmath==1.3.0
101
+ # via sympy
102
+ networkx==3.4.2
103
+ # via torch
104
+ numpy==2.2.1
105
+ # via
106
+ # accelerate
107
+ # contourpy
108
+ # gradio
109
+ # matplotlib
110
+ # opencv-python
111
+ # pandas
112
+ # scipy
113
+ # supervision
114
+ # transformers
115
+ nvidia-cublas-cu12==12.1.3.1
116
+ # via
117
+ # nvidia-cudnn-cu12
118
+ # nvidia-cusolver-cu12
119
+ # torch
120
+ nvidia-cuda-cupti-cu12==12.1.105
121
+ # via torch
122
+ nvidia-cuda-nvrtc-cu12==12.1.105
123
+ # via torch
124
+ nvidia-cuda-runtime-cu12==12.1.105
125
+ # via torch
126
+ nvidia-cudnn-cu12==9.1.0.70
127
+ # via torch
128
+ nvidia-cufft-cu12==11.0.2.54
129
+ # via torch
130
+ nvidia-curand-cu12==10.3.2.106
131
+ # via torch
132
+ nvidia-cusolver-cu12==11.4.5.107
133
+ # via torch
134
+ nvidia-cusparse-cu12==12.1.0.106
135
+ # via
136
+ # nvidia-cusolver-cu12
137
+ # torch
138
+ nvidia-nccl-cu12==2.20.5
139
+ # via torch
140
+ nvidia-nvjitlink-cu12==12.6.85
141
+ # via
142
+ # nvidia-cusolver-cu12
143
+ # nvidia-cusparse-cu12
144
+ nvidia-nvtx-cu12==12.1.105
145
+ # via torch
146
+ opencv-python==4.10.0.84
147
+ # via supervision
148
+ orjson==3.10.14
149
+ # via gradio
150
+ packaging==24.2
151
+ # via
152
+ # accelerate
153
+ # gradio
154
+ # gradio-client
155
+ # huggingface-hub
156
+ # matplotlib
157
+ # spaces
158
+ # transformers
159
+ pandas==2.2.3
160
+ # via gradio
161
+ pillow==11.1.0
162
+ # via
163
+ # gradio
164
+ # matplotlib
165
+ # supervision
166
+ psutil==5.9.8
167
+ # via
168
+ # accelerate
169
+ # spaces
170
+ pydantic==2.10.4
171
+ # via
172
+ # fastapi
173
+ # gradio
174
+ # spaces
175
+ pydantic-core==2.27.2
176
+ # via pydantic
177
+ pydub==0.25.1
178
+ # via gradio
179
+ pygments==2.19.1
180
+ # via rich
181
+ pyparsing==3.2.1
182
+ # via matplotlib
183
+ python-dateutil==2.9.0.post0
184
+ # via
185
+ # matplotlib
186
+ # pandas
187
+ python-multipart==0.0.20
188
+ # via gradio
189
+ pytz==2024.2
190
+ # via pandas
191
+ pyyaml==6.0.2
192
+ # via
193
+ # accelerate
194
+ # gradio
195
+ # huggingface-hub
196
+ # supervision
197
+ # transformers
198
+ regex==2024.11.6
199
+ # via transformers
200
+ requests==2.32.3
201
+ # via
202
+ # huggingface-hub
203
+ # spaces
204
+ # supervision
205
+ # transformers
206
+ rich==13.9.4
207
+ # via typer
208
+ ruff==0.8.6
209
+ # via gradio
210
+ safehttpx==0.1.6
211
+ # via gradio
212
+ safetensors==0.5.2
213
+ # via
214
+ # accelerate
215
+ # transformers
216
+ scipy==1.15.0
217
+ # via supervision
218
+ semantic-version==2.10.0
219
+ # via gradio
220
+ setuptools==75.8.0
221
+ # via vitpose-transformers (pyproject.toml)
222
+ shellingham==1.5.4
223
+ # via typer
224
+ six==1.17.0
225
+ # via python-dateutil
226
+ sniffio==1.3.1
227
+ # via anyio
228
+ spaces==0.32.0
229
+ # via vitpose-transformers (pyproject.toml)
230
+ starlette==0.41.3
231
+ # via
232
+ # fastapi
233
+ # gradio
234
+ supervision==0.25.1
235
+ # via vitpose-transformers (pyproject.toml)
236
+ sympy==1.13.3
237
+ # via torch
238
+ tokenizers==0.21.0
239
+ # via transformers
240
+ tomlkit==0.13.2
241
+ # via gradio
242
+ torch==2.4.0
243
+ # via
244
+ # vitpose-transformers (pyproject.toml)
245
+ # accelerate
246
+ tqdm==4.67.1
247
+ # via
248
+ # huggingface-hub
249
+ # supervision
250
+ # transformers
251
+ transformers @ git+https://github.com/huggingface/transformers.git@a6256ec0982fee2c57cc41237bff7e64ed4dcda9
252
+ # via vitpose-transformers (pyproject.toml)
253
+ triton==3.0.0
254
+ # via torch
255
+ typer==0.15.1
256
+ # via gradio
257
+ typing-extensions==4.12.2
258
+ # via
259
+ # anyio
260
+ # fastapi
261
+ # gradio
262
+ # gradio-client
263
+ # huggingface-hub
264
+ # pydantic
265
+ # pydantic-core
266
+ # rich
267
+ # spaces
268
+ # torch
269
+ # typer
270
+ # uvicorn
271
+ tzdata==2024.2
272
+ # via pandas
273
+ urllib3==2.3.0
274
+ # via requests
275
+ uvicorn==0.34.0
276
+ # via gradio
277
+ websockets==14.1
278
+ # via gradio-client
style.css ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ display: block;
4
+ }
5
+
6
+ #duplicate-button {
7
+ margin: auto;
8
+ color: #fff;
9
+ background: #1565c0;
10
+ border-radius: 100vh;
11
+ }
uv.lock ADDED
The diff for this file is too large to render. See raw diff