mattdeitke
commited on
Commit
•
62cd5a4
1
Parent(s):
9b62880
refactor
Browse files- objaverse_xl/__init__.py +1 -150
- objaverse_xl/smithsonian.py +138 -0
- objaverse_xl/utils.py +14 -0
- requirements.txt +1 -1
objaverse_xl/__init__.py
CHANGED
@@ -1,150 +1 @@
|
|
1 |
-
import
|
2 |
-
import os
|
3 |
-
import uuid
|
4 |
-
from functools import partial
|
5 |
-
from multiprocessing import Pool
|
6 |
-
from typing import Dict, List, Optional
|
7 |
-
|
8 |
-
import fsspec
|
9 |
-
import pandas as pd
|
10 |
-
import requests
|
11 |
-
from loguru import logger
|
12 |
-
from tqdm import tqdm
|
13 |
-
|
14 |
-
|
15 |
-
def get_uid_from_str(string: str) -> str:
|
16 |
-
"""Generates a UUID from a string.
|
17 |
-
|
18 |
-
Args:
|
19 |
-
string (str): String to generate a UUID from.
|
20 |
-
|
21 |
-
Returns:
|
22 |
-
str: UUID generated from the string.
|
23 |
-
"""
|
24 |
-
namespace = uuid.NAMESPACE_DNS
|
25 |
-
return str(uuid.uuid5(namespace, string))
|
26 |
-
|
27 |
-
|
28 |
-
def load_smithsonian_metadata(
|
29 |
-
download_dir: str = "~/.objaverse-xl",
|
30 |
-
) -> pd.DataFrame:
|
31 |
-
"""Loads the Smithsonian Object Metadata dataset as a Pandas DataFrame.
|
32 |
-
|
33 |
-
Args:
|
34 |
-
download_dir (str, optional): Directory to download the parquet metadata file.
|
35 |
-
Supports all file systems supported by fsspec. Defaults to
|
36 |
-
"~/.objaverse-xl".
|
37 |
-
|
38 |
-
Returns:
|
39 |
-
pd.DataFrame: Smithsonian Object Metadata dataset as a Pandas DataFrame with
|
40 |
-
columns for the object "title", "url", "quality", "file_type", "uid", and
|
41 |
-
"license". The quality is always Medium and the file_type is always glb.
|
42 |
-
"""
|
43 |
-
dirname = os.path.expanduser(os.path.join(download_dir, "smithsonian"))
|
44 |
-
filename = os.path.join(dirname, "object-metadata.parquet")
|
45 |
-
fs, path = fsspec.core.url_to_fs(filename)
|
46 |
-
if fs.protocol == "file":
|
47 |
-
os.makedirs(dirname, exist_ok=True)
|
48 |
-
|
49 |
-
if fs.exists(filename):
|
50 |
-
df = pd.read_parquet(filename)
|
51 |
-
return df
|
52 |
-
else:
|
53 |
-
url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/smithsonian/object-metadata.parquet"
|
54 |
-
response = requests.get(url)
|
55 |
-
response.raise_for_status()
|
56 |
-
with fs.open(filename, "wb") as file:
|
57 |
-
file.write(response.content)
|
58 |
-
df = pd.read_parquet(filename)
|
59 |
-
|
60 |
-
df["uid"] = df["url"].apply(get_uid_from_str)
|
61 |
-
df["license"] = "CC0"
|
62 |
-
return df
|
63 |
-
|
64 |
-
|
65 |
-
def download_smithsonian_object(url: str, download_dir: str = "~/.objaverse-xl") -> str:
|
66 |
-
"""Downloads a Smithsonian Object from a URL.
|
67 |
-
|
68 |
-
Args:
|
69 |
-
url (str): URL to download the Smithsonian Object from.
|
70 |
-
download_dir (str, optional): Directory to download the Smithsonian Object to.
|
71 |
-
Supports all file systems supported by fsspec. Defaults to
|
72 |
-
"~/.objaverse-xl".
|
73 |
-
|
74 |
-
Returns:
|
75 |
-
str: Path to the downloaded Smithsonian Object.
|
76 |
-
"""
|
77 |
-
uid = get_uid_from_str(url)
|
78 |
-
|
79 |
-
dirname = os.path.expanduser(os.path.join(download_dir, "smithsonian", "objects"))
|
80 |
-
filename = os.path.join(dirname, f"{uid}.glb")
|
81 |
-
fs, path = fsspec.core.url_to_fs(filename)
|
82 |
-
if fs.protocol == "file":
|
83 |
-
os.makedirs(dirname, exist_ok=True)
|
84 |
-
|
85 |
-
if not fs.exists(filename):
|
86 |
-
tmp_path = os.path.join(dirname, f"{uid}.glb.tmp")
|
87 |
-
response = requests.get(url)
|
88 |
-
|
89 |
-
# check if the path is valid
|
90 |
-
if response.status_code == 404:
|
91 |
-
logger.warning(f"404 for {url}")
|
92 |
-
return None
|
93 |
-
|
94 |
-
# write to tmp path
|
95 |
-
with fs.open(tmp_path, "wb") as file:
|
96 |
-
for chunk in response.iter_content(chunk_size=8192):
|
97 |
-
file.write(chunk)
|
98 |
-
|
99 |
-
# rename to final path
|
100 |
-
fs.rename(tmp_path, filename)
|
101 |
-
|
102 |
-
return filename
|
103 |
-
|
104 |
-
|
105 |
-
def download_smithsonian_objects(
|
106 |
-
urls: Optional[str] = None,
|
107 |
-
processes: Optional[int] = None,
|
108 |
-
download_dir: str = "~/.objaverse-xl",
|
109 |
-
) -> List[Dict[str, str]]:
|
110 |
-
"""Downloads all Smithsonian Objects.
|
111 |
-
|
112 |
-
Args:
|
113 |
-
urls (Optional[str], optional): List of URLs to download the Smithsonian Objects
|
114 |
-
from. If None, all Smithsonian Objects will be downloaded. Defaults to None.
|
115 |
-
processes (Optional[int], optional): Number of processes to use for downloading
|
116 |
-
the Smithsonian Objects. If None, the number of processes will be set to the
|
117 |
-
number of CPUs on the machine (multiprocessing.cpu_count()). Defaults to
|
118 |
-
None.
|
119 |
-
download_dir (str, optional): Directory to download the Smithsonian Objects to.
|
120 |
-
Supports all file systems supported by fsspec. Defaults to
|
121 |
-
"~/.objaverse-xl".
|
122 |
-
|
123 |
-
Returns:
|
124 |
-
List[Dict[str, str]]: List of dictionaries with keys "download_path" and "url"
|
125 |
-
for each downloaded object.
|
126 |
-
"""
|
127 |
-
if processes is None:
|
128 |
-
processes = multiprocessing.cpu_count()
|
129 |
-
if urls is None:
|
130 |
-
df = load_smithsonian_metadata(download_dir=download_dir)
|
131 |
-
urls = df["url"].tolist()
|
132 |
-
|
133 |
-
logger.info(f"Downloading {len(urls)} Smithsonian Objects with {processes=}")
|
134 |
-
with Pool(processes=processes) as pool:
|
135 |
-
results = list(
|
136 |
-
tqdm(
|
137 |
-
pool.imap_unordered(
|
138 |
-
partial(download_smithsonian_object, download_dir=download_dir),
|
139 |
-
urls,
|
140 |
-
),
|
141 |
-
total=len(urls),
|
142 |
-
desc="Downloading Smithsonian Objects",
|
143 |
-
)
|
144 |
-
)
|
145 |
-
out = [
|
146 |
-
{"download_path": download_path, "url": url}
|
147 |
-
for download_path, url in zip(results, urls)
|
148 |
-
if download_path is not None
|
149 |
-
]
|
150 |
-
return out
|
|
|
1 |
+
from smithsonian import download_smithsonian_objects, load_smithsonian_metadata
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
objaverse_xl/smithsonian.py
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import multiprocessing
|
2 |
+
import os
|
3 |
+
import uuid
|
4 |
+
from functools import partial
|
5 |
+
from multiprocessing import Pool
|
6 |
+
from typing import Dict, List, Optional
|
7 |
+
|
8 |
+
import fsspec
|
9 |
+
import pandas as pd
|
10 |
+
import requests
|
11 |
+
from loguru import logger
|
12 |
+
from tqdm import tqdm
|
13 |
+
from utils import get_uid_from_str
|
14 |
+
|
15 |
+
|
16 |
+
def load_smithsonian_metadata(
|
17 |
+
download_dir: str = "~/.objaverse-xl",
|
18 |
+
) -> pd.DataFrame:
|
19 |
+
"""Loads the Smithsonian Object Metadata dataset as a Pandas DataFrame.
|
20 |
+
|
21 |
+
Args:
|
22 |
+
download_dir (str, optional): Directory to download the parquet metadata file.
|
23 |
+
Supports all file systems supported by fsspec. Defaults to
|
24 |
+
"~/.objaverse-xl".
|
25 |
+
|
26 |
+
Returns:
|
27 |
+
pd.DataFrame: Smithsonian Object Metadata dataset as a Pandas DataFrame with
|
28 |
+
columns for the object "title", "url", "quality", "file_type", "uid", and
|
29 |
+
"license". The quality is always Medium and the file_type is always glb.
|
30 |
+
"""
|
31 |
+
dirname = os.path.expanduser(os.path.join(download_dir, "smithsonian"))
|
32 |
+
filename = os.path.join(dirname, "object-metadata.parquet")
|
33 |
+
fs, path = fsspec.core.url_to_fs(filename)
|
34 |
+
if fs.protocol == "file":
|
35 |
+
os.makedirs(dirname, exist_ok=True)
|
36 |
+
|
37 |
+
if fs.exists(filename):
|
38 |
+
df = pd.read_parquet(filename)
|
39 |
+
return df
|
40 |
+
else:
|
41 |
+
url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/smithsonian/object-metadata.parquet"
|
42 |
+
response = requests.get(url)
|
43 |
+
response.raise_for_status()
|
44 |
+
with fs.open(filename, "wb") as file:
|
45 |
+
file.write(response.content)
|
46 |
+
df = pd.read_parquet(filename)
|
47 |
+
|
48 |
+
df["uid"] = df["url"].apply(get_uid_from_str)
|
49 |
+
df["license"] = "CC0"
|
50 |
+
return df
|
51 |
+
|
52 |
+
|
53 |
+
def download_smithsonian_object(url: str, download_dir: str = "~/.objaverse-xl") -> str:
|
54 |
+
"""Downloads a Smithsonian Object from a URL.
|
55 |
+
|
56 |
+
Args:
|
57 |
+
url (str): URL to download the Smithsonian Object from.
|
58 |
+
download_dir (str, optional): Directory to download the Smithsonian Object to.
|
59 |
+
Supports all file systems supported by fsspec. Defaults to
|
60 |
+
"~/.objaverse-xl".
|
61 |
+
|
62 |
+
Returns:
|
63 |
+
str: Path to the downloaded Smithsonian Object.
|
64 |
+
"""
|
65 |
+
uid = get_uid_from_str(url)
|
66 |
+
|
67 |
+
dirname = os.path.expanduser(os.path.join(download_dir, "smithsonian", "objects"))
|
68 |
+
filename = os.path.join(dirname, f"{uid}.glb")
|
69 |
+
fs, path = fsspec.core.url_to_fs(filename)
|
70 |
+
if fs.protocol == "file":
|
71 |
+
os.makedirs(dirname, exist_ok=True)
|
72 |
+
|
73 |
+
if not fs.exists(filename):
|
74 |
+
tmp_path = os.path.join(dirname, f"{uid}.glb.tmp")
|
75 |
+
response = requests.get(url)
|
76 |
+
|
77 |
+
# check if the path is valid
|
78 |
+
if response.status_code == 404:
|
79 |
+
logger.warning(f"404 for {url}")
|
80 |
+
return None
|
81 |
+
|
82 |
+
# write to tmp path
|
83 |
+
with fs.open(tmp_path, "wb") as file:
|
84 |
+
for chunk in response.iter_content(chunk_size=8192):
|
85 |
+
file.write(chunk)
|
86 |
+
|
87 |
+
# rename to final path
|
88 |
+
fs.rename(tmp_path, filename)
|
89 |
+
|
90 |
+
return filename
|
91 |
+
|
92 |
+
|
93 |
+
def download_smithsonian_objects(
|
94 |
+
urls: Optional[str] = None,
|
95 |
+
processes: Optional[int] = None,
|
96 |
+
download_dir: str = "~/.objaverse-xl",
|
97 |
+
) -> List[Dict[str, str]]:
|
98 |
+
"""Downloads all Smithsonian Objects.
|
99 |
+
|
100 |
+
Args:
|
101 |
+
urls (Optional[str], optional): List of URLs to download the Smithsonian Objects
|
102 |
+
from. If None, all Smithsonian Objects will be downloaded. Defaults to None.
|
103 |
+
processes (Optional[int], optional): Number of processes to use for downloading
|
104 |
+
the Smithsonian Objects. If None, the number of processes will be set to the
|
105 |
+
number of CPUs on the machine (multiprocessing.cpu_count()). Defaults to
|
106 |
+
None.
|
107 |
+
download_dir (str, optional): Directory to download the Smithsonian Objects to.
|
108 |
+
Supports all file systems supported by fsspec. Defaults to
|
109 |
+
"~/.objaverse-xl".
|
110 |
+
|
111 |
+
Returns:
|
112 |
+
List[Dict[str, str]]: List of dictionaries with keys "download_path" and "url"
|
113 |
+
for each downloaded object.
|
114 |
+
"""
|
115 |
+
if processes is None:
|
116 |
+
processes = multiprocessing.cpu_count()
|
117 |
+
if urls is None:
|
118 |
+
df = load_smithsonian_metadata(download_dir=download_dir)
|
119 |
+
urls = df["url"].tolist()
|
120 |
+
|
121 |
+
logger.info(f"Downloading {len(urls)} Smithsonian Objects with {processes=}")
|
122 |
+
with Pool(processes=processes) as pool:
|
123 |
+
results = list(
|
124 |
+
tqdm(
|
125 |
+
pool.imap_unordered(
|
126 |
+
partial(download_smithsonian_object, download_dir=download_dir),
|
127 |
+
urls,
|
128 |
+
),
|
129 |
+
total=len(urls),
|
130 |
+
desc="Downloading Smithsonian Objects",
|
131 |
+
)
|
132 |
+
)
|
133 |
+
out = [
|
134 |
+
{"download_path": download_path, "url": url}
|
135 |
+
for download_path, url in zip(results, urls)
|
136 |
+
if download_path is not None
|
137 |
+
]
|
138 |
+
return out
|
objaverse_xl/utils.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import uuid
|
2 |
+
|
3 |
+
|
4 |
+
def get_uid_from_str(string: str) -> str:
|
5 |
+
"""Generates a UUID from a string.
|
6 |
+
|
7 |
+
Args:
|
8 |
+
string (str): String to generate a UUID from.
|
9 |
+
|
10 |
+
Returns:
|
11 |
+
str: UUID generated from the string.
|
12 |
+
"""
|
13 |
+
namespace = uuid.NAMESPACE_DNS
|
14 |
+
return str(uuid.uuid5(namespace, string))
|
requirements.txt
CHANGED
@@ -3,4 +3,4 @@ pandas
|
|
3 |
pyarrow
|
4 |
tqdm
|
5 |
loguru
|
6 |
-
fsspec
|
|
|
3 |
pyarrow
|
4 |
tqdm
|
5 |
loguru
|
6 |
+
fsspec>=2022.11.0
|