File size: 7,387 Bytes
fd5252e 10230ea 9387217 22df957 fd5252e 22df957 10230ea 9387217 22df957 fd5252e 10230ea 9387217 22df957 fd5252e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
import json
import os
import shutil
from dataclasses import dataclass
from pathlib import Path
from threading import Thread
from typing import Any, Dict, List, Optional
import requests
from tqdm import tqdm
@dataclass
class ModelConfig:
base_model_path: str
base_inpaint_model_path: str
is_sdxl: bool = False
base_dimension: int = 512
low_gpu_mem: bool = False
base_model_variant: Optional[str] = None
base_inpaint_model_variant: Optional[str] = None
def load_model_from_config(path):
m_config = ModelConfig(path, path)
if os.path.exists(path + "/inference.json"):
with open(path + "/inference.json", "r") as f:
config = json.loads(f.read())
model_path = config.get("model_path", path)
inpaint_model_path = config.get("inpaint_model_path", model_path)
is_sdxl = config.get("is_sdxl", False)
base_dimension = config.get("base_dimension", 512)
base_model_variant = config.get("base_model_variant", None)
base_inpaint_model_variant = config.get("base_inpaint_model_variant", None)
m_config.base_model_path = model_path
m_config.base_inpaint_model_path = inpaint_model_path
m_config.is_sdxl = is_sdxl
m_config.base_dimension = base_dimension
m_config.low_gpu_mem = config.get("low_gpu_mem", False)
m_config.base_model_variant = base_model_variant
m_config.base_inpaint_model_variant = base_inpaint_model_variant
#
# if config.get("model_type") == "huggingface":
# model_dir = config["model_path"]
# if config.get("model_type") == "s3":
# s3_config = config["model_path"]["s3"]
# base_url = s3_config["base_url"]
#
# urls = [base_url + item for item in s3_config["paths"]]
# out_dir = Path.home() / ".cache" / "base_model"
# if out_dir.exists():
# print("Model already exist")
# else:
# print("Downloading model")
# BaseModelDownloader(urls, s3_config["paths"], out_dir).download()
# model_dir = str(out_dir)
return m_config
class BaseModelDownloader:
"""
A utility for fast download of base model from S3 or any CDN served storage.
Works by downloading multiple files in parallel and dividing large files
into smaller chunks and combining them at the end.
Currently it uses multithreading (not multiprocessing) assuming GIL won't
interfere with network/disk IO.
Created by: KP
"""
def __init__(self, urls: List[str], url_paths: List[str], out_dir: Path):
self.urls = urls
self.url_paths = url_paths
shutil.rmtree(out_dir, ignore_errors=True)
out_dir.mkdir(parents=True, exist_ok=True)
self.out_dir = out_dir
def download(self):
threads = []
batch_urls = {}
for url, url_path in zip(self.urls, self.url_paths):
out_dir = self.out_dir / url_path
self.out_dir.parent.mkdir(parents=True, exist_ok=True)
if url.endswith(".bin"):
if "unet/" in url_path:
thread = Thread(
target=self.__download_parallel, args=(url, out_dir, 6)
)
thread.start()
threads.append(thread)
else:
thread = Thread(
target=self.__download_files, args=([url], [out_dir])
)
thread.start()
threads.append(thread)
pass
else:
batch_urls[url] = out_dir
if batch_urls:
thread = Thread(
target=self.__download_files,
args=(list(batch_urls.keys()), list(batch_urls.values())),
)
thread.start()
threads.append(thread)
pass
for thread in threads:
thread.join()
def __download_parallel(self, url, output_filename, num_parts=4):
response = requests.head(url)
total_size = int(response.headers.get("content-length", 0))
print("total_size", total_size)
chunk_size = total_size // num_parts
ranges = [
(i * chunk_size, (i + 1) * chunk_size - 1) for i in range(num_parts - 1)
]
ranges.append((ranges[-1][1] + 1, total_size))
print(ranges)
save_dir = Path.home() / ".cache" / "download_parts"
os.makedirs(save_dir, exist_ok=True)
threads = []
for i, (start, end) in enumerate(ranges):
thread = Thread(
target=self.__download_part, args=(url, start, end, i, save_dir)
)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
self.__combine_parts(save_dir, output_filename, num_parts)
os.rmdir(save_dir)
def __combine_parts(self, save_dir, output_filename, num_parts):
part_files = [os.path.join(save_dir, f"part_{i}.tmp") for i in range(num_parts)]
output_filename.parent.mkdir(parents=True, exist_ok=True)
with open(output_filename, "wb") as output_file:
for part_file in part_files:
print("combining: ", part_file)
with open(part_file, "rb") as part:
output_file.write(part.read())
out_file_size = output_file.tell()
print("out_file_size", out_file_size)
for part_file in part_files:
os.remove(part_file)
def __download_part(self, url, start_byte, end_byte, part_num, save_dir):
headers = {"Range": f"bytes={start_byte}-{end_byte}"}
response = requests.get(url, headers=headers, stream=True)
part_filename = os.path.join(save_dir, f"part_{part_num}.tmp")
print("Downloading part: ", url, part_filename, end_byte - start_byte)
with open(part_filename, "wb") as part_file, tqdm(
desc=str(part_filename),
total=end_byte - start_byte,
unit="B",
unit_scale=True,
unit_divisor=1024,
) as bar:
for chunk in response.iter_content(chunk_size=8192):
if chunk:
size = part_file.write(chunk)
bar.update(size)
return part_filename
def __download_files(self, urls, out_paths: List[Path]):
for url, out_path in zip(urls, out_paths):
out_path.parent.mkdir(parents=True, exist_ok=True)
with requests.get(url, stream=True) as r:
print("Downloading: ", url)
total_size = int(r.headers.get("content-length", 0))
chunk_size = 8192
r.raise_for_status()
with open(out_path, "wb") as f, tqdm(
desc=str(out_path),
total=total_size,
unit="B",
unit_scale=True,
unit_divisor=1024,
) as bar:
for data in r.iter_content(chunk_size=chunk_size):
size = f.write(data)
bar.update(size)
|