fix dl civitai files with params in url
Browse files
files_cells/notebooks/en/downloading_en.ipynb
CHANGED
@@ -333,7 +333,7 @@
|
|
333 |
" ]\n",
|
334 |
"}\n",
|
335 |
"\n",
|
336 |
-
"
|
337 |
"prefixes = {\n",
|
338 |
" \"model\": models_dir,\n",
|
339 |
" \"vae\": vaes_dir,\n",
|
@@ -344,11 +344,10 @@
|
|
344 |
" \"adetailer\": adetailer_dir\n",
|
345 |
"}\n",
|
346 |
"\n",
|
347 |
-
"
|
348 |
"directories = [value for key, value in prefixes.items()] # for unpucking zip files\n",
|
349 |
"!mkdir -p {\" \".join(directories)}\n",
|
350 |
"\n",
|
351 |
-
"url = \"\"\n",
|
352 |
"hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
|
353 |
"user_header = f\"\\\"Authorization: Bearer {hf_token}\\\"\"\n",
|
354 |
"\n",
|
@@ -361,9 +360,6 @@
|
|
361 |
" response = requests.get(base_url, headers=headers)\n",
|
362 |
" if response.status_code == 200:\n",
|
363 |
" return response.json()\n",
|
364 |
-
" else:\n",
|
365 |
-
" print(f\"Failed to retrieve data. Status code: {response.status_code}\")\n",
|
366 |
-
" return None\n",
|
367 |
" except requests.exceptions.RequestException as e:\n",
|
368 |
" print(f\"An error occurred: {e}\")\n",
|
369 |
" return None\n",
|
@@ -373,9 +369,11 @@
|
|
373 |
" image_url = data['images'][0]['url'] # get preview: first image\n",
|
374 |
" return model_name, image_url\n",
|
375 |
"\n",
|
376 |
-
"def generate_preview_filename(model_name):\n",
|
377 |
-
"
|
378 |
-
"
|
|
|
|
|
379 |
"\n",
|
380 |
"''' main download code '''\n",
|
381 |
"\n",
|
@@ -399,27 +397,26 @@
|
|
399 |
" basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
|
400 |
" aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'\n",
|
401 |
"\n",
|
402 |
-
" print(\"\\033[32m
|
403 |
" # print(url, dst_dir, file_name)\n",
|
404 |
"\n",
|
405 |
" # === CivitAi API ===\n",
|
406 |
" civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
|
407 |
-
" if 'civitai' in url
|
408 |
" model_id = url.split('/')[-1]\n",
|
409 |
-
"
|
410 |
-
"
|
411 |
-
"\n",
|
412 |
-
"
|
413 |
-
"
|
414 |
-
"
|
415 |
-
"
|
416 |
-
"
|
417 |
-
"
|
418 |
-
"
|
|
|
419 |
" else:\n",
|
420 |
-
" print(\"
|
421 |
-
" else:\n",
|
422 |
-
" print(\"Failed to retrieve data from the API.\")\n",
|
423 |
"\n",
|
424 |
" # -- GDrive --\n",
|
425 |
" if 'drive.google' in url:\n",
|
@@ -463,7 +460,7 @@
|
|
463 |
"\n",
|
464 |
"## unpucking zip files\n",
|
465 |
"def unpucking_zip_files():\n",
|
466 |
-
" # directories
|
467 |
" for directory in directories:\n",
|
468 |
" for root, dirs, files in os.walk(directory):\n",
|
469 |
" for file in files:\n",
|
|
|
333 |
" ]\n",
|
334 |
"}\n",
|
335 |
"\n",
|
336 |
+
"url = \"\"\n",
|
337 |
"prefixes = {\n",
|
338 |
" \"model\": models_dir,\n",
|
339 |
" \"vae\": vaes_dir,\n",
|
|
|
344 |
" \"adetailer\": adetailer_dir\n",
|
345 |
"}\n",
|
346 |
"\n",
|
347 |
+
"extension_repo = []\n",
|
348 |
"directories = [value for key, value in prefixes.items()] # for unpucking zip files\n",
|
349 |
"!mkdir -p {\" \".join(directories)}\n",
|
350 |
"\n",
|
|
|
351 |
"hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
|
352 |
"user_header = f\"\\\"Authorization: Bearer {hf_token}\\\"\"\n",
|
353 |
"\n",
|
|
|
360 |
" response = requests.get(base_url, headers=headers)\n",
|
361 |
" if response.status_code == 200:\n",
|
362 |
" return response.json()\n",
|
|
|
|
|
|
|
363 |
" except requests.exceptions.RequestException as e:\n",
|
364 |
" print(f\"An error occurred: {e}\")\n",
|
365 |
" return None\n",
|
|
|
369 |
" image_url = data['images'][0]['url'] # get preview: first image\n",
|
370 |
" return model_name, image_url\n",
|
371 |
"\n",
|
372 |
+
"def generate_preview_filename(model_name, image_url):\n",
|
373 |
+
" name = model_name.split('.')\n",
|
374 |
+
" img_ext = image_url.split('.')\n",
|
375 |
+
" print(f\"\\n\\033[31m[Preview DL]\\033[0m: {name[0]}.preview.{img_ext[-1]}\")\n",
|
376 |
+
" return f\"{name[0]}.preview.{img_ext[-1]}\" # assigning the original image format\n",
|
377 |
"\n",
|
378 |
"''' main download code '''\n",
|
379 |
"\n",
|
|
|
397 |
" basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
|
398 |
" aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'\n",
|
399 |
"\n",
|
400 |
+
" print(f\"\\n\\033[32m{'---'*45}\\n\\033[33mURL: \\033[34m{url}\\n\\033[33mSAVE DIR: \\033[34m{dst_dir}\\n\\033[33mFILE NAME: \\033[34m{file_name}033[0m\")\n",
|
401 |
" # print(url, dst_dir, file_name)\n",
|
402 |
"\n",
|
403 |
" # === CivitAi API ===\n",
|
404 |
" civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
|
405 |
+
" if 'civitai' in url:\n",
|
406 |
" model_id = url.split('/')[-1]\n",
|
407 |
+
" url = f\"{url}&token={civitai_token}\" if '?' in url else f\"{url}?token={civitai_token}\"\n",
|
408 |
+
"\n",
|
409 |
+
" if dst_dir not in [vaes_dir, control_dir, adetailer_dir]: # filter\n",
|
410 |
+
" data = get_data_from_api(model_id, civitai_token)\n",
|
411 |
+
" if data:\n",
|
412 |
+
" model_name, image_url = extract_file_and_image_info(data)\n",
|
413 |
+
" if model_name and image_url:\n",
|
414 |
+
" image_file_name = generate_preview_filename(model_name if not file_name else file_name, image_url)\n",
|
415 |
+
" with capture.capture_output() as cap: # clear shit\n",
|
416 |
+
" !aria2c {aria2_args} -d {dst_dir} -o {image_file_name} {image_url}\n",
|
417 |
+
" del cap\n",
|
418 |
" else:\n",
|
419 |
+
" print(\"\\033[31m[Preview DL]\\033[0m: Failed to retrieve data from the API.\")\n",
|
|
|
|
|
420 |
"\n",
|
421 |
" # -- GDrive --\n",
|
422 |
" if 'drive.google' in url:\n",
|
|
|
460 |
"\n",
|
461 |
"## unpucking zip files\n",
|
462 |
"def unpucking_zip_files():\n",
|
463 |
+
" # directories - above\n",
|
464 |
" for directory in directories:\n",
|
465 |
" for root, dirs, files in os.walk(directory):\n",
|
466 |
" for file in files:\n",
|
files_cells/notebooks/ru/downloading_ru.ipynb
CHANGED
@@ -333,7 +333,7 @@
|
|
333 |
" ]\n",
|
334 |
"}\n",
|
335 |
"\n",
|
336 |
-
"
|
337 |
"prefixes = {\n",
|
338 |
" \"model\": models_dir,\n",
|
339 |
" \"vae\": vaes_dir,\n",
|
@@ -344,11 +344,10 @@
|
|
344 |
" \"adetailer\": adetailer_dir\n",
|
345 |
"}\n",
|
346 |
"\n",
|
347 |
-
"
|
348 |
"directories = [value for key, value in prefixes.items()] # for unpucking zip files\n",
|
349 |
"!mkdir -p {\" \".join(directories)}\n",
|
350 |
"\n",
|
351 |
-
"url = \"\"\n",
|
352 |
"hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
|
353 |
"user_header = f\"\\\"Authorization: Bearer {hf_token}\\\"\"\n",
|
354 |
"\n",
|
@@ -361,9 +360,6 @@
|
|
361 |
" response = requests.get(base_url, headers=headers)\n",
|
362 |
" if response.status_code == 200:\n",
|
363 |
" return response.json()\n",
|
364 |
-
" else:\n",
|
365 |
-
" print(f\"Failed to retrieve data. Status code: {response.status_code}\")\n",
|
366 |
-
" return None\n",
|
367 |
" except requests.exceptions.RequestException as e:\n",
|
368 |
" print(f\"An error occurred: {e}\")\n",
|
369 |
" return None\n",
|
@@ -373,9 +369,11 @@
|
|
373 |
" image_url = data['images'][0]['url'] # get preview: first image\n",
|
374 |
" return model_name, image_url\n",
|
375 |
"\n",
|
376 |
-
"def generate_preview_filename(model_name):\n",
|
377 |
-
"
|
378 |
-
"
|
|
|
|
|
379 |
"\n",
|
380 |
"''' main download code '''\n",
|
381 |
"\n",
|
@@ -399,27 +397,26 @@
|
|
399 |
" basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
|
400 |
" aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'\n",
|
401 |
"\n",
|
402 |
-
" print(\"\\033[32m
|
403 |
" # print(url, dst_dir, file_name)\n",
|
404 |
"\n",
|
405 |
" # === CivitAi API ===\n",
|
406 |
" civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
|
407 |
-
" if 'civitai' in url
|
408 |
" model_id = url.split('/')[-1]\n",
|
409 |
-
"
|
410 |
-
"
|
411 |
-
"\n",
|
412 |
-
"
|
413 |
-
"
|
414 |
-
"
|
415 |
-
"
|
416 |
-
"
|
417 |
-
"
|
418 |
-
"
|
|
|
419 |
" else:\n",
|
420 |
-
" print(\"
|
421 |
-
" else:\n",
|
422 |
-
" print(\"Failed to retrieve data from the API.\")\n",
|
423 |
"\n",
|
424 |
" # -- GDrive --\n",
|
425 |
" if 'drive.google' in url:\n",
|
@@ -463,7 +460,7 @@
|
|
463 |
"\n",
|
464 |
"## unpucking zip files\n",
|
465 |
"def unpucking_zip_files():\n",
|
466 |
-
" # directories
|
467 |
" for directory in directories:\n",
|
468 |
" for root, dirs, files in os.walk(directory):\n",
|
469 |
" for file in files:\n",
|
|
|
333 |
" ]\n",
|
334 |
"}\n",
|
335 |
"\n",
|
336 |
+
"url = \"\"\n",
|
337 |
"prefixes = {\n",
|
338 |
" \"model\": models_dir,\n",
|
339 |
" \"vae\": vaes_dir,\n",
|
|
|
344 |
" \"adetailer\": adetailer_dir\n",
|
345 |
"}\n",
|
346 |
"\n",
|
347 |
+
"extension_repo = []\n",
|
348 |
"directories = [value for key, value in prefixes.items()] # for unpucking zip files\n",
|
349 |
"!mkdir -p {\" \".join(directories)}\n",
|
350 |
"\n",
|
|
|
351 |
"hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
|
352 |
"user_header = f\"\\\"Authorization: Bearer {hf_token}\\\"\"\n",
|
353 |
"\n",
|
|
|
360 |
" response = requests.get(base_url, headers=headers)\n",
|
361 |
" if response.status_code == 200:\n",
|
362 |
" return response.json()\n",
|
|
|
|
|
|
|
363 |
" except requests.exceptions.RequestException as e:\n",
|
364 |
" print(f\"An error occurred: {e}\")\n",
|
365 |
" return None\n",
|
|
|
369 |
" image_url = data['images'][0]['url'] # get preview: first image\n",
|
370 |
" return model_name, image_url\n",
|
371 |
"\n",
|
372 |
+
"def generate_preview_filename(model_name, image_url):\n",
|
373 |
+
" name = model_name.split('.')\n",
|
374 |
+
" img_ext = image_url.split('.')\n",
|
375 |
+
" print(f\"\\n\\033[31m[Preview DL]\\033[0m: {name[0]}.preview.{img_ext[-1]}\")\n",
|
376 |
+
" return f\"{name[0]}.preview.{img_ext[-1]}\" # assigning the original image format\n",
|
377 |
"\n",
|
378 |
"''' main download code '''\n",
|
379 |
"\n",
|
|
|
397 |
" basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
|
398 |
" aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'\n",
|
399 |
"\n",
|
400 |
+
" print(f\"\\n\\033[32m{'---'*45}\\n\\033[33mURL: \\033[34m{url}\\n\\033[33mSAVE DIR: \\033[34m{dst_dir}\\n\\033[33mFILE NAME: \\033[34m{file_name}033[0m\")\n",
|
401 |
" # print(url, dst_dir, file_name)\n",
|
402 |
"\n",
|
403 |
" # === CivitAi API ===\n",
|
404 |
" civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
|
405 |
+
" if 'civitai' in url:\n",
|
406 |
" model_id = url.split('/')[-1]\n",
|
407 |
+
" url = f\"{url}&token={civitai_token}\" if '?' in url else f\"{url}?token={civitai_token}\"\n",
|
408 |
+
"\n",
|
409 |
+
" if dst_dir not in [vaes_dir, control_dir, adetailer_dir]: # filter\n",
|
410 |
+
" data = get_data_from_api(model_id, civitai_token)\n",
|
411 |
+
" if data:\n",
|
412 |
+
" model_name, image_url = extract_file_and_image_info(data)\n",
|
413 |
+
" if model_name and image_url:\n",
|
414 |
+
" image_file_name = generate_preview_filename(model_name if not file_name else file_name, image_url)\n",
|
415 |
+
" with capture.capture_output() as cap: # clear shit\n",
|
416 |
+
" !aria2c {aria2_args} -d {dst_dir} -o {image_file_name} {image_url}\n",
|
417 |
+
" del cap\n",
|
418 |
" else:\n",
|
419 |
+
" print(\"\\033[31m[Preview DL]\\033[0m: Failed to retrieve data from the API.\")\n",
|
|
|
|
|
420 |
"\n",
|
421 |
" # -- GDrive --\n",
|
422 |
" if 'drive.google' in url:\n",
|
|
|
460 |
"\n",
|
461 |
"## unpucking zip files\n",
|
462 |
"def unpucking_zip_files():\n",
|
463 |
+
" # directories - above\n",
|
464 |
" for directory in directories:\n",
|
465 |
" for root, dirs, files in os.walk(directory):\n",
|
466 |
" for file in files:\n",
|
files_cells/python/en/downloading_en.py
CHANGED
@@ -314,7 +314,7 @@ controlnet_list = {
|
|
314 |
]
|
315 |
}
|
316 |
|
317 |
-
|
318 |
prefixes = {
|
319 |
"model": models_dir,
|
320 |
"vae": vaes_dir,
|
@@ -325,11 +325,10 @@ prefixes = {
|
|
325 |
"adetailer": adetailer_dir
|
326 |
}
|
327 |
|
328 |
-
|
329 |
directories = [value for key, value in prefixes.items()] # for unpucking zip files
|
330 |
get_ipython().system('mkdir -p {" ".join(directories)}')
|
331 |
|
332 |
-
url = ""
|
333 |
hf_token = optional_huggingface_token if optional_huggingface_token else "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"
|
334 |
user_header = f"\"Authorization: Bearer {hf_token}\""
|
335 |
|
@@ -342,9 +341,6 @@ def get_data_from_api(model_id, api_key): # get model data
|
|
342 |
response = requests.get(base_url, headers=headers)
|
343 |
if response.status_code == 200:
|
344 |
return response.json()
|
345 |
-
else:
|
346 |
-
print(f"Failed to retrieve data. Status code: {response.status_code}")
|
347 |
-
return None
|
348 |
except requests.exceptions.RequestException as e:
|
349 |
print(f"An error occurred: {e}")
|
350 |
return None
|
@@ -354,9 +350,11 @@ def extract_file_and_image_info(data):
|
|
354 |
image_url = data['images'][0]['url'] # get preview: first image
|
355 |
return model_name, image_url
|
356 |
|
357 |
-
def generate_preview_filename(model_name):
|
358 |
-
|
359 |
-
|
|
|
|
|
360 |
|
361 |
''' main download code '''
|
362 |
|
@@ -380,27 +378,26 @@ def manual_download(url, dst_dir, file_name):
|
|
380 |
basename = url.split("/")[-1] if file_name is None else file_name
|
381 |
aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'
|
382 |
|
383 |
-
print("\033[32m---
|
384 |
# print(url, dst_dir, file_name)
|
385 |
|
386 |
# === CivitAi API ===
|
387 |
civitai_token = "62c0c5956b2f9defbd844d754000180b"
|
388 |
-
if 'civitai' in url
|
389 |
model_id = url.split('/')[-1]
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
|
|
400 |
else:
|
401 |
-
print("
|
402 |
-
else:
|
403 |
-
print("Failed to retrieve data from the API.")
|
404 |
|
405 |
# -- GDrive --
|
406 |
if 'drive.google' in url:
|
@@ -444,7 +441,7 @@ def download(url):
|
|
444 |
|
445 |
## unpucking zip files
|
446 |
def unpucking_zip_files():
|
447 |
-
# directories
|
448 |
for directory in directories:
|
449 |
for root, dirs, files in os.walk(directory):
|
450 |
for file in files:
|
|
|
314 |
]
|
315 |
}
|
316 |
|
317 |
+
url = ""
|
318 |
prefixes = {
|
319 |
"model": models_dir,
|
320 |
"vae": vaes_dir,
|
|
|
325 |
"adetailer": adetailer_dir
|
326 |
}
|
327 |
|
328 |
+
extension_repo = []
|
329 |
directories = [value for key, value in prefixes.items()] # for unpucking zip files
|
330 |
get_ipython().system('mkdir -p {" ".join(directories)}')
|
331 |
|
|
|
332 |
hf_token = optional_huggingface_token if optional_huggingface_token else "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"
|
333 |
user_header = f"\"Authorization: Bearer {hf_token}\""
|
334 |
|
|
|
341 |
response = requests.get(base_url, headers=headers)
|
342 |
if response.status_code == 200:
|
343 |
return response.json()
|
|
|
|
|
|
|
344 |
except requests.exceptions.RequestException as e:
|
345 |
print(f"An error occurred: {e}")
|
346 |
return None
|
|
|
350 |
image_url = data['images'][0]['url'] # get preview: first image
|
351 |
return model_name, image_url
|
352 |
|
353 |
+
def generate_preview_filename(model_name, image_url):
|
354 |
+
name = model_name.split('.')
|
355 |
+
img_ext = image_url.split('.')
|
356 |
+
print(f"\n\033[31m[Preview DL]\033[0m: {name[0]}.preview.{img_ext[-1]}")
|
357 |
+
return f"{name[0]}.preview.{img_ext[-1]}" # assigning the original image format
|
358 |
|
359 |
''' main download code '''
|
360 |
|
|
|
378 |
basename = url.split("/")[-1] if file_name is None else file_name
|
379 |
aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'
|
380 |
|
381 |
+
print(f"\n\033[32m{'---'*45}\n\033[33mURL: \033[34m{url}\n\033[33mSAVE DIR: \033[34m{dst_dir}\n\033[33mFILE NAME: \033[34m{file_name}033[0m")
|
382 |
# print(url, dst_dir, file_name)
|
383 |
|
384 |
# === CivitAi API ===
|
385 |
civitai_token = "62c0c5956b2f9defbd844d754000180b"
|
386 |
+
if 'civitai' in url:
|
387 |
model_id = url.split('/')[-1]
|
388 |
+
url = f"{url}&token={civitai_token}" if '?' in url else f"{url}?token={civitai_token}"
|
389 |
+
|
390 |
+
if dst_dir not in [vaes_dir, control_dir, adetailer_dir]: # filter
|
391 |
+
data = get_data_from_api(model_id, civitai_token)
|
392 |
+
if data:
|
393 |
+
model_name, image_url = extract_file_and_image_info(data)
|
394 |
+
if model_name and image_url:
|
395 |
+
image_file_name = generate_preview_filename(model_name if not file_name else file_name, image_url)
|
396 |
+
with capture.capture_output() as cap: # clear shit
|
397 |
+
get_ipython().system('aria2c {aria2_args} -d {dst_dir} -o {image_file_name} {image_url}')
|
398 |
+
del cap
|
399 |
else:
|
400 |
+
print("\033[31m[Preview DL]\033[0m: Failed to retrieve data from the API.")
|
|
|
|
|
401 |
|
402 |
# -- GDrive --
|
403 |
if 'drive.google' in url:
|
|
|
441 |
|
442 |
## unpucking zip files
|
443 |
def unpucking_zip_files():
|
444 |
+
# directories - above
|
445 |
for directory in directories:
|
446 |
for root, dirs, files in os.walk(directory):
|
447 |
for file in files:
|
files_cells/python/ru/downloading_ru.py
CHANGED
@@ -314,7 +314,7 @@ controlnet_list = {
|
|
314 |
]
|
315 |
}
|
316 |
|
317 |
-
|
318 |
prefixes = {
|
319 |
"model": models_dir,
|
320 |
"vae": vaes_dir,
|
@@ -325,11 +325,10 @@ prefixes = {
|
|
325 |
"adetailer": adetailer_dir
|
326 |
}
|
327 |
|
328 |
-
|
329 |
directories = [value for key, value in prefixes.items()] # for unpucking zip files
|
330 |
get_ipython().system('mkdir -p {" ".join(directories)}')
|
331 |
|
332 |
-
url = ""
|
333 |
hf_token = optional_huggingface_token if optional_huggingface_token else "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"
|
334 |
user_header = f"\"Authorization: Bearer {hf_token}\""
|
335 |
|
@@ -342,9 +341,6 @@ def get_data_from_api(model_id, api_key): # get model data
|
|
342 |
response = requests.get(base_url, headers=headers)
|
343 |
if response.status_code == 200:
|
344 |
return response.json()
|
345 |
-
else:
|
346 |
-
print(f"Failed to retrieve data. Status code: {response.status_code}")
|
347 |
-
return None
|
348 |
except requests.exceptions.RequestException as e:
|
349 |
print(f"An error occurred: {e}")
|
350 |
return None
|
@@ -354,9 +350,11 @@ def extract_file_and_image_info(data):
|
|
354 |
image_url = data['images'][0]['url'] # get preview: first image
|
355 |
return model_name, image_url
|
356 |
|
357 |
-
def generate_preview_filename(model_name):
|
358 |
-
|
359 |
-
|
|
|
|
|
360 |
|
361 |
''' main download code '''
|
362 |
|
@@ -380,27 +378,26 @@ def manual_download(url, dst_dir, file_name):
|
|
380 |
basename = url.split("/")[-1] if file_name is None else file_name
|
381 |
aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'
|
382 |
|
383 |
-
print("\033[32m---
|
384 |
# print(url, dst_dir, file_name)
|
385 |
|
386 |
# === CivitAi API ===
|
387 |
civitai_token = "62c0c5956b2f9defbd844d754000180b"
|
388 |
-
if 'civitai' in url
|
389 |
model_id = url.split('/')[-1]
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
|
|
400 |
else:
|
401 |
-
print("
|
402 |
-
else:
|
403 |
-
print("Failed to retrieve data from the API.")
|
404 |
|
405 |
# -- GDrive --
|
406 |
if 'drive.google' in url:
|
@@ -444,7 +441,7 @@ def download(url):
|
|
444 |
|
445 |
## unpucking zip files
|
446 |
def unpucking_zip_files():
|
447 |
-
# directories
|
448 |
for directory in directories:
|
449 |
for root, dirs, files in os.walk(directory):
|
450 |
for file in files:
|
|
|
314 |
]
|
315 |
}
|
316 |
|
317 |
+
url = ""
|
318 |
prefixes = {
|
319 |
"model": models_dir,
|
320 |
"vae": vaes_dir,
|
|
|
325 |
"adetailer": adetailer_dir
|
326 |
}
|
327 |
|
328 |
+
extension_repo = []
|
329 |
directories = [value for key, value in prefixes.items()] # for unpucking zip files
|
330 |
get_ipython().system('mkdir -p {" ".join(directories)}')
|
331 |
|
|
|
332 |
hf_token = optional_huggingface_token if optional_huggingface_token else "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"
|
333 |
user_header = f"\"Authorization: Bearer {hf_token}\""
|
334 |
|
|
|
341 |
response = requests.get(base_url, headers=headers)
|
342 |
if response.status_code == 200:
|
343 |
return response.json()
|
|
|
|
|
|
|
344 |
except requests.exceptions.RequestException as e:
|
345 |
print(f"An error occurred: {e}")
|
346 |
return None
|
|
|
350 |
image_url = data['images'][0]['url'] # get preview: first image
|
351 |
return model_name, image_url
|
352 |
|
353 |
+
def generate_preview_filename(model_name, image_url):
|
354 |
+
name = model_name.split('.')
|
355 |
+
img_ext = image_url.split('.')
|
356 |
+
print(f"\n\033[31m[Preview DL]\033[0m: {name[0]}.preview.{img_ext[-1]}")
|
357 |
+
return f"{name[0]}.preview.{img_ext[-1]}" # assigning the original image format
|
358 |
|
359 |
''' main download code '''
|
360 |
|
|
|
378 |
basename = url.split("/")[-1] if file_name is None else file_name
|
379 |
aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'
|
380 |
|
381 |
+
print(f"\n\033[32m{'---'*45}\n\033[33mURL: \033[34m{url}\n\033[33mSAVE DIR: \033[34m{dst_dir}\n\033[33mFILE NAME: \033[34m{file_name}033[0m")
|
382 |
# print(url, dst_dir, file_name)
|
383 |
|
384 |
# === CivitAi API ===
|
385 |
civitai_token = "62c0c5956b2f9defbd844d754000180b"
|
386 |
+
if 'civitai' in url:
|
387 |
model_id = url.split('/')[-1]
|
388 |
+
url = f"{url}&token={civitai_token}" if '?' in url else f"{url}?token={civitai_token}"
|
389 |
+
|
390 |
+
if dst_dir not in [vaes_dir, control_dir, adetailer_dir]: # filter
|
391 |
+
data = get_data_from_api(model_id, civitai_token)
|
392 |
+
if data:
|
393 |
+
model_name, image_url = extract_file_and_image_info(data)
|
394 |
+
if model_name and image_url:
|
395 |
+
image_file_name = generate_preview_filename(model_name if not file_name else file_name, image_url)
|
396 |
+
with capture.capture_output() as cap: # clear shit
|
397 |
+
get_ipython().system('aria2c {aria2_args} -d {dst_dir} -o {image_file_name} {image_url}')
|
398 |
+
del cap
|
399 |
else:
|
400 |
+
print("\033[31m[Preview DL]\033[0m: Failed to retrieve data from the API.")
|
|
|
|
|
401 |
|
402 |
# -- GDrive --
|
403 |
if 'drive.google' in url:
|
|
|
441 |
|
442 |
## unpucking zip files
|
443 |
def unpucking_zip_files():
|
444 |
+
# directories - above
|
445 |
for directory in directories:
|
446 |
for root, dirs, files in os.walk(directory):
|
447 |
for file in files:
|