updated download code & added zip support & added adetailer model download support
Browse files- files_cells/notebooks/en/downloading_en.ipynb +42 -20
- files_cells/notebooks/en/launch_en.ipynb +1 -0
- files_cells/notebooks/ru/downloading_ru.ipynb +42 -20
- files_cells/notebooks/ru/launch_ru.ipynb +1 -0
- files_cells/python/en/downloading_en.py +42 -20
- files_cells/python/en/launch_en.py +1 -0
- files_cells/python/ru/downloading_ru.py +42 -20
- files_cells/python/ru/launch_ru.py +1 -0
files_cells/notebooks/en/downloading_en.ipynb
CHANGED
@@ -23,6 +23,8 @@
|
|
23 |
"import re\n",
|
24 |
"import time\n",
|
25 |
"import json\n",
|
|
|
|
|
26 |
"import requests\n",
|
27 |
"import subprocess\n",
|
28 |
"from datetime import timedelta\n",
|
@@ -134,6 +136,7 @@
|
|
134 |
"loras_dir = f\"{webui_path}/models/Lora\"\n",
|
135 |
"extensions_dir = f\"{webui_path}/extensions\"\n",
|
136 |
"control_dir = f\"{webui_path}/models/ControlNet\"\n",
|
|
|
137 |
"\n",
|
138 |
"\n",
|
139 |
"# ================= MAIN CODE =================\n",
|
@@ -326,19 +329,18 @@
|
|
326 |
"\n",
|
327 |
"extension_repo = []\n",
|
328 |
"prefixes = {\n",
|
329 |
-
"
|
330 |
-
" \"
|
331 |
-
" \"
|
332 |
-
" \"
|
333 |
-
" \"
|
334 |
-
" \"
|
335 |
-
" \"
|
336 |
"}\n",
|
337 |
"\n",
|
338 |
-
"!mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir}\n",
|
339 |
"\n",
|
340 |
"url = \"\"\n",
|
341 |
-
"ControlNet_url = \"\"\n",
|
342 |
"hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
|
343 |
"user_header = f\"\\\"Authorization: Bearer {hf_token}\\\"\"\n",
|
344 |
"\n",
|
@@ -352,7 +354,7 @@
|
|
352 |
" if file_name:\n",
|
353 |
" url = re.sub(r'\\[.*?\\]', '', url)\n",
|
354 |
"\n",
|
355 |
-
" for prefix,
|
356 |
" if original_url.startswith(f\"{prefix}:\"):\n",
|
357 |
" if prefix != \"extension\":\n",
|
358 |
" manual_download(url, dir, file_name=file_name)\n",
|
@@ -363,7 +365,8 @@
|
|
363 |
" basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
|
364 |
" header_option = f\"--header={user_header}\"\n",
|
365 |
"\n",
|
366 |
-
" print(\"\\033[32m---\"*45 + f\"\\n\\033[33mURL: \\033[34m{url}\\n\\033[33mSAVE DIR: \\033[34m{dst_dir}\\n\\033[33mFILE NAME: \\033[34m{file_name}\\033[32m\\n~~~\\033[0m\")\n",
|
|
|
367 |
"\n",
|
368 |
" # I do it at my own risk..... Fucking CivitAi >:(\n",
|
369 |
" civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
|
@@ -410,6 +413,22 @@
|
|
410 |
" url, dst_dir, file_name = link_or_path.split()\n",
|
411 |
" manual_download(url, dst_dir, file_name)\n",
|
412 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
413 |
"''' submodels - added urls '''\n",
|
414 |
"\n",
|
415 |
"submodels = []\n",
|
@@ -452,6 +471,8 @@
|
|
452 |
"unique_urls = []\n",
|
453 |
"\n",
|
454 |
"def process_file_download(file_url):\n",
|
|
|
|
|
455 |
" if file_url.startswith(\"http\"):\n",
|
456 |
" if \"blob\" in file_url:\n",
|
457 |
" file_url = file_url.replace(\"blob\", \"raw\")\n",
|
@@ -466,13 +487,17 @@
|
|
466 |
" if any(f'# {tag}' in line.lower() for tag in prefixes):\n",
|
467 |
" current_tag = next((tag for tag in prefixes if tag in line.lower()))\n",
|
468 |
"\n",
|
469 |
-
" urls = [url.strip() for url in line.split(',')]\n",
|
470 |
" for url in urls:\n",
|
471 |
" if url.startswith(\"http\") and url not in unique_urls:\n",
|
472 |
-
"
|
|
|
473 |
" unique_urls.append(url)\n",
|
474 |
"\n",
|
|
|
|
|
475 |
"# fix all possible errors/options and function call\n",
|
|
|
476 |
"if custom_file_urls:\n",
|
477 |
" for custom_file_url in custom_file_urls.replace(',', '').split():\n",
|
478 |
" if not custom_file_url.endswith('.txt'):\n",
|
@@ -482,17 +507,14 @@
|
|
482 |
" custom_file_url = f'{root_path}/{custom_file_url}'\n",
|
483 |
"\n",
|
484 |
" try:\n",
|
485 |
-
" process_file_download(custom_file_url)\n",
|
486 |
" except FileNotFoundError:\n",
|
487 |
" pass\n",
|
488 |
"\n",
|
489 |
"# url prefixing\n",
|
490 |
-
"urls = [
|
491 |
-
"for
|
492 |
-
"
|
493 |
-
" prefixed_urls = [f\"{prefix}:{url}\" for url in urls[i].replace(',', '').split()]\n",
|
494 |
-
" if prefixed_urls:\n",
|
495 |
-
" url += \", \".join(prefixed_urls) + \", \"\n",
|
496 |
"\n",
|
497 |
"if detailed_download == \"on\":\n",
|
498 |
" print(\"\\n\\n\\033[33m# ====== Detailed Download ====== #\\n\\033[0m\")\n",
|
|
|
23 |
"import re\n",
|
24 |
"import time\n",
|
25 |
"import json\n",
|
26 |
+
"import shutil\n",
|
27 |
+
"import zipfile\n",
|
28 |
"import requests\n",
|
29 |
"import subprocess\n",
|
30 |
"from datetime import timedelta\n",
|
|
|
136 |
"loras_dir = f\"{webui_path}/models/Lora\"\n",
|
137 |
"extensions_dir = f\"{webui_path}/extensions\"\n",
|
138 |
"control_dir = f\"{webui_path}/models/ControlNet\"\n",
|
139 |
+
"adetailer_dir = f\"{webui_path}/models/adetailer/\"\n",
|
140 |
"\n",
|
141 |
"\n",
|
142 |
"# ================= MAIN CODE =================\n",
|
|
|
329 |
"\n",
|
330 |
"extension_repo = []\n",
|
331 |
"prefixes = {\n",
|
332 |
+
" \"model\": models_dir,\n",
|
333 |
+
" \"vae\": vaes_dir,\n",
|
334 |
+
" \"lora\": loras_dir,\n",
|
335 |
+
" \"embed\": embeddings_dir,\n",
|
336 |
+
" \"extension\": extensions_dir,\n",
|
337 |
+
" \"control\": control_dir,\n",
|
338 |
+
" \"adetailer\": adetailer_dir\n",
|
339 |
"}\n",
|
340 |
"\n",
|
341 |
+
"!mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir} {adetailer_dir}\n",
|
342 |
"\n",
|
343 |
"url = \"\"\n",
|
|
|
344 |
"hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
|
345 |
"user_header = f\"\\\"Authorization: Bearer {hf_token}\\\"\"\n",
|
346 |
"\n",
|
|
|
354 |
" if file_name:\n",
|
355 |
" url = re.sub(r'\\[.*?\\]', '', url)\n",
|
356 |
"\n",
|
357 |
+
" for prefix, dir in prefixes.items():\n",
|
358 |
" if original_url.startswith(f\"{prefix}:\"):\n",
|
359 |
" if prefix != \"extension\":\n",
|
360 |
" manual_download(url, dir, file_name=file_name)\n",
|
|
|
365 |
" basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
|
366 |
" header_option = f\"--header={user_header}\"\n",
|
367 |
"\n",
|
368 |
+
" # print(\"\\033[32m---\"*45 + f\"\\n\\033[33mURL: \\033[34m{url}\\n\\033[33mSAVE DIR: \\033[34m{dst_dir}\\n\\033[33mFILE NAME: \\033[34m{file_name}\\033[32m\\n~~~\\033[0m\")\n",
|
369 |
+
" print(url, dst_dir, file_name)\n",
|
370 |
"\n",
|
371 |
" # I do it at my own risk..... Fucking CivitAi >:(\n",
|
372 |
" civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
|
|
|
413 |
" url, dst_dir, file_name = link_or_path.split()\n",
|
414 |
" manual_download(url, dst_dir, file_name)\n",
|
415 |
"\n",
|
416 |
+
" unpucking_zip_files()\n",
|
417 |
+
"\n",
|
418 |
+
"## unpucking zip files\n",
|
419 |
+
"def unpucking_zip_files():\n",
|
420 |
+
" directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]\n",
|
421 |
+
"\n",
|
422 |
+
" for directory in directories:\n",
|
423 |
+
" for root, dirs, files in os.walk(directory):\n",
|
424 |
+
" for file in files:\n",
|
425 |
+
" if file.endswith(\".zip\"):\n",
|
426 |
+
" zip_path = os.path.join(root, file)\n",
|
427 |
+
" extract_path = os.path.splitext(zip_path)[0]\n",
|
428 |
+
" with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n",
|
429 |
+
" zip_ref.extractall(extract_path)\n",
|
430 |
+
" os.remove(zip_path)\n",
|
431 |
+
"\n",
|
432 |
"''' submodels - added urls '''\n",
|
433 |
"\n",
|
434 |
"submodels = []\n",
|
|
|
471 |
"unique_urls = []\n",
|
472 |
"\n",
|
473 |
"def process_file_download(file_url):\n",
|
474 |
+
" files_urls = \"\"\n",
|
475 |
+
"\n",
|
476 |
" if file_url.startswith(\"http\"):\n",
|
477 |
" if \"blob\" in file_url:\n",
|
478 |
" file_url = file_url.replace(\"blob\", \"raw\")\n",
|
|
|
487 |
" if any(f'# {tag}' in line.lower() for tag in prefixes):\n",
|
488 |
" current_tag = next((tag for tag in prefixes if tag in line.lower()))\n",
|
489 |
"\n",
|
490 |
+
" urls = [url.split('#')[0].strip() for url in line.split(',')] # filter urls\n",
|
491 |
" for url in urls:\n",
|
492 |
" if url.startswith(\"http\") and url not in unique_urls:\n",
|
493 |
+
" # handle_manual(f\"{current_tag}:{url}\")\n",
|
494 |
+
" files_urls += f\"{current_tag}:{url}, \"\n",
|
495 |
" unique_urls.append(url)\n",
|
496 |
"\n",
|
497 |
+
" return files_urls\n",
|
498 |
+
"\n",
|
499 |
"# fix all possible errors/options and function call\n",
|
500 |
+
"file_urls = \"\"\n",
|
501 |
"if custom_file_urls:\n",
|
502 |
" for custom_file_url in custom_file_urls.replace(',', '').split():\n",
|
503 |
" if not custom_file_url.endswith('.txt'):\n",
|
|
|
507 |
" custom_file_url = f'{root_path}/{custom_file_url}'\n",
|
508 |
"\n",
|
509 |
" try:\n",
|
510 |
+
" file_urls += process_file_download(custom_file_url)\n",
|
511 |
" except FileNotFoundError:\n",
|
512 |
" pass\n",
|
513 |
"\n",
|
514 |
"# url prefixing\n",
|
515 |
+
"urls = [Model_url, Vae_url, LoRA_url, Embedding_url, Extensions_url]\n",
|
516 |
+
"prefixed_urls = [f\"{prefix}:{url}\" for prefix, url in zip(prefixes.keys(), urls) if url]\n",
|
517 |
+
"url += \", \".join(prefixed_urls) + \", \" + file_urls\n",
|
|
|
|
|
|
|
518 |
"\n",
|
519 |
"if detailed_download == \"on\":\n",
|
520 |
" print(\"\\n\\n\\033[33m# ====== Detailed Download ====== #\\n\\033[0m\")\n",
|
files_cells/notebooks/en/launch_en.ipynb
CHANGED
@@ -92,6 +92,7 @@
|
|
92 |
"# automatic fixing path V2\n",
|
93 |
"!sed -i 's#\"tagger_hf_cache_dir\": \".*models/interrogators\"#\"tagger_hf_cache_dir\": \"{root_path}/sdw/models/interrogators\"#' {webui_path}/config.json\n",
|
94 |
"!sed -i 's#\"additional_networks_extra_lora_path\": \".*models/Lora/\"#\"additional_networks_extra_lora_path\": \"{root_path}/sdw/models/Lora/\"#' {webui_path}/config.json\n",
|
|
|
95 |
"# ---\n",
|
96 |
"!sed -i 's/\"sd_checkpoint_hash\": \".*\"/\"sd_checkpoint_hash\": \"\"/g; s/\"sd_model_checkpoint\": \".*\"/\"sd_model_checkpoint\": \"\"/g; s/\"sd_vae\": \".*\"/\"sd_vae\": \"None\"/g' {webui_path}/config.json\n",
|
97 |
"\n",
|
|
|
92 |
"# automatic fixing path V2\n",
|
93 |
"!sed -i 's#\"tagger_hf_cache_dir\": \".*models/interrogators\"#\"tagger_hf_cache_dir\": \"{root_path}/sdw/models/interrogators\"#' {webui_path}/config.json\n",
|
94 |
"!sed -i 's#\"additional_networks_extra_lora_path\": \".*models/Lora/\"#\"additional_networks_extra_lora_path\": \"{root_path}/sdw/models/Lora/\"#' {webui_path}/config.json\n",
|
95 |
+
"!sed -i \'s#"ad_extra_models_dir": "*"#"ad_extra_models_dir": "{root_path}/sdw/models/adetailer/"#\' {webui_path}/config.json\n",
|
96 |
"# ---\n",
|
97 |
"!sed -i 's/\"sd_checkpoint_hash\": \".*\"/\"sd_checkpoint_hash\": \"\"/g; s/\"sd_model_checkpoint\": \".*\"/\"sd_model_checkpoint\": \"\"/g; s/\"sd_vae\": \".*\"/\"sd_vae\": \"None\"/g' {webui_path}/config.json\n",
|
98 |
"\n",
|
files_cells/notebooks/ru/downloading_ru.ipynb
CHANGED
@@ -23,6 +23,8 @@
|
|
23 |
"import re\n",
|
24 |
"import time\n",
|
25 |
"import json\n",
|
|
|
|
|
26 |
"import requests\n",
|
27 |
"import subprocess\n",
|
28 |
"from datetime import timedelta\n",
|
@@ -134,6 +136,7 @@
|
|
134 |
"loras_dir = f\"{webui_path}/models/Lora\"\n",
|
135 |
"extensions_dir = f\"{webui_path}/extensions\"\n",
|
136 |
"control_dir = f\"{webui_path}/models/ControlNet\"\n",
|
|
|
137 |
"\n",
|
138 |
"\n",
|
139 |
"# ================= MAIN CODE =================\n",
|
@@ -326,19 +329,18 @@
|
|
326 |
"\n",
|
327 |
"extension_repo = []\n",
|
328 |
"prefixes = {\n",
|
329 |
-
"
|
330 |
-
" \"
|
331 |
-
" \"
|
332 |
-
" \"
|
333 |
-
" \"
|
334 |
-
" \"
|
335 |
-
" \"
|
336 |
"}\n",
|
337 |
"\n",
|
338 |
-
"!mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir}\n",
|
339 |
"\n",
|
340 |
"url = \"\"\n",
|
341 |
-
"ControlNet_url = \"\"\n",
|
342 |
"hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
|
343 |
"user_header = f\"\\\"Authorization: Bearer {hf_token}\\\"\"\n",
|
344 |
"\n",
|
@@ -352,7 +354,7 @@
|
|
352 |
" if file_name:\n",
|
353 |
" url = re.sub(r'\\[.*?\\]', '', url)\n",
|
354 |
"\n",
|
355 |
-
" for prefix,
|
356 |
" if original_url.startswith(f\"{prefix}:\"):\n",
|
357 |
" if prefix != \"extension\":\n",
|
358 |
" manual_download(url, dir, file_name=file_name)\n",
|
@@ -363,7 +365,8 @@
|
|
363 |
" basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
|
364 |
" header_option = f\"--header={user_header}\"\n",
|
365 |
"\n",
|
366 |
-
" print(\"\\033[32m---\"*45 + f\"\\n\\033[33mURL: \\033[34m{url}\\n\\033[33mSAVE DIR: \\033[34m{dst_dir}\\n\\033[33mFILE NAME: \\033[34m{file_name}\\033[32m\\n~~~\\033[0m\")\n",
|
|
|
367 |
"\n",
|
368 |
" # I do it at my own risk..... Fucking CivitAi >:(\n",
|
369 |
" civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
|
@@ -410,6 +413,22 @@
|
|
410 |
" url, dst_dir, file_name = link_or_path.split()\n",
|
411 |
" manual_download(url, dst_dir, file_name)\n",
|
412 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
413 |
"''' submodels - added urls '''\n",
|
414 |
"\n",
|
415 |
"submodels = []\n",
|
@@ -452,6 +471,8 @@
|
|
452 |
"unique_urls = []\n",
|
453 |
"\n",
|
454 |
"def process_file_download(file_url):\n",
|
|
|
|
|
455 |
" if file_url.startswith(\"http\"):\n",
|
456 |
" if \"blob\" in file_url:\n",
|
457 |
" file_url = file_url.replace(\"blob\", \"raw\")\n",
|
@@ -466,13 +487,17 @@
|
|
466 |
" if any(f'# {tag}' in line.lower() for tag in prefixes):\n",
|
467 |
" current_tag = next((tag for tag in prefixes if tag in line.lower()))\n",
|
468 |
"\n",
|
469 |
-
" urls = [url.strip() for url in line.split(',')]\n",
|
470 |
" for url in urls:\n",
|
471 |
" if url.startswith(\"http\") and url not in unique_urls:\n",
|
472 |
-
"
|
|
|
473 |
" unique_urls.append(url)\n",
|
474 |
"\n",
|
|
|
|
|
475 |
"# fix all possible errors/options and function call\n",
|
|
|
476 |
"if custom_file_urls:\n",
|
477 |
" for custom_file_url in custom_file_urls.replace(',', '').split():\n",
|
478 |
" if not custom_file_url.endswith('.txt'):\n",
|
@@ -482,17 +507,14 @@
|
|
482 |
" custom_file_url = f'{root_path}/{custom_file_url}'\n",
|
483 |
"\n",
|
484 |
" try:\n",
|
485 |
-
" process_file_download(custom_file_url)\n",
|
486 |
" except FileNotFoundError:\n",
|
487 |
" pass\n",
|
488 |
"\n",
|
489 |
"# url prefixing\n",
|
490 |
-
"urls = [
|
491 |
-
"for
|
492 |
-
"
|
493 |
-
" prefixed_urls = [f\"{prefix}:{url}\" for url in urls[i].replace(',', '').split()]\n",
|
494 |
-
" if prefixed_urls:\n",
|
495 |
-
" url += \", \".join(prefixed_urls) + \", \"\n",
|
496 |
"\n",
|
497 |
"if detailed_download == \"on\":\n",
|
498 |
" print(\"\\n\\n\\033[33m# ====== Подробная Загрузка ====== #\\n\\033[0m\")\n",
|
|
|
23 |
"import re\n",
|
24 |
"import time\n",
|
25 |
"import json\n",
|
26 |
+
"import shutil\n",
|
27 |
+
"import zipfile\n",
|
28 |
"import requests\n",
|
29 |
"import subprocess\n",
|
30 |
"from datetime import timedelta\n",
|
|
|
136 |
"loras_dir = f\"{webui_path}/models/Lora\"\n",
|
137 |
"extensions_dir = f\"{webui_path}/extensions\"\n",
|
138 |
"control_dir = f\"{webui_path}/models/ControlNet\"\n",
|
139 |
+
"adetailer_dir = f\"{webui_path}/models/adetailer/\"\n",
|
140 |
"\n",
|
141 |
"\n",
|
142 |
"# ================= MAIN CODE =================\n",
|
|
|
329 |
"\n",
|
330 |
"extension_repo = []\n",
|
331 |
"prefixes = {\n",
|
332 |
+
" \"model\": models_dir,\n",
|
333 |
+
" \"vae\": vaes_dir,\n",
|
334 |
+
" \"lora\": loras_dir,\n",
|
335 |
+
" \"embed\": embeddings_dir,\n",
|
336 |
+
" \"extension\": extensions_dir,\n",
|
337 |
+
" \"control\": control_dir,\n",
|
338 |
+
" \"adetailer\": adetailer_dir\n",
|
339 |
"}\n",
|
340 |
"\n",
|
341 |
+
"!mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir} {adetailer_dir}\n",
|
342 |
"\n",
|
343 |
"url = \"\"\n",
|
|
|
344 |
"hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
|
345 |
"user_header = f\"\\\"Authorization: Bearer {hf_token}\\\"\"\n",
|
346 |
"\n",
|
|
|
354 |
" if file_name:\n",
|
355 |
" url = re.sub(r'\\[.*?\\]', '', url)\n",
|
356 |
"\n",
|
357 |
+
" for prefix, dir in prefixes.items():\n",
|
358 |
" if original_url.startswith(f\"{prefix}:\"):\n",
|
359 |
" if prefix != \"extension\":\n",
|
360 |
" manual_download(url, dir, file_name=file_name)\n",
|
|
|
365 |
" basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
|
366 |
" header_option = f\"--header={user_header}\"\n",
|
367 |
"\n",
|
368 |
+
" # print(\"\\033[32m---\"*45 + f\"\\n\\033[33mURL: \\033[34m{url}\\n\\033[33mSAVE DIR: \\033[34m{dst_dir}\\n\\033[33mFILE NAME: \\033[34m{file_name}\\033[32m\\n~~~\\033[0m\")\n",
|
369 |
+
" print(url, dst_dir, file_name)\n",
|
370 |
"\n",
|
371 |
" # I do it at my own risk..... Fucking CivitAi >:(\n",
|
372 |
" civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
|
|
|
413 |
" url, dst_dir, file_name = link_or_path.split()\n",
|
414 |
" manual_download(url, dst_dir, file_name)\n",
|
415 |
"\n",
|
416 |
+
" unpucking_zip_files()\n",
|
417 |
+
"\n",
|
418 |
+
"## unpucking zip files\n",
|
419 |
+
"def unpucking_zip_files():\n",
|
420 |
+
" directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]\n",
|
421 |
+
"\n",
|
422 |
+
" for directory in directories:\n",
|
423 |
+
" for root, dirs, files in os.walk(directory):\n",
|
424 |
+
" for file in files:\n",
|
425 |
+
" if file.endswith(\".zip\"):\n",
|
426 |
+
" zip_path = os.path.join(root, file)\n",
|
427 |
+
" extract_path = os.path.splitext(zip_path)[0]\n",
|
428 |
+
" with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n",
|
429 |
+
" zip_ref.extractall(extract_path)\n",
|
430 |
+
" os.remove(zip_path)\n",
|
431 |
+
"\n",
|
432 |
"''' submodels - added urls '''\n",
|
433 |
"\n",
|
434 |
"submodels = []\n",
|
|
|
471 |
"unique_urls = []\n",
|
472 |
"\n",
|
473 |
"def process_file_download(file_url):\n",
|
474 |
+
" files_urls = \"\"\n",
|
475 |
+
"\n",
|
476 |
" if file_url.startswith(\"http\"):\n",
|
477 |
" if \"blob\" in file_url:\n",
|
478 |
" file_url = file_url.replace(\"blob\", \"raw\")\n",
|
|
|
487 |
" if any(f'# {tag}' in line.lower() for tag in prefixes):\n",
|
488 |
" current_tag = next((tag for tag in prefixes if tag in line.lower()))\n",
|
489 |
"\n",
|
490 |
+
" urls = [url.split('#')[0].strip() for url in line.split(',')] # filter urls\n",
|
491 |
" for url in urls:\n",
|
492 |
" if url.startswith(\"http\") and url not in unique_urls:\n",
|
493 |
+
" # handle_manual(f\"{current_tag}:{url}\")\n",
|
494 |
+
" files_urls += f\"{current_tag}:{url}, \"\n",
|
495 |
" unique_urls.append(url)\n",
|
496 |
"\n",
|
497 |
+
" return files_urls\n",
|
498 |
+
"\n",
|
499 |
"# fix all possible errors/options and function call\n",
|
500 |
+
"file_urls = \"\"\n",
|
501 |
"if custom_file_urls:\n",
|
502 |
" for custom_file_url in custom_file_urls.replace(',', '').split():\n",
|
503 |
" if not custom_file_url.endswith('.txt'):\n",
|
|
|
507 |
" custom_file_url = f'{root_path}/{custom_file_url}'\n",
|
508 |
"\n",
|
509 |
" try:\n",
|
510 |
+
" file_urls += process_file_download(custom_file_url)\n",
|
511 |
" except FileNotFoundError:\n",
|
512 |
" pass\n",
|
513 |
"\n",
|
514 |
"# url prefixing\n",
|
515 |
+
"urls = [Model_url, Vae_url, LoRA_url, Embedding_url, Extensions_url]\n",
|
516 |
+
"prefixed_urls = [f\"{prefix}:{url}\" for prefix, url in zip(prefixes.keys(), urls) if url]\n",
|
517 |
+
"url += \", \".join(prefixed_urls) + \", \" + file_urls\n",
|
|
|
|
|
|
|
518 |
"\n",
|
519 |
"if detailed_download == \"on\":\n",
|
520 |
" print(\"\\n\\n\\033[33m# ====== Подробная Загрузка ====== #\\n\\033[0m\")\n",
|
files_cells/notebooks/ru/launch_ru.ipynb
CHANGED
@@ -92,6 +92,7 @@
|
|
92 |
"# automatic fixing path V2\n",
|
93 |
"!sed -i 's#\"tagger_hf_cache_dir\": \".*models/interrogators\"#\"tagger_hf_cache_dir\": \"{root_path}/sdw/models/interrogators\"#' {webui_path}/config.json\n",
|
94 |
"!sed -i 's#\"additional_networks_extra_lora_path\": \".*models/Lora/\"#\"additional_networks_extra_lora_path\": \"{root_path}/sdw/models/Lora/\"#' {webui_path}/config.json\n",
|
|
|
95 |
"# ---\n",
|
96 |
"!sed -i 's/\"sd_checkpoint_hash\": \".*\"/\"sd_checkpoint_hash\": \"\"/g; s/\"sd_model_checkpoint\": \".*\"/\"sd_model_checkpoint\": \"\"/g; s/\"sd_vae\": \".*\"/\"sd_vae\": \"None\"/g' {webui_path}/config.json\n",
|
97 |
"\n",
|
|
|
92 |
"# automatic fixing path V2\n",
|
93 |
"!sed -i 's#\"tagger_hf_cache_dir\": \".*models/interrogators\"#\"tagger_hf_cache_dir\": \"{root_path}/sdw/models/interrogators\"#' {webui_path}/config.json\n",
|
94 |
"!sed -i 's#\"additional_networks_extra_lora_path\": \".*models/Lora/\"#\"additional_networks_extra_lora_path\": \"{root_path}/sdw/models/Lora/\"#' {webui_path}/config.json\n",
|
95 |
+
"!sed -i \'s#"ad_extra_models_dir": "*"#"ad_extra_models_dir": "{root_path}/sdw/models/adetailer/"#\' {webui_path}/config.json\n",
|
96 |
"# ---\n",
|
97 |
"!sed -i 's/\"sd_checkpoint_hash\": \".*\"/\"sd_checkpoint_hash\": \"\"/g; s/\"sd_model_checkpoint\": \".*\"/\"sd_model_checkpoint\": \"\"/g; s/\"sd_vae\": \".*\"/\"sd_vae\": \"None\"/g' {webui_path}/config.json\n",
|
98 |
"\n",
|
files_cells/python/en/downloading_en.py
CHANGED
@@ -4,6 +4,8 @@ import os
|
|
4 |
import re
|
5 |
import time
|
6 |
import json
|
|
|
|
|
7 |
import requests
|
8 |
import subprocess
|
9 |
from datetime import timedelta
|
@@ -115,6 +117,7 @@ embeddings_dir = f"{webui_path}/embeddings"
|
|
115 |
loras_dir = f"{webui_path}/models/Lora"
|
116 |
extensions_dir = f"{webui_path}/extensions"
|
117 |
control_dir = f"{webui_path}/models/ControlNet"
|
|
|
118 |
|
119 |
|
120 |
# ================= MAIN CODE =================
|
@@ -307,19 +310,18 @@ controlnet_list = {
|
|
307 |
|
308 |
extension_repo = []
|
309 |
prefixes = {
|
310 |
-
|
311 |
-
"
|
312 |
-
"
|
313 |
-
"
|
314 |
-
"
|
315 |
-
"
|
316 |
-
"
|
317 |
}
|
318 |
|
319 |
-
get_ipython().system('mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir}')
|
320 |
|
321 |
url = ""
|
322 |
-
ControlNet_url = ""
|
323 |
hf_token = optional_huggingface_token if optional_huggingface_token else "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"
|
324 |
user_header = f"\"Authorization: Bearer {hf_token}\""
|
325 |
|
@@ -333,7 +335,7 @@ def handle_manual(url):
|
|
333 |
if file_name:
|
334 |
url = re.sub(r'\[.*?\]', '', url)
|
335 |
|
336 |
-
for prefix,
|
337 |
if original_url.startswith(f"{prefix}:"):
|
338 |
if prefix != "extension":
|
339 |
manual_download(url, dir, file_name=file_name)
|
@@ -344,7 +346,8 @@ def manual_download(url, dst_dir, file_name):
|
|
344 |
basename = url.split("/")[-1] if file_name is None else file_name
|
345 |
header_option = f"--header={user_header}"
|
346 |
|
347 |
-
print("\033[32m---"*45 + f"\n\033[33mURL: \033[34m{url}\n\033[33mSAVE DIR: \033[34m{dst_dir}\n\033[33mFILE NAME: \033[34m{file_name}\033[32m\n~~~\033[0m")
|
|
|
348 |
|
349 |
# I do it at my own risk..... Fucking CivitAi >:(
|
350 |
civitai_token = "62c0c5956b2f9defbd844d754000180b"
|
@@ -391,6 +394,22 @@ def download(url):
|
|
391 |
url, dst_dir, file_name = link_or_path.split()
|
392 |
manual_download(url, dst_dir, file_name)
|
393 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
394 |
''' submodels - added urls '''
|
395 |
|
396 |
submodels = []
|
@@ -433,6 +452,8 @@ for submodel in submodels:
|
|
433 |
unique_urls = []
|
434 |
|
435 |
def process_file_download(file_url):
|
|
|
|
|
436 |
if file_url.startswith("http"):
|
437 |
if "blob" in file_url:
|
438 |
file_url = file_url.replace("blob", "raw")
|
@@ -447,13 +468,17 @@ def process_file_download(file_url):
|
|
447 |
if any(f'# {tag}' in line.lower() for tag in prefixes):
|
448 |
current_tag = next((tag for tag in prefixes if tag in line.lower()))
|
449 |
|
450 |
-
urls = [url.strip() for url in line.split(',')]
|
451 |
for url in urls:
|
452 |
if url.startswith("http") and url not in unique_urls:
|
453 |
-
|
|
|
454 |
unique_urls.append(url)
|
455 |
|
|
|
|
|
456 |
# fix all possible errors/options and function call
|
|
|
457 |
if custom_file_urls:
|
458 |
for custom_file_url in custom_file_urls.replace(',', '').split():
|
459 |
if not custom_file_url.endswith('.txt'):
|
@@ -463,17 +488,14 @@ if custom_file_urls:
|
|
463 |
custom_file_url = f'{root_path}/{custom_file_url}'
|
464 |
|
465 |
try:
|
466 |
-
process_file_download(custom_file_url)
|
467 |
except FileNotFoundError:
|
468 |
pass
|
469 |
|
470 |
# url prefixing
|
471 |
-
urls = [
|
472 |
-
for
|
473 |
-
|
474 |
-
prefixed_urls = [f"{prefix}:{url}" for url in urls[i].replace(',', '').split()]
|
475 |
-
if prefixed_urls:
|
476 |
-
url += ", ".join(prefixed_urls) + ", "
|
477 |
|
478 |
if detailed_download == "on":
|
479 |
print("\n\n\033[33m# ====== Detailed Download ====== #\n\033[0m")
|
|
|
4 |
import re
|
5 |
import time
|
6 |
import json
|
7 |
+
import shutil
|
8 |
+
import zipfile
|
9 |
import requests
|
10 |
import subprocess
|
11 |
from datetime import timedelta
|
|
|
117 |
loras_dir = f"{webui_path}/models/Lora"
|
118 |
extensions_dir = f"{webui_path}/extensions"
|
119 |
control_dir = f"{webui_path}/models/ControlNet"
|
120 |
+
adetailer_dir = f"{webui_path}/models/adetailer/"
|
121 |
|
122 |
|
123 |
# ================= MAIN CODE =================
|
|
|
310 |
|
311 |
extension_repo = []
|
312 |
prefixes = {
|
313 |
+
"model": models_dir,
|
314 |
+
"vae": vaes_dir,
|
315 |
+
"lora": loras_dir,
|
316 |
+
"embed": embeddings_dir,
|
317 |
+
"extension": extensions_dir,
|
318 |
+
"control": control_dir,
|
319 |
+
"adetailer": adetailer_dir
|
320 |
}
|
321 |
|
322 |
+
get_ipython().system('mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir} {adetailer_dir}')
|
323 |
|
324 |
url = ""
|
|
|
325 |
hf_token = optional_huggingface_token if optional_huggingface_token else "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"
|
326 |
user_header = f"\"Authorization: Bearer {hf_token}\""
|
327 |
|
|
|
335 |
if file_name:
|
336 |
url = re.sub(r'\[.*?\]', '', url)
|
337 |
|
338 |
+
for prefix, dir in prefixes.items():
|
339 |
if original_url.startswith(f"{prefix}:"):
|
340 |
if prefix != "extension":
|
341 |
manual_download(url, dir, file_name=file_name)
|
|
|
346 |
basename = url.split("/")[-1] if file_name is None else file_name
|
347 |
header_option = f"--header={user_header}"
|
348 |
|
349 |
+
# print("\033[32m---"*45 + f"\n\033[33mURL: \033[34m{url}\n\033[33mSAVE DIR: \033[34m{dst_dir}\n\033[33mFILE NAME: \033[34m{file_name}\033[32m\n~~~\033[0m")
|
350 |
+
print(url, dst_dir, file_name)
|
351 |
|
352 |
# I do it at my own risk..... Fucking CivitAi >:(
|
353 |
civitai_token = "62c0c5956b2f9defbd844d754000180b"
|
|
|
394 |
url, dst_dir, file_name = link_or_path.split()
|
395 |
manual_download(url, dst_dir, file_name)
|
396 |
|
397 |
+
unpucking_zip_files()
|
398 |
+
|
399 |
+
## unpucking zip files
|
400 |
+
def unpucking_zip_files():
|
401 |
+
directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]
|
402 |
+
|
403 |
+
for directory in directories:
|
404 |
+
for root, dirs, files in os.walk(directory):
|
405 |
+
for file in files:
|
406 |
+
if file.endswith(".zip"):
|
407 |
+
zip_path = os.path.join(root, file)
|
408 |
+
extract_path = os.path.splitext(zip_path)[0]
|
409 |
+
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
|
410 |
+
zip_ref.extractall(extract_path)
|
411 |
+
os.remove(zip_path)
|
412 |
+
|
413 |
''' submodels - added urls '''
|
414 |
|
415 |
submodels = []
|
|
|
452 |
unique_urls = []
|
453 |
|
454 |
def process_file_download(file_url):
|
455 |
+
files_urls = ""
|
456 |
+
|
457 |
if file_url.startswith("http"):
|
458 |
if "blob" in file_url:
|
459 |
file_url = file_url.replace("blob", "raw")
|
|
|
468 |
if any(f'# {tag}' in line.lower() for tag in prefixes):
|
469 |
current_tag = next((tag for tag in prefixes if tag in line.lower()))
|
470 |
|
471 |
+
urls = [url.split('#')[0].strip() for url in line.split(',')] # filter urls
|
472 |
for url in urls:
|
473 |
if url.startswith("http") and url not in unique_urls:
|
474 |
+
# handle_manual(f"{current_tag}:{url}")
|
475 |
+
files_urls += f"{current_tag}:{url}, "
|
476 |
unique_urls.append(url)
|
477 |
|
478 |
+
return files_urls
|
479 |
+
|
480 |
# fix all possible errors/options and function call
|
481 |
+
file_urls = ""
|
482 |
if custom_file_urls:
|
483 |
for custom_file_url in custom_file_urls.replace(',', '').split():
|
484 |
if not custom_file_url.endswith('.txt'):
|
|
|
488 |
custom_file_url = f'{root_path}/{custom_file_url}'
|
489 |
|
490 |
try:
|
491 |
+
file_urls += process_file_download(custom_file_url)
|
492 |
except FileNotFoundError:
|
493 |
pass
|
494 |
|
495 |
# url prefixing
|
496 |
+
urls = [Model_url, Vae_url, LoRA_url, Embedding_url, Extensions_url]
|
497 |
+
prefixed_urls = [f"{prefix}:{url}" for prefix, url in zip(prefixes.keys(), urls) if url]
|
498 |
+
url += ", ".join(prefixed_urls) + ", " + file_urls
|
|
|
|
|
|
|
499 |
|
500 |
if detailed_download == "on":
|
501 |
print("\n\n\033[33m# ====== Detailed Download ====== #\n\033[0m")
|
files_cells/python/en/launch_en.py
CHANGED
@@ -68,6 +68,7 @@ if zrok_token:
|
|
68 |
# automatic fixing path V2
|
69 |
get_ipython().system('sed -i \'s#"tagger_hf_cache_dir": ".*models/interrogators"#"tagger_hf_cache_dir": "{root_path}/sdw/models/interrogators"#\' {webui_path}/config.json')
|
70 |
get_ipython().system('sed -i \'s#"additional_networks_extra_lora_path": ".*models/Lora/"#"additional_networks_extra_lora_path": "{root_path}/sdw/models/Lora/"#\' {webui_path}/config.json')
|
|
|
71 |
# ---
|
72 |
get_ipython().system('sed -i \'s/"sd_checkpoint_hash": ".*"/"sd_checkpoint_hash": ""/g; s/"sd_model_checkpoint": ".*"/"sd_model_checkpoint": ""/g; s/"sd_vae": ".*"/"sd_vae": "None"/g\' {webui_path}/config.json')
|
73 |
|
|
|
68 |
# automatic fixing path V2
|
69 |
get_ipython().system('sed -i \'s#"tagger_hf_cache_dir": ".*models/interrogators"#"tagger_hf_cache_dir": "{root_path}/sdw/models/interrogators"#\' {webui_path}/config.json')
|
70 |
get_ipython().system('sed -i \'s#"additional_networks_extra_lora_path": ".*models/Lora/"#"additional_networks_extra_lora_path": "{root_path}/sdw/models/Lora/"#\' {webui_path}/config.json')
|
71 |
+
get_ipython().system('sed -i \'s#"ad_extra_models_dir": "*"#"ad_extra_models_dir": "{root_path}/sdw/models/adetailer/"#\' {webui_path}/config.json')
|
72 |
# ---
|
73 |
get_ipython().system('sed -i \'s/"sd_checkpoint_hash": ".*"/"sd_checkpoint_hash": ""/g; s/"sd_model_checkpoint": ".*"/"sd_model_checkpoint": ""/g; s/"sd_vae": ".*"/"sd_vae": "None"/g\' {webui_path}/config.json')
|
74 |
|
files_cells/python/ru/downloading_ru.py
CHANGED
@@ -4,6 +4,8 @@ import os
|
|
4 |
import re
|
5 |
import time
|
6 |
import json
|
|
|
|
|
7 |
import requests
|
8 |
import subprocess
|
9 |
from datetime import timedelta
|
@@ -115,6 +117,7 @@ embeddings_dir = f"{webui_path}/embeddings"
|
|
115 |
loras_dir = f"{webui_path}/models/Lora"
|
116 |
extensions_dir = f"{webui_path}/extensions"
|
117 |
control_dir = f"{webui_path}/models/ControlNet"
|
|
|
118 |
|
119 |
|
120 |
# ================= MAIN CODE =================
|
@@ -307,19 +310,18 @@ controlnet_list = {
|
|
307 |
|
308 |
extension_repo = []
|
309 |
prefixes = {
|
310 |
-
|
311 |
-
"
|
312 |
-
"
|
313 |
-
"
|
314 |
-
"
|
315 |
-
"
|
316 |
-
"
|
317 |
}
|
318 |
|
319 |
-
get_ipython().system('mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir}')
|
320 |
|
321 |
url = ""
|
322 |
-
ControlNet_url = ""
|
323 |
hf_token = optional_huggingface_token if optional_huggingface_token else "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"
|
324 |
user_header = f"\"Authorization: Bearer {hf_token}\""
|
325 |
|
@@ -333,7 +335,7 @@ def handle_manual(url):
|
|
333 |
if file_name:
|
334 |
url = re.sub(r'\[.*?\]', '', url)
|
335 |
|
336 |
-
for prefix,
|
337 |
if original_url.startswith(f"{prefix}:"):
|
338 |
if prefix != "extension":
|
339 |
manual_download(url, dir, file_name=file_name)
|
@@ -344,7 +346,8 @@ def manual_download(url, dst_dir, file_name):
|
|
344 |
basename = url.split("/")[-1] if file_name is None else file_name
|
345 |
header_option = f"--header={user_header}"
|
346 |
|
347 |
-
print("\033[32m---"*45 + f"\n\033[33mURL: \033[34m{url}\n\033[33mSAVE DIR: \033[34m{dst_dir}\n\033[33mFILE NAME: \033[34m{file_name}\033[32m\n~~~\033[0m")
|
|
|
348 |
|
349 |
# I do it at my own risk..... Fucking CivitAi >:(
|
350 |
civitai_token = "62c0c5956b2f9defbd844d754000180b"
|
@@ -391,6 +394,22 @@ def download(url):
|
|
391 |
url, dst_dir, file_name = link_or_path.split()
|
392 |
manual_download(url, dst_dir, file_name)
|
393 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
394 |
''' submodels - added urls '''
|
395 |
|
396 |
submodels = []
|
@@ -433,6 +452,8 @@ for submodel in submodels:
|
|
433 |
unique_urls = []
|
434 |
|
435 |
def process_file_download(file_url):
|
|
|
|
|
436 |
if file_url.startswith("http"):
|
437 |
if "blob" in file_url:
|
438 |
file_url = file_url.replace("blob", "raw")
|
@@ -447,13 +468,17 @@ def process_file_download(file_url):
|
|
447 |
if any(f'# {tag}' in line.lower() for tag in prefixes):
|
448 |
current_tag = next((tag for tag in prefixes if tag in line.lower()))
|
449 |
|
450 |
-
urls = [url.strip() for url in line.split(',')]
|
451 |
for url in urls:
|
452 |
if url.startswith("http") and url not in unique_urls:
|
453 |
-
|
|
|
454 |
unique_urls.append(url)
|
455 |
|
|
|
|
|
456 |
# fix all possible errors/options and function call
|
|
|
457 |
if custom_file_urls:
|
458 |
for custom_file_url in custom_file_urls.replace(',', '').split():
|
459 |
if not custom_file_url.endswith('.txt'):
|
@@ -463,17 +488,14 @@ if custom_file_urls:
|
|
463 |
custom_file_url = f'{root_path}/{custom_file_url}'
|
464 |
|
465 |
try:
|
466 |
-
process_file_download(custom_file_url)
|
467 |
except FileNotFoundError:
|
468 |
pass
|
469 |
|
470 |
# url prefixing
|
471 |
-
urls = [
|
472 |
-
for
|
473 |
-
|
474 |
-
prefixed_urls = [f"{prefix}:{url}" for url in urls[i].replace(',', '').split()]
|
475 |
-
if prefixed_urls:
|
476 |
-
url += ", ".join(prefixed_urls) + ", "
|
477 |
|
478 |
if detailed_download == "on":
|
479 |
print("\n\n\033[33m# ====== Подробная Загрузка ====== #\n\033[0m")
|
|
|
4 |
import re
|
5 |
import time
|
6 |
import json
|
7 |
+
import shutil
|
8 |
+
import zipfile
|
9 |
import requests
|
10 |
import subprocess
|
11 |
from datetime import timedelta
|
|
|
117 |
loras_dir = f"{webui_path}/models/Lora"
|
118 |
extensions_dir = f"{webui_path}/extensions"
|
119 |
control_dir = f"{webui_path}/models/ControlNet"
|
120 |
+
adetailer_dir = f"{webui_path}/models/adetailer/"
|
121 |
|
122 |
|
123 |
# ================= MAIN CODE =================
|
|
|
310 |
|
311 |
extension_repo = []
|
312 |
prefixes = {
|
313 |
+
"model": models_dir,
|
314 |
+
"vae": vaes_dir,
|
315 |
+
"lora": loras_dir,
|
316 |
+
"embed": embeddings_dir,
|
317 |
+
"extension": extensions_dir,
|
318 |
+
"control": control_dir,
|
319 |
+
"adetailer": adetailer_dir
|
320 |
}
|
321 |
|
322 |
+
get_ipython().system('mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir} {adetailer_dir}')
|
323 |
|
324 |
url = ""
|
|
|
325 |
hf_token = optional_huggingface_token if optional_huggingface_token else "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"
|
326 |
user_header = f"\"Authorization: Bearer {hf_token}\""
|
327 |
|
|
|
335 |
if file_name:
|
336 |
url = re.sub(r'\[.*?\]', '', url)
|
337 |
|
338 |
+
for prefix, dir in prefixes.items():
|
339 |
if original_url.startswith(f"{prefix}:"):
|
340 |
if prefix != "extension":
|
341 |
manual_download(url, dir, file_name=file_name)
|
|
|
346 |
basename = url.split("/")[-1] if file_name is None else file_name
|
347 |
header_option = f"--header={user_header}"
|
348 |
|
349 |
+
# print("\033[32m---"*45 + f"\n\033[33mURL: \033[34m{url}\n\033[33mSAVE DIR: \033[34m{dst_dir}\n\033[33mFILE NAME: \033[34m{file_name}\033[32m\n~~~\033[0m")
|
350 |
+
print(url, dst_dir, file_name)
|
351 |
|
352 |
# I do it at my own risk..... Fucking CivitAi >:(
|
353 |
civitai_token = "62c0c5956b2f9defbd844d754000180b"
|
|
|
394 |
url, dst_dir, file_name = link_or_path.split()
|
395 |
manual_download(url, dst_dir, file_name)
|
396 |
|
397 |
+
unpucking_zip_files()
|
398 |
+
|
399 |
+
## unpucking zip files
|
400 |
+
def unpucking_zip_files():
|
401 |
+
directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]
|
402 |
+
|
403 |
+
for directory in directories:
|
404 |
+
for root, dirs, files in os.walk(directory):
|
405 |
+
for file in files:
|
406 |
+
if file.endswith(".zip"):
|
407 |
+
zip_path = os.path.join(root, file)
|
408 |
+
extract_path = os.path.splitext(zip_path)[0]
|
409 |
+
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
|
410 |
+
zip_ref.extractall(extract_path)
|
411 |
+
os.remove(zip_path)
|
412 |
+
|
413 |
''' submodels - added urls '''
|
414 |
|
415 |
submodels = []
|
|
|
452 |
unique_urls = []
|
453 |
|
454 |
def process_file_download(file_url):
|
455 |
+
files_urls = ""
|
456 |
+
|
457 |
if file_url.startswith("http"):
|
458 |
if "blob" in file_url:
|
459 |
file_url = file_url.replace("blob", "raw")
|
|
|
468 |
if any(f'# {tag}' in line.lower() for tag in prefixes):
|
469 |
current_tag = next((tag for tag in prefixes if tag in line.lower()))
|
470 |
|
471 |
+
urls = [url.split('#')[0].strip() for url in line.split(',')] # filter urls
|
472 |
for url in urls:
|
473 |
if url.startswith("http") and url not in unique_urls:
|
474 |
+
# handle_manual(f"{current_tag}:{url}")
|
475 |
+
files_urls += f"{current_tag}:{url}, "
|
476 |
unique_urls.append(url)
|
477 |
|
478 |
+
return files_urls
|
479 |
+
|
480 |
# fix all possible errors/options and function call
|
481 |
+
file_urls = ""
|
482 |
if custom_file_urls:
|
483 |
for custom_file_url in custom_file_urls.replace(',', '').split():
|
484 |
if not custom_file_url.endswith('.txt'):
|
|
|
488 |
custom_file_url = f'{root_path}/{custom_file_url}'
|
489 |
|
490 |
try:
|
491 |
+
file_urls += process_file_download(custom_file_url)
|
492 |
except FileNotFoundError:
|
493 |
pass
|
494 |
|
495 |
# url prefixing
|
496 |
+
urls = [Model_url, Vae_url, LoRA_url, Embedding_url, Extensions_url]
|
497 |
+
prefixed_urls = [f"{prefix}:{url}" for prefix, url in zip(prefixes.keys(), urls) if url]
|
498 |
+
url += ", ".join(prefixed_urls) + ", " + file_urls
|
|
|
|
|
|
|
499 |
|
500 |
if detailed_download == "on":
|
501 |
print("\n\n\033[33m# ====== Подробная Загрузка ====== #\n\033[0m")
|
files_cells/python/ru/launch_ru.py
CHANGED
@@ -68,6 +68,7 @@ if zrok_token:
|
|
68 |
# automatic fixing path V2
|
69 |
get_ipython().system('sed -i \'s#"tagger_hf_cache_dir": ".*models/interrogators"#"tagger_hf_cache_dir": "{root_path}/sdw/models/interrogators"#\' {webui_path}/config.json')
|
70 |
get_ipython().system('sed -i \'s#"additional_networks_extra_lora_path": ".*models/Lora/"#"additional_networks_extra_lora_path": "{root_path}/sdw/models/Lora/"#\' {webui_path}/config.json')
|
|
|
71 |
# ---
|
72 |
get_ipython().system('sed -i \'s/"sd_checkpoint_hash": ".*"/"sd_checkpoint_hash": ""/g; s/"sd_model_checkpoint": ".*"/"sd_model_checkpoint": ""/g; s/"sd_vae": ".*"/"sd_vae": "None"/g\' {webui_path}/config.json')
|
73 |
|
|
|
68 |
# automatic fixing path V2
|
69 |
get_ipython().system('sed -i \'s#"tagger_hf_cache_dir": ".*models/interrogators"#"tagger_hf_cache_dir": "{root_path}/sdw/models/interrogators"#\' {webui_path}/config.json')
|
70 |
get_ipython().system('sed -i \'s#"additional_networks_extra_lora_path": ".*models/Lora/"#"additional_networks_extra_lora_path": "{root_path}/sdw/models/Lora/"#\' {webui_path}/config.json')
|
71 |
+
get_ipython().system('sed -i \'s#"ad_extra_models_dir": "*"#"ad_extra_models_dir": "{root_path}/sdw/models/adetailer/"#\' {webui_path}/config.json')
|
72 |
# ---
|
73 |
get_ipython().system('sed -i \'s/"sd_checkpoint_hash": ".*"/"sd_checkpoint_hash": ""/g; s/"sd_model_checkpoint": ".*"/"sd_model_checkpoint": ""/g; s/"sd_vae": ".*"/"sd_vae": "None"/g\' {webui_path}/config.json')
|
74 |
|