File size: 7,355 Bytes
b5a2343 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"gpuType": "T4"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "RXcT0H3RYk9j"
},
"outputs": [],
"source": [
"!git clone https://github.com/MooreThreads/Moore-AnimateAnyone.git"
]
},
{
"cell_type": "code",
"source": [
"%cd /content/Moore-AnimateAnyone\n",
"!pip install -r requirements.txt\n",
"!pip install https://github.com/karaokenerds/python-audio-separator/releases/download/v0.12.1/onnxruntime_gpu-1.17.0-cp310-cp310-linux_x86_64.whl"
],
"metadata": {
"id": "Yi_MgKanYq--"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"#!mkdir /content/Moore-AnimateAnyone/pretrainedweights\n",
"!mkdir /content/Moore-AnimateAnyone/pretrainedweights/DWpose\n",
"!mkdir /content/Moore-AnimateAnyone/pretrainedweights/image_encoder\n",
"\n",
"print(\"done\")"
],
"metadata": {
"id": "KQkefoBbbZ0f"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"!git lfs install\n",
"!git clone https://huggingface.co/patrolli/AnimateAnyone"
],
"metadata": {
"id": "KF4KUup3aXNG"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"!apt -y install -qq aria2\n",
"BaseModelUrl = \"https://huggingface.co/runwayml/stable-diffusion-v1-5\"\n",
"BaseModelDir = \"/content/Moore-AnimateAnyone/pretrainedweights/stable-diffusion-v1-5\"\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/model_index.json -d {BaseModelDir} -o model_index.json\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/resolve/main/vae/diffusion_pytorch_model.bin -d {BaseModelDir}/vae -o diffusion_pytorch_model.bin\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/vae/config.json -d {BaseModelDir}/vae -o config.json\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/resolve/main/unet/diffusion_pytorch_model.bin -d {BaseModelDir}/unet -o diffusion_pytorch_model.bin\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/unet/config.json -d {BaseModelDir}/unet -o config.json\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/tokenizer/vocab.json -d {BaseModelDir}/tokenizer -o vocab.json\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/tokenizer/tokenizer_config.json -d {BaseModelDir}/tokenizer -o tokenizer_config.json\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/tokenizer/special_tokens_map.json -d {BaseModelDir}/tokenizer -o special_tokens_map.json\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/tokenizer/merges.txt -d {BaseModelDir}/tokenizer -o merges.txt\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/resolve/main/text_encoder/pytorch_model.bin -d {BaseModelDir}/text_encoder -o pytorch_model.bin\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/text_encoder/config.json -d {BaseModelDir}/text_encoder -o config.json\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/scheduler/scheduler_config.json -d {BaseModelDir}/scheduler -o scheduler_config.json\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/resolve/main/safety_checker/pytorch_model.bin -d {BaseModelDir}/safety_checker -o pytorch_model.bin\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/safety_checker/config.json -d {BaseModelDir}/safety_checker -o config.json\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/feature_extractor/preprocessor_config.json -d {BaseModelDir}/feature_extractor -o preprocessor_config.json"
],
"metadata": {
"id": "JAnAFo-GaKvH"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"%cd /content/Moore-AnimateAnyone/pretrainedweights/DWpose\n",
"!wget https://huggingface.co/yzd-v/DWPose/resolve/main/yolox_l.onnx #yolox\n",
"!wget https://huggingface.co/yzd-v/DWPose/resolve/main/dw-ll_ucoco_384.onnx #dwpose"
],
"metadata": {
"id": "y4KuiWsvc5NF"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"#@title Yaml file - alter model paths to suit here then copy and paste into the original yaml file or simply alter the original file\n",
"\n",
"pretrained_base_model_path: \"./pretrained_weights/stable-diffusion-v1-5/\"\n",
"pretrained_vae_path: \"./pretrained_weights/sd-vae-ft-mse\"\n",
"image_encoder_path: \"./pretrained_weights/image_encoder\"\n",
"denoising_unet_path: \"./pretrained_weights/denoising_unet.pth\"\n",
"reference_unet_path: \"./pretrained_weights/reference_unet.pth\"\n",
"pose_guider_path: \"./pretrained_weights/pose_guider.pth\"\n",
"motion_module_path: \"./pretrained_weights/motion_module.pth\"\n",
"\n",
"inference_config: \"./configs/inference/inference_v2.yaml\"\n",
"weight_dtype: 'fp16'\n",
"\n",
"test_cases:\n",
" \"./configs/inference/ref_images/anyone-2.png\":\n",
" - \"./configs/inference/pose_videos/anyone-video-2_kps.mp4\"\n",
" - \"./configs/inference/pose_videos/anyone-video-5_kps.mp4\"\n",
" \"./configs/inference/ref_images/anyone-10.png\":\n",
" - \"./configs/inference/pose_videos/anyone-video-1_kps.mp4\"\n",
" - \"./configs/inference/pose_videos/anyone-video-2_kps.mp4\"\n",
" \"./configs/inference/ref_images/anyone-11.png\":\n",
" - \"./configs/inference/pose_videos/anyone-video-1_kps.mp4\"\n",
" - \"./configs/inference/pose_videos/anyone-video-2_kps.mp4\"\n",
" \"./configs/inference/ref_images/anyone-3.png\":\n",
" - \"./configs/inference/pose_videos/anyone-video-2_kps.mp4\"\n",
" - \"./configs/inference/pose_videos/anyone-video-5_kps.mp4\"\n",
" \"./configs/inference/ref_images/anyone-5.png\":\n",
" - \"./configs/inference/pose_videos/anyone-video-2_kps.mp4\"\n"
],
"metadata": {
"id": "EZZvbXF3ewhF"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"!python /content/Moore-AnimateAnyone/app.py"
],
"metadata": {
"id": "z_qg7x4yZ_CN"
},
"execution_count": null,
"outputs": []
}
]
} |