Drexubery commited on
Commit
29dd4d7
1 Parent(s): c30fe0b
Files changed (4) hide show
  1. app.py +3 -10
  2. app_bad.py +0 -114
  3. app_new.py +0 -146
  4. pytorch3d +0 -1
app.py CHANGED
@@ -3,9 +3,6 @@ import torch
3
  import sys
4
  import spaces
5
 
6
- # os.system('pip install iopath')
7
- # os.system("pip install -v -v -v 'git+https://github.com/facebookresearch/pytorch3d.git@stable'")
8
- # os.system("cd pytorch3d && pip install -e . && cd ..")
9
 
10
  import gradio as gr
11
  import random
@@ -37,11 +34,8 @@ opts.save_dir = './'
37
  os.makedirs(opts.save_dir,exist_ok=True)
38
  test_tensor = torch.Tensor([0]).cuda()
39
  opts.device = str(test_tensor.device)
40
- # os.system('pip install iopath') FORCE_CUDA=1
41
- # os.system('curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz')
42
- # os.system('tar xzf 1.10.0.tar.gz')
43
- # os.system('export CUB_HOME=$PWD/cub-1.10.0')
44
- # spaces.GPU(os.system("pip install 'git+https://github.com/facebookresearch/pytorch3d.git'"))
45
  pyt_version_str=torch.__version__.split("+")[0].replace(".", "")
46
  version_str="".join([
47
  f"py3{sys.version_info.minor}_cu",
@@ -51,7 +45,6 @@ version_str="".join([
51
  print(version_str)
52
  os.system(f"{sys.executable} -m pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html")
53
  os.system("mkdir -p checkpoints/ && wget https://download.europe.naverlabs.com/ComputerVision/DUSt3R/DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth -P checkpoints/")
54
-
55
  print(f'>>> System info: {version_str}')
56
 
57
 
@@ -110,7 +103,7 @@ def viewcrafter_demo(opts):
110
  inputs=[i2v_input_image, i2v_elevation, i2v_center_scale, i2v_d_phi, i2v_d_theta, i2v_d_r, i2v_steps, i2v_seed],
111
  outputs=[i2v_traj_video,i2v_output_video],
112
  fn = image2video.run_gradio,
113
- cache_examples=False,
114
  )
115
 
116
  # image2video.run_gradio(i2v_input_image='test/images/boy.png', i2v_elevation='10', i2v_d_phi='0 40', i2v_d_theta='0 0', i2v_d_r='0 0', i2v_center_scale=1, i2v_steps=50, i2v_seed=123)
 
3
  import sys
4
  import spaces
5
 
 
 
 
6
 
7
  import gradio as gr
8
  import random
 
34
  os.makedirs(opts.save_dir,exist_ok=True)
35
  test_tensor = torch.Tensor([0]).cuda()
36
  opts.device = str(test_tensor.device)
37
+
38
+ # install pytorch3d
 
 
 
39
  pyt_version_str=torch.__version__.split("+")[0].replace(".", "")
40
  version_str="".join([
41
  f"py3{sys.version_info.minor}_cu",
 
45
  print(version_str)
46
  os.system(f"{sys.executable} -m pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html")
47
  os.system("mkdir -p checkpoints/ && wget https://download.europe.naverlabs.com/ComputerVision/DUSt3R/DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth -P checkpoints/")
 
48
  print(f'>>> System info: {version_str}')
49
 
50
 
 
103
  inputs=[i2v_input_image, i2v_elevation, i2v_center_scale, i2v_d_phi, i2v_d_theta, i2v_d_r, i2v_steps, i2v_seed],
104
  outputs=[i2v_traj_video,i2v_output_video],
105
  fn = image2video.run_gradio,
106
+ cache_examples='lazy',
107
  )
108
 
109
  # image2video.run_gradio(i2v_input_image='test/images/boy.png', i2v_elevation='10', i2v_d_phi='0 40', i2v_d_theta='0 0', i2v_d_r='0 0', i2v_center_scale=1, i2v_steps=50, i2v_seed=123)
app_bad.py DELETED
@@ -1,114 +0,0 @@
1
- import os
2
- import torch
3
- import sys
4
-
5
- # os.system('pip install iopath')
6
- # os.system("pip install -v -v -v 'git+https://github.com/facebookresearch/pytorch3d.git@stable'")
7
- # os.system("cd pytorch3d && pip install -e . && cd ..")
8
- os.system("pip install 'git+https://github.com/facebookresearch/pytorch3d.git'")
9
-
10
-
11
- import gradio as gr
12
- import random
13
- from viewcrafter import ViewCrafter
14
- from configs.infer_config import get_parser
15
- from huggingface_hub import hf_hub_download
16
-
17
- i2v_examples = [
18
- ['test/images/boy.png', 0, 1.0, '0 40', '0 0', '0 0', 50, 123],
19
- ['test/images/car.jpeg', 0, 1.0, '0 -35', '0 0', '0 -0.1', 50, 123],
20
- ['test/images/fruit.jpg', 0, 1.0, '0 -3 -15 -20 -17 -5 0', '0 -2 -5 -10 -8 -5 0 2 5 3 0', '0 0', 50, 123],
21
- ['test/images/room.png', 5, 1.0, '0 3 10 20 17 10 0', '0 -2 -8 -6 0 2 5 3 0', '0 -0.02 -0.09 -0.16 -0.09 0', 50, 123],
22
- ['test/images/castle.png', 0, 1.0, '0 30', '0 -1 -5 -4 0 1 5 4 0', '0 -0.2', 50, 123],
23
- ]
24
-
25
- max_seed = 2 ** 31
26
-
27
- def download_model():
28
- REPO_ID = 'Drexubery/ViewCrafter_25'
29
- filename_list = ['model.ckpt']
30
- for filename in filename_list:
31
- local_file = os.path.join('./checkpoints/', filename)
32
- if not os.path.exists(local_file):
33
- hf_hub_download(repo_id=REPO_ID, filename=filename, local_dir='./checkpoints/', force_download=True)
34
-
35
- REPO_ID = 'naver/DUSt3R_ViTLarge_BaseDecoder_512_dpt'
36
-
37
- download_model()
38
-
39
- def viewcrafter_demo(opts):
40
- css = """#input_img {max-width: 1024px !important} #output_vid {max-width: 1024px; max-height:576px} #random_button {max-width: 100px !important}"""
41
- image2video = ViewCrafter(opts, gradio = True)
42
- with gr.Blocks(analytics_enabled=False, css=css) as viewcrafter_iface:
43
- gr.Markdown("<div align='center'> <h1> ViewCrafter: Taming Video Diffusion Models for High-fidelity Novel View Synthesis </span> </h1> \
44
- <h2 style='font-weight: 450; font-size: 1rem; margin: 0rem'>\
45
- <a href='https://scholar.google.com/citations?user=UOE8-qsAAAAJ&hl=zh-CN'>Wangbo Yu</a>, \
46
- <a href='https://doubiiu.github.io/'>Jinbo Xing</a>, <a href=''>Li Yuan</a>, \
47
- <a href='https://wbhu.github.io/'>Wenbo Hu</a>, <a href='https://xiaoyu258.github.io/'>Xiaoyu Li</a>,\
48
- <a href=''>Zhipeng Huang</a>, <a href='https://scholar.google.com/citations?user=qgdesEcAAAAJ&hl=en/'>Xiangjun Gao</a>,\
49
- <a href='https://www.cse.cuhk.edu.hk/~ttwong/myself.html/'>Tien-Tsin Wong</a>,\
50
- <a href='https://scholar.google.com/citations?hl=en&user=4oXBp9UAAAAJ&view_op=list_works&sortby=pubdate/'>Ying Shan</a>\
51
- <a href=''>Yonghong Tian</a>\
52
- </h2> \
53
- <a style='font-size:18px;color: #FF5DB0' href='https://github.com/Drexubery/ViewCrafter/blob/main/docs/render_help.md'> [Guideline] </a>\
54
- <a style='font-size:18px;color: #000000' href=''> [ArXiv] </a>\
55
- <a style='font-size:18px;color: #000000' href='https://drexubery.github.io/ViewCrafter/'> [Project Page] </a>\
56
- <a style='font-size:18px;color: #000000' href='https://github.com/Drexubery/ViewCrafter'> [Github] </a> </div>")
57
-
58
- #######image2video######
59
- with gr.Tab(label="ViewCrafter_25, 'single_view_txt' mode"):
60
- with gr.Column():
61
- with gr.Row():
62
- with gr.Column():
63
- with gr.Row():
64
- i2v_input_image = gr.Image(label="Input Image",elem_id="input_img")
65
- with gr.Row():
66
- i2v_elevation = gr.Slider(minimum=-45, maximum=45, step=1, elem_id="elevation", label="elevation", value=5)
67
- with gr.Row():
68
- i2v_center_scale = gr.Slider(minimum=0.1, maximum=2, step=0.1, elem_id="i2v_center_scale", label="center_scale", value=1)
69
- with gr.Row():
70
- i2v_d_phi = gr.Text(label='d_phi sequence, should start with 0')
71
- with gr.Row():
72
- i2v_d_theta = gr.Text(label='d_theta sequence, should start with 0')
73
- with gr.Row():
74
- i2v_d_r = gr.Text(label='d_r sequence, should start with 0')
75
- with gr.Row():
76
- i2v_steps = gr.Slider(minimum=1, maximum=50, step=1, elem_id="i2v_steps", label="Sampling steps", value=50)
77
- with gr.Row():
78
- i2v_seed = gr.Slider(label='Random Seed', minimum=0, maximum=max_seed, step=1, value=123)
79
- i2v_end_btn = gr.Button("Generate")
80
- # with gr.Tab(label='Result'):
81
- with gr.Column():
82
- with gr.Row():
83
- i2v_traj_video = gr.Video(label="Camera Trajectory",elem_id="traj_vid",autoplay=True,show_share_button=True)
84
- with gr.Row():
85
- i2v_output_video = gr.Video(label="Generated Video",elem_id="output_vid",autoplay=True,show_share_button=True)
86
-
87
- gr.Examples(examples=i2v_examples,
88
- inputs=[i2v_input_image, i2v_elevation, i2v_center_scale, i2v_d_phi, i2v_d_theta, i2v_d_r, i2v_steps, i2v_seed],
89
- outputs=[i2v_traj_video,i2v_output_video],
90
- fn = image2video.run_gradio,
91
- cache_examples=False,
92
- )
93
-
94
- # image2video.run_gradio(i2v_input_image='test/images/boy.png', i2v_elevation='10', i2v_d_phi='0 40', i2v_d_theta='0 0', i2v_d_r='0 0', i2v_center_scale=1, i2v_steps=50, i2v_seed=123)
95
- i2v_end_btn.click(inputs=[i2v_input_image, i2v_elevation, i2v_center_scale, i2v_d_phi, i2v_d_theta, i2v_d_r, i2v_steps, i2v_seed],
96
- outputs=[i2v_traj_video,i2v_output_video],
97
- fn = image2video.run_gradio
98
- )
99
-
100
- return viewcrafter_iface
101
-
102
-
103
- if __name__ == "__main__":
104
- parser = get_parser() # infer_config.py
105
- opts = parser.parse_args() # default device: 'cuda:0'
106
- opts.save_dir = './'
107
- os.makedirs(opts.save_dir,exist_ok=True)
108
- test_tensor = torch.Tensor([0]).cuda()
109
- opts.device = str(test_tensor.device)
110
- viewcrafter_iface = viewcrafter_demo(opts)
111
- viewcrafter_iface.queue(max_size=10)
112
- viewcrafter_iface.launch()
113
- # viewcrafter_iface.launch(server_name='127.0.0.1', server_port=80, max_threads=1,debug=False)
114
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_new.py DELETED
@@ -1,146 +0,0 @@
1
- import os
2
- import torch
3
- import sys
4
- import gradio as gr
5
- import random
6
- from configs.infer_config import get_parser
7
- from huggingface_hub import hf_hub_download
8
- sys.path.append('./extern/dust3r')
9
- from dust3r.inference import inference, load_model
10
- from omegaconf import OmegaConf
11
- from pytorch_lightning import seed_everything
12
- from utils.diffusion_utils import instantiate_from_config,load_model_checkpoint,image_guided_synthesis
13
- import torchvision.transforms as transforms
14
- import copy
15
-
16
- i2v_examples = [
17
- ['test/images/boy.png', 0, 1.0, '0 40', '0 0', '0 0', 50, 123],
18
- ['test/images/car.jpeg', 0, 1.0, '0 -35', '0 0', '0 -0.1', 50, 123],
19
- ['test/images/fruit.jpg', 0, 1.0, '0 -3 -15 -20 -17 -5 0', '0 -2 -5 -10 -8 -5 0 2 5 3 0', '0 0', 50, 123],
20
- ['test/images/room.png', 5, 1.0, '0 3 10 20 17 10 0', '0 -2 -8 -6 0 2 5 3 0', '0 -0.02 -0.09 -0.16 -0.09 0', 50, 123],
21
- ['test/images/castle.png', 0, 1.0, '0 30', '0 -1 -5 -4 0 1 5 4 0', '0 -0.2', 50, 123],
22
- ]
23
-
24
- max_seed = 2 ** 31
25
-
26
- def download_model():
27
- REPO_ID = 'Drexubery/ViewCrafter_25'
28
- filename_list = ['model.ckpt']
29
- for filename in filename_list:
30
- local_file = os.path.join('./checkpoints/', filename)
31
- if not os.path.exists(local_file):
32
- hf_hub_download(repo_id=REPO_ID, filename=filename, local_dir='./checkpoints/', force_download=True)
33
-
34
- download_model()
35
-
36
-
37
- css = """#input_img {max-width: 1024px !important} #output_vid {max-width: 1024px; max-height:576px} #random_button {max-width: 100px !important}"""
38
- parser = get_parser() # infer_config.py
39
- opts = parser.parse_args() # default device: 'cuda:0'
40
- opts.save_dir = './'
41
- os.makedirs(opts.save_dir,exist_ok=True)
42
- test_tensor = torch.Tensor([0]).cuda()
43
- opts.device = str(test_tensor.device)
44
-
45
- dust3r = load_model(opts.model_path, opts.device)
46
- config = OmegaConf.load(opts.config)
47
- model_config = config.pop("model", OmegaConf.create())
48
- model_config['params']['unet_config']['params']['use_checkpoint'] = False
49
- model = instantiate_from_config(model_config)
50
- model = model.to(opts.device)
51
- model.cond_stage_model.device = opts.device
52
- model.perframe_ae = opts.perframe_ae
53
- assert os.path.exists(opts.ckpt_path), "Error: checkpoint Not Found!"
54
- model = load_model_checkpoint(model, opts.ckpt_path)
55
- model.eval()
56
- diffusion = model
57
- transform = transforms.Compose([
58
- transforms.Resize(576),
59
- transforms.CenterCrop((576,1024)),
60
- ])
61
-
62
- def infer(opts,i2v_input_image, i2v_elevation, i2v_center_scale, i2v_d_phi, i2v_d_theta, i2v_d_r, i2v_steps, i2v_seed):
63
- elevation = float(i2v_elevation)
64
- center_scale = float(i2v_center_scale)
65
- ddim_steps = i2v_steps
66
- gradio_traj = [float(i) for i in i2v_d_phi.split()],[float(i) for i in i2v_d_theta.split()],[float(i) for i in i2v_d_r.split()]
67
- seed_everything(i2v_seed)
68
-
69
- torch.cuda.empty_cache()
70
- img_tensor = torch.from_numpy(i2v_input_image).permute(2, 0, 1).unsqueeze(0).float().to(self.device)
71
- img_tensor = (img_tensor / 255. - 0.5) * 2
72
- image_tensor_resized = transform(img_tensor) #1,3,h,w
73
- images = get_input_dict(image_tensor_resized,idx = 0,dtype = torch.float32)
74
- images = [images, copy.deepcopy(images)]
75
- images[1]['idx'] = 1
76
- se_images = images
77
- se_img_ori = (image_tensor_resized.squeeze(0).permute(1,2,0) + 1.)/2.
78
-
79
- run_dust3r(input_images=self.images)
80
- nvs_single_view(gradio=True)
81
-
82
- traj_dir = os.path.join(self.opts.save_dir, "viz_traj.mp4")
83
- gen_dir = os.path.join(self.opts.save_dir, "diffusion0.mp4")
84
- return i2v_traj_path,i2v_output_path
85
-
86
- with gr.Blocks(analytics_enabled=False, css=css) as viewcrafter_iface:
87
- gr.Markdown("<div align='center'> <h1> ViewCrafter: Taming Video Diffusion Models for High-fidelity Novel View Synthesis </span> </h1> \
88
- <h2 style='font-weight: 450; font-size: 1rem; margin: 0rem'>\
89
- <a href='https://scholar.google.com/citations?user=UOE8-qsAAAAJ&hl=zh-CN'>Wangbo Yu</a>, \
90
- <a href='https://doubiiu.github.io/'>Jinbo Xing</a>, <a href=''>Li Yuan</a>, \
91
- <a href='https://wbhu.github.io/'>Wenbo Hu</a>, <a href='https://xiaoyu258.github.io/'>Xiaoyu Li</a>,\
92
- <a href=''>Zhipeng Huang</a>, <a href='https://scholar.google.com/citations?user=qgdesEcAAAAJ&hl=en/'>Xiangjun Gao</a>,\
93
- <a href='https://www.cse.cuhk.edu.hk/~ttwong/myself.html/'>Tien-Tsin Wong</a>,\
94
- <a href='https://scholar.google.com/citations?hl=en&user=4oXBp9UAAAAJ&view_op=list_works&sortby=pubdate/'>Ying Shan</a>\
95
- <a href=''>Yonghong Tian</a>\
96
- </h2> \
97
- <a style='font-size:18px;color: #FF5DB0' href='https://github.com/Drexubery/ViewCrafter/blob/main/docs/render_help.md'> [Guideline] </a>\
98
- <a style='font-size:18px;color: #000000' href=''> [ArXiv] </a>\
99
- <a style='font-size:18px;color: #000000' href='https://drexubery.github.io/ViewCrafter/'> [Project Page] </a>\
100
- <a style='font-size:18px;color: #000000' href='https://github.com/Drexubery/ViewCrafter'> [Github] </a> </div>")
101
-
102
- #######image2video######
103
- with gr.Tab(label="ViewCrafter_25, 'single_view_txt' mode"):
104
- with gr.Column():
105
- with gr.Row():
106
- with gr.Column():
107
- with gr.Row():
108
- i2v_input_image = gr.Image(label="Input Image",elem_id="input_img")
109
- with gr.Row():
110
- i2v_elevation = gr.Slider(minimum=-45, maximum=45, step=1, elem_id="elevation", label="elevation", value=5)
111
- with gr.Row():
112
- i2v_center_scale = gr.Slider(minimum=0.1, maximum=2, step=0.1, elem_id="i2v_center_scale", label="center_scale", value=1)
113
- with gr.Row():
114
- i2v_d_phi = gr.Text(label='d_phi sequence, should start with 0')
115
- with gr.Row():
116
- i2v_d_theta = gr.Text(label='d_theta sequence, should start with 0')
117
- with gr.Row():
118
- i2v_d_r = gr.Text(label='d_r sequence, should start with 0')
119
- with gr.Row():
120
- i2v_steps = gr.Slider(minimum=1, maximum=50, step=1, elem_id="i2v_steps", label="Sampling steps", value=50)
121
- with gr.Row():
122
- i2v_seed = gr.Slider(label='Random Seed', minimum=0, maximum=max_seed, step=1, value=123)
123
- i2v_end_btn = gr.Button("Generate")
124
- # with gr.Tab(label='Result'):
125
- with gr.Column():
126
- with gr.Row():
127
- i2v_traj_video = gr.Video(label="Camera Trajectory",elem_id="traj_vid",autoplay=True,show_share_button=True)
128
- with gr.Row():
129
- i2v_output_video = gr.Video(label="Generated Video",elem_id="output_vid",autoplay=True,show_share_button=True)
130
-
131
- gr.Examples(examples=i2v_examples,
132
- inputs=[opts,i2v_input_image, i2v_elevation, i2v_center_scale, i2v_d_phi, i2v_d_theta, i2v_d_r, i2v_steps, i2v_seed],
133
- outputs=[i2v_traj_video,i2v_output_video],
134
- fn = infer,
135
- cache_examples=False,
136
- )
137
-
138
- # image2video.run_gradio(i2v_input_image='test/images/boy.png', i2v_elevation='10', i2v_d_phi='0 40', i2v_d_theta='0 0', i2v_d_r='0 0', i2v_center_scale=1, i2v_steps=50, i2v_seed=123)
139
- i2v_end_btn.click(inputs=[opts,i2v_input_image, i2v_elevation, i2v_center_scale, i2v_d_phi, i2v_d_theta, i2v_d_r, i2v_steps, i2v_seed],
140
- outputs=[i2v_traj_video,i2v_output_video],
141
- fn = infer
142
- )
143
-
144
- viewcrafter_iface.queue(max_size=12).launch(show_api=True)
145
-
146
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pytorch3d DELETED
@@ -1 +0,0 @@
1
- Subproject commit 05cbea115acbbcbea77999c03d55155b23479991