Spaces:
Build error
Build error
Duplicate from kazuk/image-to-video-film
Browse filesCo-authored-by: Kazuki Nakayashiki <[email protected]>
- .gitattributes +34 -0
- README.md +14 -0
- app.py +106 -0
- packages.txt +1 -0
- requirements.txt +16 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Images to Video
|
3 |
+
emoji: 👁
|
4 |
+
colorFrom: pink
|
5 |
+
colorTo: green
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.16.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: unknown
|
11 |
+
duplicated_from: kazuk/image-to-video-film
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import pipeline
|
3 |
+
import io, base64
|
4 |
+
from PIL import Image
|
5 |
+
import numpy as np
|
6 |
+
import tensorflow as tf
|
7 |
+
import mediapy
|
8 |
+
import os
|
9 |
+
import sys
|
10 |
+
from huggingface_hub import snapshot_download
|
11 |
+
from image_tools.sizes import resize_and_crop
|
12 |
+
|
13 |
+
os.system("git clone https://github.com/google-research/frame-interpolation")
|
14 |
+
sys.path.append("frame-interpolation")
|
15 |
+
from eval import interpolator, util
|
16 |
+
|
17 |
+
ffmpeg_path = util.get_ffmpeg_path()
|
18 |
+
mediapy.set_ffmpeg(ffmpeg_path)
|
19 |
+
|
20 |
+
model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style")
|
21 |
+
interpolator = interpolator.Interpolator(model, None)
|
22 |
+
|
23 |
+
def resize(width, img):
|
24 |
+
basewidth = width
|
25 |
+
img = Image.open(img)
|
26 |
+
wpercent = (basewidth / float(img.size[0]))
|
27 |
+
hsize = int((float(img.size[1]) * float(wpercent)))
|
28 |
+
img = img.resize((basewidth, hsize), Image.ANTIALIAS)
|
29 |
+
return img
|
30 |
+
|
31 |
+
def resize_img(img1, img2, output_name):
|
32 |
+
img_target_size = Image.open(img1)
|
33 |
+
img_to_resize = resize_and_crop(
|
34 |
+
img2,
|
35 |
+
(img_target_size.size[0], img_target_size.size[1]),
|
36 |
+
crop_origin="middle"
|
37 |
+
)
|
38 |
+
img_to_resize.save(output_name)
|
39 |
+
|
40 |
+
def generate_interpolation(frame1, frame2, frame3, frame4, frame5, frame6, times_to_interpolate, fps):
|
41 |
+
|
42 |
+
frame1 = resize(256, frame1)
|
43 |
+
frame2 = resize(256, frame2)
|
44 |
+
frame3 = resize(256, frame3)
|
45 |
+
frame4 = resize(256, frame4)
|
46 |
+
frame5 = resize(256, frame5)
|
47 |
+
frame6 = resize(256, frame6)
|
48 |
+
|
49 |
+
frame1.save("test1.png")
|
50 |
+
frame2.save("test2.png")
|
51 |
+
frame3.save("test3.png")
|
52 |
+
frame4.save("test4.png")
|
53 |
+
frame5.save("test5.png")
|
54 |
+
frame6.save("test6.png")
|
55 |
+
|
56 |
+
resize_img("test1.png", "test2.png", "resized_img2.png")
|
57 |
+
resize_img("test1.png", "test3.png", "resized_img3.png")
|
58 |
+
resize_img("test1.png", "test4.png", "resized_img4.png")
|
59 |
+
resize_img("test1.png", "test5.png", "resized_img5.png")
|
60 |
+
resize_img("test1.png", "test6.png", "resized_img6.png")
|
61 |
+
|
62 |
+
input_frames = ["test1.png", "resized_img2.png", "resized_img3.png", "resized_img4.png", "resized_img5.png", "resized_img6.png"]
|
63 |
+
|
64 |
+
frames = list(util.interpolate_recursively_from_files(input_frames, times_to_interpolate, interpolator))
|
65 |
+
|
66 |
+
mediapy.write_video("out.mp4", frames, fps=fps)
|
67 |
+
|
68 |
+
return "out.mp4"
|
69 |
+
|
70 |
+
demo = gr.Blocks()
|
71 |
+
|
72 |
+
with demo:
|
73 |
+
with gr.Row():
|
74 |
+
|
75 |
+
# Left column (inputs)
|
76 |
+
with gr.Column():
|
77 |
+
|
78 |
+
with gr.Row():
|
79 |
+
# upload images and get image strings
|
80 |
+
input_arr = [
|
81 |
+
gr.inputs.Image(type='filepath', label="Frame 1"),
|
82 |
+
gr.inputs.Image(type='filepath', label="Frame 2"),
|
83 |
+
gr.inputs.Image(type='filepath', label="Frame 3"),
|
84 |
+
gr.inputs.Image(type='filepath', label="Frame 4"),
|
85 |
+
gr.inputs.Image(type='filepath', label="Frame 5"),
|
86 |
+
gr.inputs.Image(type='filepath', label="Frame 6"),
|
87 |
+
]
|
88 |
+
|
89 |
+
with gr.Row():
|
90 |
+
input_arr.append(gr.inputs.Slider(minimum=2, maximum=10, step=1, label="Times to Interpolate"))
|
91 |
+
input_arr.append(gr.inputs.Slider(minimum=15, maximum=60, step=1, label="fps"))
|
92 |
+
|
93 |
+
# Rows of instructions & buttons
|
94 |
+
with gr.Row():
|
95 |
+
gr.Markdown("After uploading some images, hit the 'Generate Video' button to create a short video!")
|
96 |
+
button_gen_video = gr.Button("Generate Video")
|
97 |
+
|
98 |
+
|
99 |
+
# Right column (outputs)
|
100 |
+
with gr.Column():
|
101 |
+
output_interpolation = gr.Video(label="Generated Video")
|
102 |
+
|
103 |
+
# Bind functions to buttons
|
104 |
+
button_gen_video.click(fn=generate_interpolation, inputs=input_arr, outputs=output_interpolation)
|
105 |
+
|
106 |
+
demo.launch(debug=True, enable_queue=True)
|
packages.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
ffmpeg
|
requirements.txt
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
torch
|
3 |
+
|
4 |
+
tensorflow==2.11.0
|
5 |
+
tensorflow-gpu==2.11.0
|
6 |
+
tensorflow-datasets==4.4.0
|
7 |
+
tensorflow-addons==0.15.0
|
8 |
+
absl-py==1.4.0
|
9 |
+
gin-config==0.5.0
|
10 |
+
parameterized==0.8.1
|
11 |
+
mediapy==1.0.3
|
12 |
+
scikit-image==0.19.1
|
13 |
+
apache-beam==2.34.0
|
14 |
+
google-cloud-bigquery-storage==1.1.0
|
15 |
+
natsort==8.1.0
|
16 |
+
image-tools
|