File size: 2,597 Bytes
324c88d
 
09e2f67
324c88d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
from diffsynth import ModelManager, SDVideoPipeline, ControlNetConfigUnit, VideoData, save_video
import torch, spaces
import gradio as gr

# Load models
model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
model_manager.load_textual_inversions("models/textual_inversion")
model_manager.load_models([
    "models/stable_diffusion/flat2DAnimerge_v45Sharp.safetensors",
    "models/AnimateDiff/mm_sd_v15_v2.ckpt",
    "models/ControlNet/control_v11p_sd15_lineart.pth",
    "models/ControlNet/control_v11f1e_sd15_tile.pth",
])
pipe = SDVideoPipeline.from_model_manager(
    model_manager,
    [
        ControlNetConfigUnit(
            processor_id="lineart",
            model_path="models/ControlNet/control_v11p_sd15_lineart.pth",
            scale=0.5
        ),
        ControlNetConfigUnit(
            processor_id="tile",
            model_path="models/ControlNet/control_v11f1e_sd15_tile.pth",
            scale=0.5
        )
    ]
)

@spaces.GPU(duration=500)
def generate_video(inp_vid):
    video = VideoData(
    video_file=inp_vid,
    height=1024, width=1024)
    input_video = [video[i] for i in range(0, 60)]
    
    # Toon shading (20G VRAM)
    torch.manual_seed(0)
    output_video = pipe(
        prompt="best quality, perfect anime illustration, light, a girl is dancing, smile, solo",
        negative_prompt="verybadimagenegative_v1.3",
        cfg_scale=3, clip_skip=2,
        controlnet_frames=input_video, num_frames=len(input_video),
        num_inference_steps=10, height=1024, width=1024,
        animatediff_batch_size=32, animatediff_stride=16,
        vram_limit_level=0,
    )
    
    # Save video
    save_video(output_video, "output_video.mp4", fps=60)

    return "output_video.mp4"


app = gr.Blocks(theme="JohnSmith9982/small_and_pretty")
with app:
    gr.Markdown("# <center>🌊💕🎶 Diffutoon</center>")
    inp_vid = gr.Video(label="请上传一个视频文件")
    btn = gr.Button("一键开启视频转绘", variant="primary")
    out_vid = gr.Video(label="请上传一个视频文件")

    btn.click(generate_video, inp_vid, out_vid)

    gr.Markdown("### <center>注意❗:请不要生成会对个人以及组织造成侵害的内容,此程序仅供科研、学习及个人娱乐使用。请自觉合规使用此程序,程序开发者不负有任何责任。</center>")
    gr.HTML('''
        <div class="footer">
                    <p>🌊🏞️🎶 - 江水东流急,滔滔无尽声。 明·顾璘
                    </p>
        </div>
    ''')
#app.queue(max_size=40, api_open=False)
app.launch(show_error=True)