File size: 3,083 Bytes
99a0484
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3e9c18d
 
 
 
99a0484
 
3e9c18d
 
 
 
99a0484
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3e9c18d
 
99a0484
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
from typing import Optional

import torch
from diffusers import ControlNetModel, StableDiffusionControlNetImg2ImgPipeline
from PIL import Image

import internals.util.image as ImageUtil
from internals.pipelines.commons import AbstractPipeline
from internals.pipelines.controlnets import ControlNet
from internals.util.config import get_hf_cache_dir


class RealtimeDraw(AbstractPipeline):
    def load(self, pipeline: AbstractPipeline):
        if hasattr(self, "pipe"):
            return

        self.__controlnet_scribble = ControlNetModel.from_pretrained(
            "lllyasviel/control_v11p_sd15_scribble",
            torch_dtype=torch.float16,
            cache_dir=get_hf_cache_dir(),
        )

        self.__controlnet_seg = ControlNetModel.from_pretrained(
            "lllyasviel/control_v11p_sd15_seg",
            torch_dtype=torch.float16,
            cache_dir=get_hf_cache_dir(),
        )

        kwargs = {**pipeline.pipe.components}  # pyright: ignore
        kwargs.pop("image_encoder", None)
        self.pipe = StableDiffusionControlNetImg2ImgPipeline(
            **kwargs, controlnet=self.__controlnet_seg
        ).to("cuda")
        self.pipe.safety_checker = None
        self.pipe2 = StableDiffusionControlNetImg2ImgPipeline(
            **kwargs, controlnet=[self.__controlnet_scribble, self.__controlnet_seg]
        ).to("cuda")
        self.pipe2.safety_checker = None

    def process_seg(
        self,
        image: Image.Image,
        prompt: str,
        negative_prompt: str,
        seed: int,
    ):
        torch.manual_seed(seed)

        image = ImageUtil.resize_image(image, 512)

        img = self.pipe.__call__(
            image=image,
            control_image=image,
            prompt=prompt,
            num_inference_steps=15,
            negative_prompt=negative_prompt,
            guidance_scale=10,
            strength=0.8,
        ).images[0]

        return img

    def process_img(
        self,
        prompt: str,
        negative_prompt: str,
        seed: int,
        image: Optional[Image.Image] = None,
        image2: Optional[Image.Image] = None,
    ):
        torch.manual_seed(seed)

        if not image:
            size = (512, 512)
            if image2:
                size = image2.size
            image = Image.new("RGB", size, color=0)

        if not image2:
            size = (512, 512)
            if image:
                size = image.size
            image2 = Image.new("RGB", size, color=0)

        image = ImageUtil.resize_image(image, 512)

        scribble = ControlNet.scribble_image(image)

        image2 = ImageUtil.resize_image(image2, 512)

        img = self.pipe2.__call__(
            image=image,
            control_image=[scribble, image2],
            prompt=prompt,
            num_inference_steps=15,
            negative_prompt=negative_prompt,
            guidance_scale=10,
            strength=0.9,
            width=image.size[0],
            height=image.size[1],
            controlnet_conditioning_scale=[1.0, 0.8],
        ).images[0]

        return img