File size: 4,480 Bytes
99a0484
 
 
 
 
 
 
 
 
ea5c647
 
 
99a0484
 
 
 
 
 
 
ea5c647
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99a0484
 
 
 
 
 
 
 
ea5c647
 
 
99a0484
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ea5c647
 
99a0484
ea5c647
3e9c18d
 
 
99a0484
 
ea5c647
3e9c18d
 
 
99a0484
ea5c647
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99a0484
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
from typing import Optional

import torch
from diffusers import ControlNetModel, StableDiffusionControlNetImg2ImgPipeline
from PIL import Image

import internals.util.image as ImageUtil
from internals.pipelines.commons import AbstractPipeline
from internals.pipelines.controlnets import ControlNet
from internals.pipelines.high_res import HighRes
from internals.pipelines.sdxl_llite_pipeline import SDXLLLiteImg2ImgPipeline
from internals.util.config import get_base_dimension, get_hf_cache_dir, get_is_sdxl


class RealtimeDraw(AbstractPipeline):
    def load(self, pipeline: AbstractPipeline):
        if hasattr(self, "pipe"):
            return

        if get_is_sdxl():
            lite_pipe = SDXLLLiteImg2ImgPipeline()
            lite_pipe.load(
                pipeline,
                [
                    "https://s3.ap-south-1.amazonaws.com/autodraft.model.assets/models/replicate-xl-llite.safetensors"
                ],
            )
            self.pipe = lite_pipe
        else:
            self.__controlnet_scribble = ControlNetModel.from_pretrained(
                "lllyasviel/control_v11p_sd15_scribble",
                torch_dtype=torch.float16,
                cache_dir=get_hf_cache_dir(),
            )

            self.__controlnet_seg = ControlNetModel.from_pretrained(
                "lllyasviel/control_v11p_sd15_seg",
                torch_dtype=torch.float16,
                cache_dir=get_hf_cache_dir(),
            )

            kwargs = {**pipeline.pipe.components}  # pyright: ignore
            kwargs.pop("image_encoder", None)
            self.pipe = StableDiffusionControlNetImg2ImgPipeline(
                **kwargs, controlnet=self.__controlnet_seg
            ).to("cuda")
            self.pipe.safety_checker = None
            self.pipe2 = StableDiffusionControlNetImg2ImgPipeline(
                **kwargs, controlnet=[self.__controlnet_scribble, self.__controlnet_seg]
            ).to("cuda")
            self.pipe2.safety_checker = None

    def process_seg(
        self,
        image: Image.Image,
        prompt: str,
        negative_prompt: str,
        seed: int,
    ):
        if get_is_sdxl():
            raise Exception("SDXL is not supported for this method")

        torch.manual_seed(seed)

        image = ImageUtil.resize_image(image, 512)

        img = self.pipe.__call__(
            image=image,
            control_image=image,
            prompt=prompt,
            num_inference_steps=15,
            negative_prompt=negative_prompt,
            guidance_scale=10,
            strength=0.8,
        ).images[0]

        return img

    def process_img(
        self,
        prompt: str,
        negative_prompt: str,
        seed: int,
        image: Optional[Image.Image] = None,
        image2: Optional[Image.Image] = None,
    ):
        torch.manual_seed(seed)

        b_dimen = get_base_dimension()

        if not image:
            size = (b_dimen, b_dimen)
            if image2:
                size = image2.size
            image = Image.new("RGB", size, color=0)

        if not image2:
            size = (b_dimen, b_dimen)
            if image:
                size = image.size
            image2 = Image.new("RGB", size, color=0)

        if get_is_sdxl():
            size = HighRes.find_closest_sdxl_aspect_ratio(image.size[0], image.size[1])
            image = image.resize(size)

            images = self.pipe.__call__(
                image=image,
                condition_image=image,
                negative_prompt=negative_prompt,
                prompt=prompt,
                seed=seed,
                num_inference_steps=10,
                width=image.size[0],
                height=image.size[1],
            )
            img = images[0]
        else:
            image = ImageUtil.resize_image(image, b_dimen)

            scribble = ControlNet.scribble_image(image)

            image2 = ImageUtil.resize_image(image2, b_dimen)

            img = self.pipe2.__call__(
                image=image,
                control_image=[scribble, image2],
                prompt=prompt,
                num_inference_steps=15,
                negative_prompt=negative_prompt,
                guidance_scale=10,
                strength=0.9,
                width=image.size[0],
                height=image.size[1],
                controlnet_conditioning_scale=[1.0, 0.8],
            ).images[0]

        img = ImageUtil.resize_image(img, 512)

        return img