File size: 1,857 Bytes
1bc457e 7fbdac4 1bc457e f70725b 1bc457e 7fbdac4 1bc457e f70725b 1bc457e a3f5c82 1bc457e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
import math
from typing import List, Optional
from PIL import Image
from internals.data.result import Result
from internals.pipelines.commons import AbstractPipeline, Img2Img
from internals.util.cache import clear_cuda_and_gc
from internals.util.config import get_base_dimension, get_model_dir
class HighRes(AbstractPipeline):
def load(self, img2img: Optional[Img2Img] = None):
if hasattr(self, "pipe"):
return
if not img2img:
img2img = Img2Img()
img2img.load(get_model_dir())
self.pipe = img2img.pipe
self.img2img = img2img
def apply(
self,
prompt: List[str],
negative_prompt: List[str],
images,
width: int,
height: int,
num_inference_steps: int,
strength: float = 0.5,
guidance_scale: int = 9,
**kwargs,
):
clear_cuda_and_gc()
images = [image.resize((width, height)) for image in images]
kwargs = {
"prompt": prompt,
"image": images,
"strength": strength,
"negative_prompt": negative_prompt,
"guidance_scale": guidance_scale,
"num_inference_steps": num_inference_steps,
**kwargs,
}
result = self.pipe.__call__(**kwargs)
return Result.from_result(result)
@staticmethod
def get_intermediate_dimension(target_width: int, target_height: int):
def_size = get_base_dimension()
desired_pixel_count = def_size * def_size
actual_pixel_count = target_width * target_height
scale = math.sqrt(desired_pixel_count / actual_pixel_count)
firstpass_width = math.ceil(scale * target_width / 64) * 64
firstpass_height = math.ceil(scale * target_height / 64) * 64
return firstpass_width, firstpass_height
|