Spaces:
Runtime error
Runtime error
import os | |
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler | |
import torch | |
import numpy as np | |
from pipeline_stable_diffusion_controlnet_inpaint import * | |
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import * | |
import random | |
# model2 | |
controlnet = ControlNetModel.from_pretrained("hirol/control_any5_openpose", torch_dtype=torch.float16) | |
pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained("hirol/Any-inpainting", controlnet=controlnet, torch_dtype=torch.float16) | |
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) | |
def make_inpaint_condition(image, image_mask): | |
image = np.array(image.convert("RGB")).astype(np.float32) / 255.0 | |
image_mask = np.array(image_mask.convert("L")) | |
assert image.shape[0:1] == image_mask.shape[0:1], "image and image_mask must have the same image size" | |
image[image_mask > 128] = -1.0 # set as masked pixel | |
image = np.expand_dims(image, 0).transpose(0, 3, 1, 2) | |
image = torch.from_numpy(image) | |
return image | |
def generate_image(prompt:str, negative_prompt:str, openpose_image, original_image, mask_image): | |
# model1 | |
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_openpose", torch_dtype=torch.float16, | |
cache_dir='./models') | |
pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( | |
"./models/Any-inpainting", controlnet=controlnet, torch_dtype=torch.float16, cache_dir='./models' | |
) | |
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) | |
pipe.to('cuda') | |
a = random.randint(10000,90000) | |
generator = torch.manual_seed(a) | |
# control_image = make_inpaint_condition(original_image, mask_image) | |
# images = [openpose_image, control_image] | |
image = pipe( | |
prompt=prompt, | |
# images, | |
image=original_image, | |
control_image=openpose_image, | |
mask_image=mask_image, | |
num_inference_steps=20, | |
generator=generator, | |
negative_prompt=negative_prompt, | |
# controlnet_conditioning_scale=[1.0, 0.8], | |
).images[0] | |
return image | |
def generate_image_sketch(prompt: str, negative_prompt: str, openpose_image, original_image, mask_image): | |
b = random.randint(10000, 90000) | |
generator = torch.manual_seed(b) | |
# model2 | |
controlnet1 = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_scribble", torch_dtype=torch.float16, | |
cache_dir='./models') | |
pipe1 = StableDiffusionControlNetInpaintPipeline.from_pretrained( | |
"./models/Any-inpainting", controlnet=controlnet1, torch_dtype=torch.float16, cache_dir='./models' | |
) | |
pipe1.scheduler = UniPCMultistepScheduler.from_config(pipe1.scheduler.config) | |
pipe1.to('cuda') | |
image = pipe1( | |
prompt=prompt, | |
# images, | |
image=original_image, | |
control_image=openpose_image, | |
mask_image=mask_image, | |
num_inference_steps=20, | |
generator=generator, | |
negative_prompt=negative_prompt, | |
# controlnet_conditioning_scale=[1.0, 0.8], | |
).images[0] | |
return [image] |