Spaces:
Runtime error
Runtime error
import gradio as gr | |
from PIL import Image | |
import cv2 | |
import numpy as np | |
from transformers import pipeline | |
from diffusers.utils import load_image | |
from accelerate import Accelerator | |
import torch, os, random, gc | |
from diffusers import StableDiffusionControlNetPipeline, StableDiffusionPipeline, ControlNetModel, UniPCMultistepScheduler | |
from controlnet_aux import OpenposeDetector | |
accelerator = Accelerator(cpu=True) | |
MAX_SEED = np.iinfo(np.int32).max | |
openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet") | |
controlnet = [ | |
ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float32), | |
ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float32), | |
] | |
models =[ | |
"runwayml/stable-diffusion-v1-5", | |
"prompthero/openjourney-v4", | |
"CompVis/stable-diffusion-v1-4", | |
"stabilityai/stable-diffusion-2-1", | |
"stablediffusionapi/disney-pixal-cartoon", | |
"stablediffusionapi/edge-of-realism", | |
"MirageML/fantasy-scene", | |
"wavymulder/lomo-diffusion", | |
"sd-dreambooth-library/fashion", | |
"DucHaiten/DucHaitenDreamWorld", | |
"VegaKH/Ultraskin", | |
"kandinsky-community/kandinsky-2-1", | |
"MirageML/lowpoly-cyberpunk", | |
"thehive/everyjourney-sdxl-0.9-finetuned", | |
"plasmo/woolitize-768sd1-5", | |
"plasmo/food-crit", | |
"johnslegers/epic-diffusion-v1.1", | |
"Fictiverse/ElRisitas", | |
"robotjung/SemiRealMix", | |
"herpritts/FFXIV-Style", | |
"prompthero/linkedin-diffusion", | |
"RayHell/popupBook-diffusion", | |
"MirageML/lowpoly-world", | |
"deadman44/SD_Photoreal_Merged_Models", | |
"Conflictx/CGI_Animation", | |
"johnslegers/epic-diffusion", | |
"tilake/China-Chic-illustration", | |
"wavymulder/modelshoot", | |
"prompthero/openjourney-lora", | |
"Fictiverse/Stable_Diffusion_VoxelArt_Model", | |
"nousr/robo-diffusion-2-base", | |
"darkstorm2150/Protogen_v2.2_Official_Release", | |
"hassanblend/HassanBlend1.5.1.2", | |
"hassanblend/hassanblend1.4", | |
"nitrosocke/redshift-diffusion", | |
"prompthero/openjourney-v2", | |
"nitrosocke/Arcane-Diffusion", | |
"Lykon/DreamShaper", | |
"wavymulder/Analog-Diffusion", | |
"nitrosocke/mo-di-diffusion", | |
"dreamlike-art/dreamlike-diffusion-1.0", | |
"dreamlike-art/dreamlike-photoreal-2.0", | |
"digiplay/RealismEngine_v1", | |
"digiplay/AIGEN_v1.4_diffusers", | |
"stablediffusionapi/dreamshaper-v6", | |
"JackAnon/GorynichMix", | |
"p1atdev/liminal-space-diffusion", | |
"nadanainone/gigaschizonegs", | |
"darkVOYAGE/dvMJv4", | |
"lckidwell/album-cover-style", | |
"axolotron/ice-cream-animals", | |
"perion/ai-avatar", | |
"FFusion/FFXL400", | |
"digiplay/GhostMix", | |
"ThePioneer/MISA", | |
"TheLastBen/froggy-style-v21-768", | |
"FloydianSound/Nixeu_Diffusion_v1-5", | |
"diffusers/sdxl-instructpix2pix-768", | |
"kakaobrain/karlo-v1-alpha-image-variations", | |
"coreml-community/coreml-HassanBlend", | |
"digiplay/PotoPhotoRealism_v1", | |
"ConsistentFactor/Aurora-By_Consistent_Factor", | |
"coreml/coreml-ghostmix-v11", | |
"rim0/quadruped_mechas", | |
"Akumetsu971/SD_Samurai_Anime_Model", | |
"Bojaxxx/Fantastic-Mr-Fox-Diffusion", | |
"sd-dreambooth-library/original-character-cyclps", | |
"AIArtsChannel/steampunk-diffusion", | |
] | |
sdulers =[ | |
"UniPCMultistepScheduler", | |
"DDIMScheduler", | |
"DDPMScheduler", | |
"DDIMInverseScheduler", | |
"CMStochasticIterativeScheduler", | |
"DEISMultistepScheduler", | |
"DPMSolverMultistepInverse", | |
"DPMSolverMultistepScheduler", | |
"DPMSolverSDEScheduler", | |
"DPMSolverSinglestepScheduler", | |
"EulerAncestralDiscreteScheduler", | |
"EulerDiscreteScheduler", | |
"HeunDiscreteScheduler", | |
"IPNDMScheduler", | |
"KarrasVeScheduler", | |
"KDPM2AncestralDiscreteScheduler", | |
"KDPM2DiscreteScheduler", | |
"LMSDiscreteScheduler", | |
"PNDMScheduler", | |
"RePaintScheduler", | |
"ScoreSdeVeScheduler", | |
"ScoreSdeVpScheduler", | |
"VQDiffusionScheduler", | |
] | |
generator = torch.Generator(device="cpu").manual_seed(random.randint(0, MAX_SEED)) | |
def plex(mput, prompt, neg_prompt, stips, modal_id, dula, blip, blop): | |
modal_id = ""+modal_id+"" | |
dula=""+dula+"" ## shedulers todo | |
pope = accelerator.prepare(StableDiffusionPipeline.from_pretrained(modal_id, use_safetensors=False, safety_checker=True,torch_dtype=torch.float32)) | |
pope.unet.to(memory_format=torch.channels_last) | |
pope = accelerator.prepare(pope.to("cpu")) | |
pipe = accelerator.prepare(StableDiffusionControlNetPipeline.from_pretrained(modal_id, use_safetensors=False,controlnet=controlnet, safety_checker=True,torch_dtype=torch.float32)) | |
pipe.unet.to(memory_format=torch.channels_last) | |
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) | |
pipe = accelerator.prepare(pipe.to("cpu")) | |
tilage = pope(prompt,num_inference_steps=5,height=512,width=512,generator=generator).images[0] | |
tilage.save('til.png', 'PNG') | |
cannyimage = np.array(tilage) | |
low_threshold = 100 | |
high_threshold = 200 | |
cannyimage = cv2.Canny(cannyimage, low_threshold, high_threshold) | |
zero_start = cannyimage.shape[1] // 4 | |
zero_end = zero_start + cannyimage.shape[1] // 2 | |
cannyimage[:, zero_start:zero_end] = 0 | |
cannyimage = cannyimage[:, :, None] | |
cannyimage = np.concatenate([cannyimage, cannyimage, cannyimage], axis=2) | |
canny_image = Image.fromarray(cannyimage) | |
canny_image.save('can.png', 'PNG') | |
pose_image = load_image(mput).resize((512, 512)) | |
pose_image.save('./pos.png', 'PNG') | |
openpose_image = openpose(pose_image) | |
openpose_image.save('./fin.png','PNG') | |
##images = [openpose_image, canny_image] | |
imoge = pipe(prompt,[openpose_image, canny_image],num_inference_steps=stips,negative_prompt=neg_prompt,controlnet_conditioning_scale=[blip, blop],height=512,width=512,generator=generator).images[0] | |
return imoge | |
iface = gr.Interface(fn=plex,inputs=[gr.Image(type="filepath"), gr.Textbox(label="prompt"), gr.Textbox(label="neg_prompt", value="monochrome, lowres, bad anatomy, worst quality, low quality"), gr.Slider(label="infer_steps", value=20, minimum=1, step=1, maximum=100), gr.Dropdown(choices=models, value=models[0], type="value", label="select a model"), gr.Dropdown(choices=sdulers, value=sdulers[0], type="value", label="schedulrs"), gr.Slider(label="condition_scale_canny", value=0.5, minimum=0.05, step=0.05, maximum=0.95), gr.Slider(label="condition_scale_pose", value=0.5, minimum=0.05, step=0.05, maximum=0.95)], outputs=gr.Image(), title="Img2Img Guided Multi-Conditioned Canny/Pose Controlnet Selectable StableDiffusion Model Demo", description="by JoPmt.") | |
iface.launch() |