Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,7 @@ import numpy as np
|
|
5 |
from transformers import pipeline
|
6 |
from diffusers.utils import load_image
|
7 |
from accelerate import Accelerator
|
8 |
-
import torch, os, random
|
9 |
from diffusers import StableDiffusionControlNetPipeline, StableDiffusionPipeline, ControlNetModel, UniPCMultistepScheduler
|
10 |
from controlnet_aux import OpenposeDetector
|
11 |
|
@@ -126,6 +126,7 @@ def plex(mput, prompt, neg_prompt, stips, modal_id, dula, blip, blop):
|
|
126 |
pipe = accelerator.prepare(pipe.to("cpu"))
|
127 |
|
128 |
tilage = pope(prompt,num_inference_steps=5,height=512,width=512,generator=generator).images[0]
|
|
|
129 |
cannyimage = np.array(tilage)
|
130 |
low_threshold = 100
|
131 |
high_threshold = 200
|
@@ -135,12 +136,15 @@ def plex(mput, prompt, neg_prompt, stips, modal_id, dula, blip, blop):
|
|
135 |
cannyimage[:, zero_start:zero_end] = 0
|
136 |
cannyimage = cannyimage[:, :, None]
|
137 |
cannyimage = np.concatenate([cannyimage, cannyimage, cannyimage], axis=2)
|
138 |
-
canny_image = Image.fromarray(cannyimage)
|
|
|
139 |
pose_image = load_image(mput).resize((512, 512))
|
|
|
140 |
openpose_image = openpose(pose_image)
|
141 |
-
|
|
|
142 |
|
143 |
-
imoge = pipe(prompt,
|
144 |
return imoge
|
145 |
|
146 |
iface = gr.Interface(fn=plex,inputs=[gr.Image(type="filepath"), gr.Textbox(label="prompt"), gr.Textbox(label="neg_prompt", value="monochrome, lowres, bad anatomy, worst quality, low quality"), gr.Slider(label="infer_steps", value=20, minimum=1, step=1, maximum=100), gr.Dropdown(choices=models, value=models[0], type="value", label="select a model"), gr.Dropdown(choices=sdulers, value=sdulers[0], type="value", label="schedulrs"), gr.Slider(label="condition_scale_canny", value=0.5, minimum=0.05, step=0.05, maximum=0.95), gr.Slider(label="condition_scale_pose", value=0.5, minimum=0.05, step=0.05, maximum=0.95)], outputs=gr.Image(), title="Img2Img Guided Multi-Conditioned Canny/Pose Controlnet Selectable StableDiffusion Model Demo", description="by JoPmt.")
|
|
|
5 |
from transformers import pipeline
|
6 |
from diffusers.utils import load_image
|
7 |
from accelerate import Accelerator
|
8 |
+
import torch, os, random, gc
|
9 |
from diffusers import StableDiffusionControlNetPipeline, StableDiffusionPipeline, ControlNetModel, UniPCMultistepScheduler
|
10 |
from controlnet_aux import OpenposeDetector
|
11 |
|
|
|
126 |
pipe = accelerator.prepare(pipe.to("cpu"))
|
127 |
|
128 |
tilage = pope(prompt,num_inference_steps=5,height=512,width=512,generator=generator).images[0]
|
129 |
+
tilage.save('til.png', 'PNG')
|
130 |
cannyimage = np.array(tilage)
|
131 |
low_threshold = 100
|
132 |
high_threshold = 200
|
|
|
136 |
cannyimage[:, zero_start:zero_end] = 0
|
137 |
cannyimage = cannyimage[:, :, None]
|
138 |
cannyimage = np.concatenate([cannyimage, cannyimage, cannyimage], axis=2)
|
139 |
+
canny_image = Image.fromarray(cannyimage)
|
140 |
+
canny_image.save('can.png', 'PNG')
|
141 |
pose_image = load_image(mput).resize((512, 512))
|
142 |
+
pose_image.save('./pos.png', 'PNG')
|
143 |
openpose_image = openpose(pose_image)
|
144 |
+
openpose_image.save('./fin.png','PNG')
|
145 |
+
##images = [openpose_image, canny_image]
|
146 |
|
147 |
+
imoge = pipe(prompt,[openpose_image, canny_image],num_inference_steps=stips,negative_prompt=neg_prompt,controlnet_conditioning_scale=[blip, blop],height=512,width=512,generator=generator).images[0]
|
148 |
return imoge
|
149 |
|
150 |
iface = gr.Interface(fn=plex,inputs=[gr.Image(type="filepath"), gr.Textbox(label="prompt"), gr.Textbox(label="neg_prompt", value="monochrome, lowres, bad anatomy, worst quality, low quality"), gr.Slider(label="infer_steps", value=20, minimum=1, step=1, maximum=100), gr.Dropdown(choices=models, value=models[0], type="value", label="select a model"), gr.Dropdown(choices=sdulers, value=sdulers[0], type="value", label="schedulrs"), gr.Slider(label="condition_scale_canny", value=0.5, minimum=0.05, step=0.05, maximum=0.95), gr.Slider(label="condition_scale_pose", value=0.5, minimum=0.05, step=0.05, maximum=0.95)], outputs=gr.Image(), title="Img2Img Guided Multi-Conditioned Canny/Pose Controlnet Selectable StableDiffusion Model Demo", description="by JoPmt.")
|