mokady commited on
Commit
5fd09d2
·
verified ·
1 Parent(s): cfffa53

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -8
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL, EulerAncestralDiscreteScheduler
2
  from diffusers.utils import load_image
3
  from PIL import Image
@@ -28,7 +29,7 @@ pipe.scheduler = EulerAncestralDiscreteScheduler(
28
  steps_offset=1
29
  )
30
  # pipe.enable_freeu(b1=1.1, b2=1.1, s1=0.5, s2=0.7)
31
- pipe.enable_xformers_memory_efficient_attention()
32
  pipe.force_zeros_for_empty_prompt = False
33
 
34
  def resize_image(image):
@@ -42,20 +43,26 @@ def resize_image(image):
42
  return resized_image
43
 
44
 
 
 
 
 
 
 
 
 
 
 
45
  def process(input_image, prompt, negative_prompt, num_steps, controlnet_conditioning_scale, seed):
46
- generator = torch.manual_seed(seed)
47
 
48
  # resize input_image to 1024x1024
49
  input_image = resize_image(input_image)
50
 
51
- grayscale_image = input_image.convert('L').convert('RGB')
52
 
53
- images = pipe(
54
- prompt, negative_prompt=negative_prompt, image=grayscale_image, num_inference_steps=num_steps, controlnet_conditioning_scale=float(controlnet_conditioning_scale),
55
- generator=generator,
56
- ).images
57
 
58
- return [grayscale_image, images[0]]
59
 
60
  block = gr.Blocks().queue()
61
 
 
1
+ import spaces
2
  from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL, EulerAncestralDiscreteScheduler
3
  from diffusers.utils import load_image
4
  from PIL import Image
 
29
  steps_offset=1
30
  )
31
  # pipe.enable_freeu(b1=1.1, b2=1.1, s1=0.5, s2=0.7)
32
+ # pipe.enable_xformers_memory_efficient_attention()
33
  pipe.force_zeros_for_empty_prompt = False
34
 
35
  def resize_image(image):
 
43
  return resized_image
44
 
45
 
46
+ @spaces.GPU
47
+ def generate_(prompt, negative_prompt, canny_image, num_steps, controlnet_conditioning_scale, seed):
48
+ generator = torch.Generator("cuda").manual_seed(seed)
49
+ images = pipe(
50
+ prompt, negative_prompt=negative_prompt, image=canny_image, num_inference_steps=num_steps, controlnet_conditioning_scale=float(controlnet_conditioning_scale),
51
+ generator=generator,
52
+ ).images
53
+ return images
54
+
55
+ @spaces.GPU
56
  def process(input_image, prompt, negative_prompt, num_steps, controlnet_conditioning_scale, seed):
 
57
 
58
  # resize input_image to 1024x1024
59
  input_image = resize_image(input_image)
60
 
61
+ canny_image = get_canny_filter(input_image)
62
 
63
+ images = generate_(prompt, negative_prompt, canny_image, num_steps, controlnet_conditioning_scale, seed)
 
 
 
64
 
65
+ return [canny_image,images[0]]
66
 
67
  block = gr.Blocks().queue()
68