Correct example with new pipeline

#4
Files changed (1) hide show
  1. README.md +44 -28
README.md CHANGED
@@ -63,57 +63,73 @@ It is recommended to use the checkpoint with [Stable Diffusion v1-5](https://hug
63
  has been trained on it.
64
  Experimentally, the checkpoint can be used with other diffusion models such as dreamboothed stable diffusion.
65
 
66
- ```
67
  1. Let's install `diffusers` and related packages:
 
68
  ```
69
  $ pip install diffusers transformers accelerate
70
  ```
 
71
  2. Run code:
 
72
  ```python
73
- import torch
74
- import os
75
  from diffusers.utils import load_image
76
- from PIL import Image
77
  import numpy as np
78
- from diffusers import (
79
- ControlNetModel,
80
- StableDiffusionControlNetPipeline,
81
- UniPCMultistepScheduler,
82
- )
83
- checkpoint = "lllyasviel/control_v11p_sd15_inpaint"
84
- original_image = load_image(
85
- "https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint/resolve/main/images/original.png"
86
  )
 
 
 
 
87
  mask_image = load_image(
88
- "https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint/resolve/main/images/mask.png"
89
  )
 
 
90
 
91
  def make_inpaint_condition(image, image_mask):
92
  image = np.array(image.convert("RGB")).astype(np.float32) / 255.0
93
- image_mask = np.array(image_mask.convert("L"))
 
94
  assert image.shape[0:1] == image_mask.shape[0:1], "image and image_mask must have the same image size"
95
- image[image_mask < 128] = -1.0 # set as masked pixel
96
  image = np.expand_dims(image, 0).transpose(0, 3, 1, 2)
97
  image = torch.from_numpy(image)
98
  return image
99
 
100
- control_image = make_inpaint_condition(original_image, mask_image)
101
- prompt = "best quality"
102
- negative_prompt="lowres, bad anatomy, bad hands, cropped, worst quality"
103
- controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16)
104
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
 
 
105
  "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
106
  )
107
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
 
108
  pipe.enable_model_cpu_offload()
109
- generator = torch.manual_seed(2)
110
- image = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=30,
111
- generator=generator, image=control_image).images[0]
112
- image.save('images/output.png')
 
 
 
 
 
 
 
113
  ```
114
- ![original](./images/original.png)
115
- ![mask](./images/mask.png)
116
- ![inpaint_output](./images/output.png)
 
117
 
118
  ## Other released checkpoints v1-1
119
  The authors released 14 different checkpoints, each trained with [Stable Diffusion v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5)
 
63
  has been trained on it.
64
  Experimentally, the checkpoint can be used with other diffusion models such as dreamboothed stable diffusion.
65
 
66
+
67
  1. Let's install `diffusers` and related packages:
68
+ 2.
69
  ```
70
  $ pip install diffusers transformers accelerate
71
  ```
72
+
73
  2. Run code:
74
+ 3.
75
  ```python
76
+ # !pip install transformers accelerate
77
+ from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel
78
  from diffusers.utils import load_image
 
79
  import numpy as np
80
+ import torch
81
+
82
+ init_image = load_image(
83
+ "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png"
 
 
 
 
84
  )
85
+ init_image = init_image.resize((512, 512))
86
+
87
+ generator = torch.Generator(device="cpu").manual_seed(1)
88
+
89
  mask_image = load_image(
90
+ "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png"
91
  )
92
+ mask_image = mask_image.resize((512, 512))
93
+
94
 
95
  def make_inpaint_condition(image, image_mask):
96
  image = np.array(image.convert("RGB")).astype(np.float32) / 255.0
97
+ image_mask = np.array(image_mask.convert("L")).astype(np.float32) / 255.0
98
+
99
  assert image.shape[0:1] == image_mask.shape[0:1], "image and image_mask must have the same image size"
100
+ image[image_mask > 0.5] = -1.0 # set as masked pixel
101
  image = np.expand_dims(image, 0).transpose(0, 3, 1, 2)
102
  image = torch.from_numpy(image)
103
  return image
104
 
105
+
106
+ control_image = make_inpaint_condition(init_image, mask_image)
107
+
108
+ controlnet = ControlNetModel.from_pretrained(
109
+ "lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16
110
+ )
111
+ pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
112
  "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
113
  )
114
+
115
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
116
  pipe.enable_model_cpu_offload()
117
+
118
+ # generate image
119
+ image = pipe(
120
+ "a handsome man with ray-ban sunglasses",
121
+ num_inference_steps=20,
122
+ generator=generator,
123
+ eta=1.0,
124
+ image=init_image,
125
+ mask_image=mask_image,
126
+ control_image=control_image,
127
+ ).images[0]
128
  ```
129
+
130
+ ![original](https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png)
131
+ ![img](https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_with_mask.png)
132
+ ![img](https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/boy_ray_ban.png)
133
 
134
  ## Other released checkpoints v1-1
135
  The authors released 14 different checkpoints, each trained with [Stable Diffusion v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5)