adamelliotfields commited on
Commit
ca2f5d2
·
verified ·
1 Parent(s): 972fe7d

Remove negative embedding checkbox

Browse files
Files changed (4) hide show
  1. DOCS.md +2 -2
  2. app.py +1 -7
  3. lib/config.py +2 -3
  4. lib/inference.py +11 -17
DOCS.md CHANGED
@@ -49,13 +49,13 @@ Currently, the only annotator available is [Canny](https://huggingface.co/lllyas
49
 
50
  In an image-to-image pipeline, the input image is used as the initial latent representation. With [IP-Adapter](https://github.com/tencent-ailab/IP-Adapter), the image is processed by a separate image encoder and the encoded features are used as conditioning along with the text prompt.
51
 
52
- For capturing faces, enable `IP-Adapter Face` to use the full-face model. You should use an input image that is mostly a face and it should be high quality. You can generate fake portraits with Realistic Vision to experiment.
53
 
54
  ### Advanced
55
 
56
  #### Textual Inversion
57
 
58
- Enable `Use negative TI` to append [`fast_negative`](https://civitai.com/models/71961?modelVersionId=94057) to your negative prompt. Read [An Image is Worth One Word](https://huggingface.co/papers/2208.01618) to learn more.
59
 
60
  #### DeepCache
61
 
 
49
 
50
  In an image-to-image pipeline, the input image is used as the initial latent representation. With [IP-Adapter](https://github.com/tencent-ailab/IP-Adapter), the image is processed by a separate image encoder and the encoded features are used as conditioning along with the text prompt.
51
 
52
+ For capturing faces, enable `IP-Adapter Face` to use the full-face model. You should use an input image that is mostly a face and it should be high quality.
53
 
54
  ### Advanced
55
 
56
  #### Textual Inversion
57
 
58
+ Add `<fast_negative>` anywhere in your negative prompt to apply the [`fast_negative`](https://civitai.com/models/71961?modelVersionId=94057) textual inversion embedding. Read [An Image is Worth One Word](https://huggingface.co/papers/2208.01618) to learn more.
59
 
60
  #### DeepCache
61
 
app.py CHANGED
@@ -184,7 +184,7 @@ with gr.Blocks(
184
  with gr.Row():
185
  negative_prompt = gr.Textbox(
186
  label="Negative Prompt",
187
- value="nsfw",
188
  lines=1,
189
  )
190
 
@@ -289,11 +289,6 @@ with gr.Blocks(
289
  label="Karras σ",
290
  value=True,
291
  )
292
- use_negative_embedding = gr.Checkbox(
293
- elem_classes=["checkbox"],
294
- label="Use negative TI",
295
- value=False,
296
- )
297
 
298
  # Image-to-Image settings
299
  gr.HTML("<h3>Image-to-Image</h3>")
@@ -434,7 +429,6 @@ with gr.Blocks(
434
  num_images,
435
  use_karras,
436
  use_ip_face,
437
- use_negative_embedding,
438
  DISABLE_IMAGE_PROMPT,
439
  DISABLE_CONTROL_IMAGE_PROMPT,
440
  DISABLE_IP_IMAGE_PROMPT,
 
184
  with gr.Row():
185
  negative_prompt = gr.Textbox(
186
  label="Negative Prompt",
187
+ value="nsfw, <fast_negative>",
188
  lines=1,
189
  )
190
 
 
289
  label="Karras σ",
290
  value=True,
291
  )
 
 
 
 
 
292
 
293
  # Image-to-Image settings
294
  gr.HTML("<h3>Image-to-Image</h3>")
 
429
  num_images,
430
  use_karras,
431
  use_ip_face,
 
432
  DISABLE_IMAGE_PROMPT,
433
  DISABLE_CONTROL_IMAGE_PROMPT,
434
  DISABLE_IP_IMAGE_PROMPT,
lib/config.py CHANGED
@@ -121,14 +121,13 @@ Config = SimpleNamespace(
121
  ANNOTATORS={
122
  "canny": "lllyasviel/control_v11p_sd15_canny",
123
  },
124
- NEGATIVE_EMBEDDING="fast_negative",
125
  WIDTH=512,
126
  HEIGHT=512,
127
  NUM_IMAGES=1,
128
  SEED=-1,
129
- GUIDANCE_SCALE=7.5,
130
  INFERENCE_STEPS=40,
131
- DENOISING_STRENGTH=0.7,
132
  DEEPCACHE_INTERVAL=1,
133
  SCALE=1,
134
  SCALES=[1, 2, 4],
 
121
  ANNOTATORS={
122
  "canny": "lllyasviel/control_v11p_sd15_canny",
123
  },
 
124
  WIDTH=512,
125
  HEIGHT=512,
126
  NUM_IMAGES=1,
127
  SEED=-1,
128
+ GUIDANCE_SCALE=6,
129
  INFERENCE_STEPS=40,
130
+ DENOISING_STRENGTH=0.8,
131
  DEEPCACHE_INTERVAL=1,
132
  SCALE=1,
133
  SCALES=[1, 2, 4],
lib/inference.py CHANGED
@@ -50,7 +50,7 @@ def generate(
50
  annotator="canny",
51
  width=512,
52
  height=512,
53
- guidance_scale=7.5,
54
  inference_steps=40,
55
  denoising_strength=0.8,
56
  deepcache=1,
@@ -58,7 +58,6 @@ def generate(
58
  num_images=1,
59
  karras=False,
60
  ip_face=False,
61
- negative_embedding=False,
62
  Error=Exception,
63
  Info=None,
64
  progress=None,
@@ -85,6 +84,8 @@ def generate(
85
 
86
  EMBEDDINGS_TYPE = ReturnedEmbeddingsType.LAST_HIDDEN_STATES_NORMALIZED
87
 
 
 
88
  if ip_image_prompt:
89
  IP_ADAPTER = "full-face" if ip_face else "plus"
90
  else:
@@ -123,19 +124,15 @@ def generate(
123
  pipe = loader.pipe
124
  upscaler = loader.upscaler
125
 
126
- # Load negative embedding if requested
127
- if negative_embedding:
128
  embeddings_dir = os.path.abspath(
129
  os.path.join(os.path.dirname(__file__), "..", "embeddings")
130
  )
131
- embedding = Config.NEGATIVE_EMBEDDING
132
- try:
133
- pipe.load_textual_inversion(
134
- pretrained_model_name_or_path=f"{embeddings_dir}/{embedding}.pt",
135
- token=f"<{embedding}>",
136
- )
137
- except (EnvironmentError, HFValidationError, RepositoryNotFoundError):
138
- raise Error(f"Invalid embedding: {embedding}")
139
 
140
  # Embed prompts with weights
141
  compel = Compel(
@@ -155,10 +152,6 @@ def generate(
155
  for i in range(num_images):
156
  try:
157
  generator = torch.Generator(device=pipe.device).manual_seed(current_seed)
158
-
159
- if negative_embedding:
160
- negative_prompt += f", <{Config.NEGATIVE_EMBEDDING}>"
161
-
162
  positive_embeds, negative_embeds = compel.pad_conditioning_tensors_to_same_length(
163
  [compel(positive_prompt), compel(negative_prompt)]
164
  )
@@ -198,8 +191,9 @@ def generate(
198
  images.append((image, str(current_seed)))
199
  current_seed += 1
200
  finally:
201
- if negative_embedding:
202
  pipe.unload_textual_inversion()
 
203
  CURRENT_STEP = 0
204
  CURRENT_IMAGE += 1
205
 
 
50
  annotator="canny",
51
  width=512,
52
  height=512,
53
+ guidance_scale=6.0,
54
  inference_steps=40,
55
  denoising_strength=0.8,
56
  deepcache=1,
 
58
  num_images=1,
59
  karras=False,
60
  ip_face=False,
 
61
  Error=Exception,
62
  Info=None,
63
  progress=None,
 
84
 
85
  EMBEDDINGS_TYPE = ReturnedEmbeddingsType.LAST_HIDDEN_STATES_NORMALIZED
86
 
87
+ FAST_NEGATIVE = "<fast_negative>" in negative_prompt
88
+
89
  if ip_image_prompt:
90
  IP_ADAPTER = "full-face" if ip_face else "plus"
91
  else:
 
124
  pipe = loader.pipe
125
  upscaler = loader.upscaler
126
 
127
+ # Load fast negative embedding
128
+ if FAST_NEGATIVE:
129
  embeddings_dir = os.path.abspath(
130
  os.path.join(os.path.dirname(__file__), "..", "embeddings")
131
  )
132
+ pipe.load_textual_inversion(
133
+ pretrained_model_name_or_path=f"{embeddings_dir}/fast_negative.pt",
134
+ token="<fast_negative>",
135
+ )
 
 
 
 
136
 
137
  # Embed prompts with weights
138
  compel = Compel(
 
152
  for i in range(num_images):
153
  try:
154
  generator = torch.Generator(device=pipe.device).manual_seed(current_seed)
 
 
 
 
155
  positive_embeds, negative_embeds = compel.pad_conditioning_tensors_to_same_length(
156
  [compel(positive_prompt), compel(negative_prompt)]
157
  )
 
191
  images.append((image, str(current_seed)))
192
  current_seed += 1
193
  finally:
194
+ if FAST_NEGATIVE:
195
  pipe.unload_textual_inversion()
196
+
197
  CURRENT_STEP = 0
198
  CURRENT_IMAGE += 1
199