n42 commited on
Commit
a447492
·
1 Parent(s): 1ff2dfa

adding trigger token config param

Browse files
Files changed (3) hide show
  1. app.py +15 -4
  2. appConfig.json +18 -9
  3. config.py +3 -1
app.py CHANGED
@@ -29,6 +29,7 @@ def models_change(model, scheduler, config):
29
 
30
  use_safetensors = False
31
  refiner = "none"
 
32
 
33
  # no model selected (because this is UI init run)
34
  if type(model) != list and str(model) != 'None':
@@ -36,7 +37,8 @@ def models_change(model, scheduler, config):
36
  use_safetensors = str(models[model]['use_safetensors'])
37
  model_description = models[model]['description']
38
  refiner = models[model]['refiner']
39
-
 
40
  # if no scheduler is selected, choose the default one for this model
41
  if scheduler == None:
42
 
@@ -53,7 +55,7 @@ def models_change(model, scheduler, config):
53
  # safety_checker_change(in_safety_checker.value, config)
54
  # requires_safety_checker_change(in_requires_safety_checker.value, config)
55
 
56
- return model_description, refiner, use_safetensors, scheduler, config, str(config), assemble_code(config)
57
 
58
  def data_type_change(data_type, config):
59
 
@@ -91,6 +93,12 @@ def prompt_change(prompt, config):
91
 
92
  return config, str(config), assemble_code(config)
93
 
 
 
 
 
 
 
94
  def negative_prompt_change(negative_prompt, config):
95
 
96
  config = set_config(config, 'negative_prompt', negative_prompt)
@@ -226,10 +234,11 @@ with gr.Blocks(analytics_enabled=False) as demo:
226
  out_model_description = gr.Textbox(value="", label="Description")
227
  with gr.Row():
228
  with gr.Column(scale=1):
 
229
  in_use_safetensors = gr.Radio(label="Use safe tensors:", choices=["True", "False"], interactive=False)
230
- in_cpu_offload = gr.Radio(label="CPU Offload:", value=config.value["cpu_offload"], choices=["True", "False"], info="This may increase performance, as it offloads computations from the GPU to the CPU. But this can also lead to slower executions and lower effectiveness. Compare running time and outputs before making sure, that this setting will help you")
231
  in_model_refiner = gr.Dropdown(value=config.value["refiner"], choices=["none"], label="Refiner", allow_custom_value=True, multiselect=False)
232
  with gr.Column(scale=1):
 
233
  in_safety_checker = gr.Radio(label="Enable safety checker:", value=config.value["safety_checker"], choices=["True", "False"])
234
  in_requires_safety_checker = gr.Radio(label="Requires safety checker:", value=config.value["requires_safety_checker"], choices=["True", "False"])
235
 
@@ -268,7 +277,7 @@ with gr.Blocks(analytics_enabled=False) as demo:
268
  in_data_type.change(data_type_change, inputs=[in_data_type, config], outputs=[config, out_config, out_code])
269
  in_allow_tensorfloat32.change(tensorfloat32_change, inputs=[in_allow_tensorfloat32, config], outputs=[config, out_config, out_code])
270
  in_variant.change(variant_change, inputs=[in_variant, config], outputs=[config, out_config, out_code])
271
- in_models.change(models_change, inputs=[in_models, in_schedulers, config], outputs=[out_model_description, in_model_refiner, in_use_safetensors, in_schedulers, config, out_config, out_code])
272
  in_model_refiner.change(model_refiner_change, inputs=[in_model_refiner, config], outputs=[config, out_config, out_code])
273
  in_cpu_offload.change(cpu_offload_change, inputs=[in_cpu_offload, config], outputs=[config, out_config, out_code])
274
  in_safety_checker.change(safety_checker_change, inputs=[in_safety_checker, config], outputs=[config, out_config, out_code])
@@ -278,6 +287,7 @@ with gr.Blocks(analytics_enabled=False) as demo:
278
  in_manual_seed.change(manual_seed_change, inputs=[in_manual_seed, config], outputs=[config, out_config, out_code])
279
  in_guidance_scale.change(guidance_scale_change, inputs=[in_guidance_scale, config], outputs=[config, out_config, out_code])
280
  in_prompt.change(prompt_change, inputs=[in_prompt, config], outputs=[config, out_config, out_code])
 
281
  in_negative_prompt.change(negative_prompt_change, inputs=[in_negative_prompt, config], outputs=[config, out_config, out_code])
282
  ev_run_inference = btn_start_pipeline.click(run_inference, inputs=[config, config_history], outputs=[out_image, out_config_history, config_history])
283
  btn_stop_pipeline.click(fn=None, inputs=None, outputs=None, cancels=[ev_run_inference])
@@ -298,6 +308,7 @@ with gr.Blocks(analytics_enabled=False) as demo:
298
  in_requires_safety_checker,
299
  in_schedulers,
300
  in_prompt,
 
301
  in_negative_prompt,
302
  in_inference_steps,
303
  in_manual_seed,
 
29
 
30
  use_safetensors = False
31
  refiner = "none"
32
+ trigger_token = ""
33
 
34
  # no model selected (because this is UI init run)
35
  if type(model) != list and str(model) != 'None':
 
37
  use_safetensors = str(models[model]['use_safetensors'])
38
  model_description = models[model]['description']
39
  refiner = models[model]['refiner']
40
+ trigger_token = models[model]['trigger_token']
41
+
42
  # if no scheduler is selected, choose the default one for this model
43
  if scheduler == None:
44
 
 
55
  # safety_checker_change(in_safety_checker.value, config)
56
  # requires_safety_checker_change(in_requires_safety_checker.value, config)
57
 
58
+ return model_description, refiner, trigger_token, use_safetensors, scheduler, config, str(config), assemble_code(config)
59
 
60
  def data_type_change(data_type, config):
61
 
 
93
 
94
  return config, str(config), assemble_code(config)
95
 
96
+ def trigger_token_change(trigger_token, config):
97
+
98
+ config = set_config(config, 'trigger_token', trigger_token)
99
+
100
+ return config, str(config), assemble_code(config)
101
+
102
  def negative_prompt_change(negative_prompt, config):
103
 
104
  config = set_config(config, 'negative_prompt', negative_prompt)
 
234
  out_model_description = gr.Textbox(value="", label="Description")
235
  with gr.Row():
236
  with gr.Column(scale=1):
237
+ in_trigger_token = gr.Textbox(value=config.value["trigger_token"], label="Trigger Token")
238
  in_use_safetensors = gr.Radio(label="Use safe tensors:", choices=["True", "False"], interactive=False)
 
239
  in_model_refiner = gr.Dropdown(value=config.value["refiner"], choices=["none"], label="Refiner", allow_custom_value=True, multiselect=False)
240
  with gr.Column(scale=1):
241
+ in_cpu_offload = gr.Radio(label="CPU Offload:", value=config.value["cpu_offload"], choices=["True", "False"], info="This may increase performance, as it offloads computations from the GPU to the CPU. But this can also lead to slower executions and lower effectiveness. Compare running time and outputs before making sure, that this setting will help you")
242
  in_safety_checker = gr.Radio(label="Enable safety checker:", value=config.value["safety_checker"], choices=["True", "False"])
243
  in_requires_safety_checker = gr.Radio(label="Requires safety checker:", value=config.value["requires_safety_checker"], choices=["True", "False"])
244
 
 
277
  in_data_type.change(data_type_change, inputs=[in_data_type, config], outputs=[config, out_config, out_code])
278
  in_allow_tensorfloat32.change(tensorfloat32_change, inputs=[in_allow_tensorfloat32, config], outputs=[config, out_config, out_code])
279
  in_variant.change(variant_change, inputs=[in_variant, config], outputs=[config, out_config, out_code])
280
+ in_models.change(models_change, inputs=[in_models, in_schedulers, config], outputs=[out_model_description, in_model_refiner, in_trigger_token, in_use_safetensors, in_schedulers, config, out_config, out_code])
281
  in_model_refiner.change(model_refiner_change, inputs=[in_model_refiner, config], outputs=[config, out_config, out_code])
282
  in_cpu_offload.change(cpu_offload_change, inputs=[in_cpu_offload, config], outputs=[config, out_config, out_code])
283
  in_safety_checker.change(safety_checker_change, inputs=[in_safety_checker, config], outputs=[config, out_config, out_code])
 
287
  in_manual_seed.change(manual_seed_change, inputs=[in_manual_seed, config], outputs=[config, out_config, out_code])
288
  in_guidance_scale.change(guidance_scale_change, inputs=[in_guidance_scale, config], outputs=[config, out_config, out_code])
289
  in_prompt.change(prompt_change, inputs=[in_prompt, config], outputs=[config, out_config, out_code])
290
+ in_trigger_token.change(trigger_token_change, inputs=[in_trigger_token, config], outputs=[config, out_config, out_code])
291
  in_negative_prompt.change(negative_prompt_change, inputs=[in_negative_prompt, config], outputs=[config, out_config, out_code])
292
  ev_run_inference = btn_start_pipeline.click(run_inference, inputs=[config, config_history], outputs=[out_image, out_config_history, config_history])
293
  btn_stop_pipeline.click(fn=None, inputs=None, outputs=None, cancels=[ev_run_inference])
 
308
  in_requires_safety_checker,
309
  in_schedulers,
310
  in_prompt,
311
+ in_trigger_token,
312
  in_negative_prompt,
313
  in_inference_steps,
314
  in_manual_seed,
appConfig.json CHANGED
@@ -5,55 +5,64 @@
5
  "use_safetensors": false,
6
  "description": "see https://huggingface.co/sd-dreambooth-library/solo-levelling-art-style",
7
  "scheduler": "DDIMScheduler",
8
- "refiner": "none"
 
9
  },
10
  "CompVis/stable-diffusion-v1-4": {
11
  "use_safetensors": true,
12
  "description": "see https://huggingface.co/CompVis/stable-diffusion-v1-4",
13
  "scheduler": "EulerDiscreteScheduler",
14
- "refiner": "none"
 
15
  },
16
  "runwayml/stable-diffusion-v1-5": {
17
  "use_safetensors": true,
18
  "description": "see https://huggingface.co/runwayml/stable-diffusion-v1-5",
19
  "scheduler": "DDPMScheduler",
20
- "refiner": "none"
 
21
  },
22
  "stabilityai/stable-diffusion-2-1": {
23
  "use_safetensors": true,
24
  "description": "see https://huggingface.co/stabilityai/stable-diffusion-2-1",
25
  "scheduler": "DPMSolverMultistepScheduler",
26
- "refiner": "none"
 
27
  },
28
  "stabilityai/stable-diffusion-xl-base-1.0": {
29
  "use_safetensors": true,
30
  "description": "see https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0",
31
  "scheduler": "DDPMScheduler",
32
- "refiner": "stabilityai/stable-diffusion-xl-refiner-1.0"
 
33
  },
34
  "sd-dreambooth-library/house-emblem": {
35
  "use_safetensors": false,
36
  "description": "see https://huggingface.co/sd-dreambooth-library/house-emblem",
37
  "scheduler": "DDPMScheduler",
38
- "refiner": "none"
 
39
  },
40
  "Envvi/Inkpunk-Diffusion": {
41
  "use_safetensors": false,
42
  "description": "see https://huggingface.co/Envvi/Inkpunk-Diffusion",
43
  "scheduler": "DDPMScheduler",
44
- "refiner": "none"
 
45
  },
46
  "Stelath/textual_inversion_comic_strip_fp16": {
47
  "use_safetensors": true,
48
  "description": "see https://huggingface.co/Stelath/textual_inversion_comic_strip_fp16",
49
  "scheduler": "DDPMScheduler",
50
- "refiner": "none"
 
51
  },
52
  "sd-dreambooth-library/herge-style": {
53
  "use_safetensors": false,
54
  "description": "see https://huggingface.co/sd-dreambooth-library/herge-style",
55
  "scheduler": "DDPMScheduler",
56
- "refiner": "none"
 
57
  }
58
 
59
  },
 
5
  "use_safetensors": false,
6
  "description": "see https://huggingface.co/sd-dreambooth-library/solo-levelling-art-style",
7
  "scheduler": "DDIMScheduler",
8
+ "refiner": "none",
9
+ "trigger_token": ""
10
  },
11
  "CompVis/stable-diffusion-v1-4": {
12
  "use_safetensors": true,
13
  "description": "see https://huggingface.co/CompVis/stable-diffusion-v1-4",
14
  "scheduler": "EulerDiscreteScheduler",
15
+ "refiner": "none",
16
+ "trigger_token": ""
17
  },
18
  "runwayml/stable-diffusion-v1-5": {
19
  "use_safetensors": true,
20
  "description": "see https://huggingface.co/runwayml/stable-diffusion-v1-5",
21
  "scheduler": "DDPMScheduler",
22
+ "refiner": "none",
23
+ "trigger_token": ""
24
  },
25
  "stabilityai/stable-diffusion-2-1": {
26
  "use_safetensors": true,
27
  "description": "see https://huggingface.co/stabilityai/stable-diffusion-2-1",
28
  "scheduler": "DPMSolverMultistepScheduler",
29
+ "refiner": "none",
30
+ "trigger_token": ""
31
  },
32
  "stabilityai/stable-diffusion-xl-base-1.0": {
33
  "use_safetensors": true,
34
  "description": "see https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0",
35
  "scheduler": "DDPMScheduler",
36
+ "refiner": "stabilityai/stable-diffusion-xl-refiner-1.0",
37
+ "trigger_token": ""
38
  },
39
  "sd-dreambooth-library/house-emblem": {
40
  "use_safetensors": false,
41
  "description": "see https://huggingface.co/sd-dreambooth-library/house-emblem",
42
  "scheduler": "DDPMScheduler",
43
+ "refiner": "none",
44
+ "trigger_token": ""
45
  },
46
  "Envvi/Inkpunk-Diffusion": {
47
  "use_safetensors": false,
48
  "description": "see https://huggingface.co/Envvi/Inkpunk-Diffusion",
49
  "scheduler": "DDPMScheduler",
50
+ "refiner": "none",
51
+ "trigger_token": ""
52
  },
53
  "Stelath/textual_inversion_comic_strip_fp16": {
54
  "use_safetensors": true,
55
  "description": "see https://huggingface.co/Stelath/textual_inversion_comic_strip_fp16",
56
  "scheduler": "DDPMScheduler",
57
+ "refiner": "none",
58
+ "trigger_token": ""
59
  },
60
  "sd-dreambooth-library/herge-style": {
61
  "use_safetensors": false,
62
  "description": "see https://huggingface.co/sd-dreambooth-library/herge-style",
63
  "scheduler": "DDPMScheduler",
64
+ "refiner": "none",
65
+ "trigger_token": "herge_style"
66
  }
67
 
68
  },
config.py CHANGED
@@ -50,6 +50,7 @@ def get_initial_config():
50
  "inference_steps": 10,
51
  "guidance_scale": 0.5,
52
  "prompt": 'A white rabbit',
 
53
  "negative_prompt": 'lowres, cropped, worst quality, low quality, chat bubble, chat bubbles, ugly',
54
  }
55
 
@@ -87,6 +88,7 @@ def get_config_from_url(initial_config, request: Request):
87
  return_config['requires_safety_checker'],
88
  return_config['scheduler'],
89
  return_config['prompt'],
 
90
  return_config['negative_prompt'],
91
  return_config['inference_steps'],
92
  return_config['manual_seed'],
@@ -173,7 +175,7 @@ def assemble_code(str_config):
173
  code.append(f'manual_seed = {config["manual_seed"]}')
174
  code.append(f'generator = torch.manual_seed(manual_seed)')
175
 
176
- code.append(f'prompt = "{config["prompt"]}"')
177
  code.append(f'negative_prompt = "{config["negative_prompt"]}"')
178
  code.append(f'inference_steps = {config["inference_steps"]}')
179
  code.append(f'guidance_scale = {config["guidance_scale"]}')
 
50
  "inference_steps": 10,
51
  "guidance_scale": 0.5,
52
  "prompt": 'A white rabbit',
53
+ "trigger_token": '',
54
  "negative_prompt": 'lowres, cropped, worst quality, low quality, chat bubble, chat bubbles, ugly',
55
  }
56
 
 
88
  return_config['requires_safety_checker'],
89
  return_config['scheduler'],
90
  return_config['prompt'],
91
+ return_config['trigger_token'],
92
  return_config['negative_prompt'],
93
  return_config['inference_steps'],
94
  return_config['manual_seed'],
 
175
  code.append(f'manual_seed = {config["manual_seed"]}')
176
  code.append(f'generator = torch.manual_seed(manual_seed)')
177
 
178
+ code.append(f'prompt = "{config["prompt"]} {config["trigger_token"]}"')
179
  code.append(f'negative_prompt = "{config["negative_prompt"]}"')
180
  code.append(f'inference_steps = {config["inference_steps"]}')
181
  code.append(f'guidance_scale = {config["guidance_scale"]}')