n42 commited on
Commit
a5097b6
·
1 Parent(s): 80c0bfe

refactoring

Browse files
Files changed (2) hide show
  1. app.py +22 -43
  2. config.py +64 -33
app.py CHANGED
@@ -24,32 +24,31 @@ import pandas as pd
24
  import base64
25
  from config import *
26
 
27
- # code output order
28
- code = {}
29
- code_pos_device = '001_code'
30
- code_pos_data_type = '002_data_type'
31
- code_pos_tf32 = '003_tf32'
32
- code_pos_variant = '004_variant'
33
- code_pos_init_pipeline = '050_init_pipe'
34
- code_pos_requires_safety_checker = '054_requires_safety_checker'
35
- code_pos_safety_checker = '055_safety_checker'
36
- code_pos_scheduler = '060_scheduler'
37
- code_pos_generator = '070_generator'
38
- code_pos_prompt = '080_prompt'
39
- code_pos_negative_prompt = '085_negative_prompt'
40
- code_pos_inference_steps = '090_inference_steps'
41
- code_pos_guidance_scale = '095_guidance_scale'
42
- code_pos_run_inference = '100_run_inference'
43
-
44
- initial_config, devices, models, schedulers = get_inital_config()
 
 
 
 
45
 
46
  config_history = []
47
 
48
- def get_sorted_code():
49
-
50
- return '\r\n'.join(value[1] for value in sorted(code.items()))
51
-
52
- # change methods
53
  def device_change(device):
54
 
55
  code[code_pos_device] = f'''device = "{device}"'''
@@ -225,26 +224,6 @@ def run_inference(model,
225
 
226
  return "Please select a model AND a scheduler.", None
227
 
228
- code[code_pos_device] = f'device = "{device}"'
229
- code[code_pos_variant] = f'variant = {variant}'
230
- code[code_pos_tf32] = f'torch.backends.cuda.matmul.allow_tf32 = {allow_tensorfloat32}'
231
- code[code_pos_data_type] = 'data_type = torch.bfloat16'
232
- code[code_pos_init_pipeline] = 'sys.exit("No model selected!")'
233
- code[code_pos_safety_checker] = 'pipeline.safety_checker = None'
234
- code[code_pos_requires_safety_checker] = f'pipeline.requires_safety_checker = {requires_safety_checker}'
235
- code[code_pos_scheduler] = 'sys.exit("No scheduler selected!")'
236
- code[code_pos_generator] = f'generator = torch.Generator("{device}")'
237
- code[code_pos_prompt] = f'prompt = "{prompt}"'
238
- code[code_pos_negative_prompt] = f'negative_prompt = "{negative_prompt}"'
239
- code[code_pos_inference_steps] = f'inference_steps = {inference_steps}'
240
- code[code_pos_guidance_scale] = f'guidance_scale = {guidance_scale}'
241
- code[code_pos_run_inference] = f'''image = pipeline(
242
- prompt=prompt,
243
- negative_prompt=negative_prompt,
244
- generator=generator.manual_seed(manual_seed),
245
- num_inference_steps=inference_steps,
246
- guidance_scale=guidance_scale).images[0]'''
247
-
248
  def dict_list_to_markdown_table(config_history):
249
 
250
  if not config_history:
 
24
  import base64
25
  from config import *
26
 
27
+ # get
28
+ # - initial configuration,
29
+ # - a list of available devices from the config file
30
+ # - a list of available models from the config file
31
+ # - a list of available schedulers from the config file
32
+ # - a dict that contains code to for reproduction
33
+ initial_config, devices, models, schedulers, code = get_inital_config()
34
+
35
+ device = initial_config["device"]
36
+ model = initial_config["model"]
37
+ scheduler = initial_config["scheduler"]
38
+ variant = initial_config["variant"]
39
+ allow_tensorfloat32 = initial_config["allow_tensorfloat32"]
40
+ use_safetensors = initial_config["use_safetensors"]
41
+ data_type = initial_config["data_type"]
42
+ safety_checker = initial_config["safety_checker"]
43
+ requires_safety_checker = initial_config["requires_safety_checker"]
44
+ manual_seed = initial_config["manual_seed"]
45
+ inference_steps = initial_config["inference_steps"]
46
+ guidance_scale = initial_config["guidance_scale"]
47
+ prompt = initial_config["prompt"]
48
+ negative_prompt = initial_config["negative_prompt"]
49
 
50
  config_history = []
51
 
 
 
 
 
 
52
  def device_change(device):
53
 
54
  code[code_pos_device] = f'''device = "{device}"'''
 
224
 
225
  return "Please select a model AND a scheduler.", None
226
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227
  def dict_list_to_markdown_table(config_history):
228
 
229
  if not config_history:
config.py CHANGED
@@ -3,6 +3,21 @@ import base64
3
  import json
4
  import torch
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  def load_app_config():
7
  try:
8
  with open('appConfig.json', 'r') as f:
@@ -16,34 +31,26 @@ def load_app_config():
16
 
17
  return appConfig
18
 
19
- appConfig = load_app_config()
20
-
21
  def get_inital_config():
22
-
23
- # model config
 
 
24
  model_configs = appConfig.get("models", {})
 
25
  models = list(model_configs.keys())
26
  model = None
 
 
27
  scheduler_configs = appConfig.get("schedulers", {})
28
  schedulers = list(scheduler_configs.keys())
29
  scheduler = None
30
-
 
31
  devices = appConfig.get("devices", [])
32
  device = None
33
-
34
- variant = None
35
- allow_tensorfloat32 = False
36
- use_safetensors = False
37
  data_type = 'float16'
38
- safety_checker = False
39
- requires_safety_checker = False
40
- manual_seed = 42
41
- inference_steps = 10
42
- guidance_scale = 0.5
43
- prompt = 'A white rabbit'
44
- negative_prompt = 'lowres, cropped, worst quality, low quality, chat bubble, chat bubbles, ugly'
45
-
46
- # # init device parameters
47
  if torch.cuda.is_available():
48
  device = "cuda"
49
  data_type = 'bfloat16'
@@ -53,26 +60,47 @@ def get_inital_config():
53
  else:
54
  device = "cpu"
55
 
56
- # inference config
57
  initial_config = {
58
  "device": device,
59
- "model": model,
60
- "scheduler": scheduler,
61
- "variant": variant,
62
- "allow_tensorflow": allow_tensorfloat32,
63
- "use_safetensors": use_safetensors,
64
  "data_type": data_type,
65
- "safety_checker": safety_checker,
66
- "requires_safety_checker": requires_safety_checker,
67
- "manual_seed": manual_seed,
68
- "inference_steps": inference_steps,
69
- "guidance_scale": guidance_scale,
70
- "prompt": prompt,
71
- "negative_prompt": negative_prompt,
72
  }
73
 
 
 
74
 
75
- return initial_config, devices, models, schedulers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
  def init_config(request: gr.Request, inital_config):
78
 
@@ -110,4 +138,7 @@ def init_config(request: gr.Request, inital_config):
110
  return_config['manual_seed'],
111
  return_config['guidance_scale']
112
  ]
113
-
 
 
 
 
3
  import json
4
  import torch
5
 
6
+ code_pos_device = '001_code'
7
+ code_pos_data_type = '002_data_type'
8
+ code_pos_tf32 = '003_tf32'
9
+ code_pos_variant = '004_variant'
10
+ code_pos_init_pipeline = '050_init_pipe'
11
+ code_pos_requires_safety_checker = '054_requires_safety_checker'
12
+ code_pos_safety_checker = '055_safety_checker'
13
+ code_pos_scheduler = '060_scheduler'
14
+ code_pos_generator = '070_generator'
15
+ code_pos_prompt = '080_prompt'
16
+ code_pos_negative_prompt = '085_negative_prompt'
17
+ code_pos_inference_steps = '090_inference_steps'
18
+ code_pos_guidance_scale = '095_guidance_scale'
19
+ code_pos_run_inference = '100_run_inference'
20
+
21
  def load_app_config():
22
  try:
23
  with open('appConfig.json', 'r') as f:
 
31
 
32
  return appConfig
33
 
 
 
34
  def get_inital_config():
35
+
36
+ appConfig = load_app_config()
37
+
38
+ # default model is None
39
  model_configs = appConfig.get("models", {})
40
+ # default model is None
41
  models = list(model_configs.keys())
42
  model = None
43
+
44
+ # default scheduler is None
45
  scheduler_configs = appConfig.get("schedulers", {})
46
  schedulers = list(scheduler_configs.keys())
47
  scheduler = None
48
+
49
+ # default device
50
  devices = appConfig.get("devices", [])
51
  device = None
 
 
 
 
52
  data_type = 'float16'
53
+ allow_tensorfloat32 = False
 
 
 
 
 
 
 
 
54
  if torch.cuda.is_available():
55
  device = "cuda"
56
  data_type = 'bfloat16'
 
60
  else:
61
  device = "cpu"
62
 
 
63
  initial_config = {
64
  "device": device,
65
+ "model": None,
66
+ "scheduler": None,
67
+ "variant": None,
68
+ "allow_tensorflow32": allow_tensorfloat32,
69
+ "use_safetensors": False,
70
  "data_type": data_type,
71
+ "safety_checker": False,
72
+ "requires_safety_checker": False,
73
+ "manual_seed": 42,
74
+ "inference_steps": 10,
75
+ "guidance_scale": 0.5,
76
+ "prompt": 'A white rabbit',
77
+ "negative_prompt": 'lowres, cropped, worst quality, low quality, chat bubble, chat bubbles, ugly',
78
  }
79
 
80
+ # code output order
81
+ code = {}
82
 
83
+ code[code_pos_device] = f'device = "{device}"'
84
+ code[code_pos_variant] = f'variant = {initial_config['variant']}'
85
+ code[code_pos_tf32] = f'torch.backends.cuda.matmul.allow_tf32 = {initial_config['allow_tensorfloat32']}'
86
+ code[code_pos_data_type] = 'data_type = torch.bfloat16'
87
+ code[code_pos_init_pipeline] = 'sys.exit("No model selected!")'
88
+ code[code_pos_safety_checker] = 'pipeline.safety_checker = None'
89
+ code[code_pos_requires_safety_checker] = f'pipeline.requires_safety_checker = {initial_config['requires_safety_checker']}'
90
+ code[code_pos_scheduler] = 'sys.exit("No scheduler selected!")'
91
+ code[code_pos_generator] = f'generator = torch.Generator("{device}")'
92
+ code[code_pos_prompt] = f'prompt = "{initial_config['prompt']}"'
93
+ code[code_pos_negative_prompt] = f'negative_prompt = "{initial_config['negative_prompt']}"'
94
+ code[code_pos_inference_steps] = f'inference_steps = {initial_config['inference_steps']}'
95
+ code[code_pos_guidance_scale] = f'guidance_scale = {initial_config['guidance_scale']}'
96
+ code[code_pos_run_inference] = f'''image = pipeline(
97
+ prompt=prompt,
98
+ negative_prompt=negative_prompt,
99
+ generator=generator.manual_seed(manual_seed),
100
+ num_inference_steps=inference_steps,
101
+ guidance_scale=guidance_scale).images[0]'''
102
+
103
+ return initial_config, devices, models, schedulers, code
104
 
105
  def init_config(request: gr.Request, inital_config):
106
 
 
138
  return_config['manual_seed'],
139
  return_config['guidance_scale']
140
  ]
141
+
142
+ def get_sorted_code():
143
+
144
+ return '\r\n'.join(value[1] for value in sorted(code.items()))