Raman Dutt commited on
Commit
a2d5835
1 Parent(s): d86745c

minor changes

Browse files
Files changed (1) hide show
  1. app.py +99 -100
app.py CHANGED
@@ -30,14 +30,13 @@ EXAMPLE_TEXT_PROMPTS = [
30
  ]
31
 
32
 
33
- def load_adapted_unet(unet_pretraining_type, exp_path, pipe):
34
 
35
  """
36
  Loads the adapted U-Net for the selected PEFT Type
37
 
38
  Parameters:
39
  unet_pretraining_type (str): The type of PEFT to use for generating the X-ray
40
- exp_path (str): The path to the best trained model for the selected PEFT Type
41
  pipe (StableDiffusionPipeline): The Stable Diffusion Pipeline to use for generating the X-ray
42
 
43
  Returns:
@@ -45,6 +44,7 @@ def load_adapted_unet(unet_pretraining_type, exp_path, pipe):
45
  """
46
 
47
  sd_folder_path = "runwayml/stable-diffusion-v1-5"
 
48
 
49
  if unet_pretraining_type == "freeze":
50
  pass
@@ -67,19 +67,19 @@ def load_adapted_unet(unet_pretraining_type, exp_path, pipe):
67
  exp_path = os.path.join(exp_path, "pytorch_lora_weights.safetensors")
68
  pipe.unet.load_attn_procs(exp_path)
69
  else:
70
- exp_path = unet_pretraining_type + "_" + "diffusion_pytorch_model.safetensors"
71
- state_dict = load_file(exp_path)
 
72
  print(pipe.unet.load_state_dict(state_dict, strict=False))
73
 
74
 
75
- def loadSDModel(unet_pretraining_type, exp_path, cuda_device):
76
 
77
  """
78
  Loads the Stable Diffusion Model for the selected PEFT Type
79
 
80
  Parameters:
81
  unet_pretraining_type (str): The type of PEFT to use for generating the X-ray
82
- exp_path (str): The path to the best trained model for the selected PEFT Type
83
  cuda_device (str): The CUDA device to use for generating the X-ray
84
 
85
  Returns:
@@ -90,104 +90,104 @@ def loadSDModel(unet_pretraining_type, exp_path, cuda_device):
90
 
91
  pipe = StableDiffusionPipeline.from_pretrained(sd_folder_path, revision="fp16")
92
 
93
- load_adapted_unet(unet_pretraining_type, exp_path, pipe)
94
  pipe.safety_checker = None
95
 
96
  return pipe
97
 
98
 
99
- def load_all_pipelines():
100
-
101
- """
102
- Loads all the Stable Diffusion Pipelines for each PEFT Type for efficient caching (Design Choice 2)
103
-
104
- Parameters:
105
- None
106
-
107
- Returns:
108
- sd_pipeline_full (StableDiffusionPipeline): The Stable Diffusion Pipeline for Full Fine-Tuning
109
- sd_pipeline_norm (StableDiffusionPipeline): The Stable Diffusion Pipeline for Norm Fine-Tuning
110
- sd_pipeline_bias (StableDiffusionPipeline): The Stable Diffusion Pipeline for Bias Fine-Tuning
111
- sd_pipeline_attention (StableDiffusionPipeline): The Stable Diffusion Pipeline for Attention Fine-Tuning
112
- sd_pipeline_NBA (StableDiffusionPipeline): The Stable Diffusion Pipeline for NBA Fine-Tuning
113
- sd_pipeline_difffit (StableDiffusionPipeline): The Stable Diffusion Pipeline for Difffit Fine-Tuning
114
- """
115
-
116
- # Dictionary containing the path to the best trained models for each PEFT type
117
- MODEL_PATH_DICT = {
118
- "full": "full_diffusion_pytorch_model.safetensors",
119
- "norm": "norm_diffusion_pytorch_model.safetensors",
120
- "bias": "bias_diffusion_pytorch_model.safetensors",
121
- "attention": "attention_diffusion_pytorch_model.safetensors",
122
- "norm_bias_attention": "norm_bias_attention_diffusion_pytorch_model.safetensors",
123
- "difffit": "difffit_diffusion_pytorch_model.safetensors",
124
- }
125
-
126
- device = "0"
127
- cuda_device = f"cuda:{device}" if torch.cuda.is_available() else "cpu"
128
-
129
- # Full FT
130
- unet_pretraining_type = "full"
131
- print("Loading Pipeline for Full Fine-Tuning")
132
- sd_pipeline_full = loadSDModel(
133
- unet_pretraining_type=unet_pretraining_type,
134
- exp_path=MODEL_PATH_DICT[unet_pretraining_type],
135
- cuda_device=cuda_device,
136
- )
137
-
138
- # Norm
139
- unet_pretraining_type = "norm"
140
- print("Loading Pipeline for Norm Fine-Tuning")
141
- sd_pipeline_norm = loadSDModel(
142
- unet_pretraining_type=unet_pretraining_type,
143
- exp_path=MODEL_PATH_DICT[unet_pretraining_type],
144
- cuda_device=cuda_device,
145
- )
146
-
147
- # bias
148
- unet_pretraining_type = "bias"
149
- print("Loading Pipeline for Bias Fine-Tuning")
150
- sd_pipeline_bias = loadSDModel(
151
- unet_pretraining_type=unet_pretraining_type,
152
- exp_path=MODEL_PATH_DICT[unet_pretraining_type],
153
- cuda_device=cuda_device,
154
- )
155
-
156
- # attention
157
- unet_pretraining_type = "attention"
158
- print("Loading Pipeline for Attention Fine-Tuning")
159
- sd_pipeline_attention = loadSDModel(
160
- unet_pretraining_type=unet_pretraining_type,
161
- exp_path=MODEL_PATH_DICT[unet_pretraining_type],
162
- cuda_device=cuda_device,
163
- )
164
-
165
- # NBA
166
- unet_pretraining_type = "norm_bias_attention"
167
- print("Loading Pipeline for NBA Fine-Tuning")
168
- sd_pipeline_NBA = loadSDModel(
169
- unet_pretraining_type=unet_pretraining_type,
170
- exp_path=MODEL_PATH_DICT[unet_pretraining_type],
171
- cuda_device=cuda_device,
172
- )
173
-
174
- # difffit
175
- unet_pretraining_type = "difffit"
176
- print("Loading Pipeline for Difffit Fine-Tuning")
177
- sd_pipeline_difffit = loadSDModel(
178
- unet_pretraining_type=unet_pretraining_type,
179
- exp_path=MODEL_PATH_DICT[unet_pretraining_type],
180
- cuda_device=cuda_device,
181
- )
182
-
183
- return (
184
- sd_pipeline_full,
185
- sd_pipeline_norm,
186
- sd_pipeline_bias,
187
- sd_pipeline_attention,
188
- sd_pipeline_NBA,
189
- sd_pipeline_difffit,
190
- )
191
 
192
 
193
  # LOAD ALL PIPELINES FIRST AND CACHE THEM
@@ -235,7 +235,6 @@ def predict(
235
  print("Loading Pipeline for {} Fine-Tuning".format(unet_pretraining_type))
236
  sd_pipeline_norm = loadSDModel(
237
  unet_pretraining_type=unet_pretraining_type,
238
- exp_path=MODEL_PATH_DICT[unet_pretraining_type],
239
  cuda_device=cuda_device,
240
  )
241
 
 
30
  ]
31
 
32
 
33
+ def load_adapted_unet(unet_pretraining_type, pipe):
34
 
35
  """
36
  Loads the adapted U-Net for the selected PEFT Type
37
 
38
  Parameters:
39
  unet_pretraining_type (str): The type of PEFT to use for generating the X-ray
 
40
  pipe (StableDiffusionPipeline): The Stable Diffusion Pipeline to use for generating the X-ray
41
 
42
  Returns:
 
44
  """
45
 
46
  sd_folder_path = "runwayml/stable-diffusion-v1-5"
47
+ exp_path = ''
48
 
49
  if unet_pretraining_type == "freeze":
50
  pass
 
67
  exp_path = os.path.join(exp_path, "pytorch_lora_weights.safetensors")
68
  pipe.unet.load_attn_procs(exp_path)
69
  else:
70
+ # exp_path = unet_pretraining_type + "_" + "diffusion_pytorch_model.safetensors"
71
+ # state_dict = load_file(exp_path)
72
+ state_dict = load_file(unet_pretraining_type + "_" + "diffusion_pytorch_model.safetensors")
73
  print(pipe.unet.load_state_dict(state_dict, strict=False))
74
 
75
 
76
+ def loadSDModel(unet_pretraining_type, cuda_device):
77
 
78
  """
79
  Loads the Stable Diffusion Model for the selected PEFT Type
80
 
81
  Parameters:
82
  unet_pretraining_type (str): The type of PEFT to use for generating the X-ray
 
83
  cuda_device (str): The CUDA device to use for generating the X-ray
84
 
85
  Returns:
 
90
 
91
  pipe = StableDiffusionPipeline.from_pretrained(sd_folder_path, revision="fp16")
92
 
93
+ load_adapted_unet(unet_pretraining_type, pipe)
94
  pipe.safety_checker = None
95
 
96
  return pipe
97
 
98
 
99
+ # def load_all_pipelines():
100
+
101
+ # """
102
+ # Loads all the Stable Diffusion Pipelines for each PEFT Type for efficient caching (Design Choice 2)
103
+
104
+ # Parameters:
105
+ # None
106
+
107
+ # Returns:
108
+ # sd_pipeline_full (StableDiffusionPipeline): The Stable Diffusion Pipeline for Full Fine-Tuning
109
+ # sd_pipeline_norm (StableDiffusionPipeline): The Stable Diffusion Pipeline for Norm Fine-Tuning
110
+ # sd_pipeline_bias (StableDiffusionPipeline): The Stable Diffusion Pipeline for Bias Fine-Tuning
111
+ # sd_pipeline_attention (StableDiffusionPipeline): The Stable Diffusion Pipeline for Attention Fine-Tuning
112
+ # sd_pipeline_NBA (StableDiffusionPipeline): The Stable Diffusion Pipeline for NBA Fine-Tuning
113
+ # sd_pipeline_difffit (StableDiffusionPipeline): The Stable Diffusion Pipeline for Difffit Fine-Tuning
114
+ # """
115
+
116
+ # # Dictionary containing the path to the best trained models for each PEFT type
117
+ # MODEL_PATH_DICT = {
118
+ # "full": "full_diffusion_pytorch_model.safetensors",
119
+ # "norm": "norm_diffusion_pytorch_model.safetensors",
120
+ # "bias": "bias_diffusion_pytorch_model.safetensors",
121
+ # "attention": "attention_diffusion_pytorch_model.safetensors",
122
+ # "norm_bias_attention": "norm_bias_attention_diffusion_pytorch_model.safetensors",
123
+ # "difffit": "difffit_diffusion_pytorch_model.safetensors",
124
+ # }
125
+
126
+ # device = "0"
127
+ # cuda_device = f"cuda:{device}" if torch.cuda.is_available() else "cpu"
128
+
129
+ # # Full FT
130
+ # unet_pretraining_type = "full"
131
+ # print("Loading Pipeline for Full Fine-Tuning")
132
+ # sd_pipeline_full = loadSDModel(
133
+ # unet_pretraining_type=unet_pretraining_type,
134
+ # exp_path=MODEL_PATH_DICT[unet_pretraining_type],
135
+ # cuda_device=cuda_device,
136
+ # )
137
+
138
+ # # Norm
139
+ # unet_pretraining_type = "norm"
140
+ # print("Loading Pipeline for Norm Fine-Tuning")
141
+ # sd_pipeline_norm = loadSDModel(
142
+ # unet_pretraining_type=unet_pretraining_type,
143
+ # exp_path=MODEL_PATH_DICT[unet_pretraining_type],
144
+ # cuda_device=cuda_device,
145
+ # )
146
+
147
+ # # bias
148
+ # unet_pretraining_type = "bias"
149
+ # print("Loading Pipeline for Bias Fine-Tuning")
150
+ # sd_pipeline_bias = loadSDModel(
151
+ # unet_pretraining_type=unet_pretraining_type,
152
+ # exp_path=MODEL_PATH_DICT[unet_pretraining_type],
153
+ # cuda_device=cuda_device,
154
+ # )
155
+
156
+ # # attention
157
+ # unet_pretraining_type = "attention"
158
+ # print("Loading Pipeline for Attention Fine-Tuning")
159
+ # sd_pipeline_attention = loadSDModel(
160
+ # unet_pretraining_type=unet_pretraining_type,
161
+ # exp_path=MODEL_PATH_DICT[unet_pretraining_type],
162
+ # cuda_device=cuda_device,
163
+ # )
164
+
165
+ # # NBA
166
+ # unet_pretraining_type = "norm_bias_attention"
167
+ # print("Loading Pipeline for NBA Fine-Tuning")
168
+ # sd_pipeline_NBA = loadSDModel(
169
+ # unet_pretraining_type=unet_pretraining_type,
170
+ # exp_path=MODEL_PATH_DICT[unet_pretraining_type],
171
+ # cuda_device=cuda_device,
172
+ # )
173
+
174
+ # # difffit
175
+ # unet_pretraining_type = "difffit"
176
+ # print("Loading Pipeline for Difffit Fine-Tuning")
177
+ # sd_pipeline_difffit = loadSDModel(
178
+ # unet_pretraining_type=unet_pretraining_type,
179
+ # exp_path=MODEL_PATH_DICT[unet_pretraining_type],
180
+ # cuda_device=cuda_device,
181
+ # )
182
+
183
+ # return (
184
+ # sd_pipeline_full,
185
+ # sd_pipeline_norm,
186
+ # sd_pipeline_bias,
187
+ # sd_pipeline_attention,
188
+ # sd_pipeline_NBA,
189
+ # sd_pipeline_difffit,
190
+ # )
191
 
192
 
193
  # LOAD ALL PIPELINES FIRST AND CACHE THEM
 
235
  print("Loading Pipeline for {} Fine-Tuning".format(unet_pretraining_type))
236
  sd_pipeline_norm = loadSDModel(
237
  unet_pretraining_type=unet_pretraining_type,
 
238
  cuda_device=cuda_device,
239
  )
240