Raman Dutt
commited on
Commit
·
87daa1c
1
Parent(s):
a9e7413
add app.py
Browse files
app.py
CHANGED
@@ -98,120 +98,6 @@ def loadSDModel(unet_pretraining_type, cuda_device):
|
|
98 |
return pipe
|
99 |
|
100 |
|
101 |
-
# def load_all_pipelines():
|
102 |
-
|
103 |
-
# """
|
104 |
-
# Loads all the Stable Diffusion Pipelines for each PEFT Type for efficient caching (Design Choice 2)
|
105 |
-
|
106 |
-
# Parameters:
|
107 |
-
# None
|
108 |
-
|
109 |
-
# Returns:
|
110 |
-
# sd_pipeline_full (StableDiffusionPipeline): The Stable Diffusion Pipeline for Full Fine-Tuning
|
111 |
-
# sd_pipeline_norm (StableDiffusionPipeline): The Stable Diffusion Pipeline for Norm Fine-Tuning
|
112 |
-
# sd_pipeline_bias (StableDiffusionPipeline): The Stable Diffusion Pipeline for Bias Fine-Tuning
|
113 |
-
# sd_pipeline_attention (StableDiffusionPipeline): The Stable Diffusion Pipeline for Attention Fine-Tuning
|
114 |
-
# sd_pipeline_NBA (StableDiffusionPipeline): The Stable Diffusion Pipeline for NBA Fine-Tuning
|
115 |
-
# sd_pipeline_difffit (StableDiffusionPipeline): The Stable Diffusion Pipeline for Difffit Fine-Tuning
|
116 |
-
# """
|
117 |
-
|
118 |
-
# # Dictionary containing the path to the best trained models for each PEFT type
|
119 |
-
# MODEL_PATH_DICT = {
|
120 |
-
# "full": "full_diffusion_pytorch_model.safetensors",
|
121 |
-
# "norm": "norm_diffusion_pytorch_model.safetensors",
|
122 |
-
# "bias": "bias_diffusion_pytorch_model.safetensors",
|
123 |
-
# "attention": "attention_diffusion_pytorch_model.safetensors",
|
124 |
-
# "norm_bias_attention": "norm_bias_attention_diffusion_pytorch_model.safetensors",
|
125 |
-
# "difffit": "difffit_diffusion_pytorch_model.safetensors",
|
126 |
-
# }
|
127 |
-
|
128 |
-
# device = "0"
|
129 |
-
# cuda_device = f"cuda:{device}" if torch.cuda.is_available() else "cpu"
|
130 |
-
|
131 |
-
# # Full FT
|
132 |
-
# unet_pretraining_type = "full"
|
133 |
-
# print("Loading Pipeline for Full Fine-Tuning")
|
134 |
-
# sd_pipeline_full = loadSDModel(
|
135 |
-
# unet_pretraining_type=unet_pretraining_type,
|
136 |
-
# exp_path=MODEL_PATH_DICT[unet_pretraining_type],
|
137 |
-
# cuda_device=cuda_device,
|
138 |
-
# )
|
139 |
-
|
140 |
-
# # Norm
|
141 |
-
# unet_pretraining_type = "norm"
|
142 |
-
# print("Loading Pipeline for Norm Fine-Tuning")
|
143 |
-
# sd_pipeline_norm = loadSDModel(
|
144 |
-
# unet_pretraining_type=unet_pretraining_type,
|
145 |
-
# exp_path=MODEL_PATH_DICT[unet_pretraining_type],
|
146 |
-
# cuda_device=cuda_device,
|
147 |
-
# )
|
148 |
-
|
149 |
-
# # bias
|
150 |
-
# unet_pretraining_type = "bias"
|
151 |
-
# print("Loading Pipeline for Bias Fine-Tuning")
|
152 |
-
# sd_pipeline_bias = loadSDModel(
|
153 |
-
# unet_pretraining_type=unet_pretraining_type,
|
154 |
-
# exp_path=MODEL_PATH_DICT[unet_pretraining_type],
|
155 |
-
# cuda_device=cuda_device,
|
156 |
-
# )
|
157 |
-
|
158 |
-
# # attention
|
159 |
-
# unet_pretraining_type = "attention"
|
160 |
-
# print("Loading Pipeline for Attention Fine-Tuning")
|
161 |
-
# sd_pipeline_attention = loadSDModel(
|
162 |
-
# unet_pretraining_type=unet_pretraining_type,
|
163 |
-
# exp_path=MODEL_PATH_DICT[unet_pretraining_type],
|
164 |
-
# cuda_device=cuda_device,
|
165 |
-
# )
|
166 |
-
|
167 |
-
# # NBA
|
168 |
-
# unet_pretraining_type = "norm_bias_attention"
|
169 |
-
# print("Loading Pipeline for NBA Fine-Tuning")
|
170 |
-
# sd_pipeline_NBA = loadSDModel(
|
171 |
-
# unet_pretraining_type=unet_pretraining_type,
|
172 |
-
# exp_path=MODEL_PATH_DICT[unet_pretraining_type],
|
173 |
-
# cuda_device=cuda_device,
|
174 |
-
# )
|
175 |
-
|
176 |
-
# # difffit
|
177 |
-
# unet_pretraining_type = "difffit"
|
178 |
-
# print("Loading Pipeline for Difffit Fine-Tuning")
|
179 |
-
# sd_pipeline_difffit = loadSDModel(
|
180 |
-
# unet_pretraining_type=unet_pretraining_type,
|
181 |
-
# exp_path=MODEL_PATH_DICT[unet_pretraining_type],
|
182 |
-
# cuda_device=cuda_device,
|
183 |
-
# )
|
184 |
-
|
185 |
-
# return (
|
186 |
-
# sd_pipeline_full,
|
187 |
-
# sd_pipeline_norm,
|
188 |
-
# sd_pipeline_bias,
|
189 |
-
# sd_pipeline_attention,
|
190 |
-
# sd_pipeline_NBA,
|
191 |
-
# sd_pipeline_difffit,
|
192 |
-
# )
|
193 |
-
|
194 |
-
|
195 |
-
# LOAD ALL PIPELINES FIRST AND CACHE THEM
|
196 |
-
# (
|
197 |
-
# sd_pipeline_full,
|
198 |
-
# sd_pipeline_norm,
|
199 |
-
# sd_pipeline_bias,
|
200 |
-
# sd_pipeline_attention,
|
201 |
-
# sd_pipeline_NBA,
|
202 |
-
# sd_pipeline_difffit,
|
203 |
-
# ) = load_all_pipelines()
|
204 |
-
|
205 |
-
# PIPELINE_DICT = {
|
206 |
-
# "full": sd_pipeline_full,
|
207 |
-
# "norm": sd_pipeline_norm,
|
208 |
-
# "bias": sd_pipeline_bias,
|
209 |
-
# "attention": sd_pipeline_attention,
|
210 |
-
# "norm_bias_attention": sd_pipeline_NBA,
|
211 |
-
# "difffit": sd_pipeline_difffit,
|
212 |
-
# }
|
213 |
-
|
214 |
-
|
215 |
def predict(
|
216 |
unet_pretraining_type,
|
217 |
input_text,
|
|
|
98 |
return pipe
|
99 |
|
100 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
def predict(
|
102 |
unet_pretraining_type,
|
103 |
input_text,
|