Upload DDPMPipeline
Browse files- __pycache__/config.cpython-310.pyc +0 -0
- config.py +3 -2
- logs/ddpm-paintings-128-finetuned-celebahq/events.out.tfevents.1701732706.coffee.29296.0 +3 -0
- logs/ddpm-paintings-128-finetuned-celebahq/events.out.tfevents.1701734614.coffee.30162.0 +3 -0
- logs/ddpm-paintings-128-finetuned-celebahq/events.out.tfevents.1701734735.coffee.30262.0 +3 -0
- logs/ddpm-paintings-128-finetuned-celebahq/events.out.tfevents.1701735986.coffee.30996.0 +3 -0
- logs/ddpm-paintings-128-finetuned-celebahq/events.out.tfevents.1701744188.coffee.33147.0 +3 -0
- logs/ddpm-paintings-128-finetuned-celebahq/events.out.tfevents.1701745393.coffee.33519.0 +3 -0
- logs/ddpm-paintings-128-finetuned-cifar10/events.out.tfevents.1701726567.coffee.25846.0 +3 -0
- logs/ddpm-paintings-128-finetuned-cifar10/events.out.tfevents.1701726710.coffee.25924.0 +3 -0
- logs/ddpm-paintings-128-finetuned-cifar10/events.out.tfevents.1701728614.coffee.26917.0 +3 -0
- logs/ddpm-paintings-128-finetuned-cifar10/events.out.tfevents.1701729930.coffee.27724.0 +3 -0
- logs/ddpm-paintings-128-finetuned-cifar10/events.out.tfevents.1701731364.coffee.28580.0 +3 -0
- main.py +37 -13
- prev_samples/0000.png +0 -0
- prev_samples/0001.png +0 -0
- prev_samples/0002.png +0 -0
- prev_samples/0003.png +0 -0
- prev_samples/0004.png +0 -0
- prev_samples/0005.png +0 -0
- samples/0000.png +0 -0
- samples/0001.png +0 -0
- samples/0002.png +0 -0
- samples/0003.png +0 -0
- samples/0004.png +0 -0
- samples/0005.png +0 -0
- scheduler/scheduler_config.json +18 -0
- unet/config.json +52 -0
- unet/diffusion_pytorch_model.safetensors +3 -0
__pycache__/config.cpython-310.pyc
CHANGED
Binary files a/__pycache__/config.cpython-310.pyc and b/__pycache__/config.cpython-310.pyc differ
|
|
config.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
from dataclasses import dataclass
|
|
|
2 |
|
3 |
|
4 |
@dataclass
|
@@ -13,10 +14,10 @@ class TrainingConfig:
|
|
13 |
save_image_epochs = 1
|
14 |
save_model_epochs = 3
|
15 |
mixed_precision = 'fp16' # `no` for float32, `fp16` for automatic mixed precision
|
16 |
-
output_dir =
|
17 |
|
18 |
push_to_hub = True # whether to upload the saved model to the HF Hub
|
19 |
-
hub_model_id = 'jmemon/ddpm-paintings-128-finetuned-
|
20 |
hub_private_repo = False
|
21 |
overwrite_output_dir = True # overwrite the old model when re-running the notebook
|
22 |
seed = 0
|
|
|
1 |
from dataclasses import dataclass
|
2 |
+
from pathlib import Path
|
3 |
|
4 |
|
5 |
@dataclass
|
|
|
14 |
save_image_epochs = 1
|
15 |
save_model_epochs = 3
|
16 |
mixed_precision = 'fp16' # `no` for float32, `fp16` for automatic mixed precision
|
17 |
+
output_dir = str(Path(__file__).parent)
|
18 |
|
19 |
push_to_hub = True # whether to upload the saved model to the HF Hub
|
20 |
+
hub_model_id = 'jmemon/ddpm-paintings-128-finetuned-celebahq' # the name of the repository to create on the HF Hub
|
21 |
hub_private_repo = False
|
22 |
overwrite_output_dir = True # overwrite the old model when re-running the notebook
|
23 |
seed = 0
|
logs/ddpm-paintings-128-finetuned-celebahq/events.out.tfevents.1701732706.coffee.29296.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c5912e77da72dd8bd563084f3ada0edf41dea0392f8d384d98cb7fa4b456da8e
|
3 |
+
size 142446
|
logs/ddpm-paintings-128-finetuned-celebahq/events.out.tfevents.1701734614.coffee.30162.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:31762b0fa4b007fc54274f8dc73cded876a75a821c2619015d7e2780c041b1e5
|
3 |
+
size 15210
|
logs/ddpm-paintings-128-finetuned-celebahq/events.out.tfevents.1701734735.coffee.30262.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0ba051400ec9f6e4e55f1256d80d63317c053075a60714dcc6007e41f7132ec1
|
3 |
+
size 144351
|
logs/ddpm-paintings-128-finetuned-celebahq/events.out.tfevents.1701735986.coffee.30996.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9c79241e87f7b4f3fdc35a86b240746241544c6f8446535730f7a80f4b3832b2
|
3 |
+
size 427942
|
logs/ddpm-paintings-128-finetuned-celebahq/events.out.tfevents.1701744188.coffee.33147.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:01e1d454cda6e51db8844dc7603c7397a4bc3065164b6d81e07f8c68ab0fcc4c
|
3 |
+
size 12606
|
logs/ddpm-paintings-128-finetuned-celebahq/events.out.tfevents.1701745393.coffee.33519.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bf28970ad4895e532f0be9f53e31281835b41351597320aa8fc356d206bd5f7c
|
3 |
+
size 427942
|
logs/ddpm-paintings-128-finetuned-cifar10/events.out.tfevents.1701726567.coffee.25846.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:875339e3b6c592fa363b5cc43e8f5e1e9df798a96652355a93f3cce222f17092
|
3 |
+
size 88
|
logs/ddpm-paintings-128-finetuned-cifar10/events.out.tfevents.1701726710.coffee.25924.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:71f8980721bc9fe39834b0dbb3474b49fd620884d6064f16f8d09e9798e16cb9
|
3 |
+
size 142446
|
logs/ddpm-paintings-128-finetuned-cifar10/events.out.tfevents.1701728614.coffee.26917.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e7d2550b132b946b8fece5b66d407aea95d9cbf26485e520d51d67fd418ce2c8
|
3 |
+
size 142446
|
logs/ddpm-paintings-128-finetuned-cifar10/events.out.tfevents.1701729930.coffee.27724.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:866209d3cb123b5b93f96d7cea4260a9e713224abb3e07a10d924afeeea221c7
|
3 |
+
size 142446
|
logs/ddpm-paintings-128-finetuned-cifar10/events.out.tfevents.1701731364.coffee.28580.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:10c8caf26e607a361ed3a2053285d67f61f77fbe308e73b888d003eb375df733
|
3 |
+
size 142446
|
main.py
CHANGED
@@ -17,7 +17,7 @@ from config import TrainingConfig
|
|
17 |
|
18 |
|
19 |
"""
|
20 |
-
Or diffusion for simple images
|
21 |
x_T's and what the output is.
|
22 |
|
23 |
Denoise each x_T multiple times to get a better picture of the distribution.
|
@@ -81,27 +81,37 @@ if __name__ == '__main__':
|
|
81 |
valid_dataloader = torch.utils.data.DataLoader(ds_dict['validation'], batch_size=config.eval_batch_size, shuffle=False)
|
82 |
test_dataloader = torch.utils.data.DataLoader(ds_dict['test'], batch_size=config.eval_batch_size, shuffle=False)
|
83 |
|
|
|
84 |
unet = UNet2DModel.from_pretrained(
|
85 |
-
'google/ddpm-celebahq-256'
|
86 |
-
safetensors=True
|
87 |
).to('mps')
|
88 |
|
89 |
scheduler = DDPMScheduler.from_pretrained(
|
90 |
'google/ddpm-celebahq-256'
|
91 |
)
|
|
|
92 |
|
93 |
-
"""
|
|
|
94 |
'jmemon/ddpm-paintings-128-finetuned-celebahq'
|
95 |
).to('mps')
|
96 |
|
97 |
scheduler = DDPMScheduler.from_pretrained(
|
98 |
'jmemon/ddpm-paintings-128-finetuned-celebahq'
|
99 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
|
101 |
lora_config = LoraConfig(
|
102 |
r=8,
|
103 |
lora_alpha=8,
|
104 |
-
#modules_to_save=['model'],
|
105 |
target_modules=['to_k','to_v'],
|
106 |
lora_dropout=0.1,
|
107 |
bias='none')
|
@@ -127,11 +137,11 @@ if __name__ == '__main__':
|
|
127 |
if config.push_to_hub:
|
128 |
repo_id = create_repo(repo_id=config.hub_model_id, exist_ok=True).repo_id
|
129 |
|
130 |
-
accelerator.init_trackers('ddpm-paintings-128-finetuned-
|
131 |
|
132 |
global_step = 0
|
133 |
|
134 |
-
for epoch in range(config.num_epochs):
|
135 |
pbar = tqdm(total=len(train_dataloader), disable=not accelerator.is_local_main_process)
|
136 |
pbar.set_description(f'Epoch {epoch}')
|
137 |
|
@@ -145,7 +155,7 @@ if __name__ == '__main__':
|
|
145 |
|
146 |
noisy_images = scheduler.add_noise(clean_images, noise, ts)
|
147 |
|
148 |
-
with accelerator.accumulate(
|
149 |
noise_pred = lora_unet(noisy_images, ts, return_dict=False)[0]
|
150 |
loss = F.mse_loss(noise_pred, noise)
|
151 |
accelerator.backward(loss)
|
@@ -164,6 +174,7 @@ if __name__ == '__main__':
|
|
164 |
pbar.close()
|
165 |
|
166 |
if accelerator.is_main_process:
|
|
|
167 |
pipeline = DDPMPipeline(unet=accelerator.unwrap_model(lora_unet), scheduler=scheduler)
|
168 |
|
169 |
if (epoch + 1) % config.save_image_epochs == 0 or epoch == config.num_epochs - 1:
|
@@ -171,16 +182,29 @@ if __name__ == '__main__':
|
|
171 |
evaluate(config, epoch, pipeline)
|
172 |
|
173 |
if (epoch + 1) % config.save_model_epochs == 0 or epoch == config.num_epochs - 1:
|
|
|
|
|
|
|
174 |
if config.push_to_hub:
|
175 |
-
|
|
|
|
|
|
|
|
|
|
|
176 |
|
177 |
upload_folder(
|
178 |
repo_id=repo_id,
|
179 |
-
folder_path=
|
180 |
commit_message=f'Epoch {epoch}',
|
181 |
-
ignore_patterns=['logs', '
|
182 |
token='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'
|
183 |
)
|
184 |
|
|
|
185 |
else:
|
186 |
-
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
|
19 |
"""
|
20 |
+
Or diffusion for simple images and explore subtly different
|
21 |
x_T's and what the output is.
|
22 |
|
23 |
Denoise each x_T multiple times to get a better picture of the distribution.
|
|
|
81 |
valid_dataloader = torch.utils.data.DataLoader(ds_dict['validation'], batch_size=config.eval_batch_size, shuffle=False)
|
82 |
test_dataloader = torch.utils.data.DataLoader(ds_dict['test'], batch_size=config.eval_batch_size, shuffle=False)
|
83 |
|
84 |
+
"""
|
85 |
unet = UNet2DModel.from_pretrained(
|
86 |
+
'google/ddpm-celebahq-256'
|
|
|
87 |
).to('mps')
|
88 |
|
89 |
scheduler = DDPMScheduler.from_pretrained(
|
90 |
'google/ddpm-celebahq-256'
|
91 |
)
|
92 |
+
"""
|
93 |
|
94 |
+
"""
|
95 |
+
unet=UNet2DModel.from_pretrained(
|
96 |
'jmemon/ddpm-paintings-128-finetuned-celebahq'
|
97 |
).to('mps')
|
98 |
|
99 |
scheduler = DDPMScheduler.from_pretrained(
|
100 |
'jmemon/ddpm-paintings-128-finetuned-celebahq'
|
101 |
+
)
|
102 |
+
"""
|
103 |
+
|
104 |
+
unet = UNet2DModel.from_pretrained(
|
105 |
+
str(Path(__file__).parent / 'unet')
|
106 |
+
).to('mps')
|
107 |
+
|
108 |
+
scheduler = DDPMScheduler.from_pretrained(
|
109 |
+
str(Path(__file__).parent / 'scheduler')
|
110 |
+
)
|
111 |
|
112 |
lora_config = LoraConfig(
|
113 |
r=8,
|
114 |
lora_alpha=8,
|
|
|
115 |
target_modules=['to_k','to_v'],
|
116 |
lora_dropout=0.1,
|
117 |
bias='none')
|
|
|
137 |
if config.push_to_hub:
|
138 |
repo_id = create_repo(repo_id=config.hub_model_id, exist_ok=True).repo_id
|
139 |
|
140 |
+
accelerator.init_trackers('ddpm-paintings-128-finetuned-celebahq')
|
141 |
|
142 |
global_step = 0
|
143 |
|
144 |
+
for epoch in range(3, config.num_epochs + 3):
|
145 |
pbar = tqdm(total=len(train_dataloader), disable=not accelerator.is_local_main_process)
|
146 |
pbar.set_description(f'Epoch {epoch}')
|
147 |
|
|
|
155 |
|
156 |
noisy_images = scheduler.add_noise(clean_images, noise, ts)
|
157 |
|
158 |
+
with accelerator.accumulate(lora_unet):
|
159 |
noise_pred = lora_unet(noisy_images, ts, return_dict=False)[0]
|
160 |
loss = F.mse_loss(noise_pred, noise)
|
161 |
accelerator.backward(loss)
|
|
|
174 |
pbar.close()
|
175 |
|
176 |
if accelerator.is_main_process:
|
177 |
+
#pipeline = DDPMPipeline(unet=accelerator.unwrap_model(lora_unet).merge_and_unload(), scheduler=scheduler)
|
178 |
pipeline = DDPMPipeline(unet=accelerator.unwrap_model(lora_unet), scheduler=scheduler)
|
179 |
|
180 |
if (epoch + 1) % config.save_image_epochs == 0 or epoch == config.num_epochs - 1:
|
|
|
182 |
evaluate(config, epoch, pipeline)
|
183 |
|
184 |
if (epoch + 1) % config.save_model_epochs == 0 or epoch == config.num_epochs - 1:
|
185 |
+
_pipeline = DDPMPipeline(
|
186 |
+
unet=accelerator.unwrap_model(lora_unet).merge_and_unload(),
|
187 |
+
scheduler=scheduler)
|
188 |
if config.push_to_hub:
|
189 |
+
_pipeline.save_pretrained(
|
190 |
+
config.output_dir,
|
191 |
+
push_to_hub=True,
|
192 |
+
repo_id=repo_id,
|
193 |
+
token='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'
|
194 |
+
)
|
195 |
|
196 |
upload_folder(
|
197 |
repo_id=repo_id,
|
198 |
+
folder_path=config.output_dir,
|
199 |
commit_message=f'Epoch {epoch}',
|
200 |
+
ignore_patterns=['logs/*', '*/.DS_Store'],
|
201 |
token='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'
|
202 |
)
|
203 |
|
204 |
+
model_loc = 'jmemon/ddpm-paintings-128-finetuned-celebahq'
|
205 |
else:
|
206 |
+
_pipeline.save_pretrained(config.output_dir)
|
207 |
+
model_loc = str(Path(__file__).parent / 'diffusion_model_pytorch.bin')
|
208 |
+
|
209 |
+
unet = UNet2DModel.from_pretrained(model_loc)
|
210 |
+
lora_unet = get_peft_model(unet, lora_config)
|
prev_samples/0000.png
ADDED
prev_samples/0001.png
ADDED
prev_samples/0002.png
ADDED
prev_samples/0003.png
ADDED
prev_samples/0004.png
ADDED
prev_samples/0005.png
ADDED
samples/0000.png
CHANGED
samples/0001.png
CHANGED
samples/0002.png
CHANGED
samples/0003.png
CHANGED
samples/0004.png
CHANGED
samples/0005.png
CHANGED
scheduler/scheduler_config.json
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "DDPMScheduler",
|
3 |
+
"_diffusers_version": "0.24.0",
|
4 |
+
"beta_end": 0.02,
|
5 |
+
"beta_schedule": "linear",
|
6 |
+
"beta_start": 0.0001,
|
7 |
+
"clip_sample": true,
|
8 |
+
"clip_sample_range": 1.0,
|
9 |
+
"dynamic_thresholding_ratio": 0.995,
|
10 |
+
"num_train_timesteps": 1000,
|
11 |
+
"prediction_type": "epsilon",
|
12 |
+
"sample_max_value": 1.0,
|
13 |
+
"steps_offset": 0,
|
14 |
+
"thresholding": false,
|
15 |
+
"timestep_spacing": "leading",
|
16 |
+
"trained_betas": null,
|
17 |
+
"variance_type": "fixed_small"
|
18 |
+
}
|
unet/config.json
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "UNet2DModel",
|
3 |
+
"_diffusers_version": "0.24.0",
|
4 |
+
"_name_or_path": "/Users/jpmemon/Desktop/School/Fall-2023/Capstone/DiffusionEx/unet",
|
5 |
+
"act_fn": "silu",
|
6 |
+
"add_attention": true,
|
7 |
+
"attention_head_dim": null,
|
8 |
+
"attn_norm_num_groups": null,
|
9 |
+
"block_out_channels": [
|
10 |
+
128,
|
11 |
+
128,
|
12 |
+
256,
|
13 |
+
256,
|
14 |
+
512,
|
15 |
+
512
|
16 |
+
],
|
17 |
+
"center_input_sample": false,
|
18 |
+
"class_embed_type": null,
|
19 |
+
"down_block_types": [
|
20 |
+
"DownBlock2D",
|
21 |
+
"DownBlock2D",
|
22 |
+
"DownBlock2D",
|
23 |
+
"DownBlock2D",
|
24 |
+
"AttnDownBlock2D",
|
25 |
+
"DownBlock2D"
|
26 |
+
],
|
27 |
+
"downsample_padding": 0,
|
28 |
+
"downsample_type": "conv",
|
29 |
+
"dropout": 0.0,
|
30 |
+
"flip_sin_to_cos": false,
|
31 |
+
"freq_shift": 1,
|
32 |
+
"in_channels": 3,
|
33 |
+
"layers_per_block": 2,
|
34 |
+
"mid_block_scale_factor": 1,
|
35 |
+
"norm_eps": 1e-06,
|
36 |
+
"norm_num_groups": 32,
|
37 |
+
"num_class_embeds": null,
|
38 |
+
"num_train_timesteps": null,
|
39 |
+
"out_channels": 3,
|
40 |
+
"resnet_time_scale_shift": "default",
|
41 |
+
"sample_size": 256,
|
42 |
+
"time_embedding_type": "positional",
|
43 |
+
"up_block_types": [
|
44 |
+
"UpBlock2D",
|
45 |
+
"AttnUpBlock2D",
|
46 |
+
"UpBlock2D",
|
47 |
+
"UpBlock2D",
|
48 |
+
"UpBlock2D",
|
49 |
+
"UpBlock2D"
|
50 |
+
],
|
51 |
+
"upsample_type": "conv"
|
52 |
+
}
|
unet/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b749f8f5afcbd0a78881e2e6fecb6b75b2834f61a29fdea56180a1f71d52fcb3
|
3 |
+
size 454741108
|