osmunphotography commited on
Commit
aa5f933
·
verified ·
1 Parent(s): 511130e

Upload 6 files

Browse files
Fooocus-release.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9beafbde4fb74451b28e9af211f978362810782b6d7513df4844203b2ab5aa3
3
+ size 4588066
default_pipeline.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import modules.core as core
2
+ import os
3
+ import torch
4
+ import modules.path
5
+ import modules.virtual_memory as virtual_memory
6
+ import comfy.model_management
7
+
8
+ from comfy.model_base import SDXL, SDXLRefiner
9
+ from modules.patch import cfg_patched, patched_model_function
10
+ from modules.expansion import FooocusExpansion
11
+
12
+
13
+ xl_base: core.StableDiffusionModel = None
14
+ xl_base_hash = ''
15
+
16
+ xl_refiner: core.StableDiffusionModel = None
17
+ xl_refiner_hash = ''
18
+
19
+ xl_base_patched: core.StableDiffusionModel = None
20
+ xl_base_patched_hash = ''
21
+
22
+
23
+ @torch.no_grad()
24
+ @torch.inference_mode()
25
+ def refresh_base_model(name):
26
+ global xl_base, xl_base_hash, xl_base_patched, xl_base_patched_hash
27
+
28
+ filename = os.path.abspath(os.path.realpath(os.path.join(modules.path.modelfile_path, name)))
29
+ model_hash = filename
30
+
31
+ if xl_base_hash == model_hash:
32
+ return
33
+
34
+ if xl_base is not None:
35
+ xl_base.to_meta()
36
+ xl_base = None
37
+
38
+ xl_base = core.load_model(filename)
39
+ if not isinstance(xl_base.unet.model, SDXL):
40
+ print('Model not supported. Fooocus only support SDXL model as the base model.')
41
+ xl_base = None
42
+ xl_base_hash = ''
43
+ refresh_base_model(modules.path.default_base_model_name)
44
+ xl_base_hash = model_hash
45
+ xl_base_patched = xl_base
46
+ xl_base_patched_hash = ''
47
+ return
48
+
49
+ xl_base_hash = model_hash
50
+ xl_base_patched = xl_base
51
+ xl_base_patched_hash = ''
52
+ print(f'Base model loaded: {model_hash}')
53
+ return
54
+
55
+
56
+ @torch.no_grad()
57
+ @torch.inference_mode()
58
+ def refresh_refiner_model(name):
59
+ global xl_refiner, xl_refiner_hash
60
+
61
+ filename = os.path.abspath(os.path.realpath(os.path.join(modules.path.modelfile_path, name)))
62
+ model_hash = filename
63
+
64
+ if xl_refiner_hash == model_hash:
65
+ return
66
+
67
+ if name == 'None':
68
+ xl_refiner = None
69
+ xl_refiner_hash = ''
70
+ print(f'Refiner unloaded.')
71
+ return
72
+
73
+ if xl_refiner is not None:
74
+ xl_refiner.to_meta()
75
+ xl_refiner = None
76
+
77
+ xl_refiner = core.load_model(filename)
78
+ if not isinstance(xl_refiner.unet.model, SDXLRefiner):
79
+ print('Model not supported. Fooocus only support SDXL refiner as the refiner.')
80
+ xl_refiner = None
81
+ xl_refiner_hash = ''
82
+ print(f'Refiner unloaded.')
83
+ return
84
+
85
+ xl_refiner_hash = model_hash
86
+ print(f'Refiner model loaded: {model_hash}')
87
+
88
+ xl_refiner.vae.first_stage_model.to('meta')
89
+ xl_refiner.vae = None
90
+ return
91
+
92
+
93
+ @torch.no_grad()
94
+ @torch.inference_mode()
95
+ def refresh_loras(loras):
96
+ global xl_base, xl_base_patched, xl_base_patched_hash
97
+ if xl_base_patched_hash == str(loras):
98
+ return
99
+
100
+ model = xl_base
101
+ for name, weight in loras:
102
+ if name == 'None':
103
+ continue
104
+
105
+ if os.path.exists(name):
106
+ filename = name
107
+ else:
108
+ filename = os.path.join(modules.path.lorafile_path, name)
109
+
110
+ assert os.path.exists(filename), 'Lora file not found!'
111
+
112
+ model = core.load_sd_lora(model, filename, strength_model=weight, strength_clip=weight)
113
+ xl_base_patched = model
114
+ xl_base_patched_hash = str(loras)
115
+ print(f'LoRAs loaded: {xl_base_patched_hash}')
116
+
117
+ return
118
+
119
+
120
+ @torch.no_grad()
121
+ @torch.inference_mode()
122
+ def clip_encode_single(clip, text, verbose=False):
123
+ cached = clip.fcs_cond_cache.get(text, None)
124
+ if cached is not None:
125
+ if verbose:
126
+ print(f'[CLIP Cached] {text}')
127
+ return cached
128
+ tokens = clip.tokenize(text)
129
+ result = clip.encode_from_tokens(tokens, return_pooled=True)
130
+ clip.fcs_cond_cache[text] = result
131
+ if verbose:
132
+ print(f'[CLIP Encoded] {text}')
133
+ return result
134
+
135
+
136
+ @torch.no_grad()
137
+ @torch.inference_mode()
138
+ def clip_encode(sd, texts, pool_top_k=1):
139
+ if sd is None:
140
+ return None
141
+ if sd.clip is None:
142
+ return None
143
+ if not isinstance(texts, list):
144
+ return None
145
+ if len(texts) == 0:
146
+ return None
147
+
148
+ clip = sd.clip
149
+ cond_list = []
150
+ pooled_acc = 0
151
+
152
+ for i, text in enumerate(texts):
153
+ cond, pooled = clip_encode_single(clip, text)
154
+ cond_list.append(cond)
155
+ if i < pool_top_k:
156
+ pooled_acc += pooled
157
+
158
+ return [[torch.cat(cond_list, dim=1), {"pooled_output": pooled_acc}]]
159
+
160
+
161
+ @torch.no_grad()
162
+ @torch.inference_mode()
163
+ def clear_sd_cond_cache(sd):
164
+ if sd is None:
165
+ return None
166
+ if sd.clip is None:
167
+ return None
168
+ sd.clip.fcs_cond_cache = {}
169
+ return
170
+
171
+
172
+ @torch.no_grad()
173
+ @torch.inference_mode()
174
+ def clear_all_caches():
175
+ clear_sd_cond_cache(xl_base_patched)
176
+ clear_sd_cond_cache(xl_refiner)
177
+
178
+
179
+ @torch.no_grad()
180
+ @torch.inference_mode()
181
+ def refresh_everything(refiner_model_name, base_model_name, loras):
182
+ refresh_refiner_model(refiner_model_name)
183
+ if xl_refiner is not None:
184
+ virtual_memory.try_move_to_virtual_memory(xl_refiner.unet.model)
185
+ virtual_memory.try_move_to_virtual_memory(xl_refiner.clip.cond_stage_model)
186
+
187
+ refresh_base_model(base_model_name)
188
+ virtual_memory.load_from_virtual_memory(xl_base.unet.model)
189
+
190
+ refresh_loras(loras)
191
+ clear_all_caches()
192
+ return
193
+
194
+
195
+ refresh_everything(
196
+ refiner_model_name=modules.path.default_refiner_model_name,
197
+ base_model_name=modules.path.default_base_model_name,
198
+ loras=[(modules.path.default_lora_name, 0.5), ('None', 0.5), ('None', 0.5), ('None', 0.5), ('None', 0.5)]
199
+ )
200
+
201
+ expansion = FooocusExpansion()
202
+
203
+
204
+ @torch.no_grad()
205
+ @torch.inference_mode()
206
+ def patch_all_models():
207
+ assert xl_base is not None
208
+ assert xl_base_patched is not None
209
+
210
+ xl_base.unet.model_options['sampler_cfg_function'] = cfg_patched
211
+ xl_base.unet.model_options['model_function_wrapper'] = patched_model_function
212
+
213
+ xl_base_patched.unet.model_options['sampler_cfg_function'] = cfg_patched
214
+ xl_base_patched.unet.model_options['model_function_wrapper'] = patched_model_function
215
+
216
+ if xl_refiner is not None:
217
+ xl_refiner.unet.model_options['sampler_cfg_function'] = cfg_patched
218
+ xl_refiner.unet.model_options['model_function_wrapper'] = patched_model_function
219
+
220
+ return
221
+
222
+
223
+ @torch.no_grad()
224
+ @torch.inference_mode()
225
+ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height, image_seed, callback, latent=None, denoise=1.0, tiled=False):
226
+ patch_all_models()
227
+
228
+ if xl_refiner is not None:
229
+ virtual_memory.try_move_to_virtual_memory(xl_refiner.unet.model)
230
+ virtual_memory.load_from_virtual_memory(xl_base.unet.model)
231
+
232
+ if latent is None:
233
+ empty_latent = core.generate_empty_latent(width=width, height=height, batch_size=1)
234
+ else:
235
+ empty_latent = latent
236
+
237
+ if xl_refiner is not None:
238
+ sampled_latent = core.ksampler_with_refiner(
239
+ model=xl_base_patched.unet,
240
+ positive=positive_cond[0],
241
+ negative=negative_cond[0],
242
+ refiner=xl_refiner.unet,
243
+ refiner_positive=positive_cond[1],
244
+ refiner_negative=negative_cond[1],
245
+ refiner_switch_step=switch,
246
+ latent=empty_latent,
247
+ steps=steps, start_step=0, last_step=steps, disable_noise=False, force_full_denoise=True,
248
+ seed=image_seed,
249
+ denoise=denoise,
250
+ callback_function=callback
251
+ )
252
+ else:
253
+ sampled_latent = core.ksampler(
254
+ model=xl_base_patched.unet,
255
+ positive=positive_cond[0],
256
+ negative=negative_cond[0],
257
+ latent=empty_latent,
258
+ steps=steps, start_step=0, last_step=steps, disable_noise=False, force_full_denoise=True,
259
+ seed=image_seed,
260
+ denoise=denoise,
261
+ callback_function=callback
262
+ )
263
+
264
+ decoded_latent = core.decode_vae(vae=xl_base_patched.vae, latent_image=sampled_latent, tiled=tiled)
265
+ images = core.pytorch_to_numpy(decoded_latent)
266
+
267
+ comfy.model_management.soft_empty_cache()
268
+ return images
fooocus_version 2.py ADDED
@@ -0,0 +1 @@
 
 
1
+ version = '2.0.78'
sd_xl_base_1.0_0.9vae.safetensors.download ADDED
File without changes
sd_xl_refiner_1.0_0.9vae.safetensors.download ADDED
File without changes