yijin928 commited on
Commit
8972c51
·
verified ·
1 Parent(s): 5cbf4e7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +193 -74
app.py CHANGED
@@ -7,17 +7,29 @@ import gradio as gr
7
  from huggingface_hub import hf_hub_download
8
  import spaces
9
  from comfy import model_management
10
- from nodes import NODE_CLASS_MAPPINGS
11
 
12
- sys.path.append(os.path.abspath(os.path.dirname(__file__)))
13
 
14
- print("Current working directory:", os.getcwd())
15
- print("Absolute path of app.py:", os.path.abspath(os.path.dirname(__file__)))
16
 
17
  hf_hub_download(
18
- repo_id="Comfy-Org/stable-diffusion-v1-5-archive",
19
- filename="v1-5-pruned-emaonly-fp16.safetensors",
20
- local_dir="models/checkpoints"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  )
22
 
23
 
@@ -89,18 +101,18 @@ def add_extra_model_paths() -> None:
89
  from main import load_extra_path_config
90
  except ImportError:
91
  print(
92
- "Could not import load_extra_path_config from main.py. Looking in util.extra_config instead."
93
  )
94
  from ut.extra_config import load_extra_path_config
95
- # sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "ut")))
96
- # from extra_config import load_extra_path_config
97
 
98
  extra_model_paths = find_path("extra_model_paths.yaml")
 
99
  if extra_model_paths is not None:
100
  load_extra_path_config(extra_model_paths)
101
  else:
102
  print("Could not find the extra_model_paths config file.")
103
 
 
104
  add_comfyui_directory_to_sys_path()
105
  add_extra_model_paths()
106
 
@@ -127,86 +139,193 @@ def import_custom_nodes() -> None:
127
  # Initializing custom nodes
128
  init_extra_nodes()
129
 
130
- import_custom_nodes()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
 
 
132
  @spaces.GPU(duration=60)
133
- def generate_image(positive_prompt, negative_prompt):
 
 
 
 
 
134
  import_custom_nodes()
135
  with torch.inference_mode():
136
- checkpointloadersimple = NODE_CLASS_MAPPINGS["CheckpointLoaderSimple"]()
137
- checkpointloadersimple_4 = checkpointloadersimple.load_checkpoint(
138
- ckpt_name="v1-5-pruned-emaonly-fp16.safetensors"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  )
140
 
141
- emptylatentimage = NODE_CLASS_MAPPINGS["EmptyLatentImage"]()
142
- emptylatentimage_5 = emptylatentimage.generate(
143
- width=512, height=512, batch_size=1
144
- )
 
 
145
 
146
- cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
147
- cliptextencode_6 = cliptextencode.encode(
148
- text=positive_prompt,
149
- clip=get_value_at_index(checkpointloadersimple_4, 1),
150
- )
151
-
152
- cliptextencode_7 = cliptextencode.encode(
153
- text=negative_prompt, clip=get_value_at_index(checkpointloadersimple_4, 1)
154
- )
155
 
156
- ksampler = NODE_CLASS_MAPPINGS["KSampler"]()
157
- vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
158
- saveimage = NODE_CLASS_MAPPINGS["SaveImage"]()
 
 
 
159
 
160
- for q in range(1):
161
- ksampler_3 = ksampler.sample(
 
 
162
  seed=random.randint(1, 2**64),
163
- steps=20,
164
- cfg=8,
165
- sampler_name="euler",
166
- scheduler="normal",
167
- denoise=1,
168
- model=get_value_at_index(checkpointloadersimple_4, 0),
169
- positive=get_value_at_index(cliptextencode_6, 0),
170
- negative=get_value_at_index(cliptextencode_7, 0),
171
- latent_image=get_value_at_index(emptylatentimage_5, 0),
172
  )
173
 
174
- vaedecode_8 = vaedecode.decode(
175
- samples=get_value_at_index(ksampler_3, 0),
176
- vae=get_value_at_index(checkpointloadersimple_4, 2),
 
 
 
 
 
 
177
  )
178
 
179
- saveimage_9 = saveimage.save_images(
180
- filename_prefix="ComfyUI",
181
- images=get_value_at_index(vaedecode_8, 0),
 
 
 
 
 
 
 
 
 
 
 
182
  )
183
- saved_path=f"output/{saveimage_9['ui']['images'][0]['filename']}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  return saved_path
185
-
186
- # Start your Gradio app
187
- with gr.Blocks() as app:
188
- # Add a title
189
- gr.Markdown("# FLUX Style Shaping")
190
-
191
- with gr.Row():
192
- with gr.Column():
193
- # Add an input
194
- positive_prompt_input = gr.Textbox(label="Positive Prompt", placeholder="Enter your positive prompt here...")
195
- negative_prompt_input = gr.Textbox(label="Negative Prompt", placeholder="Enter your negative prompt here...")
196
- # The generate button
197
- generate_btn = gr.Button("Generate")
198
 
199
- with gr.Column():
200
- # The output image
201
- output_image = gr.Image(label="Generated Image")
202
-
203
- # When clicking the button, it will trigger the `generate_image` function, with the respective inputs
204
- # and the output an image
205
- generate_btn.click(
206
- fn=generate_image,
207
- inputs=[positive_prompt_input, negative_prompt_input],
208
- outputs=[output_image]
209
  )
210
 
211
- if __name__ == "__main__":
212
- app.launch(share=True)
 
7
  from huggingface_hub import hf_hub_download
8
  import spaces
9
  from comfy import model_management
 
10
 
11
+ from huggingface_hub import hf_hub_download
12
 
 
 
13
 
14
  hf_hub_download(
15
+ repo_id="Madespace/clip",
16
+ filename="google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors",
17
+ local_dir="models/clip"
18
+ )
19
+ hf_hub_download(
20
+ repo_id="ezioruan/inswapper_128.onnx",
21
+ filename="inswapper_128.onnx",
22
+ local_dir="models/insightface"
23
+ )
24
+ hf_hub_download(
25
+ repo_id="gmk123/GFPGAN",
26
+ filename="GFPGANv1.4.pth",
27
+ local_dir="models/facerestore_models"
28
+ )
29
+ hf_hub_download(
30
+ repo_id="gemasai/4x_NMKD-Superscale-SP_178000_G",
31
+ filename="4x_NMKD-Superscale-SP_178000_G.pth",
32
+ local_dir="models/upscale_models"
33
  )
34
 
35
 
 
101
  from main import load_extra_path_config
102
  except ImportError:
103
  print(
104
+ "Could not import load_extra_path_config from main.py. Looking in utils.extra_config instead."
105
  )
106
  from ut.extra_config import load_extra_path_config
 
 
107
 
108
  extra_model_paths = find_path("extra_model_paths.yaml")
109
+
110
  if extra_model_paths is not None:
111
  load_extra_path_config(extra_model_paths)
112
  else:
113
  print("Could not find the extra_model_paths config file.")
114
 
115
+
116
  add_comfyui_directory_to_sys_path()
117
  add_extra_model_paths()
118
 
 
139
  # Initializing custom nodes
140
  init_extra_nodes()
141
 
142
+ from nodes import NODE_CLASS_MAPPINGS
143
+
144
+ #TO be added to "model_loaders" as it loads a model
145
+ downloadandloadcogvideomodel = NODE_CLASS_MAPPINGS[
146
+ "DownloadAndLoadCogVideoModel"
147
+ ]()
148
+ downloadandloadcogvideomodel_1 = downloadandloadcogvideomodel.loadmodel(
149
+ model="THUDM/CogVideoX-5b",
150
+ precision="bf16",
151
+ quantization="disabled",
152
+ enable_sequential_cpu_offload=True,
153
+ attention_mode="sdpa",
154
+ load_device="main_device",
155
+ )
156
+ loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
157
+ cliploader = NODE_CLASS_MAPPINGS["CLIPLoader"]()
158
+ cliploader_20 = cliploader.load_clip(
159
+ clip_name="t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors",
160
+ type="sd3",
161
+ device="default",
162
+ )
163
+ emptylatentimage = NODE_CLASS_MAPPINGS["EmptyLatentImage"]()
164
+
165
+ cogvideotextencode = NODE_CLASS_MAPPINGS["CogVideoTextEncode"]()
166
+ cogvideosampler = NODE_CLASS_MAPPINGS["CogVideoSampler"]()
167
+ cogvideodecode = NODE_CLASS_MAPPINGS["CogVideoDecode"]()
168
+ reactorfaceswap = NODE_CLASS_MAPPINGS["ReActorFaceSwap"]()
169
+ cr_upscale_image = NODE_CLASS_MAPPINGS["CR Upscale Image"]()
170
+ vhs_videocombine = NODE_CLASS_MAPPINGS["VHS_VideoCombine"]()
171
+
172
+ #Add all the models that load a safetensors file
173
+ model_loaders = [downloadandloadcogvideomodel_1, cliploader_20]
174
+
175
+ # Check which models are valid and how to best load them
176
+ valid_models = [
177
+ getattr(loader[0], 'patcher', loader[0])
178
+ for loader in model_loaders
179
+ if not isinstance(loader[0], dict) and not isinstance(getattr(loader[0], 'patcher', None), dict)
180
+ ]
181
+
182
+ #Finally loads the models
183
+ model_management.load_models_gpu(valid_models)
184
 
185
+ #Run ComfyUI Workflow
186
  @spaces.GPU(duration=60)
187
+ def generate_video(positive_prompt, num_frames, input_image):
188
+
189
+ print("Positive Prompt:", positive_prompt)
190
+ print("Number of Frames:", num_frames)
191
+ print("Input Image:", input_image)
192
+
193
  import_custom_nodes()
194
  with torch.inference_mode():
195
+ # downloadandloadcogvideomodel = NODE_CLASS_MAPPINGS[
196
+ # "DownloadAndLoadCogVideoModel"
197
+ # ]()
198
+ # downloadandloadcogvideomodel_1 = downloadandloadcogvideomodel.loadmodel(
199
+ # model="THUDM/CogVideoX-5b",
200
+ # precision="bf16",
201
+ # quantization="disabled",
202
+ # enable_sequential_cpu_offload=True,
203
+ # attention_mode="sdpa",
204
+ # load_device="main_device",
205
+ # )
206
+
207
+ # loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
208
+ loadimage_8 = loadimage.load_image(image=input_image)
209
+
210
+ # cliploader = NODE_CLASS_MAPPINGS["CLIPLoader"]()
211
+ # cliploader_20 = cliploader.load_clip(
212
+ # clip_name="t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors",
213
+ # type="sd3",
214
+ # device="default",
215
+ # )
216
+
217
+ # emptylatentimage = NODE_CLASS_MAPPINGS["EmptyLatentImage"]()
218
+ emptylatentimage_161 = emptylatentimage.generate(
219
+ width=720, height=480, batch_size=1
220
  )
221
 
222
+ # cogvideotextencode = NODE_CLASS_MAPPINGS["CogVideoTextEncode"]()
223
+ # cogvideosampler = NODE_CLASS_MAPPINGS["CogVideoSampler"]()
224
+ # cogvideodecode = NODE_CLASS_MAPPINGS["CogVideoDecode"]()
225
+ # reactorfaceswap = NODE_CLASS_MAPPINGS["ReActorFaceSwap"]()
226
+ # cr_upscale_image = NODE_CLASS_MAPPINGS["CR Upscale Image"]()
227
+ # vhs_videocombine = NODE_CLASS_MAPPINGS["VHS_VideoCombine"]()
228
 
229
+ for q in range(1):
230
+ cogvideotextencode_30 = cogvideotextencode.process(
231
+ prompt=positive_prompt,
232
+ strength=1,
233
+ force_offload=True,
234
+ clip=get_value_at_index(cliploader_20, 0),
235
+ )
 
 
236
 
237
+ cogvideotextencode_31 = cogvideotextencode.process(
238
+ prompt='',
239
+ strength=1,
240
+ force_offload=True,
241
+ clip=get_value_at_index(cogvideotextencode_30, 1),
242
+ )
243
 
244
+ cogvideosampler_155 = cogvideosampler.process(
245
+ num_frames=num_frames,
246
+ steps=50,
247
+ cfg=6,
248
  seed=random.randint(1, 2**64),
249
+ scheduler="CogVideoXDDIM",
250
+ denoise_strength=1,
251
+ model=get_value_at_index(downloadandloadcogvideomodel_1, 0),
252
+ positive=get_value_at_index(cogvideotextencode_30, 0),
253
+ negative=get_value_at_index(cogvideotextencode_31, 0),
254
+ samples=get_value_at_index(emptylatentimage_161, 0),
 
 
 
255
  )
256
 
257
+ cogvideodecode_11 = cogvideodecode.decode(
258
+ enable_vae_tiling=False,
259
+ tile_sample_min_height=240,
260
+ tile_sample_min_width=360,
261
+ tile_overlap_factor_height=0.2,
262
+ tile_overlap_factor_width=0.2,
263
+ auto_tile_size=True,
264
+ vae=get_value_at_index(downloadandloadcogvideomodel_1, 1),
265
+ samples=get_value_at_index(cogvideosampler_155, 0),
266
  )
267
 
268
+ reactorfaceswap_3 = reactorfaceswap.execute(
269
+ enabled=True,
270
+ swap_model="inswapper_128.onnx",
271
+ facedetection="retinaface_resnet50",
272
+ face_restore_model="GFPGANv1.4.pth",
273
+ face_restore_visibility=1,
274
+ codeformer_weight=0.75,
275
+ detect_gender_input="no",
276
+ detect_gender_source="no",
277
+ input_faces_index="0",
278
+ source_faces_index="0",
279
+ console_log_level=1,
280
+ input_image=get_value_at_index(cogvideodecode_11, 0),
281
+ source_image=get_value_at_index(loadimage_8, 0),
282
  )
283
+
284
+ cr_upscale_image_151 = cr_upscale_image.upscale(
285
+ upscale_model="4x_NMKD-Superscale-SP_178000_G.pth",
286
+ mode="rescale",
287
+ rescale_factor=4,
288
+ resize_width=720,
289
+ resampling_method="lanczos",
290
+ supersample="true",
291
+ rounding_modulus=16,
292
+ image=get_value_at_index(reactorfaceswap_3, 0),
293
+ )
294
+
295
+ vhs_videocombine_154 = vhs_videocombine.combine_video(
296
+ frame_rate=8,
297
+ loop_count=0,
298
+ filename_prefix="AnimateDiff",
299
+ format="video/h264-mp4",
300
+ pix_fmt="yuv420p",
301
+ crf=19,
302
+ save_metadata=True,
303
+ trim_to_audio=False,
304
+ pingpong=True,
305
+ save_output=True,
306
+ images=get_value_at_index(cr_upscale_image_151, 0),
307
+ unique_id=7214086815220268849,
308
+ )
309
+ saved_path = f"output/{vhs_videocombine_154['ui']['images'][0]['filename']}"
310
  return saved_path
311
+
312
+
313
+ if __name__ == "__main__":
314
+
315
+ with gr.Blocks() as app:
316
+ with gr.Row():
317
+ positive_prompt = gr.Textbox(label="Positive Prompt", value="", lines=2)
318
+ with gr.Row():
319
+ num_frames = gr.Number(label="Number of Frames", value=60)
320
+ with gr.Row():
321
+ input_image = gr.Image(label="Input Image", type="pil")
322
+ submit = gr.Button("Submit")
323
+ output_video = gr.Video(label="Output Video")
324
 
325
+ submit.click(
326
+ fn=generate_video,
327
+ inputs=[positive_prompt, num_frames, input_image],
328
+ outputs=[output_video]
 
 
 
 
 
 
329
  )
330
 
331
+ app.launch(share=True)