camenduru commited on
Commit
c6846c1
·
verified ·
1 Parent(s): 2845655

Update worker_runpod.py

Browse files
Files changed (1) hide show
  1. worker_runpod.py +5 -6
worker_runpod.py CHANGED
@@ -5,6 +5,11 @@ from hyvideo.utils.file_utils import save_videos_grid
5
  from hyvideo.config import parse_args
6
  from hyvideo.inference import HunyuanVideoSampler
7
 
 
 
 
 
 
8
  @torch.inference_mode()
9
  def generate(input):
10
  values = input["input"]
@@ -21,17 +26,11 @@ def generate(input):
21
  flow_shift = values['flow_shift']
22
  batch_size = values['batch_size']
23
  embedded_guidance_scale = values['embedded_guidance_scale']
24
- flow_reverse = values['flow_reverse']
25
 
26
  if seed == 0:
27
  random.seed(int(time.time()))
28
  seed = random.randint(0, 18446744073709551615)
29
 
30
- args = parse_args()
31
- args.flow_reverse = flow_reverse
32
- hunyuan_video_sampler = HunyuanVideoSampler.from_pretrained("/content/HunyuanVideo/ckpts", args=args)
33
- args = hunyuan_video_sampler.args
34
-
35
  outputs = hunyuan_video_sampler.predict(
36
  prompt=positive_prompt,
37
  height=height,
 
5
  from hyvideo.config import parse_args
6
  from hyvideo.inference import HunyuanVideoSampler
7
 
8
+ with torch.inference_mode():
9
+ args = parse_args()
10
+ args.flow_reverse = True
11
+ hunyuan_video_sampler = HunyuanVideoSampler.from_pretrained("/content/HunyuanVideo/ckpts", args=args)
12
+
13
  @torch.inference_mode()
14
  def generate(input):
15
  values = input["input"]
 
26
  flow_shift = values['flow_shift']
27
  batch_size = values['batch_size']
28
  embedded_guidance_scale = values['embedded_guidance_scale']
 
29
 
30
  if seed == 0:
31
  random.seed(int(time.time()))
32
  seed = random.randint(0, 18446744073709551615)
33
 
 
 
 
 
 
34
  outputs = hunyuan_video_sampler.predict(
35
  prompt=positive_prompt,
36
  height=height,