vilarin commited on
Commit
37a0a26
1 Parent(s): 98488ef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -22,6 +22,8 @@ CSS = """
22
  """
23
 
24
  # Ensure model and scheduler are initialized in GPU-enabled function
 
 
25
  if torch.cuda.is_available():
26
  pipe = DiffusionPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cuda")
27
 
@@ -36,8 +38,6 @@ def generate_image(prompt, ckpt):
36
  num_inference_steps = checkpoints[ckpt][1]
37
 
38
  if loaded != num_inference_steps:
39
- unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
40
- unet.load_state_dict(torch.load(hf_hub_download(repo, checkpoints)), map_location="cuda")
41
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", prediction_type="sample" if num_inference_steps==1 else "epsilon")
42
  loaded = num_inference_steps
43
 
 
22
  """
23
 
24
  # Ensure model and scheduler are initialized in GPU-enabled function
25
+ unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
26
+ unet.load_state_dict(torch.load(hf_hub_download(repo, checkpoints)), map_location="cuda")
27
  if torch.cuda.is_available():
28
  pipe = DiffusionPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cuda")
29
 
 
38
  num_inference_steps = checkpoints[ckpt][1]
39
 
40
  if loaded != num_inference_steps:
 
 
41
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", prediction_type="sample" if num_inference_steps==1 else "epsilon")
42
  loaded = num_inference_steps
43