multimodalart HF staff commited on
Commit
e1ad51f
1 Parent(s): 7da21f5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -1
app.py CHANGED
@@ -24,7 +24,7 @@ pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell",
24
  torch_dtype=torch.bfloat16)
25
 
26
  pipe.transformer.to(memory_format=torch.channels_last)
27
- #pipe.transformer = torch.compile(pipe.transformer, mode="max-autotune", fullgraph=True)
28
  #pipe.enable_model_cpu_offload()
29
  clip_slider = CLIPSliderFlux(pipe, device=torch.device("cuda"))
30
 
@@ -74,6 +74,8 @@ def generate(concept_1, concept_2, scale, prompt, seed, recalc_directions, itera
74
  for i in range(interm_steps):
75
  cur_scale = low_scale + (high_scale - low_scale) * i / (interm_steps - 1)
76
  image = clip_slider.generate(prompt,
 
 
77
  #guidance_scale=guidance_scale,
78
  scale=cur_scale, seed=seed, num_inference_steps=steps, avg_diff=avg_diff)
79
  images.append(image)
@@ -115,6 +117,8 @@ def update_scales(x,prompt,seed, steps, interm_steps, guidance_scale,
115
  for i in range(interm_steps):
116
  cur_scale = low_scale + (high_scale - low_scale) * i / (steps - 1)
117
  image = clip_slider.generate(prompt,
 
 
118
  #guidance_scale=guidance_scale,
119
  scale=cur_scale, seed=seed, num_inference_steps=steps, avg_diff=avg_diff)
120
  images.append(image)
 
24
  torch_dtype=torch.bfloat16)
25
 
26
  pipe.transformer.to(memory_format=torch.channels_last)
27
+ pipe.transformer = torch.compile(pipe.transformer, mode="max-autotune", fullgraph=True)
28
  #pipe.enable_model_cpu_offload()
29
  clip_slider = CLIPSliderFlux(pipe, device=torch.device("cuda"))
30
 
 
74
  for i in range(interm_steps):
75
  cur_scale = low_scale + (high_scale - low_scale) * i / (interm_steps - 1)
76
  image = clip_slider.generate(prompt,
77
+ width=768,
78
+ height=768,
79
  #guidance_scale=guidance_scale,
80
  scale=cur_scale, seed=seed, num_inference_steps=steps, avg_diff=avg_diff)
81
  images.append(image)
 
117
  for i in range(interm_steps):
118
  cur_scale = low_scale + (high_scale - low_scale) * i / (steps - 1)
119
  image = clip_slider.generate(prompt,
120
+ width=768,
121
+ height=768,
122
  #guidance_scale=guidance_scale,
123
  scale=cur_scale, seed=seed, num_inference_steps=steps, avg_diff=avg_diff)
124
  images.append(image)