KingNish commited on
Commit
92ae8b4
·
verified ·
1 Parent(s): ed38e7e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -29
app.py CHANGED
@@ -29,41 +29,47 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
29
  pipe = FluxWithCFGPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype)
30
  pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype)
31
 
32
- apply_group_offloading(
33
- pipe.transformer,
34
- offload_type="leaf_level",
35
- offload_device=torch.device("cpu"),
36
- onload_device=torch.device("cuda"),
37
- use_stream=True,
38
- )
39
- apply_group_offloading(
40
- pipe.text_encoder,
41
- offload_device=torch.device("cpu"),
42
- onload_device=torch.device("cuda"),
43
- offload_type="leaf_level",
44
- use_stream=True,
45
- )
46
- apply_group_offloading(
47
- pipe.text_encoder_2,
48
- offload_device=torch.device("cpu"),
49
- onload_device=torch.device("cuda"),
50
- offload_type="leaf_level",
51
- use_stream=True,
52
- )
53
- apply_group_offloading(
54
- pipe.vae,
55
- offload_device=torch.device("cpu"),
56
- onload_device=torch.device("cuda"),
57
- offload_type="leaf_level",
58
- use_stream=True,
59
- )
60
-
61
  pipe.to(device)
62
 
 
 
63
  # --- Inference Function ---
64
  @spaces.GPU
65
  def generate_image(prompt: str, seed: int = 42, width: int = DEFAULT_WIDTH, height: int = DEFAULT_HEIGHT, randomize_seed: bool = False, num_inference_steps: int = DEFAULT_INFERENCE_STEPS, is_enhance: bool = False):
66
  """Generates an image using the FLUX pipeline with error handling."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
  if pipe is None:
69
  raise gr.Error("Diffusion pipeline failed to load. Cannot generate images.")
 
29
  pipe = FluxWithCFGPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype)
30
  pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype)
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  pipe.to(device)
33
 
34
+ group_offloading = None
35
+
36
  # --- Inference Function ---
37
  @spaces.GPU
38
  def generate_image(prompt: str, seed: int = 42, width: int = DEFAULT_WIDTH, height: int = DEFAULT_HEIGHT, randomize_seed: bool = False, num_inference_steps: int = DEFAULT_INFERENCE_STEPS, is_enhance: bool = False):
39
  """Generates an image using the FLUX pipeline with error handling."""
40
+
41
+ global group_offloading
42
+ if not group_offloading:
43
+ apply_group_offloading(
44
+ pipe.transformer,
45
+ offload_type="leaf_level",
46
+ offload_device=torch.device("cpu"),
47
+ onload_device=torch.device("cuda"),
48
+ use_stream=True,
49
+ )
50
+ apply_group_offloading(
51
+ pipe.text_encoder,
52
+ offload_device=torch.device("cpu"),
53
+ onload_device=torch.device("cuda"),
54
+ offload_type="leaf_level",
55
+ use_stream=True,
56
+ )
57
+ apply_group_offloading(
58
+ pipe.text_encoder_2,
59
+ offload_device=torch.device("cpu"),
60
+ onload_device=torch.device("cuda"),
61
+ offload_type="leaf_level",
62
+ use_stream=True,
63
+ )
64
+ apply_group_offloading(
65
+ pipe.vae,
66
+ offload_device=torch.device("cpu"),
67
+ onload_device=torch.device("cuda"),
68
+ offload_type="leaf_level",
69
+ use_stream=True,
70
+ )
71
+
72
+ group_offloading = True
73
 
74
  if pipe is None:
75
  raise gr.Error("Diffusion pipeline failed to load. Cannot generate images.")