fantaxy commited on
Commit
c43a736
·
verified ·
1 Parent(s): 8d65abf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -85
app.py CHANGED
@@ -4,18 +4,22 @@ import warnings
4
  import os
5
  import gradio as gr
6
  import numpy as np
7
- import spaces
8
  import torch
9
  from diffusers import FluxControlNetModel
10
  from diffusers.pipelines import FluxControlNetPipeline
11
  from gradio_imageslider import ImageSlider
12
  from PIL import Image
13
  from huggingface_hub import snapshot_download
14
-
15
- # 메모리 관리를 위한 gc 추가
16
  import gc
 
 
 
 
 
 
17
  gc.collect()
18
- torch.cuda.empty_cache()
 
19
 
20
  css = """
21
  #col-container {
@@ -24,98 +28,61 @@ css = """
24
  }
25
  """
26
 
27
- # Device setup with minimal memory usage
28
- if torch.cuda.is_available():
29
- power_device = "GPU"
30
- device = "cuda"
31
- dtype = torch.float16 # Use float16 for minimum memory
32
- # Set CUDA memory fraction to 50%
33
- torch.cuda.set_per_process_memory_fraction(0.5)
34
- else:
35
- power_device = "CPU"
36
- device = "cpu"
37
- dtype = torch.float32
38
-
39
- huggingface_token = os.getenv("HUGGINFACE_TOKEN")
40
 
41
  # Minimal model configuration
42
  model_config = {
43
  "low_cpu_mem_usage": True,
44
  "torch_dtype": dtype,
45
  "use_safetensors": True,
46
- "variant": "fp16", # Use fp16 variant if available
47
  }
48
 
49
  model_path = snapshot_download(
50
  repo_id="black-forest-labs/FLUX.1-dev",
51
  repo_type="model",
52
- ignore_patterns=["*.md", "*..gitattributes", "*.bin"], # Ignore unnecessary files
53
  local_dir="FLUX.1-dev",
54
  token=huggingface_token,
55
  )
56
 
57
- # Load models with minimal configuration
58
- try:
59
- controlnet = FluxControlNetModel.from_pretrained(
60
- "jasperai/Flux.1-dev-Controlnet-Upscaler",
61
- **model_config
62
- ).to(device)
63
-
64
- pipe = FluxControlNetPipeline.from_pretrained(
65
- model_path,
66
- controlnet=controlnet,
67
- **model_config
68
- )
69
 
70
- # Enable all memory optimizations
71
- pipe.enable_model_cpu_offload()
72
- pipe.enable_attention_slicing(1)
73
- pipe.enable_sequential_cpu_offload()
74
- pipe.enable_vae_slicing()
75
-
76
- # Clear memory after loading
77
- gc.collect()
78
- torch.cuda.empty_cache()
79
-
80
- except Exception as e:
81
- print(f"Error loading models: {e}")
82
- raise
83
 
84
- # Extremely reduced parameters
85
  MAX_SEED = 1000000
86
- MAX_PIXEL_BUDGET = 128 * 128 # Extremely reduced from 256 * 256
87
-
88
- def check_resources():
89
- if torch.cuda.is_available():
90
- memory_allocated = torch.cuda.memory_allocated(0)
91
- memory_reserved = torch.cuda.memory_reserved(0)
92
- if memory_allocated/memory_reserved > 0.7: # 70% threshold
93
- gc.collect()
94
- torch.cuda.empty_cache()
95
- return True
96
-
97
- def process_input(input_image, upscale_factor, **kwargs):
98
  input_image = input_image.convert('RGB')
99
 
100
- # Reduce image size more aggressively
101
  w, h = input_image.size
102
  max_size = int(np.sqrt(MAX_PIXEL_BUDGET))
103
- if w > max_size or h > max_size:
104
- if w > h:
105
- new_w = max_size
106
- new_h = int(h * max_size / w)
107
- else:
108
- new_h = max_size
109
- new_w = int(w * max_size / h)
110
- input_image = input_image.resize((new_w, new_h), Image.LANCZOS)
111
 
112
- w, h = input_image.size
113
- w = w - w % 8
114
- h = h - h % 8
 
 
 
 
 
115
 
116
- return input_image.resize((w, h)), w, h, True
117
 
118
- @spaces.GPU
119
  def infer(
120
  seed,
121
  randomize_seed,
@@ -127,12 +94,11 @@ def infer(
127
  ):
128
  try:
129
  gc.collect()
130
- torch.cuda.empty_cache()
131
 
132
  if randomize_seed:
133
  seed = random.randint(0, MAX_SEED)
134
 
135
- input_image, w, h, _ = process_input(input_image, upscale_factor)
136
 
137
  with torch.inference_mode():
138
  generator = torch.Generator().manual_seed(seed)
@@ -141,19 +107,18 @@ def infer(
141
  control_image=input_image,
142
  controlnet_conditioning_scale=controlnet_conditioning_scale,
143
  num_inference_steps=num_inference_steps,
144
- guidance_scale=2.0, # Reduced from 3.5
145
  height=h,
146
  width=w,
147
  generator=generator,
148
  ).images[0]
149
 
150
  gc.collect()
151
- torch.cuda.empty_cache()
152
 
153
  return [input_image, image, seed]
154
 
155
  except Exception as e:
156
- gr.Error(f"An error occurred: {str(e)}")
157
  return None
158
 
159
  with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
@@ -167,23 +132,23 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
167
  num_inference_steps = gr.Slider(
168
  label="Steps",
169
  minimum=1,
170
- maximum=20, # Reduced from 30
171
  step=1,
172
- value=10, # Reduced from 20
173
  )
174
  upscale_factor = gr.Slider(
175
  label="Scale",
176
  minimum=1,
177
- maximum=1, # Fixed at 1
178
  step=1,
179
  value=1,
180
  )
181
  controlnet_conditioning_scale = gr.Slider(
182
  label="Control Scale",
183
  minimum=0.1,
184
- maximum=0.5, # Reduced from 1.0
185
  step=0.1,
186
- value=0.3, # Reduced from 0.5
187
  )
188
  seed = gr.Slider(
189
  label="Seed",
@@ -201,8 +166,8 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
201
 
202
  examples = gr.Examples(
203
  examples=[
204
- [42, False, os.path.join(current_dir, "z1.webp"), 10, 1, 0.3],
205
- [42, False, os.path.join(current_dir, "z2.webp"), 10, 1, 0.3],
206
  ],
207
  inputs=[
208
  seed,
@@ -214,7 +179,7 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
214
  ],
215
  fn=infer,
216
  outputs=result,
217
- cache_examples=False, # Disable caching
218
  )
219
 
220
  gr.on(
@@ -232,7 +197,7 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
232
  show_api=False,
233
  )
234
 
235
- # Launch with minimal resources
236
  demo.queue(max_size=1).launch(
237
  share=False,
238
  debug=True,
 
4
  import os
5
  import gradio as gr
6
  import numpy as np
 
7
  import torch
8
  from diffusers import FluxControlNetModel
9
  from diffusers.pipelines import FluxControlNetPipeline
10
  from gradio_imageslider import ImageSlider
11
  from PIL import Image
12
  from huggingface_hub import snapshot_download
 
 
13
  import gc
14
+
15
+ # Force CPU usage
16
+ device = "cpu"
17
+ dtype = torch.float32
18
+
19
+ # Clear memory
20
  gc.collect()
21
+ if torch.cuda.is_available():
22
+ torch.cuda.empty_cache()
23
 
24
  css = """
25
  #col-container {
 
28
  }
29
  """
30
 
31
+ huggingface_token = os.getenv("HF_TOKEN")
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
  # Minimal model configuration
34
  model_config = {
35
  "low_cpu_mem_usage": True,
36
  "torch_dtype": dtype,
37
  "use_safetensors": True,
38
+ "device_map": "cpu"
39
  }
40
 
41
  model_path = snapshot_download(
42
  repo_id="black-forest-labs/FLUX.1-dev",
43
  repo_type="model",
44
+ ignore_patterns=["*.md", "*..gitattributes", "*.bin"],
45
  local_dir="FLUX.1-dev",
46
  token=huggingface_token,
47
  )
48
 
49
+ # Load models on CPU
50
+ controlnet = FluxControlNetModel.from_pretrained(
51
+ "jasperai/Flux.1-dev-Controlnet-Upscaler",
52
+ **model_config
53
+ )
 
 
 
 
 
 
 
54
 
55
+ pipe = FluxControlNetPipeline.from_pretrained(
56
+ model_path,
57
+ controlnet=controlnet,
58
+ **model_config
59
+ )
60
+
61
+ # Enable optimizations
62
+ pipe.enable_attention_slicing(1)
63
+ pipe.enable_vae_slicing()
 
 
 
 
64
 
 
65
  MAX_SEED = 1000000
66
+ MAX_PIXEL_BUDGET = 64 * 64 # Extremely reduced
67
+
68
+ def process_input(input_image, upscale_factor):
 
 
 
 
 
 
 
 
 
69
  input_image = input_image.convert('RGB')
70
 
71
+ # Aggressive size reduction
72
  w, h = input_image.size
73
  max_size = int(np.sqrt(MAX_PIXEL_BUDGET))
 
 
 
 
 
 
 
 
74
 
75
+ # Resize to very small size
76
+ new_w = min(w, max_size)
77
+ new_h = min(h, max_size)
78
+ input_image = input_image.resize((new_w, new_h), Image.LANCZOS)
79
+
80
+ # Ensure dimensions are multiples of 8
81
+ w = new_w - new_w % 8
82
+ h = new_h - new_h % 8
83
 
84
+ return input_image.resize((w, h)), w, h
85
 
 
86
  def infer(
87
  seed,
88
  randomize_seed,
 
94
  ):
95
  try:
96
  gc.collect()
 
97
 
98
  if randomize_seed:
99
  seed = random.randint(0, MAX_SEED)
100
 
101
+ input_image, w, h = process_input(input_image, upscale_factor)
102
 
103
  with torch.inference_mode():
104
  generator = torch.Generator().manual_seed(seed)
 
107
  control_image=input_image,
108
  controlnet_conditioning_scale=controlnet_conditioning_scale,
109
  num_inference_steps=num_inference_steps,
110
+ guidance_scale=1.5,
111
  height=h,
112
  width=w,
113
  generator=generator,
114
  ).images[0]
115
 
116
  gc.collect()
 
117
 
118
  return [input_image, image, seed]
119
 
120
  except Exception as e:
121
+ gr.Error(f"Error: {str(e)}")
122
  return None
123
 
124
  with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
 
132
  num_inference_steps = gr.Slider(
133
  label="Steps",
134
  minimum=1,
135
+ maximum=10,
136
  step=1,
137
+ value=5,
138
  )
139
  upscale_factor = gr.Slider(
140
  label="Scale",
141
  minimum=1,
142
+ maximum=1,
143
  step=1,
144
  value=1,
145
  )
146
  controlnet_conditioning_scale = gr.Slider(
147
  label="Control Scale",
148
  minimum=0.1,
149
+ maximum=0.3,
150
  step=0.1,
151
+ value=0.2,
152
  )
153
  seed = gr.Slider(
154
  label="Seed",
 
166
 
167
  examples = gr.Examples(
168
  examples=[
169
+ [42, False, os.path.join(current_dir, "z1.webp"), 5, 1, 0.2],
170
+ [42, False, os.path.join(current_dir, "z2.webp"), 5, 1, 0.2],
171
  ],
172
  inputs=[
173
  seed,
 
179
  ],
180
  fn=infer,
181
  outputs=result,
182
+ cache_examples=False,
183
  )
184
 
185
  gr.on(
 
197
  show_api=False,
198
  )
199
 
200
+ # Minimal launch configuration
201
  demo.queue(max_size=1).launch(
202
  share=False,
203
  debug=True,