Himanshu-AT commited on
Commit
dc9bec2
·
1 Parent(s): a228de9

give ability to have multiple models

Browse files
Files changed (2) hide show
  1. app.py +33 -71
  2. lora_models.json +4 -0
app.py CHANGED
@@ -3,6 +3,7 @@ import numpy as np
3
  import os
4
  import spaces
5
  import random
 
6
  # from image_gen_aux import DepthPreprocessor
7
  from PIL import Image
8
  import torch
@@ -16,73 +17,34 @@ MAX_SEED = np.iinfo(np.int32).max
16
  MAX_IMAGE_SIZE = 2048
17
 
18
  pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16).to("cuda")
19
- pipe.load_lora_weights("Himanshu806/testLora")
20
- pipe.enable_lora()
21
-
22
- # vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae")
23
- # processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf")
24
-
25
- # preprocess = transforms.Compose(
26
- # [
27
- # transforms.Resize(
28
- # (vae.config.sample_size, vae.config.sample_size),
29
- # interpolation=transforms.InterpolationMode.BILINEAR,
30
- # ),
31
- # transforms.ToTensor(),
32
- # transforms.Normalize([0.5], [0.5]),
33
- # ]
34
- # )
35
- #
36
- # image_np = image[0].cpu().numpy() # Move to CPU and convert to NumPy
37
-
38
- # if image_np.shape[0] == 3: # Check if channels are first
39
- # image_np = image_np.transpose(1, 2, 0)
40
-
41
- # image_np = (image_np * 255).astype(np.uint8)
42
-
43
- # image = Image.fromarray(image_np)
44
-
45
- # def calculate_optimal_dimensions(image: Image.Image):
46
- # # Extract the original dimensions
47
- # original_width, original_height = image.size
48
-
49
- # # Set constants
50
- # MIN_ASPECT_RATIO = 9 / 16
51
- # MAX_ASPECT_RATIO = 16 / 9
52
- # FIXED_DIMENSION = 1024
53
-
54
- # # Calculate the aspect ratio of the original image
55
- # original_aspect_ratio = original_width / original_height
56
-
57
- # # Determine which dimension to fix
58
- # if original_aspect_ratio > 1: # Wider than tall
59
- # width = FIXED_DIMENSION
60
- # height = round(FIXED_DIMENSION / original_aspect_ratio)
61
- # else: # Taller than wide
62
- # height = FIXED_DIMENSION
63
- # width = round(FIXED_DIMENSION * original_aspect_ratio)
64
-
65
- # # Ensure dimensions are multiples of 8
66
- # width = (width // 8) * 8
67
- # height = (height // 8) * 8
68
-
69
- # # Enforce aspect ratio limits
70
- # calculated_aspect_ratio = width / height
71
- # if calculated_aspect_ratio > MAX_ASPECT_RATIO:
72
- # width = (height * MAX_ASPECT_RATIO // 8) * 8
73
- # elif calculated_aspect_ratio < MIN_ASPECT_RATIO:
74
- # height = (width / MIN_ASPECT_RATIO // 8) * 8
75
-
76
- # # Ensure width and height remain above the minimum dimensions
77
- # width = max(width, 576) if width == FIXED_DIMENSION else width
78
- # height = max(height, 576) if height == FIXED_DIMENSION else height
79
-
80
- # return width, height
81
 
82
  @spaces.GPU(durations=300)
83
- def infer(edit_images, prompt, prompt2, width, height, seed=42, randomize_seed=False, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
84
  # pipe.enable_xformers_memory_efficient_attention()
85
 
 
 
 
 
86
  image = edit_images["background"]
87
  # width, height = calculate_optimal_dimensions(image)
88
  mask = edit_images["layers"][0]
@@ -93,7 +55,7 @@ def infer(edit_images, prompt, prompt2, width, height, seed=42, randomize_seed=F
93
  image = pipe(
94
  # mask_image_latent=vae.encode(controlImage),
95
  prompt=prompt,
96
- prompt_2=prompt2,
97
  image=image,
98
  mask_image=mask,
99
  height=height,
@@ -147,13 +109,13 @@ with gr.Blocks(css=css) as demo:
147
  placeholder="Enter your prompt",
148
  container=False,
149
  )
150
- prompt2 = gr.Text(
151
- label="Prompt2",
152
- show_label=False,
153
- max_lines=2,
154
- placeholder="Enter your second prompt",
155
- container=False,
156
  )
 
157
  run_button = gr.Button("Run")
158
 
159
  result = gr.Image(label="Result", show_label=False)
@@ -209,7 +171,7 @@ with gr.Blocks(css=css) as demo:
209
  gr.on(
210
  triggers=[run_button.click, prompt.submit],
211
  fn = infer,
212
- inputs = [edit_image, prompt,prompt2, width, height, seed, randomize_seed, guidance_scale, num_inference_steps],
213
  outputs = [result, seed]
214
  )
215
 
 
3
  import os
4
  import spaces
5
  import random
6
+ import json
7
  # from image_gen_aux import DepthPreprocessor
8
  from PIL import Image
9
  import torch
 
17
  MAX_IMAGE_SIZE = 2048
18
 
19
  pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16).to("cuda")
20
+ # pipe.load_lora_weights("Himanshu806/testLora")
21
+ # pipe.enable_lora()
22
+
23
+ with open("lora_models.json", "r") as f:
24
+ lora_models = json.load(f)
25
+
26
+ def download_model(model_name, model_path):
27
+ print(f"Downloading model: {model_name} from {model_path}")
28
+ try:
29
+ pipe.load_lora_weights(model_path)
30
+ print(f"Successfully downloaded model: {model_name}")
31
+ except Exception as e:
32
+ print(f"Failed to download model: {model_name}. Error: {e}")
33
+
34
+ # Iterate through the models and download each one
35
+ for model_name, model_path in lora_models.items():
36
+ download_model(model_name, model_path)
37
+
38
+ lora_models["None"] = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  @spaces.GPU(durations=300)
41
+ def infer(edit_images, prompt, width, height, seed=42, randomize_seed=False, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
42
  # pipe.enable_xformers_memory_efficient_attention()
43
 
44
+ if lora_model != "None":
45
+ pipe.load_lora_weights(lora_models[lora_model])
46
+ pipe.enable_lora()
47
+
48
  image = edit_images["background"]
49
  # width, height = calculate_optimal_dimensions(image)
50
  mask = edit_images["layers"][0]
 
55
  image = pipe(
56
  # mask_image_latent=vae.encode(controlImage),
57
  prompt=prompt,
58
+ prompt_2=prompt,
59
  image=image,
60
  mask_image=mask,
61
  height=height,
 
109
  placeholder="Enter your prompt",
110
  container=False,
111
  )
112
+
113
+ lora_model = gr.Dropdown(
114
+ label="Select LoRA Model",
115
+ choices=list(lora_models.keys()),
116
+ value="None",
 
117
  )
118
+
119
  run_button = gr.Button("Run")
120
 
121
  result = gr.Image(label="Result", show_label=False)
 
171
  gr.on(
172
  triggers=[run_button.click, prompt.submit],
173
  fn = infer,
174
+ inputs = [edit_image, prompt, width, height, lora_model, seed, randomize_seed, guidance_scale, num_inference_steps],
175
  outputs = [result, seed]
176
  )
177
 
lora_models.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "RahulFineTuned": "Himanshu806/testLora",
3
+ "KodaRealistic": "alvdansen/flux-koda"
4
+ }