blanchon commited on
Commit
7c14093
·
1 Parent(s): 2c8d515
Files changed (1) hide show
  1. app.py +32 -21
app.py CHANGED
@@ -2,17 +2,18 @@ import os
2
  import numpy as np
3
  from typing import cast
4
  import torch
5
- from PIL import Image, ImageOps
6
  from diffusers import DiffusionPipeline
7
  import gradio as gr
8
  from gradio.components.image_editor import EditorValue
9
- import spaces
10
 
11
  DEVICE = "cuda"
12
 
13
- MAIN_MODEL_REPO_ID = os.getenv("MAIN_MODEL_REPO_ID", None)
14
- SUB_MODEL_REPO_ID = os.getenv("SUB_MODEL_REPO_ID", None)
15
- SUB_MODEL_SUBFOLDER = os.getenv("SUB_MODEL_SUBFOLDER", None)
 
 
16
 
17
  if MAIN_MODEL_REPO_ID is None:
18
  raise ValueError("MAIN_MODEL_REPO_ID is not set")
@@ -36,7 +37,7 @@ def crop_divisible_by_16(image: Image.Image) -> Image.Image:
36
  return image.crop((0, 0, w, h))
37
 
38
 
39
- @spaces.GPU(duration=150)
40
  def predict(
41
  image_and_mask: EditorValue,
42
  condition_image: Image.Image | None,
@@ -82,35 +83,43 @@ def predict(
82
  subfolder=SUB_MODEL_SUBFOLDER,
83
  )
84
 
85
- target_image = Image.fromarray(image_np)
86
  # Resize to max dimension
87
  target_image.thumbnail((target_size, target_size))
88
- target_image = target_image.resize((target_size, target_size))
89
- # Ensure dimensions are multiple of 16 (for VAE)
90
- target_image = crop_divisible_by_16(target_image)
91
 
92
- mask_image = Image.fromarray(mask_np)
 
 
 
93
  mask_image.thumbnail((target_size, target_size))
94
- mask_image = mask_image.resize((target_size, target_size))
95
- mask_image = crop_divisible_by_16(mask_image)
96
- # Invert the mask
97
- mask_image = ImageOps.invert(mask_image)
 
98
 
99
  # # Image masked is the image with the mask applied (black background)
100
  # image_masked = Image.new("RGB", image.size, (0, 0, 0))
101
  # image_masked.paste(image, (0, 0), mask)
102
 
 
103
  condition_image.thumbnail((condition_size, condition_size))
104
- condition_image = condition_image.resize((condition_size, condition_size))
105
- condition_image = crop_divisible_by_16(condition_image)
 
 
 
 
106
 
107
  generator = torch.Generator(device="cpu").manual_seed(seed)
108
 
109
  final_image = pipeline(
110
- condition_image=condition_image,
111
  prompt="",
112
- image=target_image,
113
- mask_image=mask_image,
114
  num_inference_steps=num_inference_steps,
115
  height=target_size,
116
  width=target_size,
@@ -123,7 +132,9 @@ def predict(
123
  max_sequence_length=512,
124
  ).images[0]
125
 
126
- return final_image
 
 
127
 
128
 
129
  intro_markdown = r"""
 
2
  import numpy as np
3
  from typing import cast
4
  import torch
5
+ from PIL import Image
6
  from diffusers import DiffusionPipeline
7
  import gradio as gr
8
  from gradio.components.image_editor import EditorValue
 
9
 
10
  DEVICE = "cuda"
11
 
12
+ MAIN_MODEL_REPO_ID = os.getenv(
13
+ "MAIN_MODEL_REPO_ID", "black-forest-labs/FLUX.1-Fill-dev"
14
+ )
15
+ SUB_MODEL_REPO_ID = os.getenv("SUB_MODEL_REPO_ID", "blanchon/FurnitureModel")
16
+ SUB_MODEL_SUBFOLDER = os.getenv("SUB_MODEL_SUBFOLDER", "model/v1/ckpt/9600")
17
 
18
  if MAIN_MODEL_REPO_ID is None:
19
  raise ValueError("MAIN_MODEL_REPO_ID is not set")
 
37
  return image.crop((0, 0, w, h))
38
 
39
 
40
+ # @spaces.GPU(duration=150)
41
  def predict(
42
  image_and_mask: EditorValue,
43
  condition_image: Image.Image | None,
 
83
  subfolder=SUB_MODEL_SUBFOLDER,
84
  )
85
 
86
+ target_image = Image.fromarray(image_np).convert("RGB")
87
  # Resize to max dimension
88
  target_image.thumbnail((target_size, target_size))
89
+ new_target_image = Image.new("RGB", (target_size, target_size), (0, 0, 0))
90
+ new_target_image.paste(target_image, (0, 0))
 
91
 
92
+ # Save target image
93
+ new_target_image.save("target_image.png")
94
+
95
+ mask_image = Image.fromarray(mask_np).convert("L")
96
  mask_image.thumbnail((target_size, target_size))
97
+ new_mask_image = Image.new("L", (target_size, target_size), 0)
98
+ new_mask_image.paste(mask_image, (0, 0))
99
+
100
+ # Save mask image
101
+ new_mask_image.save("mask_image.png")
102
 
103
  # # Image masked is the image with the mask applied (black background)
104
  # image_masked = Image.new("RGB", image.size, (0, 0, 0))
105
  # image_masked.paste(image, (0, 0), mask)
106
 
107
+ condition_image = condition_image.convert("RGB")
108
  condition_image.thumbnail((condition_size, condition_size))
109
+ # Save condition image
110
+ new_condition_image = Image.new("RGB", (condition_size, condition_size), (0, 0, 0))
111
+ new_condition_image.paste(condition_image, (0, 0))
112
+
113
+ # Save condition image
114
+ new_condition_image.save("condition_image.png")
115
 
116
  generator = torch.Generator(device="cpu").manual_seed(seed)
117
 
118
  final_image = pipeline(
119
+ condition_image=new_condition_image,
120
  prompt="",
121
+ image=new_target_image,
122
+ mask_image=new_mask_image,
123
  num_inference_steps=num_inference_steps,
124
  height=target_size,
125
  width=target_size,
 
132
  max_sequence_length=512,
133
  ).images[0]
134
 
135
+ final_image_crop = final_image.crop((0, 0, target_size, target_size))
136
+
137
+ return final_image_crop
138
 
139
 
140
  intro_markdown = r"""