blanchon commited on
Commit
a3068c2
Β·
1 Parent(s): d2e37f2

First commit

Browse files
Files changed (4) hide show
  1. README.md +25 -7
  2. app.py +225 -0
  3. pyproject.toml +18 -0
  4. requirements.txt +10 -0
README.md CHANGED
@@ -1,12 +1,30 @@
1
  ---
2
- title: FurnitureInpaintingDemo
3
- emoji: πŸ“‰
4
- colorFrom: purple
5
- colorTo: pink
6
  sdk: gradio
7
- sdk_version: 5.13.1
 
 
8
  app_file: app.py
9
- pinned: false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
1
  ---
2
+ title: InpaintingModelDemo
3
+ emoji: πŸŒ–
4
+ colorFrom: pink
5
+ colorTo: red
6
  sdk: gradio
7
+ python_version: 3.12
8
+ sdk_version: 5.12.0
9
+ suggested_hardware: a100-large
10
  app_file: app.py
11
+ # fullWidth: true
12
+ # header: mini
13
+ # models: blanchon/VirtualUnstagingModel
14
+ # datasets: blanchon/VirtualUnstagingDataset
15
+ tags:
16
+ - image-generation
17
+ - image-to-image
18
+ - furniture
19
+ - virtual-staging
20
+ - home-decor
21
+ - home-design
22
+ pinned: true
23
+ # preload_from_hub:
24
+ # - blanchon/VirtualUnstagingModel
25
+ license: mit
26
  ---
27
 
28
+ # VirtualUnstaging
29
+
30
+ ...
app.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ from typing import cast
4
+ from pydantic import NonNegativeInt
5
+ import torch
6
+ from PIL import Image, ImageOps
7
+ from diffusers import DiffusionPipeline
8
+ import gradio as gr
9
+ from gradio.components.image_editor import EditorValue
10
+ import spaces
11
+
12
+ DEVICE = "cuda"
13
+
14
+ MAIN_MODEL_REPO_ID = os.getenv("MAIN_MODEL_REPO_ID", None)
15
+ SUB_MODEL_REPO_ID = os.getenv("SUB_MODEL_REPO_ID", None)
16
+ SUB_MODEL_SUBFOLDER = os.getenv("SUB_MODEL_SUBFOLDER", None)
17
+
18
+ if MAIN_MODEL_REPO_ID is None:
19
+ raise ValueError("MAIN_MODEL_REPO_ID is not set")
20
+ if SUB_MODEL_REPO_ID is None:
21
+ raise ValueError("SUB_MODEL_REPO_ID is not set")
22
+ if SUB_MODEL_SUBFOLDER is None:
23
+ raise ValueError("SUB_MODEL_SUBFOLDER is not set")
24
+
25
+ pipeline = DiffusionPipeline.from_pretrained(
26
+ MAIN_MODEL_REPO_ID,
27
+ torch_dtype=torch.bfloat16,
28
+ custom_pipeline=SUB_MODEL_REPO_ID,
29
+ ).to(DEVICE)
30
+
31
+
32
+ def crop_divisible_by_16(image: Image.Image) -> Image.Image:
33
+ w, h = image.size
34
+ w = w - w % 16
35
+ h = h - h % 16
36
+ return image.crop((0, 0, w, h))
37
+
38
+
39
+ @spaces.GPU(duration=150)
40
+ def predict(
41
+ image_and_mask: EditorValue | NonNegativeInt,
42
+ furniture_reference: Image.Image | None,
43
+ seed: int = 0,
44
+ num_inference_steps: int = 28,
45
+ max_dimension: int = 704,
46
+ condition_scale: float = 1.0,
47
+ progress: gr.Progress = gr.Progress(track_tqdm=True), # noqa: ARG001, B008
48
+ ) -> Image.Image | None:
49
+ # ) -> tuple[Image.Image, Image.Image] | None:
50
+ if not image_and_mask:
51
+ gr.Info("Please upload an image and draw a mask")
52
+ return None
53
+ if not furniture_reference:
54
+ gr.Info("Please upload a furniture reference image")
55
+ return None
56
+ image_np = image_and_mask["background"]
57
+ image_np = cast(np.ndarray, image_np)
58
+
59
+ # If the image is empty, return None
60
+ if np.sum(image_np) == 0:
61
+ gr.Info("Please upload an image")
62
+ return None
63
+
64
+ alpha_channel = image_and_mask["layers"][0]
65
+ alpha_channel = cast(np.ndarray, alpha_channel)
66
+ mask_np = np.where(alpha_channel[:, :, 3] == 0, 0, 255).astype(np.uint8)
67
+
68
+ # if mask_np is empty, return None
69
+ if np.sum(mask_np) == 0:
70
+ gr.Info("Please mark the areas you want to remove")
71
+ return None
72
+
73
+ pipeline.load(
74
+ SUB_MODEL_REPO_ID,
75
+ subfolder=SUB_MODEL_SUBFOLDER,
76
+ )
77
+
78
+ image = Image.fromarray(image_np)
79
+ # Resize to max dimension
80
+ image.thumbnail((max_dimension, max_dimension))
81
+ # Ensure dimensions are multiple of 16 (for VAE)
82
+ image = crop_divisible_by_16(image)
83
+
84
+ mask = Image.fromarray(mask_np)
85
+ mask.thumbnail((max_dimension, max_dimension))
86
+ mask = crop_divisible_by_16(mask)
87
+ # Invert the mask
88
+ mask = ImageOps.invert(mask)
89
+
90
+ # Image masked is the image with the mask applied (black background)
91
+ image_masked = Image.new("RGB", image.size, (0, 0, 0))
92
+ image_masked.paste(image, (0, 0), mask)
93
+
94
+ furniture_reference.thumbnail((max_dimension, max_dimension))
95
+ furniture_reference = crop_divisible_by_16(furniture_reference)
96
+
97
+ generator = torch.Generator(device="cpu").manual_seed(seed)
98
+
99
+ final_image = pipeline(
100
+ condition_image=image_masked,
101
+ reference_image=furniture_reference,
102
+ condition_scale=condition_scale,
103
+ num_inference_steps=num_inference_steps,
104
+ generator=generator,
105
+ max_sequence_length=512,
106
+ latent_lora=True,
107
+ ).images[0]
108
+
109
+ return final_image
110
+
111
+
112
+ intro_markdown = r"""
113
+ # Furniture Inpainting Demo
114
+ """
115
+
116
+ css = r"""
117
+ #col-left {
118
+ margin: 0 auto;
119
+ max-width: 650px;
120
+ }
121
+ #col-right {
122
+ margin: 0 auto;
123
+ max-width: 650px;
124
+ }
125
+ #col-showcase {
126
+ margin: 0 auto;
127
+ max-width: 1100px;
128
+ }
129
+ """
130
+
131
+
132
+ with gr.Blocks(css=css) as demo:
133
+ gr.Markdown(intro_markdown)
134
+
135
+ with gr.Row() as content:
136
+ with gr.Column(elem_id="col-left"):
137
+ gr.HTML(
138
+ """
139
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
140
+ <div>
141
+ Step 1. Upload a room image ⬇️
142
+ </div>
143
+ </div>
144
+ """,
145
+ max_height=50,
146
+ )
147
+ image_and_mask = gr.ImageMask(
148
+ label="Image and Mask",
149
+ layers=False,
150
+ height="full",
151
+ width="full",
152
+ show_fullscreen_button=False,
153
+ sources=["upload"],
154
+ show_download_button=False,
155
+ interactive=True,
156
+ brush=gr.Brush(default_size=75, colors=["#000000"], color_mode="fixed"),
157
+ transforms=[],
158
+ )
159
+ furniture_reference = gr.Image(label="Furniture Reference")
160
+ with gr.Column(elem_id="col-right"):
161
+ gr.HTML(
162
+ """
163
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
164
+ <div>
165
+ Step 2. Press Run to launch
166
+ </div>
167
+ </div>
168
+ """,
169
+ max_height=50,
170
+ )
171
+ # image_slider = ImageSlider(
172
+ # label="Result",
173
+ # interactive=False,
174
+ # )
175
+ result = gr.Image(label="Result")
176
+ run_button = gr.Button("Run")
177
+
178
+ with gr.Accordion("Advanced Settings", open=False):
179
+ seed = gr.Slider(
180
+ label="Seed",
181
+ minimum=0,
182
+ maximum=100_000,
183
+ step=1,
184
+ value=0,
185
+ )
186
+ condition_scale = gr.Slider(
187
+ label="Condition Scale",
188
+ minimum=-10.0,
189
+ maximum=10.0,
190
+ step=0.10,
191
+ value=1.0,
192
+ )
193
+ with gr.Column():
194
+ max_dimension = gr.Slider(
195
+ label="Max Dimension",
196
+ minimum=512,
197
+ maximum=2048,
198
+ step=128,
199
+ value=704,
200
+ )
201
+
202
+ num_inference_steps = gr.Slider(
203
+ label="Number of inference steps",
204
+ minimum=1,
205
+ maximum=50,
206
+ step=1,
207
+ value=28,
208
+ )
209
+
210
+ run_button.click(
211
+ fn=predict,
212
+ inputs=[
213
+ image_and_mask,
214
+ furniture_reference,
215
+ seed,
216
+ num_inference_steps,
217
+ max_dimension,
218
+ condition_scale,
219
+ ],
220
+ # outputs=[image_slider],
221
+ outputs=[result],
222
+ )
223
+
224
+
225
+ demo.launch()
pyproject.toml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "VirtualStaging"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.12"
7
+ dependencies = [
8
+ "accelerate>=1.2.1",
9
+ "diffusers==0.31.0",
10
+ "gradio>=5.12.0",
11
+ "gradio-imageslider>=0.0.20",
12
+ "peft>=0.14.0",
13
+ "pillow>=11.1.0",
14
+ "safetensors>=0.5.2",
15
+ "sentencepiece>=0.2.0",
16
+ "spaces>=0.32.0",
17
+ "transformers>=4.48.0",
18
+ ]
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ diffusers==0.31.0
2
+ transformers
3
+ accelerate
4
+ safetensors
5
+ sentencepiece
6
+ peft
7
+ gradio
8
+ spaces
9
+ pillow
10
+ gradio_imageslider