blanchon commited on
Commit
b446d80
·
1 Parent(s): f867cc1

First Commit

Browse files
Files changed (4) hide show
  1. README.md +24 -6
  2. app.py +170 -0
  3. pyproject.toml +18 -0
  4. requirements.txt +10 -0
README.md CHANGED
@@ -1,12 +1,30 @@
1
  ---
2
- title: VirtualUnstagingDemo
3
- emoji:
4
- colorFrom: blue
5
- colorTo: purple
6
  sdk: gradio
 
7
  sdk_version: 5.12.0
 
8
  app_file: app.py
9
- pinned: false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
1
  ---
2
+ title: VirtualUnstaging
3
+ emoji: 🌖
4
+ colorFrom: pink
5
+ colorTo: red
6
  sdk: gradio
7
+ python_version: 3.12
8
  sdk_version: 5.12.0
9
+ suggested_hardware: a100-large
10
  app_file: app.py
11
+ # fullWidth: true
12
+ # header: mini
13
+ # models: blanchon/VirtualUnstagingModel
14
+ # datasets: blanchon/VirtualUnstagingDataset
15
+ tags:
16
+ - image-generation
17
+ - image-to-image
18
+ - furniture
19
+ - virtual-staging
20
+ - home-decor
21
+ - home-design
22
+ pinned: true
23
+ # preload_from_hub:
24
+ # - blanchon/VirtualUnstagingModel
25
+ license: mit
26
  ---
27
 
28
+ # VirtualUnstaging
29
+
30
+ ...
app.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from PIL import Image
4
+ from diffusers import DiffusionPipeline
5
+ import gradio as gr
6
+ import spaces
7
+
8
+ DEVICE = "cuda"
9
+
10
+ MAIN_MODEL_REPO_ID = os.getenv("MAIN_MODEL_REPO_ID", None)
11
+ SUB_MODEL_REPO_ID = os.getenv("SUB_MODEL_REPO_ID", None)
12
+ SUB_MODEL_SUBFOLDER = os.getenv("SUB_MODEL_SUBFOLDER", None)
13
+
14
+ if MAIN_MODEL_REPO_ID is None:
15
+ raise ValueError("MAIN_MODEL_REPO_ID is not set")
16
+ if SUB_MODEL_REPO_ID is None:
17
+ raise ValueError("SUB_MODEL_REPO_ID is not set")
18
+ if SUB_MODEL_SUBFOLDER is None:
19
+ raise ValueError("SUB_MODEL_SUBFOLDER is not set")
20
+
21
+ pipeline = DiffusionPipeline.from_pretrained(
22
+ MAIN_MODEL_REPO_ID,
23
+ torch_dtype=torch.bfloat16,
24
+ custom_pipeline=SUB_MODEL_REPO_ID,
25
+ ).to(DEVICE)
26
+
27
+
28
+ def crop_divisible_by_16(image: Image.Image) -> Image.Image:
29
+ w, h = image.size
30
+ w = w - w % 16
31
+ h = h - h % 16
32
+ return image.crop((0, 0, w, h))
33
+
34
+
35
+ @spaces.GPU(duration=150)
36
+ def predict(
37
+ room_image_input: Image.Image,
38
+ seed: int = 0,
39
+ num_inference_steps: int = 28,
40
+ max_dimension: int = 1024,
41
+ condition_scale: float = 1.0,
42
+ progress: gr.Progress = gr.Progress(track_tqdm=True), # noqa: ARG001, B008
43
+ ) -> Image.Image:
44
+ pipeline.load(
45
+ SUB_MODEL_REPO_ID,
46
+ subfolder=SUB_MODEL_SUBFOLDER,
47
+ )
48
+
49
+ # Resize to max dimension
50
+ room_image_input.thumbnail((max_dimension, max_dimension))
51
+ # Ensure dimensions are multiple of 16 (for VAE)
52
+ room_image_input = crop_divisible_by_16(room_image_input)
53
+
54
+ prompt = "[VIRTUAL STAGING]. An empty room."
55
+
56
+ generator = torch.Generator(device="cpu").manual_seed(seed)
57
+
58
+ final_image = pipeline(
59
+ condition_image=room_image_input,
60
+ condition_scale=condition_scale,
61
+ prompt=prompt,
62
+ num_inference_steps=num_inference_steps,
63
+ generator=generator,
64
+ max_sequence_length=512,
65
+ ).images[0]
66
+
67
+ return final_image
68
+
69
+
70
+ intro_markdown = r"""
71
+ # Virtual UnStaging Demo
72
+ """
73
+
74
+ css = r"""
75
+ #col-left {
76
+ margin: 0 auto;
77
+ max-width: 650px;
78
+ }
79
+ #col-right {
80
+ margin: 0 auto;
81
+ max-width: 650px;
82
+ }
83
+ #col-showcase {
84
+ margin: 0 auto;
85
+ max-width: 1100px;
86
+ }
87
+ """
88
+
89
+
90
+ with gr.Blocks(css=css) as demo:
91
+ gr.Markdown(intro_markdown)
92
+
93
+ with gr.Row() as content:
94
+ with gr.Column(elem_id="col-left"):
95
+ gr.HTML(
96
+ """
97
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
98
+ <div>
99
+ Step 1. Upload a room image ⬇️
100
+ </div>
101
+ </div>
102
+ """,
103
+ max_height=50,
104
+ )
105
+ room_image_input = gr.Image(
106
+ label="room",
107
+ type="pil",
108
+ sources=["upload"],
109
+ image_mode="RGB",
110
+ )
111
+ with gr.Column(elem_id="col-right"):
112
+ gr.HTML(
113
+ """
114
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
115
+ <div>
116
+ Step 2. Press Run to launch
117
+ </div>
118
+ </div>
119
+ """,
120
+ max_height=50,
121
+ )
122
+ result = gr.Image(label="result")
123
+ run_button = gr.Button("Run")
124
+
125
+ with gr.Accordion("Advanced Settings", open=False):
126
+ seed = gr.Slider(
127
+ label="Seed",
128
+ minimum=0,
129
+ maximum=100_000,
130
+ step=1,
131
+ value=0,
132
+ )
133
+ condition_scale = gr.Slider(
134
+ label="Condition Scale",
135
+ minimum=-10.0,
136
+ maximum=10.0,
137
+ step=0.10,
138
+ value=1.0,
139
+ )
140
+ with gr.Column():
141
+ max_dimension = gr.Slider(
142
+ label="Max Dimension",
143
+ minimum=512,
144
+ maximum=2048,
145
+ step=128,
146
+ value=1024,
147
+ )
148
+
149
+ num_inference_steps = gr.Slider(
150
+ label="Number of inference steps",
151
+ minimum=1,
152
+ maximum=50,
153
+ step=1,
154
+ value=28,
155
+ )
156
+
157
+ run_button.click(
158
+ fn=predict,
159
+ inputs=[
160
+ room_image_input,
161
+ seed,
162
+ num_inference_steps,
163
+ max_dimension,
164
+ condition_scale,
165
+ ],
166
+ outputs=[result],
167
+ )
168
+
169
+
170
+ demo.launch()
pyproject.toml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "VirtualStaging"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.12"
7
+ dependencies = [
8
+ "accelerate>=1.2.1",
9
+ "diffusers==0.31.0",
10
+ "gradio>=5.12.0",
11
+ "gradio-imageslider>=0.0.20",
12
+ "peft>=0.14.0",
13
+ "pillow>=11.1.0",
14
+ "safetensors>=0.5.2",
15
+ "sentencepiece>=0.2.0",
16
+ "spaces>=0.32.0",
17
+ "transformers>=4.48.0",
18
+ ]
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ diffusers==0.31.0
2
+ transformers
3
+ accelerate
4
+ safetensors
5
+ sentencepiece
6
+ peft
7
+ gradio
8
+ spaces
9
+ pillow
10
+ gradio_imageslider