File size: 8,506 Bytes
81b1a0e
 
 
 
 
e797135
6284dc0
 
4a3bbdd
6284dc0
 
e797135
6284dc0
 
e797135
54516d1
 
5d10050
54516d1
 
6be00d8
e797135
81b1a0e
53ff575
81b1a0e
621c740
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
327742a
81b1a0e
6284dc0
81b1a0e
1592dab
81b1a0e
 
 
 
6284dc0
81b1a0e
 
 
 
a10635a
 
e4862f5
1592dab
a10635a
 
 
 
f70bf31
 
a10635a
81b1a0e
de0b7d0
d967d62
 
 
fbe03e2
e797135
741bf59
 
 
4420101
bfe6e38
0e5a7e4
4bb8a82
a0ef2a3
bfe6e38
 
b59df1c
741bf59
 
 
1592dab
 
741bf59
 
5023a18
 
741bf59
 
 
5023a18
741bf59
 
5023a18
741bf59
 
 
85f9120
53ff575
85f9120
 
 
53ff575
741bf59
53ff575
5023a18
53ff575
741bf59
 
53ff575
741bf59
5023a18
eefba1b
741bf59
eefba1b
 
5023a18
621c740
 
53ff575
 
5023a18
741bf59
5023a18
 
6f7ea50
621c740
6f7ea50
84abebf
5023a18
741bf59
 
 
 
8a9ec25
 
9439305
81b1a0e
 
fbe03e2
de5ed42
 
 
 
 
 
4c18769
 
 
 
 
 
741bf59
 
 
 
 
1acca69
81b1a0e
b59df1c
1acca69
1592dab
e7c2780
b59df1c
1acca69
81b1a0e
9f09c5a
741bf59
81b1a0e
1acca69
 
 
9430ab7
 
1592dab
9430ab7
2ef1d69
1acca69
4c18769
9f09c5a
741bf59
 
 
 
 
1592dab
 
 
 
 
741bf59
 
 
1acca69
 
 
741bf59
 
9430ab7
1acca69
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
import os
import cv2
import numpy as np
import torch
import gradio as gr
import spaces

from glob import glob
from typing import Tuple

from PIL import Image
from gradio_imageslider import ImageSlider
from transformers import AutoModelForImageSegmentation
from torchvision import transforms

import requests
from io import BytesIO
import zipfile


torch.set_float32_matmul_precision('high')
torch.jit.script = lambda f: f

device = "cuda" if torch.cuda.is_available() else "cpu"

### image_proc.py
def refine_foreground(image, mask, r=90):
    if mask.size != image.size:
        mask = mask.resize(image.size)
    image = np.array(image) / 255.0
    mask = np.array(mask) / 255.0
    estimated_foreground = FB_blur_fusion_foreground_estimator_2(image, mask, r=r)
    image_masked = Image.fromarray((estimated_foreground * 255.0).astype(np.uint8))
    return image_masked


def FB_blur_fusion_foreground_estimator_2(image, alpha, r=90):
    # Thanks to the source: https://github.com/Photoroom/fast-foreground-estimation
    alpha = alpha[:, :, None]
    F, blur_B = FB_blur_fusion_foreground_estimator(
        image, image, image, alpha, r)
    return FB_blur_fusion_foreground_estimator(image, F, blur_B, alpha, r=6)[0]


def FB_blur_fusion_foreground_estimator(image, F, B, alpha, r=90):
    if isinstance(image, Image.Image):
        image = np.array(image) / 255.0
    blurred_alpha = cv2.blur(alpha, (r, r))[:, :, None]

    blurred_FA = cv2.blur(F * alpha, (r, r))
    blurred_F = blurred_FA / (blurred_alpha + 1e-5)

    blurred_B1A = cv2.blur(B * (1 - alpha), (r, r))
    blurred_B = blurred_B1A / ((1 - blurred_alpha) + 1e-5)
    F = blurred_F + alpha * \
        (image - alpha * blurred_F - (1 - alpha) * blurred_B)
    F = np.clip(F, 0, 1)
    return F, blurred_B


class ImagePreprocessor():
    def __init__(self, resolution: Tuple[int, int] = (1024, 1024)) -> None:
        self.transform_image = transforms.Compose([
            transforms.Resize(resolution),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ])

    def proc(self, image: Image.Image) -> torch.Tensor:
        image = self.transform_image(image)
        return image


usage_to_weights_file = {
    'General': 'BiRefNet',
    'General-Lite': 'BiRefNet_lite',
    'General-Lite-2K': 'BiRefNet_lite-2K',
    'Portrait': 'BiRefNet-portrait',
    'DIS': 'BiRefNet-DIS5K',
    'HRSOD': 'BiRefNet-HRSOD',
    'COD': 'BiRefNet-COD',
    'DIS-TR_TEs': 'BiRefNet-DIS5K-TR_TEs',
    'General-legacy': 'BiRefNet-legacy'
}

birefnet = AutoModelForImageSegmentation.from_pretrained('/'.join(('zhengpeng7', usage_to_weights_file['General'])), trust_remote_code=True)
birefnet.to(device)
birefnet.eval()


@spaces.GPU
def predict(images, resolution, weights_file):
    assert (images is not None), 'AssertionError: images cannot be None.'

    global birefnet
    # Load BiRefNet with chosen weights
    _weights_file = '/'.join(('zhengpeng7', usage_to_weights_file[weights_file] if weights_file is not None else usage_to_weights_file['General']))
    print('Using weights: {}.'.format(_weights_file))
    birefnet = AutoModelForImageSegmentation.from_pretrained(_weights_file, trust_remote_code=True)
    birefnet.to(device)
    birefnet.eval()

    try:
        resolution = [int(int(reso)//32*32) for reso in resolution.strip().split('x')]
    except:
        resolution = (1024, 1024) if weights_file not in ['General-Lite-2K'] else (2560, 1440)
        print('Invalid resolution input. Automatically changed to 1024x1024 or 2K.')

    if isinstance(images, list):
        # For tab_batch
        save_paths = []
        save_dir = 'preds-BiRefNet'
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        tab_is_batch = True
    else:
        images = [images]
        tab_is_batch = False

    for idx_image, image_src in enumerate(images):
        if isinstance(image_src, str):
            if os.path.isfile(image_src):
                image_ori = Image.open(image_src)
            else:
                response = requests.get(image_src)
                image_data = BytesIO(response.content)
                image_ori = Image.open(image_data)
        else:
            image_ori = Image.fromarray(image_src)

        image = image_ori.convert('RGB')
        # Preprocess the image
        image_preprocessor = ImagePreprocessor(resolution=tuple(resolution))
        image_proc = image_preprocessor.proc(image)
        image_proc = image_proc.unsqueeze(0)

        # Prediction
        with torch.no_grad():
            preds = birefnet(image_proc.to(device))[-1].sigmoid().cpu()
        pred = preds[0].squeeze()

        # Show Results
        pred_pil = transforms.ToPILImage()(pred)
        image_masked = refine_foreground(image, pred_pil)
        image_masked.putalpha(pred_pil.resize(image.size))

        torch.cuda.empty_cache()

        if tab_is_batch:
            save_file_path = os.path.join(save_dir, "{}.png".format(os.path.splitext(os.path.basename(image_src))[0]))
            image_masked.save(save_file_path)
            save_paths.append(save_file_path)

    if tab_is_batch:
        zip_file_path = os.path.join(save_dir, "{}.zip".format(save_dir))
        with zipfile.ZipFile(zip_file_path, 'w') as zipf:
            for file in save_paths:
                zipf.write(file, os.path.basename(file))
        return save_paths, zip_file_path
    else:
        return (image_masked, image_ori)


examples = [[_] for _ in glob('examples/*')][:]
# Add the option of resolution in a text box.
for idx_example, example in enumerate(examples):
    examples[idx_example].append('1024x1024')
examples.append(examples[-1].copy())
examples[-1][1] = '512x512'

examples_url = [
    ['https://hips.hearstapps.com/hmg-prod/images/gettyimages-1229892983-square.jpg'],
]
for idx_example_url, example_url in enumerate(examples_url):
    examples_url[idx_example_url].append('1024x1024')

descriptions = ('Upload a picture, our model will extract a highly accurate segmentation of the subject in it.\n)'
                 ' The resolution used in our training was `1024x1024`, thus the suggested resolution to obtain good results!\n'
                 ' Our codes can be found at https://github.com/ZhengPeng7/BiRefNet.\n'
                 ' We also maintain the HF model of BiRefNet at https://huggingface.co/ZhengPeng7/BiRefNet for easier access.')

tab_image = gr.Interface(
    fn=predict,
    inputs=[
        gr.Image(label='Upload an image'),
        gr.Textbox(lines=1, placeholder="Type the resolution (`WxH`) you want, e.g., `1024x1024`.", label="Resolution"),
        gr.Radio(list(usage_to_weights_file.keys()), value='General', label="Weights", info="Choose the weights you want.")
    ],
    outputs=ImageSlider(label="BiRefNet's prediction", type="pil"),
    examples=examples,
    api_name="image",
    description=descriptions,
)

tab_text = gr.Interface(
    fn=predict,
    inputs=[
        gr.Textbox(label="Paste an image URL"),
        gr.Textbox(lines=1, placeholder="Type the resolution (`WxH`) you want, e.g., `1024x1024`.", label="Resolution"),
        gr.Radio(list(usage_to_weights_file.keys()), value='General', label="Weights", info="Choose the weights you want.")
    ],
    outputs=ImageSlider(label="BiRefNet's prediction", type="pil"),
    examples=examples_url,
    api_name="text",
    description=descriptions+'\nTab-URL is partially modified from https://huggingface.co/spaces/not-lain/background-removal, thanks to this great work!',
)

tab_batch = gr.Interface(
    fn=predict,
    inputs=[
        gr.File(label="Upload multiple images", type="filepath", file_count="multiple"),
        gr.Textbox(lines=1, placeholder="Type the resolution (`WxH`) you want, e.g., `1024x1024`.", label="Resolution"),
        gr.Radio(list(usage_to_weights_file.keys()), value='General', label="Weights", info="Choose the weights you want.")
    ],
    outputs=[gr.Gallery(label="BiRefNet's predictions"), gr.File(label="Download masked images.")],
    api_name="batch",
    description=descriptions+'\nTab-batch is partially modified from https://huggingface.co/spaces/NegiTurkey/Multi_Birefnetfor_Background_Removal, thanks to this great work!',
)

demo = gr.TabbedInterface(
    [tab_image, tab_text, tab_batch],
    ['image', 'text', 'batch'],
    title="BiRefNet demo for subject extraction (general / salient / camouflaged / portrait).",
)

if __name__ == "__main__":
    demo.launch(debug=True)