import os import sys # sys.path.append(os.path.abspath(os.path.dirname(os.getcwd()))) # os.chdir("../") import gradio as gr import numpy as np from pathlib import Path from matplotlib import pyplot as plt import torch import tempfile from lama_inpaint import inpaint_img_with_lama, build_lama_model, inpaint_img_with_builded_lama from utils import load_img_to_array, save_array_to_img, dilate_mask, \ show_mask, show_points from PIL import Image sys.path.insert(0, str(Path(__file__).resolve().parent / "third_party" / "segment-anything")) from segment_anything import SamPredictor, sam_model_registry import argparse def setup_args(parser): parser.add_argument( "--lama_config", type=str, default="./third_party/lama/configs/prediction/default.yaml", help="The path to the config file of lama model. " "Default: the config of big-lama", ) parser.add_argument( "--lama_ckpt", type=str, default="pretrained_models/big-lama", help="The path to the lama checkpoint.", ) parser.add_argument( "--sam_ckpt", type=str, default="./pretrained_models/sam_vit_h_4b8939.pth", help="The path to the SAM checkpoint to use for mask generation.", ) def mkstemp(suffix, dir=None): fd, path = tempfile.mkstemp(suffix=f"{suffix}", dir=dir) os.close(fd) return Path(path) def get_sam_feat(img): model['sam'].set_image(img) features = model['sam'].features orig_h = model['sam'].orig_h orig_w = model['sam'].orig_w input_h = model['sam'].input_h input_w = model['sam'].input_w model['sam'].reset_image() return features, orig_h, orig_w, input_h, input_w def get_masked_img(img, w, h, features, orig_h, orig_w, input_h, input_w, dilate_kernel_size): point_coords = [w, h] point_labels = [1] model['sam'].is_image_set = True model['sam'].features = features model['sam'].orig_h = orig_h model['sam'].orig_w = orig_w model['sam'].input_h = input_h model['sam'].input_w = input_w # model['sam'].set_image(img) # todo : update here for accelerating masks, _, _ = model['sam'].predict( point_coords=np.array([point_coords]), point_labels=np.array(point_labels), multimask_output=True, ) masks = masks.astype(np.uint8) * 255 dilate_kernel_size = 20 # dilate mask to avoid unmasked edge effect if dilate_kernel_size is not None: masks = [dilate_mask(mask, dilate_kernel_size) for mask in masks] else: masks = [mask for mask in masks] figs = [] for idx, mask in enumerate(masks): # save the pointed and masked image tmp_p = mkstemp(".png") dpi = plt.rcParams['figure.dpi'] height, width = img.shape[:2] fig = plt.figure(figsize=(width/dpi/0.77, height/dpi/0.77)) plt.imshow(img) plt.axis('off') show_points(plt.gca(), [point_coords], point_labels, size=(width*0.04)**2) show_mask(plt.gca(), mask, random_color=False) plt.tight_layout() plt.savefig(tmp_p, bbox_inches='tight', pad_inches=0) figs.append(fig) plt.close() return *figs, *masks def get_inpainted_img(img,mask): lama_config = args.lama_config device = "cuda" if torch.cuda.is_available() else "cpu" if len(mask.shape)==3: mask = mask[:,:,0] img_inpainted = inpaint_img_with_builded_lama( model['lama'], img, mask, lama_config, device=device) return img_inpainted # get args parser = argparse.ArgumentParser() setup_args(parser) args = parser.parse_args(sys.argv[1:]) # build models model = {} # build the sam model model_type="vit_h" ckpt_p=args.sam_ckpt model_sam = sam_model_registry[model_type](checkpoint=ckpt_p) device = "cuda" if torch.cuda.is_available() else "cpu" model_sam.to(device=device) model['sam'] = SamPredictor(model_sam) # build the lama model lama_config = args.lama_config lama_ckpt = args.lama_ckpt device = "cuda" if torch.cuda.is_available() else "cpu" model['lama'] = build_lama_model(lama_config, lama_ckpt, device=device) image_input = gr.Image(label="Input Image") mask_input = gr.Image(label="Mask Image") demo = gr.Interface( fn=get_inpainted_img, inputs=[image_input, mask_input], outputs=gr.Image(type="numpy", label="Output Image"), title="Image and Mask Processor", description="Upload an image and a mask to process the image. The mask highlights the areas to be processed.", ) if __name__ == "__main__": demo.queue(api_open=True) demo.launch(show_api=True)