File size: 1,352 Bytes
e48f8e5
98f4d1a
 
 
 
 
a38c0be
98f4d1a
 
e48f8e5
e56e26b
98f4d1a
375492b
 
 
 
98f4d1a
31b0f8c
98f4d1a
 
4ad7d0b
98f4d1a
4ad7d0b
98f4d1a
a786af4
98f4d1a
 
239c084
0083041
66c5f75
 
 
 
 
 
 
c22ea98
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
from diffusers import StableDiffusionXLInpaintPipeline
import gradio as gr
import numpy as np
import imageio
from PIL import Image
import torch
import modin.pandas as pd

device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = StableDiffusionXLInpaintPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", safety_checker=None)
pipe = pipe.to(device)

def resize(value,img):
    img = Image.open(img)
    img = img.resize((value,value))
    return img

def predict(source_img, prompt, negative_prompt):
    imageio.imwrite("data.png", source_img["image"])
    imageio.imwrite("data_mask.png", source_img["mask"])
    src = resize(768, "data.png")
    src.save("src.png")
    mask = resize(768, "data_mask.png")  
    mask.save("mask.png")
    image = pipe(prompt=prompt, negative_prompt=negative_prompt, image=src, mask_image=mask, num_inference_steps=20).images[0]
    return image

title="SDXL 1.0 Inpainting CPU"
description=""
gr.Interface(fn=predict, inputs=[gr.Image(source="upload", type="numpy", 
tool="sketch", elem_id="source_container"), 
gr.Textbox(label='What you want the AI to Generate, 77 Token limit'), 
gr.Textbox(label='What you Do Not want the AI to generate')], 
outputs='image', 
title=title, 
description=description, 
article = "<br><br><br><br><br><br><br><br><br><br>").launch(max_threads=True, debug=True)