|
import gradio as gr |
|
import numpy as np |
|
import time |
|
import math |
|
import random |
|
import torch |
|
import spaces |
|
|
|
from diffusers import ( |
|
ControlNetModel, |
|
StableDiffusionControlNetPipeline, |
|
) |
|
from PIL import Image |
|
from pillow_heif import register_heif_opener |
|
|
|
register_heif_opener() |
|
|
|
max_64_bit_int = np.iinfo(np.int32).max |
|
|
|
if torch.cuda.is_available(): |
|
device = "cuda" |
|
floatType = torch.float16 |
|
else: |
|
device = "cpu" |
|
floatType = torch.float32 |
|
|
|
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11e_sd15_ip2p", torch_dtype = floatType) |
|
pipe = StableDiffusionControlNetPipeline.from_pretrained( |
|
"botp/stable-diffusion-v1-5", safety_checker = None, controlnet = controlnet, torch_dtype = floatType |
|
) |
|
pipe = pipe.to(device) |
|
|
|
def update_seed(is_randomize_seed, seed): |
|
if is_randomize_seed: |
|
return random.randint(0, max_64_bit_int) |
|
return seed |
|
|
|
def check( |
|
input_image, |
|
prompt, |
|
negative_prompt, |
|
denoising_steps, |
|
num_inference_steps, |
|
guidance_scale, |
|
image_guidance_scale, |
|
is_randomize_seed, |
|
seed, |
|
progress = gr.Progress()): |
|
if input_image is None: |
|
raise gr.Error("Please provide an image.") |
|
|
|
if prompt is None or prompt == "": |
|
raise gr.Error("Please provide a prompt input.") |
|
|
|
@spaces.GPU(duration=420) |
|
def pix2pix( |
|
input_image, |
|
prompt, |
|
negative_prompt, |
|
denoising_steps, |
|
num_inference_steps, |
|
guidance_scale, |
|
image_guidance_scale, |
|
is_randomize_seed, |
|
seed, |
|
progress = gr.Progress()): |
|
check( |
|
input_image, |
|
prompt, |
|
negative_prompt, |
|
denoising_steps, |
|
num_inference_steps, |
|
guidance_scale, |
|
image_guidance_scale, |
|
is_randomize_seed, |
|
seed |
|
) |
|
start = time.time() |
|
progress(0, desc = "Preparing data...") |
|
|
|
if negative_prompt is None: |
|
negative_prompt = "" |
|
|
|
if denoising_steps is None: |
|
denoising_steps = 0 |
|
|
|
if num_inference_steps is None: |
|
num_inference_steps = 20 |
|
|
|
if guidance_scale is None: |
|
guidance_scale = 5 |
|
|
|
if image_guidance_scale is None: |
|
image_guidance_scale = 1.5 |
|
|
|
if seed is None: |
|
seed = random.randint(0, max_64_bit_int) |
|
|
|
random.seed(seed) |
|
torch.manual_seed(seed) |
|
|
|
original_height, original_width, dummy_channel = np.array(input_image).shape |
|
output_width = original_width |
|
output_height = original_height |
|
mask_image = Image.new(mode = input_image.mode, size = (output_width, output_height), color = "white") |
|
|
|
limitation = ""; |
|
|
|
|
|
if 1024 * 1024 < output_width * output_height: |
|
factor = ((1024 * 1024) / (output_width * output_height))**0.5 |
|
output_width = math.floor(output_width * factor) |
|
output_height = math.floor(output_height * factor) |
|
|
|
limitation = " Due to technical limitation, the image have been downscaled and then upscaled."; |
|
|
|
|
|
output_width = output_width - (output_width % 8) |
|
output_height = output_height - (output_height % 8) |
|
progress(None, desc = "Processing...") |
|
|
|
output_image = pipe( |
|
seeds=[seed], |
|
width = output_width, |
|
height = output_height, |
|
prompt = prompt, |
|
negative_prompt = negative_prompt, |
|
image = input_image, |
|
mask_image = mask_image, |
|
num_inference_steps = num_inference_steps, |
|
guidance_scale = guidance_scale, |
|
image_guidance_scale = image_guidance_scale, |
|
denoising_steps = denoising_steps, |
|
show_progress_bar = True |
|
).images[0] |
|
|
|
if limitation != "": |
|
output_image = output_image.resize((original_width, original_height)) |
|
|
|
end = time.time() |
|
secondes = int(end - start) |
|
minutes = math.floor(secondes / 60) |
|
secondes = secondes - (minutes * 60) |
|
hours = math.floor(minutes / 60) |
|
minutes = minutes - (hours * 60) |
|
return [ |
|
output_image, |
|
("Start again to get a different result. " if is_randomize_seed else "") + "The image has been generated in " + ((str(hours) + " h, ") if hours != 0 else "") + ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + str(secondes) + " sec." + limitation |
|
] |
|
|
|
with gr.Blocks() as interface: |
|
gr.HTML( |
|
""" |
|
<h1 style="text-align: center;">Instruct Pix2Pix demo</h1> |
|
<p style="text-align: center;">Modifies your image using a textual instruction, freely, without account, without watermark, without installation, which can be downloaded</p> |
|
<br/> |
|
<br/> |
|
β¨ Powered by <i>SD 1.5</i> and <i>ControlNet</i>. The result quality extremely varies depending on what we ask. |
|
<br/> |
|
<ul> |
|
<li>To change the <b>view angle</b> of your image, I recommend to use <i>Zero123</i>,</li> |
|
<li>To <b>upscale</b> your image, I recommend to use <i><a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR">SUPIR</a></i>,</li> |
|
<li>To <b>slightly change</b> your image, I recommend to use <i>Image-to-Image SDXL</i>,</li> |
|
<li>To change <b>one detail</b> on your image, I recommend to use <i>Inpaint SDXL</i>,</li> |
|
<li>To remove the <b>background</b> of your image, I recommend to use <i>BRIA</i>,</li> |
|
<li>To enlarge the <b>viewpoint</b> of your image, I recommend to use <i>Uncrop</i>,</li> |
|
<li>To make a <b>tile</b> of your image, I recommend to use <i>Make My Image Tile</i>,</li> |
|
</ul> |
|
<br/> |
|
""" + ("πββοΈ Estimated time: few minutes." if torch.cuda.is_available() else "π Slow process... ~1 hour.") + """ |
|
Your computer must not enter into standby mode. You can launch several generations in different browser tabs when you're gone. If this space does not work or you want a faster run, use <i>Instruct Pix2Pix</i> available on hysts's <i>ControlNet-v1-1</i> space (last tab) or on <i>Dezgo</i> site.<br>You can duplicate this space on a free account, it's designed to work on CPU, GPU and ZeroGPU.<br/> |
|
<a href='https://huggingface.co/spaces/Fabrice-TIERCELIN/Instruct-Pix2Pix?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14'></a> |
|
<br/> |
|
βοΈ You can use, modify and share the generated images but not for commercial uses. |
|
""" |
|
) |
|
with gr.Column(): |
|
input_image = gr.Image(label = "Your image", sources = ["upload", "webcam", "clipboard"], type = "pil") |
|
prompt = gr.Textbox(label = "Prompt", info = "Instruct what to change in the image", placeholder = "Order the AI what to change in the image", lines = 2) |
|
with gr.Accordion("Advanced options", open = False): |
|
negative_prompt = gr.Textbox(label = "Negative prompt", placeholder = "Describe what you do NOT want to see in the image", value = '' |
|
'blurring, aliasing, unsharp, weird textures, ugly, dirty, messy, ' |
|
'worst quality, low quality, frames, watermark, signature, jpeg artifacts, ' |
|
'deformed, lowres, over-smooth') |
|
denoising_steps = gr.Slider(minimum = 0, maximum = 1000, value = 0, step = 1, label = "Denoising", info = "lower=irrelevant result, higher=relevant result") |
|
num_inference_steps = gr.Slider(minimum = 10, maximum = 500, value = 20, step = 1, label = "Number of inference steps", info = "lower=faster, higher=image quality") |
|
guidance_scale = gr.Slider(minimum = 1, maximum = 13, value = 5, step = 0.1, label = "Guidance Scale", info = "lower=image quality, higher=follow the prompt") |
|
image_guidance_scale = gr.Slider(minimum = 1, value = 1.5, step = 0.1, label = "Image Guidance Scale", info = "lower=image quality, higher=follow the image") |
|
randomize_seed = gr.Checkbox(label = "\U0001F3B2 Randomize seed", value = True, info = "If checked, result is always different") |
|
seed = gr.Slider(minimum = 0, maximum = max_64_bit_int, step = 1, randomize = True, label = "Seed") |
|
|
|
submit = gr.Button("π Modify", variant = "primary") |
|
|
|
modified_image = gr.Image(label = "Modified image") |
|
information = gr.HTML() |
|
|
|
submit.click(fn = update_seed, inputs = [ |
|
randomize_seed, |
|
seed |
|
], outputs = [ |
|
seed |
|
], queue = False, show_progress = False).then(check, inputs = [ |
|
input_image, |
|
prompt, |
|
negative_prompt, |
|
denoising_steps, |
|
num_inference_steps, |
|
guidance_scale, |
|
image_guidance_scale, |
|
randomize_seed, |
|
seed |
|
], outputs = [], queue = False, show_progress = False).success(pix2pix, inputs = [ |
|
input_image, |
|
prompt, |
|
negative_prompt, |
|
denoising_steps, |
|
num_inference_steps, |
|
guidance_scale, |
|
image_guidance_scale, |
|
randomize_seed, |
|
seed |
|
], outputs = [ |
|
modified_image, |
|
information |
|
], scroll_to_output = True) |
|
|
|
gr.Examples( |
|
run_on_click = True, |
|
fn = pix2pix, |
|
inputs = [ |
|
input_image, |
|
prompt, |
|
negative_prompt, |
|
denoising_steps, |
|
num_inference_steps, |
|
guidance_scale, |
|
image_guidance_scale, |
|
randomize_seed, |
|
seed |
|
], |
|
outputs = [ |
|
modified_image, |
|
information |
|
], |
|
examples = [ |
|
[ |
|
"./Examples/Example1.webp", |
|
"What if it's snowing?", |
|
"blurring, aliasing, unsharp, weird textures, ugly, dirty, messy, " |
|
"worst quality, low quality, frames, watermark, signature, jpeg artifacts, " |
|
"deformed, lowres, over-smooth", |
|
1, |
|
20, |
|
5, |
|
1.5, |
|
False, |
|
42 |
|
], |
|
[ |
|
"./Examples/Example2.png", |
|
"What if this woman had brown hair?", |
|
"blurring, aliasing, unsharp, weird textures, ugly, dirty, messy, " |
|
"worst quality, low quality, frames, watermark, signature, jpeg artifacts, " |
|
"deformed, lowres, over-smooth", |
|
1, |
|
20, |
|
5, |
|
1.5, |
|
False, |
|
42 |
|
], |
|
[ |
|
"./Examples/Example3.jpeg", |
|
"Replace the house by a windmill", |
|
"blurring, aliasing, unsharp, weird textures, ugly, dirty, messy, " |
|
"worst quality, low quality, frames, watermark, signature, jpeg artifacts, " |
|
"deformed, lowres, over-smooth", |
|
1, |
|
20, |
|
5, |
|
1.5, |
|
False, |
|
42 |
|
], |
|
[ |
|
"./Examples/Example4.gif", |
|
"What if the camera was in opposite side?", |
|
"blurring, aliasing, unsharp, weird textures, ugly, dirty, messy, " |
|
"worst quality, low quality, frames, watermark, signature, jpeg artifacts, " |
|
"deformed, lowres, over-smooth", |
|
1, |
|
20, |
|
5, |
|
1.5, |
|
False, |
|
42 |
|
], |
|
[ |
|
"./Examples/Example5.bmp", |
|
"Turn him into cyborg", |
|
"blurring, aliasing, unsharp, weird textures, ugly, dirty, messy, " |
|
"worst quality, low quality, frames, watermark, signature, jpeg artifacts, " |
|
"deformed, lowres, over-smooth", |
|
1, |
|
20, |
|
5, |
|
25, |
|
False, |
|
42 |
|
], |
|
], |
|
cache_examples = False, |
|
) |
|
|
|
interface.queue().launch() |