Fabrice-TIERCELIN's picture
Update seed
c70fac3 verified
raw
history blame
10.8 kB
import gradio as gr
import numpy as np
import os
import time
import math
import random
import imageio
import torch
from diffusers import (
ControlNetModel,
DiffusionPipeline,
StableDiffusionControlNetPipeline,
)
from PIL import Image, ImageFilter
max_64_bit_int = 2**63 - 1
device = "cuda" if torch.cuda.is_available() else "cpu"
floatType = torch.float16 if torch.cuda.is_available() else torch.float32
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11e_sd15_ip2p", torch_dtype = floatType)
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker = None, controlnet = controlnet, torch_dtype = floatType
)
pipe = pipe.to(device)
def update_seed(is_randomize_seed, seed):
if is_randomize_seed:
return random.randint(0, max_64_bit_int)
return seed
def check(
input_image,
prompt,
negative_prompt,
denoising_steps,
num_inference_steps,
guidance_scale,
image_guidance_scale,
seed,
progress = gr.Progress()):
if input_image is None:
raise gr.Error("Please provide an image.")
if prompt is None or prompt == "":
raise gr.Error("Please provide a prompt input.")
def pix2pix(
input_image,
prompt,
negative_prompt,
denoising_steps,
num_inference_steps,
guidance_scale,
image_guidance_scale,
seed,
progress = gr.Progress()):
check(
input_image,
prompt,
negative_prompt,
denoising_steps,
num_inference_steps,
guidance_scale,
image_guidance_scale,
seed
)
start = time.time()
progress(0, desc = "Preparing data...")
if negative_prompt is None:
negative_prompt = ""
if denoising_steps is None:
denoising_steps = 0
if num_inference_steps is None:
num_inference_steps = 20
if guidance_scale is None:
guidance_scale = 5
if image_guidance_scale is None:
image_guidance_scale = 1.5
if seed is None:
seed = random.randint(0, max_64_bit_int)
random.seed(seed)
torch.manual_seed(seed)
original_height, original_width, dummy_channel = np.array(input_image).shape
output_width = original_width
output_height = original_height
mask_image = Image.new(mode = input_image.mode, size = (output_width, output_height), color = "white")
limitation = "";
# Limited to 1 million pixels
if 1024 * 1024 < output_width * output_height:
factor = ((1024 * 1024) / (output_width * output_height))**0.5
output_width = math.floor(output_width * factor)
output_height = math.floor(output_height * factor)
limitation = " Due to technical limitation, the image have been downscaled and then upscaled.";
# Width and height must be multiple of 8
output_width = output_width - (output_width % 8)
output_height = output_height - (output_height % 8)
progress(None, desc = "Processing...")
output_image = pipe(
seeds=[seed],
width = output_width,
height = output_height,
prompt = prompt,
negative_prompt = negative_prompt,
image = input_image,
mask_image = mask_image,
num_inference_steps = num_inference_steps,
guidance_scale = guidance_scale,
image_guidance_scale = image_guidance_scale,
denoising_steps = denoising_steps,
show_progress_bar = True
).images[0]
if limitation != "":
output_image = output_image.resize((original_width, original_height))
end = time.time()
secondes = int(end - start)
minutes = secondes // 60
secondes = secondes - (minutes * 60)
hours = minutes // 60
minutes = minutes - (hours * 60)
return [
output_image,
"Start again to get a different result. The new image is " + str(output_width) + " pixels large and " + str(output_height) + " pixels high, so an image of " + f'{output_width * output_height:,}' + " pixels. The image have been generated in " + str(hours) + " h, " + str(minutes) + " min, " + str(secondes) + " sec." + limitation
]
with gr.Blocks() as interface:
gr.Markdown(
"""
<p style="text-align: center;"><b><big><big><big>Instruct Pix2Pix demo</big></big></big></b></p>
<p style="text-align: center;">Modifies your image using a textual instruction, freely, without account, without watermark, without installation, which can be downloaded</p>
<br/>
<br/>
πŸš€ Powered by <i>SD 1.5</i> and <i>ControlNet</i>. The result quality extremely varies depending on what we ask.
<br/>
<ul>
<li>To change the <b>view angle</b> of your image, I recommend to use <i>Zero123</i>,</li>
<li>To <b>upscale</b> your image, I recommend to use <i>Ilaria Upscaler</i>,</li>
<li>To <b>slightly change</b> your image, I recommend to use <i>Image-to-Image SDXL</i>,</li>
<li>To change <b>one detail</b> on your image, I recommend to use <i>Inpaint SDXL</i>,</li>
<li>To remove the <b>background</b> of your image, I recommend to use <i>BRIA</i>,</li>
<li>To enlarge the <b>viewpoint</b> of your image, I recommend to use <i>Uncrop</i>,</li>
<li>To make a <b>tile</b> of your image, I recommend to use <i>Make My Image Tile</i>,</li>
</ul>
<br/>
🐌 Slow process... ~1 hour. You can launch several generations in different browser tabs when you're gone. If this space does not work or you want a faster run, use <i>Instruct Pix2Pix</i> available on terrapretapermaculture's <i>ControlNet-v1-1</i> space (last tab) or on <i>Dezgo</i> site.<br>You can duplicate this space on a free account, it works on CPU.<br/>
<a href='https://huggingface.co/spaces/Fabrice-TIERCELIN/Instruct-Pix2Pix?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14'></a>
<br/>
βš–οΈ You can use, modify and share the generated images but not for commercial uses.
"""
)
with gr.Column():
input_image = gr.Image(label = "Your image", sources = ["upload", "webcam", "clipboard"], type = "pil")
prompt = gr.Textbox(label = 'Prompt', info = "Instruct what to change in the image", placeholder = 'Order the AI what to change in the image')
with gr.Accordion("Advanced options", open = False):
negative_prompt = gr.Textbox(label = 'Negative prompt', placeholder = 'Describe what you do NOT want to see in the image', value = 'Watermark')
denoising_steps = gr.Slider(minimum = 0, maximum = 1000, value = 0, step = 1, label = "Denoising", info = "lower=irrelevant result, higher=relevant result")
num_inference_steps = gr.Slider(minimum = 10, maximum = 500, value = 20, step = 1, label = "Number of inference steps", info = "lower=faster, higher=image quality")
guidance_scale = gr.Slider(minimum = 1, maximum = 13, value = 5, step = 0.1, label = "Classifier-Free Guidance Scale", info = "lower=image quality, higher=follow the prompt")
image_guidance_scale = gr.Slider(minimum = 1, value = 1.5, step = 0.1, label = "Image Guidance Scale", info = "lower=image quality, higher=follow the image")
randomize_seed = gr.Checkbox(label = "\U0001F3B2 Randomize seed", value = True, info = "If checked, result is always different")
seed = gr.Slider(minimum = 0, maximum = max_64_bit_int, step = 1, randomize = True, label = "Seed")
submit = gr.Button("Modify", variant = "primary")
modified_image = gr.Image(label = "Modified image")
information = gr.Label(label = "Information")
submit.click(fn = update_seed, inputs = [
randomize_seed,
seed
], outputs = [
seed
], queue = False, show_progress = False).then(check, inputs = [
input_image,
prompt,
negative_prompt,
denoising_steps,
num_inference_steps,
guidance_scale,
image_guidance_scale,
seed
], outputs = [], queue = False, show_progress = False).success(pix2pix, inputs = [
input_image,
prompt,
negative_prompt,
denoising_steps,
num_inference_steps,
guidance_scale,
image_guidance_scale,
seed
], outputs = [
modified_image,
information
], scroll_to_output = True)
gr.Examples(
fn = pix2pix,
inputs = [
input_image,
prompt,
negative_prompt,
denoising_steps,
num_inference_steps,
guidance_scale,
image_guidance_scale,
seed
],
outputs = [
modified_image,
information
],
examples = [
[
"./Examples/Example1.webp",
"What if it's snowing?",
"Watermark",
1,
20,
5,
1.5,
42
],
[
"./Examples/Example2.png",
"What if this woman had brown hair?",
"Watermark",
1,
20,
5,
1.5,
42
],
[
"./Examples/Example3.jpeg",
"Replace the house by a windmill",
"Watermark",
1,
20,
5,
1.5,
42
],
[
"./Examples/Example4.gif",
"What if the camera was in opposite side?",
"Watermark",
1,
20,
5,
1.5,
42
],
[
"./Examples/Example5.bmp",
"Turn him into cyborg",
"Watermark",
1,
20,
5,
25,
42
],
],
cache_examples = False,
)
interface.queue().launch()