Spaces:
Running
on
Zero
Running
on
Zero
File size: 2,684 Bytes
6052413 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
import gradio as gr
import torch
import spaces
from diffusers import FluxControlPipeline, FluxTransformer2DModel
####################################
# Load the model(s) on CPU #
####################################
edit_transformer = FluxTransformer2DModel.from_pretrained(
"sayakpaul/FLUX.1-dev-edit-v0",
torch_dtype=torch.float32
)
pipeline = FluxControlPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev",
transformer=edit_transformer,
torch_dtype=torch.float32
)
#####################################
# The function for our Gradio app #
#####################################
@spaces.GPU(duration=120)
def generate(prompt, input_image):
"""
Runs the Flux Control pipeline for editing the given `input_image`
with the specified `prompt`. The pipeline is on CPU by default.
"""
# Perform inference
output_image = pipeline(
control_image=input_image,
prompt=prompt,
guidance_scale=30.0,
num_inference_steps=50,
max_sequence_length=512,
height=input_image.height,
width=input_image.width,
generator=torch.manual_seed(0),
).images[0]
return output_image
def launch_app():
with gr.Blocks() as demo:
gr.Markdown(
"""
# Flux Control Editing
This demo uses the [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev)
pipeline with an edit transformer from [Sayak Paul](https://huggingface.co/sayakpaul).
**Acknowledgements**:
- [Sayak Paul](https://huggingface.co/sayakpaul) for open-sourcing FLUX.1-dev-edit-v0
- [black-forest-labs](https://huggingface.co/black-forest-labs) for FLUX.1-dev
"""
)
with gr.Row():
prompt = gr.Textbox(
label="Prompt",
placeholder="e.g. 'Edit a certain thing in the image'"
)
input_image = gr.Image(
label="Image",
type="pil",
)
generate_button = gr.Button("Generate")
output_image = gr.Image(label="Edited Image")
# Connect button to function
generate_button.click(
fn=generate,
inputs=[prompt, input_image],
outputs=[output_image],
)
gr.Examples(
examples=[
["Turn the color of the mushroom to gray", "mushroom.jpg"],
["Make the mushroom polka-dotted", "mushroom.jpg"],
],
inputs=[prompt, input_image],
)
return demo
if __name__ == "__main__":
demo = launch_app()
demo.launch() |