Spaces:
Running
Running
File size: 3,666 Bytes
f86ef0c 689df9f f86ef0c 619238b a081231 619238b f86ef0c 6879969 619238b 1974db4 af16514 4b76800 1974db4 55cc769 1974db4 57d84d6 4b76800 57d84d6 4b76800 1974db4 c4b82ef af16514 619238b 6879969 f86ef0c f5b7834 f86ef0c 619238b f86ef0c 57d84d6 619238b 57d84d6 f86ef0c 6d82c44 f86ef0c 619238b f86ef0c 8b990bd 5dd7553 8b990bd dcbbf2e 1974db4 5dd7553 f86ef0c 1974db4 f86ef0c 95ae0b9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
import gradio as gr
import requests
import json
import io
import random
import os
from PIL import Image
API_BASE_URL = "https://api-inference.huggingface.co/models/"
MODEL_LIST = [
"openskyml/dalle-3-xl",
"Linaqruf/animagine-xl-2.0",
"Lykon/dreamshaper-7",
"Linaqruf/animagine-xl",
"runwayml/stable-diffusion-v1-5",
"stabilityai/stable-diffusion-xl-base-1.0",
"prompthero/openjourney-v4",
"nerijs/pixel-art-xl",
"Linaqruf/anything-v3.0",
"playgroundai/playground-v2-1024px-aesthetic"
]
API_TOKEN = os.getenv("HF_READ_TOKEN") # Make sure to set your Hugging Face token
HEADERS = {"Authorization": f"Bearer {API_TOKEN}"}
def select_model(model_name):
if model_name in MODEL_LIST:
return API_BASE_URL + model_name
def extend_prompt(input_text):
if not input_text.strip():
gr.Warning("Input text is empty!")
return None
API_URL = f"{API_BASE_URL}Gustavosta/MagicPrompt-Stable-Diffusion"
payload = {"inputs": input_text}
try:
response = requests.post(API_URL, headers=HEADERS, json=payload).json()
return response[0].get("generated_text", "")
except requests.exceptions.RequestException as e:
gr.Error(f"Error in API request: {e}")
return None
def generate_image(prompt, selected_model, is_negative=False, steps=1, cfg_scale=6, seed=None):
if not prompt.strip():
raise gr.Error("Cannot generate image: Input text is empty!")
model_url = select_model(selected_model)
API_URL = f"{model_url}"
payload = {
"inputs": prompt,
"is_negative": is_negative,
"steps": steps,
"cfg_scale": cfg_scale,
"seed": seed if seed is not None else random.randint(-1, 2147483647)
}
try:
response = requests.post(API_URL, headers=HEADERS, json=payload)
response.raise_for_status()
image_bytes = io.BytesIO(response.content)
image = Image.open(image_bytes)
return image
except requests.exceptions.RequestException as e:
raise gr.Error(e)
with gr.Blocks(theme="soft") as playground:
gr.HTML(
"""
<div style="text-align: center; margin: 0 auto;">
<div style="display: inline-flex; align-items: center; gap: 0.8rem; font-size: 1.75rem;">
<h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;">Play with SD Models</h1>
</div>
<p style="margin-bottom: 10px; font-size: 94%; line-height: 23px;">
Create your AI art with Stable Diffusion models!
</p>
</div>
"""
)
with gr.Row():
image_output = gr.Image(type="pil", label="Output Image", elem_id="gallery")
with gr.Column(elem_id="prompt-container"):
text_prompt = gr.Textbox(label="Prompt", placeholder="a cute cat", lines=1, elem_id="prompt-text-input")
model_dropdown = gr.Dropdown(label="Model", choices=MODEL_LIST, elem_id="model-dropdown", value="runwayml/stable-diffusion-v1-5")
gen_button = gr.Button("Generate", variant='primary', elem_id="gen-button")
extend_button = gr.Button("Extend Prompt", variant='primary', elem_id="extend-button")
with gr.Accordion("Advanced settings", open=False):
negative_prompt = gr.Textbox(label="Negative Prompt", value="text, blurry, fuzziness", lines=1, elem_id="negative-prompt-text-input")
gen_button.click(generate_image, inputs=[text_prompt, model_dropdown, negative_prompt], outputs=image_output)
extend_button.click(extend_prompt, inputs=text_prompt, outputs=text_prompt)
playground.launch(show_api=False) |