Spaces:
Runtime error
Runtime error
import gradio as gr | |
import requests | |
import io | |
from PIL import Image | |
import json | |
import os | |
import logging | |
import math | |
from tqdm import tqdm | |
import time | |
#logging.basicConfig(level=logging.DEBUG) | |
with open('loras.json', 'r') as f: | |
loras = json.load(f) | |
def update_selection(selected_state: gr.SelectData): | |
logging.debug(f"Inside update_selection, selected_state: {selected_state}") | |
selected_lora_index = selected_state.index | |
selected_lora = loras[selected_lora_index] | |
new_placeholder = f"Type a prompt for {selected_lora['title']}" | |
lora_repo = selected_lora["repo"] | |
updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨" | |
return ( | |
gr.update(placeholder=new_placeholder), | |
updated_text, # Retorna o texto Markdown atualizado | |
selected_state | |
) | |
def run_lora(prompt, selected_state, progress=gr.Progress(track_tqdm=True)): | |
logging.debug(f"Inside run_lora, selected_state: {selected_state}") | |
if not selected_state: | |
logging.error("selected_state is None or empty.") | |
raise gr.Error("You must select a LoRA before proceeding.") # Popup error when no LoRA is selected | |
selected_lora_index = selected_state.index # Changed this line | |
selected_lora = loras[selected_lora_index] | |
api_url = f"https://api-inference.huggingface.co/models/{selected_lora['repo']}" | |
trigger_word = selected_lora["trigger_word"] | |
#token = os.getenv("API_TOKEN") | |
payload = { | |
"inputs": f"{prompt} {trigger_word}", | |
"parameters":{"negative_prompt": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)", "num_inference_steps": 30, "scheduler":"DPMSolverMultistepScheduler"}, | |
} | |
#headers = {"Authorization": f"Bearer {token}"} | |
# Add a print statement to display the API request | |
print(f"API Request: {api_url}") | |
#print(f"API Headers: {headers}") | |
print(f"API Payload: {payload}") | |
error_count = 0 | |
pbar = tqdm(total=None, desc="Loading model") | |
while(True): | |
response = requests.post(api_url, json=payload) | |
if response.status_code == 200: | |
return Image.open(io.BytesIO(response.content)) | |
elif response.status_code == 503: | |
#503 is triggered when the model is doing cold boot. It also gives you a time estimate from when the model is loaded but it is not super precise | |
time.sleep(1) | |
pbar.update(1) | |
elif response.status_code == 500 and error_count < 5: | |
print(response.content) | |
time.sleep(1) | |
error_count += 1 | |
continue | |
else: | |
logging.error(f"API Error: {response.status_code}") | |
raise gr.Error("API Error: Unable to fetch the image.") # Raise a Gradio error here | |
with gr.Blocks(css="custom.css") as app: | |
title = gr.Markdown("# artificialguybr LoRA portfolio") | |
description = gr.Markdown( | |
"### This is my portfolio. Follow me on Twitter [@artificialguybr](https://twitter.com/artificialguybr). \n" | |
"**Note**: The speed and generation quality are for demonstration purposes. " | |
"For best quality, use Auto or Comfy or Diffusers. \n" | |
"**Warning**: The API might take some time to deliver the image. \n" | |
"Special thanks to Hugging Face for their free inference API." | |
) | |
selected_state = gr.State() | |
with gr.Row(): | |
gallery = gr.Gallery( | |
[(item["image"], item["title"]) for item in loras], | |
label="LoRA Gallery", | |
allow_preview=False, | |
columns=3 | |
) | |
with gr.Column(): | |
prompt_title = gr.Markdown("### Click on a LoRA in the gallery to select it") | |
selected_info = gr.Markdown("") # Novo componente Markdown para exibir o texto da LoRA selecionada | |
with gr.Row(): | |
prompt = gr.Textbox(label="Prompt", show_label=False, lines=1, max_lines=1, placeholder="Type a prompt after selecting a LoRA") | |
button = gr.Button("Run") | |
result = gr.Image(interactive=False, label="Generated Image") | |
gallery.select( | |
update_selection, | |
outputs=[prompt, selected_info, selected_state] # Adicionado selected_info aqui | |
) | |
prompt.submit( | |
fn=run_lora, | |
inputs=[prompt, selected_state], | |
outputs=[result] | |
) | |
button.click( | |
fn=run_lora, | |
inputs=[prompt, selected_state], | |
outputs=[result] | |
) | |
app.queue | |
app.launch() |