|
import json |
|
import requests |
|
import gradio as gr |
|
|
|
def generate_image(prompt, negative_prompt, width, height, samples, num_inference_steps, safety_checker, enhance_prompt, seed, guidance_scale, multi_lingual, panorama, self_attention, upscale, embeddings, lora, webhook, track_id): |
|
url = "https://modelslab.com/api/v6/images/text2img" |
|
|
|
payload = json.dumps({ |
|
"key": "sHj15HTjxiCkFtV3PHmSeehjaVGdpNotsb1iMbIpniNzfTsjgbN7Z9RFB8Wu", |
|
"model_id": "juggernaut-xl-v8", |
|
"prompt": prompt, |
|
"negative_prompt": negative_prompt, |
|
"width": width, |
|
"height": height, |
|
"samples": samples, |
|
"num_inference_steps": num_inference_steps, |
|
"safety_checker": safety_checker, |
|
"enhance_prompt": enhance_prompt, |
|
"seed": seed, |
|
"guidance_scale": guidance_scale, |
|
"multi_lingual": multi_lingual, |
|
"panorama": panorama, |
|
"self_attention": self_attention, |
|
"upscale": upscale, |
|
"embeddings": embeddings, |
|
"lora": lora, |
|
"webhook": webhook, |
|
"track_id": track_id |
|
}) |
|
|
|
headers = { |
|
'Content-Type': 'application/json' |
|
} |
|
|
|
response = requests.request("POST", url, headers=headers, data=payload) |
|
|
|
return response.text |
|
|
|
|
|
iface = gr.Interface(fn=generate_image, |
|
inputs=["text", "text", "text", "text", "text", "text", "text", "text", "text", "number", "text", "text", "text", "text", "text", "text", "text", "text", "text"], |
|
outputs="text", |
|
title="Text to Image Generation", |
|
description="Generate an image based on text prompts.", |
|
article="Enter your prompts and settings and click 'Generate Image'.") |
|
iface.launch() |
|
|