Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Adding support for custom inference models/LoRAs
Browse files
app.py
CHANGED
@@ -10,19 +10,20 @@ import json
|
|
10 |
# Project by Nymbo
|
11 |
|
12 |
# Base API URL for Hugging Face inference
|
13 |
-
API_URL = "https://api-inference.huggingface.co/models/
|
14 |
# Retrieve the API token from environment variables
|
15 |
API_TOKEN = os.getenv("HF_READ_TOKEN")
|
16 |
headers = {"Authorization": f"Bearer {API_TOKEN}"}
|
17 |
# Timeout for requests
|
18 |
timeout = 100
|
19 |
|
20 |
-
def query(prompt, model, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
|
21 |
# Debug log to indicate function start
|
22 |
print("Starting query function...")
|
23 |
# Print the parameters for debugging purposes
|
24 |
print(f"Prompt: {prompt}")
|
25 |
print(f"Model: {model}")
|
|
|
26 |
print(f"Parameters - Steps: {steps}, CFG Scale: {cfg_scale}, Seed: {seed}, Strength: {strength}, Width: {width}, Height: {height}")
|
27 |
|
28 |
# Check if the prompt is empty or None
|
@@ -43,211 +44,209 @@ def query(prompt, model, is_negative=False, steps=35, cfg_scale=7, sampler="DPM+
|
|
43 |
prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
|
44 |
print(f'Generation {key}: {prompt}') # Debug log
|
45 |
|
46 |
-
# Set the API URL based on the selected model
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
if model == 'epiCPhotoGasm':
|
250 |
-
API_URL = "https://api-inference.huggingface.co/models/Yntec/epiCPhotoGasm"
|
251 |
print(f"API URL set to: {API_URL}") # Debug log
|
252 |
|
253 |
# Define the payload for the request
|
@@ -290,7 +289,7 @@ def query(prompt, model, is_negative=False, steps=35, cfg_scale=7, sampler="DPM+
|
|
290 |
raise gr.Error(f"{response.status_code}: The model is being loaded. Please try again later.")
|
291 |
else:
|
292 |
raise gr.Error(f"{response.status_code}: An unexpected error occurred.")
|
293 |
-
|
294 |
try:
|
295 |
# Attempt to read the image from the response content
|
296 |
image_bytes = response.content
|
@@ -319,11 +318,14 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme_5') as dalle:
|
|
319 |
with gr.Row():
|
320 |
# Textbox for user to input the prompt
|
321 |
text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=3, elem_id="prompt-text-input")
|
|
|
|
|
|
|
322 |
with gr.Row():
|
323 |
# Accordion for selecting the model
|
324 |
-
with gr.Accordion("
|
325 |
# Textbox for searching models
|
326 |
-
model_search = gr.Textbox(label="
|
327 |
models_list = (
|
328 |
"3D Sketchfab",
|
329 |
"90s Anime Art",
|
@@ -467,11 +469,12 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme_5') as dalle:
|
|
467 |
with gr.Row():
|
468 |
# Display a sample prompt for guidance
|
469 |
gr.Textbox(label="Sample prompt", value="{prompt} | ultra detail, ultra elaboration, ultra quality, perfect.")
|
470 |
-
|
471 |
# Accordion displaying featured models
|
472 |
with gr.Accordion("Featured Models (WiP)", open=False):
|
473 |
gr.HTML(
|
474 |
"""
|
|
|
475 |
<table style="width:100%; text-align:center; margin:auto;">
|
476 |
<tr>
|
477 |
<th>Model Name</th>
|
@@ -494,14 +497,13 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme_5') as dalle:
|
|
494 |
<td></td>
|
495 |
</tr>
|
496 |
</table>
|
497 |
-
|
498 |
"""
|
499 |
)
|
500 |
|
501 |
# Accordion providing an overview of advanced settings
|
502 |
with gr.Accordion("Advanced Settings Overview", open=False):
|
503 |
gr.Markdown(
|
504 |
-
|
505 |
## Negative Prompt
|
506 |
###### This box is for telling the AI what you don't want in your images. Think of it as a way to avoid certain elements. For instance, if you don't want blurry images or extra limbs showing up, this is where you'd mention it.
|
507 |
|
@@ -513,7 +515,7 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme_5') as dalle:
|
|
513 |
|
514 |
## CFG Scale
|
515 |
###### CFG stands for "Control Free Guidance." The scale adjusts how closely the AI follows your prompt. A lower number makes the AI more creative and free-flowing, while a higher number makes it stick closely to what you asked for. If you want the AI to take fewer artistic liberties, slide this towards a higher number. Just think "Control Freak Gauge".
|
516 |
-
|
517 |
## Sampling Method
|
518 |
###### This is the technique the AI uses to create your image. Each option is a different approach, like choosing between pencils, markers, or paint. You don't need to worry too much about this; the default setting is usually the best choice for most users.
|
519 |
|
@@ -526,15 +528,16 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme_5') as dalle:
|
|
526 |
### Remember, these settings are all about giving you control over the image generation process. Feel free to experiment and see what each one does. And if you're ever in doubt, the default settings are a great place to start. Happy creating!
|
527 |
"""
|
528 |
)
|
|
|
529 |
# Row containing the 'Run' button to trigger the image generation
|
530 |
with gr.Row():
|
531 |
text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
|
532 |
# Row for displaying the generated image output
|
533 |
with gr.Row():
|
534 |
image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
|
535 |
-
|
536 |
# Set up button click event to call the query function
|
537 |
-
text_button.click(query, inputs=[text_prompt, model, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=image_output)
|
538 |
|
539 |
print("Launching Gradio interface...") # Debug log
|
540 |
# Launch the Gradio interface without showing the API or sharing externally
|
|
|
10 |
# Project by Nymbo
|
11 |
|
12 |
# Base API URL for Hugging Face inference
|
13 |
+
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
|
14 |
# Retrieve the API token from environment variables
|
15 |
API_TOKEN = os.getenv("HF_READ_TOKEN")
|
16 |
headers = {"Authorization": f"Bearer {API_TOKEN}"}
|
17 |
# Timeout for requests
|
18 |
timeout = 100
|
19 |
|
20 |
+
def query(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
|
21 |
# Debug log to indicate function start
|
22 |
print("Starting query function...")
|
23 |
# Print the parameters for debugging purposes
|
24 |
print(f"Prompt: {prompt}")
|
25 |
print(f"Model: {model}")
|
26 |
+
print(f"Custom LoRA: {custom_lora}")
|
27 |
print(f"Parameters - Steps: {steps}, CFG Scale: {cfg_scale}, Seed: {seed}, Strength: {strength}, Width: {width}, Height: {height}")
|
28 |
|
29 |
# Check if the prompt is empty or None
|
|
|
44 |
prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
|
45 |
print(f'Generation {key}: {prompt}') # Debug log
|
46 |
|
47 |
+
# Set the API URL based on the selected model or custom LoRA
|
48 |
+
if custom_lora.strip() != "":
|
49 |
+
API_URL = f"https://api-inference.huggingface.co/models/{custom_lora.strip()}"
|
50 |
+
else:
|
51 |
+
if model == 'Stable Diffusion XL':
|
52 |
+
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
|
53 |
+
if model == 'FLUX.1 [Dev]':
|
54 |
+
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
|
55 |
+
if model == 'FLUX.1 [Schnell]':
|
56 |
+
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
|
57 |
+
if model == 'Flux Logo Design':
|
58 |
+
API_URL = "https://api-inference.huggingface.co/models/Shakker-Labs/FLUX.1-dev-LoRA-Logo-Design"
|
59 |
+
prompt = f"wablogo, logo, Minimalist, {prompt}"
|
60 |
+
if model == 'Flux Uncensored':
|
61 |
+
API_URL = "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-uncensored"
|
62 |
+
if model == 'Flux Uncensored V2':
|
63 |
+
API_URL = "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-Uncensored-V2"
|
64 |
+
if model == 'Flux Tarot Cards':
|
65 |
+
API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Ton618-Tarot-Cards-Flux-LoRA"
|
66 |
+
prompt = f"Tarot card, {prompt}"
|
67 |
+
if model == 'Pixel Art Sprites':
|
68 |
+
API_URL = "https://api-inference.huggingface.co/models/sWizad/pokemon-trainer-sprites-pixelart-flux"
|
69 |
+
prompt = f"a pixel image, {prompt}"
|
70 |
+
if model == '3D Sketchfab':
|
71 |
+
API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Castor-3D-Sketchfab-Flux-LoRA"
|
72 |
+
prompt = f"3D Sketchfab, {prompt}"
|
73 |
+
if model == 'Retro Comic Flux':
|
74 |
+
API_URL = "https://api-inference.huggingface.co/models/renderartist/retrocomicflux"
|
75 |
+
prompt = f"c0m1c, comic book panel, {prompt}"
|
76 |
+
if model == 'Caricature':
|
77 |
+
API_URL = "https://api-inference.huggingface.co/models/TheAwakenOne/caricature"
|
78 |
+
prompt = f"CCTUR3, {prompt}"
|
79 |
+
if model == 'Huggieverse':
|
80 |
+
API_URL = "https://api-inference.huggingface.co/models/Chunte/flux-lora-Huggieverse"
|
81 |
+
prompt = f"HGGRE, {prompt}"
|
82 |
+
if model == 'Propaganda Poster':
|
83 |
+
API_URL = "https://api-inference.huggingface.co/models/AlekseyCalvin/Propaganda_Poster_Schnell_by_doctor_diffusion"
|
84 |
+
prompt = f"propaganda poster, {prompt}"
|
85 |
+
if model == 'Flux Game Assets V2':
|
86 |
+
API_URL = "https://api-inference.huggingface.co/models/gokaygokay/Flux-Game-Assets-LoRA-v2"
|
87 |
+
prompt = f"wbgmsst, white background, {prompt}"
|
88 |
+
if model == 'SoftPasty Flux':
|
89 |
+
API_URL = "https://api-inference.huggingface.co/models/alvdansen/softpasty-flux-dev"
|
90 |
+
prompt = f"araminta_illus illustration style, {prompt}"
|
91 |
+
if model == 'Flux Stickers':
|
92 |
+
API_URL = "https://api-inference.huggingface.co/models/diabolic6045/Flux_Sticker_Lora"
|
93 |
+
prompt = f"5t1cker 5ty1e, {prompt}"
|
94 |
+
if model == 'Flux Animex V2':
|
95 |
+
API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Animex-v2-LoRA"
|
96 |
+
prompt = f"Animex, {prompt}"
|
97 |
+
if model == 'Flux Animeo V1':
|
98 |
+
API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Animeo-v1-LoRA"
|
99 |
+
prompt = f"Animeo, {prompt}"
|
100 |
+
if model == 'Movie Board':
|
101 |
+
API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Flux.1-Dev-Movie-Boards-LoRA"
|
102 |
+
prompt = f"movieboard, {prompt}"
|
103 |
+
if model == 'Purple Dreamy':
|
104 |
+
API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Purple-Dreamy-Flux-LoRA"
|
105 |
+
prompt = f"Purple Dreamy, {prompt}"
|
106 |
+
if model == 'PS1 Style Flux':
|
107 |
+
API_URL = "https://api-inference.huggingface.co/models/veryVANYA/ps1-style-flux"
|
108 |
+
prompt = f"ps1 game screenshot, {prompt}"
|
109 |
+
if model == 'Softserve Anime':
|
110 |
+
API_URL = "https://api-inference.huggingface.co/models/alvdansen/softserve_anime"
|
111 |
+
prompt = f"sftsrv style illustration, {prompt}"
|
112 |
+
if model == 'Flux Tarot v1':
|
113 |
+
API_URL = "https://api-inference.huggingface.co/models/multimodalart/flux-tarot-v1"
|
114 |
+
prompt = f"in the style of TOK a trtcrd tarot style, {prompt}"
|
115 |
+
if model == 'Half Illustration':
|
116 |
+
API_URL = "https://api-inference.huggingface.co/models/davisbro/half_illustration"
|
117 |
+
prompt = f"in the style of TOK, {prompt}"
|
118 |
+
if model == 'OpenDalle v1.1':
|
119 |
+
API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/OpenDalleV1.1"
|
120 |
+
if model == 'Flux Ghibsky Illustration':
|
121 |
+
API_URL = "https://api-inference.huggingface.co/models/aleksa-codes/flux-ghibsky-illustration"
|
122 |
+
prompt = f"GHIBSKY style, {prompt}"
|
123 |
+
if model == 'Flux Koda':
|
124 |
+
API_URL = "https://api-inference.huggingface.co/models/alvdansen/flux-koda"
|
125 |
+
prompt = f"flmft style, {prompt}"
|
126 |
+
if model == 'Soviet Diffusion XL':
|
127 |
+
API_URL = "https://api-inference.huggingface.co/models/openskyml/soviet-diffusion-xl"
|
128 |
+
prompt = f"soviet poster, {prompt}"
|
129 |
+
if model == 'Flux Realism LoRA':
|
130 |
+
API_URL = "https://api-inference.huggingface.co/models/XLabs-AI/flux-RealismLora"
|
131 |
+
if model == 'Frosting Lane Flux':
|
132 |
+
API_URL = "https://api-inference.huggingface.co/models/alvdansen/frosting_lane_flux"
|
133 |
+
prompt = f"frstingln illustration, {prompt}"
|
134 |
+
if model == 'Phantasma Anime':
|
135 |
+
API_URL = "https://api-inference.huggingface.co/models/alvdansen/phantasma-anime"
|
136 |
+
if model == 'Boreal':
|
137 |
+
API_URL = "https://api-inference.huggingface.co/models/kudzueye/Boreal"
|
138 |
+
prompt = f"photo, {prompt}"
|
139 |
+
if model == 'How2Draw':
|
140 |
+
API_URL = "https://api-inference.huggingface.co/models/glif/how2draw"
|
141 |
+
prompt = f"How2Draw, {prompt}"
|
142 |
+
if model == 'Flux AestheticAnime':
|
143 |
+
API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/FLUX-AestheticAnime"
|
144 |
+
if model == 'Fashion Hut Modeling LoRA':
|
145 |
+
API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Fashion-Hut-Modeling-LoRA"
|
146 |
+
prompt = f"Modeling of, {prompt}"
|
147 |
+
if model == 'Flux SyntheticAnime':
|
148 |
+
API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/FLUX-SyntheticAnime"
|
149 |
+
prompt = f"1980s anime screengrab, VHS quality, syntheticanime, {prompt}"
|
150 |
+
if model == 'Flux Midjourney Anime':
|
151 |
+
API_URL = "https://api-inference.huggingface.co/models/brushpenbob/flux-midjourney-anime"
|
152 |
+
prompt = f"egmid, {prompt}"
|
153 |
+
if model == 'Coloring Book Generator':
|
154 |
+
API_URL = "https://api-inference.huggingface.co/models/robert123231/coloringbookgenerator"
|
155 |
+
if model == 'Collage Flux':
|
156 |
+
API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Castor-Collage-Dim-Flux-LoRA"
|
157 |
+
prompt = f"collage, {prompt}"
|
158 |
+
if model == 'Flux Product Ad Backdrop':
|
159 |
+
API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Flux-Product-Ad-Backdrop"
|
160 |
+
prompt = f"Product Ad, {prompt}"
|
161 |
+
if model == 'Product Design':
|
162 |
+
API_URL = "https://api-inference.huggingface.co/models/multimodalart/product-design"
|
163 |
+
prompt = f"product designed by prdsgn, {prompt}"
|
164 |
+
if model == '90s Anime Art':
|
165 |
+
API_URL = "https://api-inference.huggingface.co/models/glif/90s-anime-art"
|
166 |
+
if model == 'Brain Melt Acid Art':
|
167 |
+
API_URL = "https://api-inference.huggingface.co/models/glif/Brain-Melt-Acid-Art"
|
168 |
+
prompt = f"maximalism, in an acid surrealism style, {prompt}"
|
169 |
+
if model == 'Lustly Flux Uncensored v1':
|
170 |
+
API_URL = "https://api-inference.huggingface.co/models/lustlyai/Flux_Lustly.ai_Uncensored_nsfw_v1"
|
171 |
+
if model == 'NSFW Master Flux':
|
172 |
+
API_URL = "https://api-inference.huggingface.co/models/Keltezaa/NSFW_MASTER_FLUX"
|
173 |
+
prompt = f"NSFW, {prompt}"
|
174 |
+
if model == 'Flux Outfit Generator':
|
175 |
+
API_URL = "https://api-inference.huggingface.co/models/tryonlabs/FLUX.1-dev-LoRA-Outfit-Generator"
|
176 |
+
if model == 'Midjourney':
|
177 |
+
API_URL = "https://api-inference.huggingface.co/models/Jovie/Midjourney"
|
178 |
+
if model == 'DreamPhotoGASM':
|
179 |
+
API_URL = "https://api-inference.huggingface.co/models/Yntec/DreamPhotoGASM"
|
180 |
+
if model == 'Flux Super Realism LoRA':
|
181 |
+
API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Super-Realism-LoRA"
|
182 |
+
if model == 'Stable Diffusion 2-1':
|
183 |
+
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1-base"
|
184 |
+
if model == 'Stable Diffusion 3.5 Large':
|
185 |
+
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large"
|
186 |
+
if model == 'Stable Diffusion 3.5 Large Turbo':
|
187 |
+
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large-turbo"
|
188 |
+
if model == 'Stable Diffusion 3 Medium':
|
189 |
+
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3-medium-diffusers"
|
190 |
+
prompt = f"A, {prompt}"
|
191 |
+
if model == 'Duchaiten Real3D NSFW XL':
|
192 |
+
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/duchaiten-real3d-nsfw-xl"
|
193 |
+
if model == 'Pixel Art XL':
|
194 |
+
API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl"
|
195 |
+
prompt = f"pixel art, {prompt}"
|
196 |
+
if model == 'Character Design':
|
197 |
+
API_URL = "https://api-inference.huggingface.co/models/KappaNeuro/character-design"
|
198 |
+
prompt = f"Character Design, {prompt}"
|
199 |
+
if model == 'Sketched Out Manga':
|
200 |
+
API_URL = "https://api-inference.huggingface.co/models/alvdansen/sketchedoutmanga"
|
201 |
+
prompt = f"daiton, {prompt}"
|
202 |
+
if model == 'Archfey Anime':
|
203 |
+
API_URL = "https://api-inference.huggingface.co/models/alvdansen/archfey_anime"
|
204 |
+
if model == 'Lofi Cuties':
|
205 |
+
API_URL = "https://api-inference.huggingface.co/models/alvdansen/lofi-cuties"
|
206 |
+
if model == 'YiffyMix':
|
207 |
+
API_URL = "https://api-inference.huggingface.co/models/Yntec/YiffyMix"
|
208 |
+
if model == 'Analog Madness Realistic v7':
|
209 |
+
API_URL = "https://api-inference.huggingface.co/models/digiplay/AnalogMadness-realistic-model-v7"
|
210 |
+
if model == 'Selfie Photography':
|
211 |
+
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/selfiephotographyredmond-selfie-photography-lora-for-sdxl"
|
212 |
+
prompt = f"instagram model, discord profile picture, {prompt}"
|
213 |
+
if model == 'Filmgrain':
|
214 |
+
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/filmgrain-redmond-filmgrain-lora-for-sdxl"
|
215 |
+
prompt = f"Film Grain, FilmGrainAF, {prompt}"
|
216 |
+
if model == 'Leonardo AI Style Illustration':
|
217 |
+
API_URL = "https://api-inference.huggingface.co/models/goofyai/Leonardo_Ai_Style_Illustration"
|
218 |
+
prompt = f"leonardo style, illustration, vector art, {prompt}"
|
219 |
+
if model == 'Cyborg Style XL':
|
220 |
+
API_URL = "https://api-inference.huggingface.co/models/goofyai/cyborg_style_xl"
|
221 |
+
prompt = f"cyborg style, {prompt}"
|
222 |
+
if model == 'Little Tinies':
|
223 |
+
API_URL = "https://api-inference.huggingface.co/models/alvdansen/littletinies"
|
224 |
+
if model == 'NSFW XL':
|
225 |
+
API_URL = "https://api-inference.huggingface.co/models/Dremmar/nsfw-xl"
|
226 |
+
if model == 'Analog Redmond':
|
227 |
+
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/analogredmond"
|
228 |
+
prompt = f"timeless style, {prompt}"
|
229 |
+
if model == 'Pixel Art Redmond':
|
230 |
+
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/PixelArtRedmond"
|
231 |
+
prompt = f"Pixel Art, {prompt}"
|
232 |
+
if model == 'Ascii Art':
|
233 |
+
API_URL = "https://api-inference.huggingface.co/models/CiroN2022/ascii-art"
|
234 |
+
prompt = f"ascii art, {prompt}"
|
235 |
+
if model == 'Analog':
|
236 |
+
API_URL = "https://api-inference.huggingface.co/models/Yntec/Analog"
|
237 |
+
if model == 'Maple Syrup':
|
238 |
+
API_URL = "https://api-inference.huggingface.co/models/Yntec/MapleSyrup"
|
239 |
+
if model == 'Perfect Lewd Fantasy':
|
240 |
+
API_URL = "https://api-inference.huggingface.co/models/digiplay/perfectLewdFantasy_v1.01"
|
241 |
+
if model == 'AbsoluteReality 1.8.1':
|
242 |
+
API_URL = "https://api-inference.huggingface.co/models/digiplay/AbsoluteReality_v1.8.1"
|
243 |
+
if model == 'Disney':
|
244 |
+
API_URL = "https://api-inference.huggingface.co/models/goofyai/disney_style_xl"
|
245 |
+
prompt = f"Disney style, {prompt}"
|
246 |
+
if model == 'Redmond SDXL':
|
247 |
+
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/LogoRedmond-LogoLoraForSDXL-V2"
|
248 |
+
if model == 'epiCPhotoGasm':
|
249 |
+
API_URL = "https://api-inference.huggingface.co/models/Yntec/epiCPhotoGasm"
|
|
|
|
|
250 |
print(f"API URL set to: {API_URL}") # Debug log
|
251 |
|
252 |
# Define the payload for the request
|
|
|
289 |
raise gr.Error(f"{response.status_code}: The model is being loaded. Please try again later.")
|
290 |
else:
|
291 |
raise gr.Error(f"{response.status_code}: An unexpected error occurred.")
|
292 |
+
|
293 |
try:
|
294 |
# Attempt to read the image from the response content
|
295 |
image_bytes = response.content
|
|
|
318 |
with gr.Row():
|
319 |
# Textbox for user to input the prompt
|
320 |
text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=3, elem_id="prompt-text-input")
|
321 |
+
with gr.Row():
|
322 |
+
# Textbox for custom LoRA input
|
323 |
+
custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path (optional)", placeholder="multimodalart/vintage-ads-flux")
|
324 |
with gr.Row():
|
325 |
# Accordion for selecting the model
|
326 |
+
with gr.Accordion("Featured Models", open=True):
|
327 |
# Textbox for searching models
|
328 |
+
model_search = gr.Textbox(label="Filter Models", placeholder="Search for a featured model...", lines=1, elem_id="model-search-input")
|
329 |
models_list = (
|
330 |
"3D Sketchfab",
|
331 |
"90s Anime Art",
|
|
|
469 |
with gr.Row():
|
470 |
# Display a sample prompt for guidance
|
471 |
gr.Textbox(label="Sample prompt", value="{prompt} | ultra detail, ultra elaboration, ultra quality, perfect.")
|
472 |
+
|
473 |
# Accordion displaying featured models
|
474 |
with gr.Accordion("Featured Models (WiP)", open=False):
|
475 |
gr.HTML(
|
476 |
"""
|
477 |
+
<p><a href="https://huggingface.co/models?inference=warm&sort=likes&search=stable-diffusion">See all available models</a></p>
|
478 |
<table style="width:100%; text-align:center; margin:auto;">
|
479 |
<tr>
|
480 |
<th>Model Name</th>
|
|
|
497 |
<td></td>
|
498 |
</tr>
|
499 |
</table>
|
|
|
500 |
"""
|
501 |
)
|
502 |
|
503 |
# Accordion providing an overview of advanced settings
|
504 |
with gr.Accordion("Advanced Settings Overview", open=False):
|
505 |
gr.Markdown(
|
506 |
+
"""
|
507 |
## Negative Prompt
|
508 |
###### This box is for telling the AI what you don't want in your images. Think of it as a way to avoid certain elements. For instance, if you don't want blurry images or extra limbs showing up, this is where you'd mention it.
|
509 |
|
|
|
515 |
|
516 |
## CFG Scale
|
517 |
###### CFG stands for "Control Free Guidance." The scale adjusts how closely the AI follows your prompt. A lower number makes the AI more creative and free-flowing, while a higher number makes it stick closely to what you asked for. If you want the AI to take fewer artistic liberties, slide this towards a higher number. Just think "Control Freak Gauge".
|
518 |
+
|
519 |
## Sampling Method
|
520 |
###### This is the technique the AI uses to create your image. Each option is a different approach, like choosing between pencils, markers, or paint. You don't need to worry too much about this; the default setting is usually the best choice for most users.
|
521 |
|
|
|
528 |
### Remember, these settings are all about giving you control over the image generation process. Feel free to experiment and see what each one does. And if you're ever in doubt, the default settings are a great place to start. Happy creating!
|
529 |
"""
|
530 |
)
|
531 |
+
|
532 |
# Row containing the 'Run' button to trigger the image generation
|
533 |
with gr.Row():
|
534 |
text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
|
535 |
# Row for displaying the generated image output
|
536 |
with gr.Row():
|
537 |
image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
|
538 |
+
|
539 |
# Set up button click event to call the query function
|
540 |
+
text_button.click(query, inputs=[text_prompt, model, custom_lora, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=image_output)
|
541 |
|
542 |
print("Launching Gradio interface...") # Debug log
|
543 |
# Launch the Gradio interface without showing the API or sharing externally
|