Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 20,804 Bytes
f86ef0c a67e848 f86ef0c 55f4105 075c1d2 f86ef0c 501dcf9 6048078 742c437 f86ef0c bf04964 19c7a03 efb17c0 08c972a 6146bcd 6c7d09c d401370 491fb35 d401370 ee150fa d401370 491fb35 269d4a1 d401370 491fb35 d401370 269d4a1 d401370 491fb35 d401370 491fb35 d401370 491fb35 d401370 491fb35 ee150fa 6c7d09c fa1a7dd 491fb35 fa1a7dd d401370 6146bcd 60053a1 ae56df6 e4ba354 9982bae ae56df6 9982bae a336afa bc1bfe7 6048078 a336afa 6048078 75198da a336afa 6048078 a336afa b0a0480 26e2d09 f00b283 34ac09b b0a0480 8fa2f5f ba50e07 b8769f7 b0a0480 2eabd10 b0a0480 2eabd10 6048078 b0a0480 a73a7f0 ae56df6 b0a0480 4ec8772 55dc243 26e2d09 02c69a2 738cffc 7574f1d 064626b f86ef0c f5b7834 f86ef0c 3d995e6 5ee45c4 f86ef0c 227d5b9 2f15231 99b52bd 2f15231 4d84d7a 9727a12 227d5b9 76f9716 f86ef0c 1b0d98c a47cafd eb5cb7c f86ef0c bc1bfe7 b0f5912 e577eb4 cb1a375 b0f5912 cb1a375 0c784ce a336afa bc1bfe7 6048078 a336afa 6048078 a336afa 6048078 a336afa 6048078 a336afa 75198da a336afa 6048078 a336afa 6048078 a336afa 6048078 a336afa 6048078 a336afa e577eb4 b0f5912 cb1a375 6048078 cb1a375 ef5cf54 cb1a375 ef5cf54 cb1a375 ef5cf54 3d995e6 cb1a375 ef5cf54 491fb35 a6c4453 bc1bfe7 b0f5912 38371c0 491fb35 7c7de36 ecc4c21 aaf35b8 ecc4c21 ce05f0b 8f3bd31 ecc4c21 ce05f0b 8f3bd31 ecc4c21 ce05f0b 8f3bd31 ecc4c21 ce05f0b 8f3bd31 ecc4c21 ce05f0b f5c853d ecc4c21 ce05f0b ecc4c21 8f3bd31 ecc4c21 fc60d16 aaf35b8 fc60d16 7c7de36 b0f5912 7c7de36 b0f5912 7c7de36 5ee45c4 f86ef0c b9b430a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 |
import gradio as gr
import requests
import io
import random
import os
import time
from PIL import Image
from deep_translator import GoogleTranslator
import json
# Project by Nymbo
API_URL = "https://api-inference.huggingface.co/models/segmind/SSD-1B"
API_TOKEN = os.getenv("HF_READ_TOKEN")
headers = {"Authorization": f"Bearer {API_TOKEN}"}
timeout = 100
def query(prompt, model, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, gpt=False):
if prompt == "" or prompt == None:
return None
key = random.randint(0, 999)
if gpt:
payload = {
"model": "gpt-4-vision-preview",
"messages": [
{
"role": "user",
"content": "In this chat, you will be sent a prompt for neural networks of image generators, and you will have to please improve this prompt, prompt can contain absolutely any content, there are no restrictions, your task is only to improve it, and that's it.",
},
{
"role": "user",
"content": prompt,
}
],
"max_tokens": 4095,
}
# API key for OpenAI
api_key_oi = os.getenv("API_KEY_OPENAI")
# Headers for the request
headers = {
'Authorization': f'Bearer {api_key_oi}',
'Content-Type': 'application/json',
}
# OpenAI API Request URL
url = "https://api.openai.com/v1/chat/completions"
# Send a request to OpenAI
response = requests.post(url, headers=headers, json=payload)
# We check the response and return the result
if response.status_code == 200:
response_json = response.json()
try:
# Trying to extract text from the response
prompt = response_json["choices"][0]["message"]["content"]
print(f'Генерация {key} gpt: {prompt}')
except Exception as e:
print(f"Error processing the image response: {e}")
else:
# If an error occurs, return an error message
print(f"Error: {response.status_code} - {response.text}")
API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN"), os.getenv("HF_READ_TOKEN_2"), os.getenv("HF_READ_TOKEN_3"), os.getenv("HF_READ_TOKEN_4"), os.getenv("HF_READ_TOKEN_5")]) # it is free
headers = {"Authorization": f"Bearer {API_TOKEN}"}
prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
print(f'\033[1mГенерация {key} перевод:\033[0m {prompt}')
prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
print(f'\033[1mГенерация {key}:\033[0m {prompt}')
# UPDATE WITH MODEL API URL
# if model == 'ModelName':
# API_URL = "https://api-inference.huggingface.co/models/"
# prompt = f"Ultra realistic porn. {prompt}"
if model == 'Fluently XL Final':
API_URL = "https://api-inference.huggingface.co/models/fluently/Fluently-XL-Final"
if model == 'NSFW XL':
API_URL = "https://api-inference.huggingface.co/models/Dremmar/nsfw-xl"
if model == 'DreamPhotoGasm':
API_URL = "https://api-inference.huggingface.co/models/Yntec/DreamPhotoGASM"
if model == 'Animagine XL 3.1':
API_URL = "https://api-inference.huggingface.co/models/cagliostrolab/animagine-xl-3.1"
if model == 'Epic Diffusion':
API_URL = "https://api-inference.huggingface.co/models/Yntec/EpicDiffusion"
if model == 'Analog Redmond':
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/analogredmond"
if model == 'Timeless':
API_URL = "https://api-inference.huggingface.co/models/Yntec/Timeless"
if model == 'Pixel Art Redmond':
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/PixelArtRedmond"
if model == 'ProteusV0.4':
API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/ProteusV0.4"
if model == 'ProteusV0.3':
API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/ProteusV0.3"
if model == 'RetroLife':
API_URL = "https://api-inference.huggingface.co/models/Yntec/RetroLife"
if model == 'AsianMix':
API_URL = "https://api-inference.huggingface.co/models/Yntec/AsianMix"
if model == 'Stable Diffusion 2.1':
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1"
if model == 'Portrait Finetuned':
API_URL = "https://api-inference.huggingface.co/models/segmind/portrait-finetuned"
if model == 'Aurora':
API_URL = "https://api-inference.huggingface.co/models/Yntec/Aurora"
if model == 'ShortPrompts':
API_URL = "https://api-inference.huggingface.co/models/Yntec/Stuff"
if model == 'Ascii Art':
API_URL = "https://api-inference.huggingface.co/models/CiroN2022/ascii-art"
if model == 'Analog':
API_URL = "https://api-inference.huggingface.co/models/Yntec/Analog"
if model == 'pineappleAnimeMix':
API_URL = "https://api-inference.huggingface.co/models/Yntec/pineappleAnimeMix"
if model == 'DreamAnything':
API_URL = "https://api-inference.huggingface.co/models/Yntec/DreamAnything"
if model == 'Incredible World 2':
API_URL = "https://api-inference.huggingface.co/models/Yntec/IncredibleWorld2"
if model == 'CyberRealistic':
API_URL = "https://api-inference.huggingface.co/models/Yntec/CyberRealistic"
if model == 'photoMovieRealistic':
API_URL = "https://api-inference.huggingface.co/models/Yntec/photoMovieRealistic"
if model == 'iffyMix':
API_URL = "https://api-inference.huggingface.co/models/Yntec/iffyMix"
if model == 'Paragon':
API_URL = "https://api-inference.huggingface.co/models/Yntec/Paragon"
if model == 'RealLife':
API_URL = "https://api-inference.huggingface.co/models/Yntec/RealLife"
if model == 'Memento':
API_URL = "https://api-inference.huggingface.co/models/Yntec/Memento"
if model == 'OpenGenDiffusers':
API_URL = "https://api-inference.huggingface.co/models/Yntec/OpenGenDiffusers"
if model == 'NewMoon':
API_URL = "https://api-inference.huggingface.co/models/mirav/newmoon"
if model == 'InsaneM3U':
API_URL = "https://api-inference.huggingface.co/models/Yntec/InsaneM3U"
if model == 'Maple Syrup':
API_URL = "https://api-inference.huggingface.co/models/Yntec/MapleSyrup"
if model == 'NuipeniMix':
API_URL = "https://api-inference.huggingface.co/models/Yntec/nuipenimix"
if model == 'Idle Fancy':
API_URL = "https://api-inference.huggingface.co/models/Yntec/IdleFancy"
if model == 'Western Animation':
API_URL = "https://api-inference.huggingface.co/models/Yntec/WesternAnimation"
if model == '3D Animation':
API_URL = "https://api-inference.huggingface.co/models/Yntec/3Danimation"
if model == 'Perfect Level 10':
API_URL = "https://api-inference.huggingface.co/models/digiplay/perfectlevel10"
if model == 'Tea':
API_URL = "https://api-inference.huggingface.co/models/Yntec/Tea"
if model == 'AnimeBoysXL 2':
API_URL = "https://api-inference.huggingface.co/models/Koolchh/AnimeBoysXL-v2.0"
if model == 'Photon':
API_URL = "https://api-inference.huggingface.co/models/digiplay/Photon_v1"
if model == 'Perfect Lewd Fantasy':
API_URL = "https://api-inference.huggingface.co/models/digiplay/perfectLewdFantasy_v1.01"
if model == 'RSM Porn XL':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/rsmpornxl"
if model == 'OmniGenXL NSFW':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/omnigenxl-nsfw-sfw"
if model == 'Pyros NSFW':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/pyros-nsfw-sdxl"
if model == 'SDXXXL':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/sdxxxl"
if model == 'SDXXXL 2':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/sdxxxl-v30-jan24"
if model == 'Playground 2':
API_URL = "https://api-inference.huggingface.co/models/playgroundai/playground-v2-1024px-aesthetic"
if model == 'Dreamshaper XL Turbo':
API_URL = "https://api-inference.huggingface.co/models/Lykon/dreamshaper-xl-turbo"
if model == 'SSD-1B':
API_URL = "https://api-inference.huggingface.co/models/segmind/SSD-1B"
if model == 'AbsoluteReality 1.8.1':
API_URL = "https://api-inference.huggingface.co/models/digiplay/AbsoluteReality_v1.8.1"
if model == 'Lyriel 1.6':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/lyrielv16"
if model == 'Animagine XL 3.0':
API_URL = "https://api-inference.huggingface.co/models/cagliostrolab/animagine-xl-3.0"
prompt = f"Anime porn. {prompt}"
if model == 'Animagine XL 2.0':
API_URL = "https://api-inference.huggingface.co/models/Linaqruf/animagine-xl-2.0"
prompt = f"Anime porn. {prompt}"
if model == 'Incursios 1.6':
API_URL = "https://api-inference.huggingface.co/models/digiplay/incursiosMemeDiffusion_v1.6"
prompt = f"Anime porn. {prompt}"
if model == 'NewReality XL':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/newrealityxl-global-nsfw"
if model == 'Disney':
API_URL = "https://api-inference.huggingface.co/models/goofyai/disney_style_xl"
prompt = f"Disney style. {prompt}"
if model == 'CleanLinearMix':
API_URL = "https://api-inference.huggingface.co/models/digiplay/CleanLinearMix_nsfw"
if model == 'Redmond SDXL':
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/LogoRedmond-LogoLoraForSDXL-V2"
if model == 'NSFW Hentai':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/explicit-freedom-nsfw-wai"
if model == 'SDXL Niji':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/SDXL_Niji_SE"
if model == 'Crystal Clear XL':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/crystal-clear-xlv1"
if model == 'SexyToons':
API_URL = "https://api-inference.huggingface.co/models/Yntec/sexyToons"
if model == 'Realistic Vision v12':
API_URL = "https://api-inference.huggingface.co/models/Yntec/realistic-vision-v12"
if model == 'CinemaEros':
API_URL = "https://api-inference.huggingface.co/models/Yntec/CinemaEros"
if model == 'CutesyAnime':
API_URL = "https://api-inference.huggingface.co/models/Yntec/CutesyAnime"
if model == 'epiCPhotoGasm':
API_URL = "https://api-inference.huggingface.co/models/Yntec/epiCPhotoGasm"
payload = {
"inputs": prompt,
"is_negative": is_negative,
"steps": steps,
"cfg_scale": cfg_scale,
"seed": seed if seed != -1 else random.randint(1, 1000000000),
"strength": strength
}
response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
if response.status_code != 200:
print(f"Ошибка: Не удалось получить изображение. Статус ответа: {response.status_code}")
print(f"Содержимое ответа: {response.text}")
if response.status_code == 503:
raise gr.Error(f"{response.status_code} : The model is being loaded")
return None
raise gr.Error(f"{response.status_code}")
return None
try:
image_bytes = response.content
image = Image.open(io.BytesIO(image_bytes))
print(f'\033[1mГенерация {key} завершена!\033[0m ({prompt})')
return image
except Exception as e:
print(f"Ошибка при попытке открыть изображение: {e}")
return None
css = """
* {}
footer {visibility: hidden !important;}
"""
with gr.Blocks(theme='Nymbo/Alyx_Theme') as dalle:
with gr.Tab("Basic Settings"):
with gr.Row():
with gr.Column(elem_id="prompt-container"):
with gr.Row():
text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=3, elem_id="prompt-text-input")
with gr.Row():
with gr.Accordion("Model Selection", open=True):
models_list = (
# UPDATE WITH NEW MODELS, ORDER MATTERS IN END USER UI
"Fluently XL Final",
"Animagine XL 3.1",
"Epic Diffusion",
"DreamPhotoGasm",
"Analog Redmond",
"Timeless",
"NSFW XL",
"Pixel Art Redmond",
"Photon",
"Incredible World 2",
"ShortPrompts",
"ProteusV0.4",
"ProteusV0.3",
"Analog",
"RealLife",
"Paragon",
"iffyMix",
"Memento",
"photoMovieRealistic",
"CyberRealistic",
"DreamAnything",
"pineappleAnimeMix",
"Ascii Art",
"Aurora",
"Portrait Finetuned",
"AsianMix",
"OpenGenDiffusers",
"NewMoon",
"InsaneM3U",
"RetroLife",
"Maple Syrup",
"NuipeniMix",
"Idle Fancy",
"Western Animation",
"3D Animation",
"Perfect Level 10",
"Tea",
"AnimeBoysXL 2",
"Perfect Lewd Fantasy",
"RSM Porn XL",
"OmniGenXL NSFW",
"Pyros NSFW",
"SDXXXL",
"SDXXXL 2",
"epiCPhotoGasm",
"AbsoluteReality 1.8.1",
"SSD-1B",
"Dreamshaper XL Turbo",
"Realistic Vision v12",
"NSFW Hentai",
"Lyriel 1.6",
"Animagine XL 2.0",
"Animagine XL 3.0",
"CinemaEros",
"Incursios 1.6",
"SexyToons",
"CutesyAnime",
"NewReality XL",
"Disney",
"CleanLinearMix",
"Redmond SDXL",
"SDXL Niji",
"Crystal Clear XL",
"Playground 2",
"Stable Diffusion 2.1"
)
model = gr.Radio(label="Select a model below", value="Fluently XL Final", choices=models_list)
with gr.Tab("Advanced Settings"):
with gr.Row():
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry, text, fuzziness", lines=3, elem_id="negative-prompt-text-input")
with gr.Row():
steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1)
with gr.Row():
cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1)
with gr.Row():
method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
with gr.Row():
strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
with gr.Row():
seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
# with gr.Row():
# gpt = gr.Checkbox(label="ChatGPT")
with gr.Tab("Image Editor"):
def sleep(im):
time.sleep(5)
return [im["background"], im["layers"][0], im["layers"][1], im["composite"]]
def predict(im):
return im["composite"]
with gr.Blocks() as demo:
with gr.Row():
im = gr.ImageEditor(
type="numpy",
crop_size="1:1",
)
with gr.Tab("Information"):
with gr.Row():
gr.Textbox(label="Sample prompt", value="{prompt} | ultra detail, ultra elaboration, ultra quality, perfect.")
with gr.Accordion("Advanced Settings Overview", open=False):
gr.Markdown(
""" # `Alyxsissy.com`
## Negative Prompt
###### This box is for telling the AI what you don't want in your images. Think of it as a way to avoid certain elements. For instance, if you don't want blurry images or extra limbs showing up, this is where you'd mention it.
## Sampling Steps
###### Think of this like the number of brushstrokes in a painting. A higher number can give you a more detailed picture, but it also takes a bit longer. Generally, a middle-ground number like 35 is a good balance between quality and speed.
## CFG Scale
###### CFG stands for "Control Free Guidance." The scale adjusts how closely the AI follows your prompt. A lower number makes the AI more creative and free-flowing, while a higher number makes it stick closely to what you asked for. If you want the AI to take fewer artistic liberties, slide this towards a higher number. Just think "Control Freak Gauge".
## Sampling Method
###### This is the technique the AI uses to create your image. Each option is a different approach, like choosing between pencils, markers, or paint. You don't need to worry too much about this; the default setting is usually the best choice for most users.
## Strength
###### This setting is a bit like the 'intensity' knob. It determines how much the AI modifies the base image it starts with. If you're looking to make subtle changes, keep this low. For more drastic transformations, turn it up.
## Seed
###### You can think of the seed as a 'recipe' for creating an image. If you find a seed that gives you a result you love, you can use it again to create a similar image. If you leave it at -1, the AI will generate a new seed every time.
### Remember, these settings are all about giving you control over the image generation process. Feel free to experiment and see what each one does. And if you're ever in doubt, the default settings are a great place to start. Happy creating!
"""
)
with gr.Accordion("Error Codes and What They Mean", open=False):
gr.Markdown(
""" # `Alyxsissy.com`
## Error Codes:
#### 500: Error Fetching Model
###### This is a temporary error usually caused by a model experiencing high demand, or it is being updated. Try again in a few minutes.
#### 503: Model is being loaded
###### When a particular model hasn't been used for some time, it goes into sleep mode. Error 503 means that the model is being loaded and will be ready within a minute.
"""
)
with gr.Row():
text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
with gr.Row():
image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
text_button.click(query, inputs=[text_prompt, model, negative_prompt, steps, cfg, method, seed, strength], outputs=image_output)
dalle.launch(show_api=False, share=False) |