Spaces:
Nymbo
/
Running on CPU Upgrade

File size: 6,177 Bytes
f86ef0c
 
 
 
 
 
55f4105
 
f86ef0c
521a864
742c437
f86ef0c
616c847
3b8a061
3e0bf53
c0b30e1
6146bcd
 
 
 
 
60053a1
55f4105
9982bae
 
 
55f4105
 
713d510
cd28a2d
8fb92bc
cd28a2d
f00b283
f7c0284
f00b283
990502e
f00b283
34ac09b
990502e
 
db91780
 
f00b283
260ef11
0a81284
961b0b9
f00b283
45ad881
5847e71
ac0a6da
3341831
 
1ec9bd2
 
2f6b89f
 
6cd5e81
9e2ce11
bddaca0
 
5847e71
 
3864ee5
 
3b8a061
 
 
 
713d510
f86ef0c
f5b7834
f86ef0c
 
 
3c4df5f
f86ef0c
 
 
 
b38f76f
e866b68
f86ef0c
 
1b0d98c
eb5cb7c
 
f86ef0c
6b98730
3eb21bd
e577eb4
 
ccf7d79
3e0bf53
7c7de36
e577eb4
 
ef5cf54
 
 
 
 
 
 
 
 
 
22ebab0
7c7de36
 
 
 
 
 
e7fd4fc
f86ef0c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import gradio as gr
import requests
import io
import random
import os
from PIL import Image
from deep_translator import GoogleTranslator
from langdetect import detect

API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl"
API_TOKEN = os.getenv("HF_READ_TOKEN")
headers = {"Authorization": f"Bearer {API_TOKEN}"}

models_list = ["AbsoluteReality 1.8.1", "DALL-E 3 XL", "Playground 2", "Openjourney 4", "Lyriel 1.6", "Animagine XL 2.0", "Counterfeit 2.5", "Realistic Vision 5.1", "Incursios 1.6", "Anime Detailer XL", "Vector Art XL", "epiCRealism", "PixelArt XL", "NewReality XL", "Anything 5.0", "PixArt XL 2.0", "Disney Cartoon", "CleanLinearMix", "Waifu 1.4"]

def query(prompt, model, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=None):
    if prompt == None:
        return None

    
    API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN"), os.getenv("HF_READ_TOKEN_2"), os.getenv("HF_READ_TOKEN_3"), os.getenv("HF_READ_TOKEN_4"), os.getenv("HF_READ_TOKEN_5")]) # it is free
    headers = {"Authorization": f"Bearer {API_TOKEN}"}
    language = detect(prompt)
    key = random.randint(0, 999)

    print(f'\033[1mГенерация {key}:\033[0m {prompt}')
    if language == 'ru':
        prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
        print(f'\033[1mГенерация {key} перевод:\033[0m {prompt}')
    
    if model == 'DALL-E 3 XL':
        API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl"
    if model == 'Playground 2':
        API_URL = "https://api-inference.huggingface.co/models/playgroundai/playground-v2-1024px-aesthetic"
    if model == 'Openjourney 4':
        API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney-v4"
    if model == 'AbsoluteReality 1.8.1':
        API_URL = "https://api-inference.huggingface.co/models/digiplay/AbsoluteReality_v1.8.1"
    if model == 'Lyriel 1.6':
        API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/lyrielv16"
    if model == 'Animagine XL 2.0':
        API_URL = "https://api-inference.huggingface.co/models/Linaqruf/animagine-xl-2.0"
    if model == 'Counterfeit 2.5':
        API_URL = "https://api-inference.huggingface.co/models/gsdf/Counterfeit-V2.5"
    if model == 'Realistic Vision 5.1':
        API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/realistic-vision-v51"
    if model == 'Incursios 1.6':
        API_URL = "https://api-inference.huggingface.co/models/digiplay/incursiosMemeDiffusion_v1.6"
    if model == 'Anime Detailer XL':
        API_URL = "https://api-inference.huggingface.co/models/Linaqruf/anime-detailer-xl-lora"
    if model == 'epiCRealism':
        API_URL = "https://api-inference.huggingface.co/models/emilianJR/epiCRealism"
    if model == 'PixelArt XL':
        API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl"
    if model == 'NewReality XL':
        API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/newrealityxl-global-nsfw"
    if model == 'Anything 5.0':
        API_URL = "https://api-inference.huggingface.co/models/hogiahien/anything-v5-edited"
    if model == 'PixArt XL 2.0':
        API_URL = "https://api-inference.huggingface.co/models/PixArt-alpha/PixArt-XL-2-1024-MS"
    if model == 'Vector Art XL':
        API_URL = "https://api-inference.huggingface.co/models/DoctorDiffusion/doctor-diffusion-s-controllable-vector-art-xl-lora"
    if model == 'Disney Cartoon':
        API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/disney-pixal-cartoon"
    if model == 'CleanLinearMix':
        API_URL = "https://api-inference.huggingface.co/models/digiplay/CleanLinearMix_nsfw"
    if model == 'Waifu 1.4':
        API_URL = "https://api-inference.huggingface.co/models/gisohi6975/nsfw-waifu-diffusion"
    
    payload = {
        "inputs": prompt,
        "is_negative": is_negative,
        "steps": steps,
        "cfg_scale": cfg_scale,
        "seed": seed if seed is not -1 else random.randint(1, 1000000000)
        }

    image_bytes = requests.post(API_URL, headers=headers, json=payload).content
    image = Image.open(io.BytesIO(image_bytes))
    print(f'\033[1mГенерация {key} завершена!\033[0m ({prompt})')
    return image


css = """
footer {visibility: hidden !important;}
"""

with gr.Blocks(css=css) as dalle:
    with gr.Tab("Базовые настройки"):
        with gr.Row():
            with gr.Column(elem_id="prompt-container"):
                text_prompt = gr.Textbox(label="Prompt", placeholder="Описание изображения", lines=3, elem_id="prompt-text-input")
                model = gr.Radio(label="Модель", value="DALL-E 3 XL", choices=models_list)
                

    with gr.Tab("Расширенные настройки"):
        gr.Row():
            negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Чего не должно быть на изображении", value="[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry, text, fuzziness", lines=3, elem_id="negative-prompt-text-input")
        gr.Row():
            steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1)
        gr.Row():
            cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1)
        gr.Row():
            method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
        gr.Row():
            seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)


    with gr.Row():
        text_button = gr.Button("Генерация", variant='primary', elem_id="gen-button")
    with gr.Row():
        image_output = gr.Image(type="pil", label="Изображение", elem_id="gallery")
        
    text_button.click(query, inputs=[text_prompt, model, negative_prompt, steps, cfg, method, seed], outputs=image_output)

dalle.launch(show_api=False)