|
import random |
|
import gradio as gr |
|
import numpy as np |
|
import spaces |
|
import torch |
|
from diffusers import AutoPipelineForText2Image, AutoencoderKL |
|
from compel import Compel, ReturnedEmbeddingsType |
|
|
|
if not torch.cuda.is_available(): |
|
DESCRIPTION += "\n<p>你现在运行在CPU上 但是此项目只支持GPU.</p>" |
|
|
|
MAX_SEED = np.iinfo(np.int32).max |
|
MAX_IMAGE_SIZE = 4096 |
|
|
|
if torch.cuda.is_available(): |
|
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) |
|
pipe = AutoPipelineForText2Image.from_pretrained( |
|
"John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl", |
|
vae=vae, |
|
torch_dtype=torch.float16, |
|
use_safetensors=True, |
|
add_watermarker=False |
|
) |
|
|
|
pipe.to("cuda") |
|
|
|
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: |
|
if randomize_seed: |
|
seed = random.randint(0, MAX_SEED) |
|
return seed |
|
|
|
@spaces.GPU |
|
def infer( |
|
prompt: str, |
|
negative_prompt: str = "lowres, {bad}, error, fewer, extra, missing, worst quality, jpeg artifacts, bad quality, watermark, unfinished, displeasing, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]", |
|
use_negative_prompt: bool = True, |
|
seed: int = 7, |
|
width: int = 1024, |
|
height: int = 1536, |
|
guidance_scale: float = 3, |
|
num_inference_steps: int = 30, |
|
randomize_seed: bool = True, |
|
use_resolution_binning: bool = True, |
|
progress=gr.Progress(track_tqdm=True), |
|
): |
|
seed = int(randomize_seed_fn(seed, randomize_seed)) |
|
generator = torch.Generator().manual_seed(seed) |
|
compel = Compel(tokenizer=[pipe.tokenizer, pipe.tokenizer_2] , text_encoder=[pipe.text_encoder, pipe.text_encoder_2], returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, requires_pooled=[False, True]) |
|
conditioning, pooled = compel(prompt) |
|
|
|
image = pipe( |
|
|
|
prompt_embeds=conditioning, |
|
pooled_prompt_embeds=pooled, |
|
negative_prompt=negative_prompt, |
|
width=width, |
|
height=height, |
|
guidance_scale=guidance_scale, |
|
num_inference_steps=num_inference_steps, |
|
generator=generator, |
|
use_resolution_binning=use_resolution_binning, |
|
).images[0] |
|
return image, seed |
|
|
|
examples = [ |
|
"nahida (genshin impact)", |
|
"klee (genshin impact)", |
|
] |
|
|
|
css = ''' |
|
.gradio-container{max-width: 560px !important} |
|
h1{text-align:center} |
|
footer { |
|
visibility: hidden |
|
} |
|
''' |
|
|
|
with gr.Blocks(css=css) as demo: |
|
gr.Markdown("""# 梦羽的模型生成器 |
|
### 快速生成NoobAIXL v1.0的模型图片""") |
|
with gr.Group(): |
|
with gr.Row(): |
|
prompt = gr.Text( |
|
label="关键词", |
|
show_label=False, |
|
max_lines=1, |
|
placeholder="输入你要的图片关键词", |
|
container=False, |
|
) |
|
run_button = gr.Button("生成", scale=0, variant="primary") |
|
result = gr.Image(label="Result", show_label=False) |
|
with gr.Accordion("高级选项", open=False): |
|
with gr.Row(): |
|
use_negative_prompt = gr.Checkbox(label="使用反向词条", value=True) |
|
negative_prompt = gr.Text( |
|
label="反向词条", |
|
max_lines=5, |
|
lines=4, |
|
placeholder="输入你要排除的图片关键词", |
|
value="lowres, {bad}, error, fewer, extra, missing, worst quality, jpeg artifacts, bad quality, watermark, unfinished, displeasing, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]", |
|
visible=True, |
|
) |
|
seed = gr.Slider( |
|
label="种子", |
|
minimum=0, |
|
maximum=MAX_SEED, |
|
step=1, |
|
value=0, |
|
) |
|
randomize_seed = gr.Checkbox(label="随机种子", value=True) |
|
with gr.Row(visible=True): |
|
width = gr.Slider( |
|
label="宽度", |
|
minimum=512, |
|
maximum=MAX_IMAGE_SIZE, |
|
step=64, |
|
value=1024, |
|
) |
|
height = gr.Slider( |
|
label="高度", |
|
minimum=512, |
|
maximum=MAX_IMAGE_SIZE, |
|
step=64, |
|
value=1536, |
|
) |
|
with gr.Row(): |
|
guidance_scale = gr.Slider( |
|
label="Guidance Scale", |
|
minimum=0.1, |
|
maximum=10, |
|
step=0.1, |
|
value=7.0, |
|
) |
|
num_inference_steps = gr.Slider( |
|
label="生成步数", |
|
minimum=1, |
|
maximum=50, |
|
step=1, |
|
value=28, |
|
) |
|
|
|
gr.Examples( |
|
examples=examples, |
|
inputs=prompt, |
|
outputs=[result, seed], |
|
fn=infer |
|
) |
|
|
|
use_negative_prompt.change( |
|
fn=lambda x: gr.update(visible=x), |
|
inputs=use_negative_prompt, |
|
outputs=negative_prompt, |
|
) |
|
|
|
gr.on( |
|
triggers=[prompt.submit,run_button.click], |
|
fn=infer, |
|
inputs=[ |
|
prompt, |
|
negative_prompt, |
|
use_negative_prompt, |
|
seed, |
|
width, |
|
height, |
|
guidance_scale, |
|
num_inference_steps, |
|
randomize_seed, |
|
], |
|
outputs=[result, seed], |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |