File size: 4,065 Bytes
19c9a4a
1650677
 
501ff66
 
1650677
501ff66
 
1650677
1c4f2f2
9ca4cfe
1650677
501ff66
 
11d6379
007938a
501ff66
11d6379
501ff66
 
 
1650677
 
501ff66
 
 
 
 
1650677
 
 
 
 
501ff66
1650677
 
 
 
 
 
501ff66
1650677
 
9ca4cfe
 
1650677
9ca4cfe
1650677
 
 
 
501ff66
1650677
 
 
 
 
501ff66
ff7c088
 
 
 
1650677
 
 
 
 
 
 
 
 
501ff66
1650677
9ca4cfe
 
501ff66
1650677
501ff66
1650677
 
 
 
501ff66
1650677
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1c4f2f2
1650677
 
 
 
 
 
 
1c4f2f2
1650677
 
 
 
 
 
501ff66
1650677
501ff66
1650677
 
 
 
 
501ff66
1650677
501ff66
1650677
 
1c4f2f2
1650677
501ff66
 
1650677
 
501ff66
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import spaces
import gradio as gr
import numpy as np
import PIL.Image
from PIL import Image
import random
from diffusers import ControlNetModel, StableDiffusionXLPipeline, AutoencoderKL
from diffusers import DDIMScheduler, EulerAncestralDiscreteScheduler
import torch
import os
import uuid

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

pipe = StableDiffusionXLPipeline.from_single_file(
    "https://huggingface.co/Laxhar/noob_sdxl_beta/noob_hercules3/checkpoint/checkpoint-e2_s109089.safetensors/checkpoint-e2_s109089.safetensors",
    use_safetensors=True,
    torch_dtype=torch.float16,
)
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(device)

MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1216

@spaces.GPU
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):

    if randomize_seed:
        seed = random.randint(0, MAX_SEED)

    generator = torch.Generator().manual_seed(seed)

    output_image = pipe(
        prompt=prompt,
        negative_prompt=negative_prompt,
        guidance_scale=guidance_scale,
        num_inference_steps=num_inference_steps,
        width=width,
        height=height,
        generator=generator
    ).images[0]

    # PNGに変換する
    output_image = output_image.convert("RGBA")

    return output_image  # 画像オブジェクトを直接返す

css = """
#col-container {
    margin: 0 auto;
    max-width: 520px;
}
"""

with gr.Blocks(css=css) as demo:

    with gr.Column(elem_id="col-container"):
        gr.Markdown("""
         Text-to-Image Demo
        using [Noob SDXL beta model](https://huggingface.co/Laxhar)
        """)
        with gr.Row():
            prompt = gr.Text(
                label="Prompt",
                show_label=False,
                max_lines=1,
                placeholder="Enter your prompt",
                container=False,
            )

            run_button = gr.Button("Run", scale=0)

        # show_download_button=True でPNG形式でダウンロードできるようにする
        result = gr.Image(label="Result", show_label=False, type="pil", elem_id="output_image", show_download_button=True)
        
        with gr.Accordion("Advanced Settings", open=False):

            negative_prompt = gr.Text(
                label="Negative prompt",
                max_lines=1,
                placeholder="Enter a negative prompt",
                value="nsfw, (low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn"
            )

            seed = gr.Slider(
                label="Seed",
                minimum=0,
                maximum=MAX_SEED,
                step=1,
                value=0,
            )

            randomize_seed = gr.Checkbox(label="Randomize seed", value=True)

            with gr.Row():
                width = gr.Slider(
                    label="Width",
                    minimum=256,
                    maximum=MAX_IMAGE_SIZE,
                    step=32,
                    value=1024,
                )

                height = gr.Slider(
                    label="Height",
                    minimum=256,
                    maximum=MAX_IMAGE_SIZE,
                    step=32,
                    value=1024,
                )

            with gr.Row():
                guidance_scale = gr.Slider(
                    label="Guidance scale",
                    minimum=0.0,
                    maximum=20.0,
                    step=0.1,
                    value=7,
                )

                num_inference_steps = gr.Slider(
                    label="Number of inference steps",
                    minimum=1,
                    maximum=28,
                    step=1,
                    value=28,
                )

    run_button.click(
        fn=infer,
        inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
        outputs=[result]
    )

demo.queue().launch()