nevreal commited on
Commit
4af6932
·
verified ·
1 Parent(s): 55598ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -111
app.py CHANGED
@@ -1,111 +1,84 @@
1
- import gradio as gr
2
- from model import models
3
- from multit2i import (load_models, infer_fn, infer_rand_fn, save_gallery,
4
- change_model, warm_model, get_model_info_md, loaded_models,
5
- get_positive_prefix, get_positive_suffix, get_negative_prefix, get_negative_suffix,
6
- get_recom_prompt_type, set_recom_prompt_preset, get_tag_type, randomize_seed, translate_to_en)
7
-
8
- max_images = 6
9
- MAX_SEED = 2**32-1
10
- load_models(models)
11
-
12
- css = """
13
- .model_info { text-align: center; }
14
- .output { width=112px; height=112px; max_width=112px; max_height=112px; !important; }
15
- .gallery { min_width=512px; min_height=512px; max_height=1024px; !important; }
16
- """
17
-
18
- with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", fill_width=True, css=css) as demo:
19
- with gr.Row():
20
- with gr.Column(scale=10):
21
- with gr.Group():
22
- prompt = gr.Text(label="Prompt", lines=2, max_lines=8, placeholder="1girl, solo, ...", show_copy_button=True)
23
- with gr.Accordion("Advanced options", open=False):
24
- neg_prompt = gr.Text(label="Negative Prompt", lines=1, max_lines=8, placeholder="")
25
- with gr.Row():
26
- width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
27
- height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
28
- steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
29
- with gr.Row():
30
- cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
31
- seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
32
- seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary")
33
- recom_prompt_preset = gr.Radio(label="Set Presets", choices=get_recom_prompt_type(), value="Common")
34
- with gr.Row():
35
- positive_prefix = gr.CheckboxGroup(label="Use Positive Prefix", choices=get_positive_prefix(), value=[])
36
- positive_suffix = gr.CheckboxGroup(label="Use Positive Suffix", choices=get_positive_suffix(), value=["Common"])
37
- negative_prefix = gr.CheckboxGroup(label="Use Negative Prefix", choices=get_negative_prefix(), value=[])
38
- negative_suffix = gr.CheckboxGroup(label="Use Negative Suffix", choices=get_negative_suffix(), value=["Common"])
39
- with gr.Row():
40
- image_num = gr.Slider(label="Number of images", minimum=1, maximum=max_images, value=1, step=1, interactive=True, scale=2)
41
- trans_prompt = gr.Button(value="Translate 📝", variant="secondary", size="sm", scale=2)
42
- clear_prompt = gr.Button(value="Clear 🗑️", variant="secondary", size="sm", scale=1)
43
- with gr.Row():
44
- run_button = gr.Button("Generate Image", variant="primary", scale=6)
45
- random_button = gr.Button("Random Model 🎲", variant="secondary", scale=3)
46
- #stop_button = gr.Button('Stop', interactive=False, variant="stop", scale=1)
47
- with gr.Group():
48
- model_name = gr.Dropdown(label="Select Model", choices=list(loaded_models.keys()), value=list(loaded_models.keys())[0], allow_custom_value=True)
49
- model_info = gr.Markdown(value=get_model_info_md(list(loaded_models.keys())[0]), elem_classes="model_info")
50
- with gr.Column(scale=10):
51
- with gr.Group():
52
- with gr.Row():
53
- output = [gr.Image(label='', elem_classes="output", type="filepath", format="png",
54
- show_download_button=True, show_share_button=False, show_label=False,
55
- interactive=False, min_width=80, visible=True) for _ in range(max_images)]
56
- with gr.Group():
57
- results = gr.Gallery(label="Gallery", elem_classes="gallery", interactive=False, show_download_button=True, show_share_button=False,
58
- container=True, format="png", object_fit="cover", columns=2, rows=2)
59
- image_files = gr.Files(label="Download", interactive=False)
60
- clear_results = gr.Button("Clear Gallery / Download 🗑️", variant="secondary")
61
- with gr.Column():
62
- examples = gr.Examples(
63
- examples = [
64
- ["souryuu asuka langley, 1girl, neon genesis evangelion, plugsuit, pilot suit, red bodysuit, sitting, crossing legs, black eye patch, cat hat, throne, symmetrical, looking down, from bottom, looking at viewer, outdoors"],
65
- ["sailor moon, magical girl transformation, sparkles and ribbons, soft pastel colors, crescent moon motif, starry night sky background, shoujo manga style"],
66
- ["kafuu chino, 1girl, solo"],
67
- ["1girl"],
68
- ["beautiful sunset"],
69
- ],
70
- inputs=[prompt],
71
- )
72
- gr.Markdown(
73
- f"""This demo was created in reference to the following demos.<br>
74
- [Nymbo/Flood](https://huggingface.co/spaces/Nymbo/Flood),
75
- [Yntec/ToyWorldXL](https://huggingface.co/spaces/Yntec/ToyWorldXL),
76
- [Yntec/Diffusion80XX](https://huggingface.co/spaces/Yntec/Diffusion80XX).
77
- """
78
- )
79
- gr.DuplicateButton(value="Duplicate Space")
80
- gr.Markdown(f"Just a few edits to *model.py* are all it takes to complete your own collection.")
81
-
82
- #gr.on(triggers=[run_button.click, prompt.submit, random_button.click], fn=lambda: gr.update(interactive=True), inputs=None, outputs=stop_button, show_api=False)
83
- model_name.change(change_model, [model_name], [model_info], queue=False, show_api=False)\
84
- .success(warm_model, [model_name], None, queue=False, show_api=False)
85
- for i, o in enumerate(output):
86
- img_i = gr.Number(i, visible=False)
87
- image_num.change(lambda i, n: gr.update(visible = (i < n)), [img_i, image_num], o, show_api=False)
88
- gen_event = gr.on(triggers=[run_button.click, prompt.submit],
89
- fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5, l1, l2, l3, l4: infer_fn(m, t1, t2, n1, n2, n3, n4, n5, l1, l2, l3, l4) if (i < n) else None,
90
- inputs=[img_i, image_num, model_name, prompt, neg_prompt, height, width, steps, cfg, seed,
91
- positive_prefix, positive_suffix, negative_prefix, negative_suffix],
92
- outputs=[o], queue=False, show_api=False) # Be sure to delete ", queue=False" when activating the stop button
93
- gen_event2 = gr.on(triggers=[random_button.click],
94
- fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5, l1, l2, l3, l4: infer_rand_fn(m, t1, t2, n1, n2, n3, n4, n5, l1, l2, l3, l4) if (i < n) else None,
95
- inputs=[img_i, image_num, model_name, prompt, neg_prompt, height, width, steps, cfg, seed,
96
- positive_prefix, positive_suffix, negative_prefix, negative_suffix],
97
- outputs=[o], queue=False, show_api=False) # Be sure to delete ", queue=False" when activating the stop button
98
- o.change(save_gallery, [o, results], [results, image_files], show_api=False)
99
- #stop_button.click(lambda: gr.update(interactive=False), None, stop_button, cancels=[gen_event, gen_event2], show_api=False)
100
-
101
- clear_prompt.click(lambda: None, None, [prompt], queue=False, show_api=False)
102
- clear_results.click(lambda: (None, None), None, [results, image_files], queue=False, show_api=False)
103
- recom_prompt_preset.change(set_recom_prompt_preset, [recom_prompt_preset],
104
- [positive_prefix, positive_suffix, negative_prefix, negative_suffix], queue=False, show_api=False)
105
- seed_rand.click(randomize_seed, None, [seed], queue=False, show_api=False)
106
- trans_prompt.click(translate_to_en, [prompt], [prompt], queue=False, show_api=False)\
107
- .then(translate_to_en, [neg_prompt], [neg_prompt], queue=False, show_api=False)
108
-
109
- demo.queue(default_concurrency_limit=200, max_size=200)
110
- demo.launch(max_threads=400)
111
- # https://github.com/gradio-app/gradio/issues/6339
 
1
+ import argparse
2
+ import random
3
+ from model import models
4
+ from multit2i import (load_models, infer_fn, infer_rand_fn, save_gallery, change_model,
5
+ warm_model, get_model_info_md, loaded_models, get_positive_prefix, get_positive_suffix,
6
+ get_negative_prefix, get_negative_suffix, get_recom_prompt_type, set_recom_prompt_preset,
7
+ randomize_seed, translate_to_en)
8
+
9
+ MAX_SEED = 2**32 - 1
10
+ max_images = 6
11
+
12
+ def generate_image(args):
13
+ load_models(models)
14
+
15
+ model_name = args.model_name
16
+ prompt = args.prompt
17
+ neg_prompt = args.neg_prompt
18
+ width = args.width
19
+ height = args.height
20
+ steps = args.steps
21
+ cfg = args.cfg
22
+ seed = args.seed if args.seed != -1 else random.randint(0, MAX_SEED)
23
+
24
+ positive_prefix = args.positive_prefix
25
+ positive_suffix = args.positive_suffix
26
+ negative_prefix = args.negative_prefix
27
+ negative_suffix = args.negative_suffix
28
+
29
+ images = infer_fn(
30
+ model_name, prompt, neg_prompt, height, width, steps, cfg, seed,
31
+ positive_prefix, positive_suffix, negative_prefix, negative_suffix
32
+ )
33
+
34
+ save_gallery(images, args.output)
35
+
36
+ def random_model_image(args):
37
+ load_models(models)
38
+
39
+ model_name = random.choice(list(loaded_models.keys()))
40
+ prompt = args.prompt
41
+ neg_prompt = args.neg_prompt
42
+ width = args.width
43
+ height = args.height
44
+ steps = args.steps
45
+ cfg = args.cfg
46
+ seed = random.randint(0, MAX_SEED)
47
+
48
+ positive_prefix = args.positive_prefix
49
+ positive_suffix = args.positive_suffix
50
+ negative_prefix = args.negative_prefix
51
+ negative_suffix = args.negative_suffix
52
+
53
+ images = infer_rand_fn(
54
+ model_name, prompt, neg_prompt, height, width, steps, cfg, seed,
55
+ positive_prefix, positive_suffix, negative_prefix, negative_suffix
56
+ )
57
+
58
+ save_gallery(images, args.output)
59
+
60
+ if __name__ == "__main__":
61
+ parser = argparse.ArgumentParser(description="Image generation script using preloaded models.")
62
+
63
+ parser.add_argument('--model_name', type=str, default=list(loaded_models.keys())[0], help='The model to use for image generation.')
64
+ parser.add_argument('--prompt', type=str, required=True, help='The prompt for image generation.')
65
+ parser.add_argument('--neg_prompt', type=str, default="", help='Negative prompt to avoid unwanted elements.')
66
+ parser.add_argument('--width', type=int, default=0, help='Image width, 0 for default.')
67
+ parser.add_argument('--height', type=int, default=0, help='Image height, 0 for default.')
68
+ parser.add_argument('--steps', type=int, default=0, help='Number of inference steps.')
69
+ parser.add_argument('--cfg', type=float, default=0, help='Guidance scale (CFG).')
70
+ parser.add_argument('--seed', type=int, default=-1, help='Random seed, -1 for random.')
71
+ parser.add_argument('--output', type=str, default="output_gallery", help='Output directory for generated images.')
72
+ parser.add_argument('--positive_prefix', nargs='*', default=[], help='Positive prefix for the prompt.')
73
+ parser.add_argument('--positive_suffix', nargs='*', default=["Common"], help='Positive suffix for the prompt.')
74
+ parser.add_argument('--negative_prefix', nargs='*', default=[], help='Negative prefix for the prompt.')
75
+ parser.add_argument('--negative_suffix', nargs='*', default=["Common"], help='Negative suffix for the prompt.')
76
+
77
+ parser.add_argument('--random_model', action='store_true', help='Use random model for image generation.')
78
+
79
+ args = parser.parse_args()
80
+
81
+ if args.random_model:
82
+ random_model_image(args)
83
+ else:
84
+ generate_image(args)