File size: 5,721 Bytes
1a5cc4f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
import json
from contextlib import closing
import modules.scripts
from modules import processing, infotext_utils
from modules.infotext_utils import create_override_settings_dict, parse_generation_parameters
from modules.shared import opts
import modules.shared as shared
from modules.ui import plaintext_to_html
from PIL import Image
import gradio as gr
def txt2img_create_processing(id_task: str, request: gr.Request, prompt: str, negative_prompt: str, prompt_styles,
n_iter: int, batch_size: int, cfg_scale: float, height: int, width: int, enable_hr: bool,
denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int,
hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_scheduler: str,
hr_prompt: str, hr_negative_prompt, override_settings_texts, enable_progressive_growing: bool,
progressive_growing_min_scale: float, progressive_growing_max_scale: float, progressive_growing_steps: int,
progressive_growing_refinement: bool, *args, force_enable_hr=False):
override_settings = create_override_settings_dict(override_settings_texts)
if force_enable_hr:
enable_hr = True
print(f"enable_progressive_growing: {enable_progressive_growing}")
print(f"progressive_growing_min_scale: {progressive_growing_min_scale}")
p = processing.StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples,
outpath_grids=opts.outdir_grids or opts.outdir_txt2img_grids,
prompt=prompt,
styles=prompt_styles,
negative_prompt=negative_prompt,
batch_size=batch_size,
n_iter=n_iter,
cfg_scale=cfg_scale,
width=width,
height=height,
enable_hr=enable_hr,
denoising_strength=denoising_strength,
hr_scale=hr_scale,
hr_upscaler=hr_upscaler,
hr_second_pass_steps=hr_second_pass_steps,
hr_resize_x=hr_resize_x,
hr_resize_y=hr_resize_y,
hr_checkpoint_name=None if hr_checkpoint_name == 'Use same checkpoint' else hr_checkpoint_name,
hr_sampler_name=None if hr_sampler_name == 'Use same sampler' else hr_sampler_name,
hr_scheduler=None if hr_scheduler == 'Use same scheduler' else hr_scheduler,
hr_prompt=hr_prompt,
hr_negative_prompt=hr_negative_prompt,
override_settings=override_settings,
)
p.id_task = id_task
p.enable_progressive_growing = enable_progressive_growing
p.progressive_growing_min_scale = progressive_growing_min_scale
p.progressive_growing_max_scale = progressive_growing_max_scale
p.progressive_growing_steps = progressive_growing_steps
p.progressive_growing_refinement = progressive_growing_refinement
p.scripts = modules.scripts.scripts_txt2img
p.script_args = args
p.user = request.username
if shared.opts.enable_console_prompts:
print(f"\ntxt2img: {prompt}", file=shared.progress_print_out)
return p
def txt2img_upscale(id_task: str, request: gr.Request, gallery, gallery_index, generation_info, *args):
assert len(gallery) > 0, 'No image to upscale'
assert 0 <= gallery_index < len(gallery), f'Bad image index: {gallery_index}'
p = txt2img_create_processing(id_task, request, *args, force_enable_hr=True)
p.batch_size = 1
p.n_iter = 1
# txt2img_upscale attribute that signifies this is called by txt2img_upscale
p.txt2img_upscale = True
geninfo = json.loads(generation_info)
image_info = gallery[gallery_index] if 0 <= gallery_index < len(gallery) else gallery[0]
p.firstpass_image = infotext_utils.image_from_url_text(image_info)
parameters = parse_generation_parameters(geninfo.get('infotexts')[gallery_index], [])
p.seed = parameters.get('Seed', -1)
p.subseed = parameters.get('Variation seed', -1)
p.override_settings['save_images_before_highres_fix'] = False
with closing(p):
processed = modules.scripts.scripts_txt2img.run(p, *p.script_args)
if processed is None:
processed = processing.process_images(p)
shared.total_tqdm.clear()
new_gallery = []
for i, image in enumerate(gallery):
if i == gallery_index:
geninfo["infotexts"][gallery_index: gallery_index+1] = processed.infotexts
new_gallery.extend(processed.images)
else:
fake_image = Image.new(mode="RGB", size=(1, 1))
fake_image.already_saved_as = image["name"].rsplit('?', 1)[0]
new_gallery.append(fake_image)
geninfo["infotexts"][gallery_index] = processed.info
return new_gallery, json.dumps(geninfo), plaintext_to_html(processed.info), plaintext_to_html(processed.comments, classname="comments")
def txt2img(id_task: str, request: gr.Request, *args):
p = txt2img_create_processing(id_task, request, *args)
with closing(p):
processed = modules.scripts.scripts_txt2img.run(p, *p.script_args)
if processed is None:
processed = processing.process_images(p)
shared.total_tqdm.clear()
generation_info_js = processed.js()
if opts.samples_log_stdout:
print(generation_info_js)
if opts.do_not_show_images:
processed.images = []
return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments, classname="comments") |