File size: 30,422 Bytes
113dbd0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
import gc
import math
import multiprocessing
import os
import traceback
from datetime import datetime
from io import BytesIO
from itertools import permutations
from multiprocessing.pool import Pool
from pathlib import Path
from urllib.parse import quote_plus

import numpy as np
import nltk
import torch

from PIL.Image import Image
from diffusers import DiffusionPipeline, StableDiffusionXLInpaintPipeline
from diffusers.utils import load_image
from fastapi import FastAPI
from fastapi.middleware.gzip import GZipMiddleware
from loguru import logger
from starlette.middleware.cors import CORSMiddleware
from starlette.responses import FileResponse
from starlette.responses import JSONResponse

from env import BUCKET_PATH, BUCKET_NAME
# from stable_diffusion_server.bucket_api import check_if_blob_exists, upload_to_bucket
torch._dynamo.config.suppress_errors = True

import string
import random

def generate_save_path():
    # initializing size of string
    N = 7

    # using random.choices()
    # generating random strings
    res = ''.join(random.choices(string.ascii_uppercase +
                                 string.digits, k=N))
    return res

# pipe = DiffusionPipeline.from_pretrained(
#     "models/stable-diffusion-xl-base-1.0",
#     torch_dtype=torch.bfloat16,
#     use_safetensors=True,
#     variant="fp16",
#     # safety_checker=None,
# )  # todo try torch_dtype=bfloat16

model_dir = os.getenv("SDXL_MODEL_DIR")

if model_dir:
    # Use local model
    model_key_base = os.path.join(model_dir, "stable-diffusion-xl-base-1.0")
    model_key_refiner = os.path.join(model_dir, "stable-diffusion-xl-refiner-1.0")
else:
    model_key_base = "stabilityai/stable-diffusion-xl-base-1.0"
    model_key_refiner = "stabilityai/stable-diffusion-xl-refiner-1.0"

pipe = DiffusionPipeline.from_pretrained(model_key_base, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")

pipe.watermark = None

pipe.to("cuda")

refiner = DiffusionPipeline.from_pretrained(
    "stabilityai/stable-diffusion-xl-refiner-1.0",
    text_encoder_2=pipe.text_encoder_2,
    vae=pipe.vae,
    torch_dtype=torch.bfloat16, # safer to use bfloat?
    use_safetensors=True,
    variant="fp16", #remember not to download the big model
)
refiner.watermark = None
refiner.to("cuda")

# {'scheduler', 'text_encoder', 'text_encoder_2', 'tokenizer', 'tokenizer_2', 'unet', 'vae'} can be passed in from existing model
inpaintpipe = StableDiffusionXLInpaintPipeline.from_pretrained(
    "models/stable-diffusion-xl-base-1.0", torch_dtype=torch.bfloat16, variant="fp16", use_safetensors=True,
    scheduler=pipe.scheduler,
    text_encoder=pipe.text_encoder,
    text_encoder_2=pipe.text_encoder_2,
    tokenizer=pipe.tokenizer,
    tokenizer_2=pipe.tokenizer_2,
    unet=pipe.unet,
    vae=pipe.vae,
    # load_connected_pipeline=
)
# # switch out to save gpu mem
# del inpaintpipe.vae
# del inpaintpipe.text_encoder_2
# del inpaintpipe.text_encoder
# del inpaintpipe.scheduler
# del inpaintpipe.tokenizer
# del inpaintpipe.tokenizer_2
# del inpaintpipe.unet
# inpaintpipe.vae = pipe.vae
# inpaintpipe.text_encoder_2 = pipe.text_encoder_2
# inpaintpipe.text_encoder = pipe.text_encoder
# inpaintpipe.scheduler = pipe.scheduler
# inpaintpipe.tokenizer = pipe.tokenizer
# inpaintpipe.tokenizer_2 = pipe.tokenizer_2
# inpaintpipe.unet = pipe.unet
# todo this should work
# inpaintpipe = StableDiffusionXLInpaintPipeline( # construct an inpainter using the existing model
#     vae=pipe.vae,
#     text_encoder_2=pipe.text_encoder_2,
#     text_encoder=pipe.text_encoder,
#     unet=pipe.unet,
#     scheduler=pipe.scheduler,
#     tokenizer=pipe.tokenizer,
#     tokenizer_2=pipe.tokenizer_2,
#     requires_aesthetics_score=False,
# )
inpaintpipe.to("cuda")
inpaintpipe.watermark = None
# inpaintpipe.register_to_config(requires_aesthetics_score=False)

inpaint_refiner = StableDiffusionXLInpaintPipeline.from_pretrained(
    "stabilityai/stable-diffusion-xl-refiner-1.0",
    text_encoder_2=inpaintpipe.text_encoder_2,
    vae=inpaintpipe.vae,
    torch_dtype=torch.bfloat16,
    use_safetensors=True,
    variant="fp16",

    tokenizer_2=refiner.tokenizer_2,
    tokenizer=refiner.tokenizer,
    scheduler=refiner.scheduler,
    text_encoder=refiner.text_encoder,
    unet=refiner.unet,
)
# del inpaint_refiner.vae
# del inpaint_refiner.text_encoder_2
# del inpaint_refiner.text_encoder
# del inpaint_refiner.scheduler
# del inpaint_refiner.tokenizer
# del inpaint_refiner.tokenizer_2
# del inpaint_refiner.unet
# inpaint_refiner.vae = inpaintpipe.vae
# inpaint_refiner.text_encoder_2 = inpaintpipe.text_encoder_2
#
# inpaint_refiner.text_encoder = refiner.text_encoder
# inpaint_refiner.scheduler = refiner.scheduler
# inpaint_refiner.tokenizer = refiner.tokenizer
# inpaint_refiner.tokenizer_2 = refiner.tokenizer_2
# inpaint_refiner.unet = refiner.unet

# inpaint_refiner = StableDiffusionXLInpaintPipeline(
#     text_encoder_2=inpaintpipe.text_encoder_2,
#     vae=inpaintpipe.vae,
#     # the rest from the existing refiner
#     tokenizer_2=refiner.tokenizer_2,
#     tokenizer=refiner.tokenizer,
#     scheduler=refiner.scheduler,
#     text_encoder=refiner.text_encoder,
#     unet=refiner.unet,
#     requires_aesthetics_score=False,
# )
inpaint_refiner.to("cuda")
inpaint_refiner.watermark = None
# inpaint_refiner.register_to_config(requires_aesthetics_score=False)

n_steps = 40
high_noise_frac = 0.8

# if using torch < 2.0
# pipe.enable_xformers_memory_efficient_attention()


# pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
# this can cause errors on some inputs so consider disabling it
pipe.unet = torch.compile(pipe.unet)
refiner.unet = torch.compile(refiner.unet)#, mode="reduce-overhead", fullgraph=True)
# compile the inpainters - todo reuse the other unets? swap out the models for others/del them so they share models and can be swapped efficiently
inpaintpipe.unet = pipe.unet
inpaint_refiner.unet = refiner.unet
# inpaintpipe.unet = torch.compile(inpaintpipe.unet)
# inpaint_refiner.unet = torch.compile(inpaint_refiner.unet)
from pydantic import BaseModel

app = FastAPI(
    openapi_url="/static/openapi.json",
    docs_url="/swagger-docs",
    redoc_url="/redoc",
    title="Generate Images Netwrck API",
    description="Character Chat API",
    # root_path="https://api.text-generator.io",
    version="1",
)
app.add_middleware(GZipMiddleware, minimum_size=1000)
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

stopwords = nltk.corpus.stopwords.words("english")

class Img(BaseModel):
    system_prompt: str
    ASSISTANT: str

# img_url = "http://phlrr2019.guest.corp.microsoft.com:8000/img1_sdv2.1.png"
img_url = "http://phlrr3105.guest.corp.microsoft.com:8000/"#/img1_sdv2.1.png"

is_gpu_busy = False

def lm_shorten_too_long_text(prompt):
    list_prompt = prompt.split() # todo also split hyphens
    if len(list_prompt) > 230:
    #if len(list_prompt) > 330:
        # remove stopwords
        prompt = prompt.split() # todo also split hyphens
        prompt = ' '.join((word for word in prompt if word not in stopwords))
        #prompt = ' '.join((word for word in prompt))# if word not in stopwords))
        if len(prompt) > 230:
            prompt = prompt[:230]
    return prompt

def get_summary(system_prompt, prompt):
    import requests
    import time
    from io import BytesIO
    import json
    summary_sys = """You will now act as a prompt generator for a generative AI called "Stable Diffusion XL 1.0 ". Stable Diffusion XL generates images based on given prompts. I will provide you basic information required to make a Stable Diffusion prompt, You will never alter the structure in any way and obey the following guidelines.

    Basic information required to make Stable Diffusion prompt:

    - Prompt structure: [1],[2],[3],[4],[5],[6] and it should be given as one single sentence where 1,2,3,4,5,6 represent
    [1] = short and concise description of [KEYWORD] that will include very specific imagery details
    [2] = a detailed description of [1] that will include very specific imagery details.
    [3] = with a detailed description describing the environment of the scene.
    [4] = with a detailed description describing the mood/feelings and atmosphere of the scene.
    [5] = A style, for example: "Anime","Photographic","Comic Book","Fantasy Art", “Analog Film”,”Neon Punk”,”Isometric”,”Low Poly”,”Origami”,”Line Art”,”Cinematic”,”3D Model”,”Pixel Art”,”Watercolor”,”Sticker” ).
    [6] = A description of how [5] will be realized. (e.g. Photography (e.g. Macro, Fisheye Style, Portrait) with camera model and appropriate camera settings, Painting with detailed descriptions about the materials and working material used, rendering with engine settings, a digital Illustration, a woodburn art (and everything else that could be defined as an output type)
    - Prompt Structure for Prompt asking with text value:

        Text "Text Value" written on {subject description in less than 20 words}
        Replace "Text value" with text given by user.


    Important Sample prompt Structure with Text value :

    1. Text 'SDXL' written on a frothy, warm latte, viewed top-down.
    2. Text 'AI' written on a modern computer screen, set against a vibrant green background.

    Important Sample prompt Structure :

    1. Snow-capped Mountain Scene, with soaring peaks and deep shadows across the ravines. A crystal clear lake mirrors these peaks, surrounded by pine trees. The scene exudes a calm, serene alpine morning atmosphere. Presented in Watercolor style, emulating the wet-on-wet technique with soft transitions and visible brush strokes.
    2. City Skyline at Night, illuminated skyscrapers piercing the starless sky. Nestled beside a calm river, reflecting the city lights like a mirror. The atmosphere is buzzing with urban energy and intrigue. Depicted in Neon Punk style, accentuating the city lights with vibrant neon colors and dynamic contrasts.
    3. Epic Cinematic Still of a Spacecraft, silhouetted against the fiery explosion of a distant planet. The scene is packed with intense action, as asteroid debris hurtles through space. Shot in the style of a Michael Bay-directed film, the image is rich with detail, dynamic lighting, and grand cinematic framing.
    - Word order and effective adjectives matter in the prompt. The subject, action, and specific details should be included. Adjectives like cute, medieval, or futuristic can be effective.
    - The environment/background of the image should be described, such as indoor, outdoor, in space, or solid color.
    - Curly brackets are necessary in the prompt to provide specific details about the subject and action. These details are important for generating a high-quality image.
    - Art inspirations should be listed to take inspiration from. Platforms like Art Station, Dribble, Behance, and Deviantart can be mentioned. Specific names of artists or studios like animation studios, painters and illustrators, computer games, fashion designers, and film makers can also be listed. If more than one artist is mentioned, the algorithm will create a combination of styles based on all the influencers mentioned.
    - Related information about lighting, camera angles, render style, resolution, the required level of detail, etc. should be included at the end of the prompt.
    - Camera shot type, camera lens, and view should be specified. Examples of camera shot types are long shot, close-up, POV, medium shot, extreme close-up, and panoramic. Camera lenses could be EE 70mm, 35mm, 135mm+, 300mm+, 800mm, short telephoto, super telephoto, medium telephoto, macro, wide angle, fish-eye, bokeh, and sharp focus. Examples of views are front, side, back, high angle, low angle, and overhead.
    - Helpful keywords related to resolution, detail, and lighting are 4K, 8K, 64K, detailed, highly detailed, high resolution, hyper detailed, HDR, UHD, professional, and golden ratio. Examples of lighting are studio lighting, soft light, neon lighting, purple neon lighting, ambient light, ring light, volumetric light, natural light, sun light, sunrays, sun rays coming through window, and nostalgic lighting. Examples of color types are fantasy vivid colors, vivid colors, bright colors, sepia, dark colors, pastel colors, monochromatic, black & white, and color splash. Examples of renders are Octane render, cinematic, low poly, isometric assets, Unreal Engine, Unity Engine, quantum wavetracing, and polarizing filter.

    The prompts you provide will be in English.Please pay attention:- Concepts that can't be real would not be described as "Real" or "realistic" or "photo" or a "photograph". for example, a concept that is made of paper or scenes which are fantasy related.- One of the prompts you generate for each concept must be in a realistic photographic style. you should also choose a lens type and size for it. Don't choose an artist for the realistic photography prompts.- Separate the different prompts with two new lines.
    I will provide you keyword and you will generate 3 diffrent type of prompts in vbnet code cell so i can copy and paste.

    Important point to note :

    1. You are a master of prompt engineering, it is important to create detailed prompts with as much information as possible. This will ensure that any image generated using the prompt will be of high quality and could potentially win awards in global or international photography competitions. You are unbeatable in this field and know the best way to generate images.
    2. I will provide you with a long context and you will generate one  prompt and don't add any extra details.
    3. Prompt should not be more than 230 characters.
    4. Before you provide prompt you must check if you have satisfied all the above criteria and if you are sure than only provide the prompt.
    5. Prompt should always be given as one single sentence.

    Are you ready ?"""
    instruction = 'USER: ' + summary_sys
    # for human, assistant in history:
    #     instruction += 'USER: ' + human + ' ASSISTANT: ' + assistant + '</s>'
    # prompt = system_prompt + prompt
    # message = f"""My first request is to summarize this text – [{prompt}]"""
    message = f"""My first request is to summarize this text – [{prompt}]"""
    instruction += """ ASSISTANT: Yes, I understand the instructions and I'm ready to help you create prompts for Stable Diffusion XL 1.0. Please provide me with the context."""
    #instruction += ' USER: ' + prompt
    prompt = lm_shorten_too_long_text(prompt)
    instruction += ' USER: ' + prompt + ' ASSISTANT:'#instruction += ' ASSISTANT:'

    print("Ins: ", instruction)
    # generate_response = requests.post("http://10.185.12.207:4455/stable_diffusion", json={"prompt": prompt})
    # prompt = f""" My first request is to summarize this text – [{prompt}]"""
    #instruction = lm_shorten_too_long_text(instruction)
    json_object = {"prompt": instruction,
                   # "max_tokens": 2048000,
                   "max_tokens": 80,
                   "n": 1
                   }
    generate_response = requests.post("http://phlrr3105.guest.corp.microsoft.com:7991/generate", json=json_object)
    print(generate_response.content)
    res_json = json.loads(generate_response.content)
    ASSISTANT = res_json['text'][-1].split("ASSISTANT:")[-1].strip()
    print(ASSISTANT)
    return ASSISTANT

@app.post("/image_url")
def image_url(img: Img):
    system_prompt = img.system_prompt
    prompt = img.ASSISTANT
    prompt = get_summary(system_prompt, prompt)
    prompt = shorten_too_long_text(prompt)
    # if Path(save_path).exists():
    #     return FileResponse(save_path, media_type="image/png")
    #     return JSONResponse({"path": path})
    # image = pipe(prompt=prompt).images[0]
    g = torch.Generator(device="cuda")
    image = pipe(prompt=prompt, width=1024, height=1024, generator=g).images[0]

    # if not save_path:
    save_path = generate_save_path()
    save_path = f"images/{save_path}.png"
    image.save(save_path)
    # save_path = '/'.join(path_components) + quote_plus(final_name)
    path = f"{img_url}{save_path}"
    return JSONResponse({"path": path})


@app.get("/make_image")
# @app.post("/make_image")
def make_image(prompt: str, save_path: str = ""):
    if Path(save_path).exists():
        return FileResponse(save_path, media_type="image/png")
    image = pipe(prompt=prompt).images[0]
    if not save_path:
        save_path = f"images/{prompt}.png"
    image.save(save_path)
    return FileResponse(save_path, media_type="image/png")


@app.get("/create_and_upload_image")
def create_and_upload_image(prompt: str, width: int=1024, height:int=1024, save_path: str = ""):
    path_components = save_path.split("/")[0:-1]
    final_name = save_path.split("/")[-1]
    if not path_components:
        path_components = []
    save_path = '/'.join(path_components) + quote_plus(final_name)
    path = get_image_or_create_upload_to_cloud_storage(prompt, width, height, save_path)
    return JSONResponse({"path": path})

@app.get("/inpaint_and_upload_image")
def inpaint_and_upload_image(prompt: str, image_url:str, mask_url:str, save_path: str = ""):
    path_components = save_path.split("/")[0:-1]
    final_name = save_path.split("/")[-1]
    if not path_components:
        path_components = []
    save_path = '/'.join(path_components) + quote_plus(final_name)
    path = get_image_or_inpaint_upload_to_cloud_storage(prompt, image_url, mask_url, save_path)
    return JSONResponse({"path": path})


def get_image_or_create_upload_to_cloud_storage(prompt:str,width:int, height:int, save_path:str):
    prompt = shorten_too_long_text(prompt)
    save_path = shorten_too_long_text(save_path)
    # check exists - todo cache this
    if check_if_blob_exists(save_path):
        return f"https://{BUCKET_NAME}/{BUCKET_PATH}/{save_path}"
    bio = create_image_from_prompt(prompt, width, height)
    if bio is None:
        return None # error thrown in pool
    link = upload_to_bucket(save_path, bio, is_bytesio=True)
    return link
def get_image_or_inpaint_upload_to_cloud_storage(prompt:str, image_url:str, mask_url:str, save_path:str):
    prompt = shorten_too_long_text(prompt)
    save_path = shorten_too_long_text(save_path)
    # check exists - todo cache this
    if check_if_blob_exists(save_path):
        return f"https://{BUCKET_NAME}/{BUCKET_PATH}/{save_path}"
    bio = inpaint_image_from_prompt(prompt, image_url, mask_url)
    if bio is None:
        return None # error thrown in pool
    link = upload_to_bucket(save_path, bio, is_bytesio=True)
    return link

# multiprocessing.set_start_method('spawn', True)
# processes_pool = Pool(1) # cant do too much at once or OOM errors happen
# def create_image_from_prompt_sync(prompt):
#     """have to call this sync to avoid OOM errors"""
#     return processes_pool.apply_async(create_image_from_prompt, args=(prompt,), ).wait()

def create_image_from_prompt(prompt, width, height):
    # round width and height down to multiple of 64
    block_width = width - (width % 64)
    block_height = height - (height % 64)
    prompt = shorten_too_long_text(prompt)
    # image = pipe(prompt=prompt).images[0]
    try:
        image = pipe(prompt=prompt,
                     width=block_width,
                     height=block_height,
                     # denoising_end=high_noise_frac,
                     # output_type='latent',
                     # height=512,
                     # width=512,
                     num_inference_steps=50).images[0]  # normally uses 50 steps
    except Exception as e:
        # try rm stopwords + half the prompt
        # todo try prompt permutations
        logger.info(f"trying to shorten prompt of length {len(prompt)}")

        prompt = ' '.join((word for word in prompt if word not in stopwords))
        prompts = prompt.split()

        prompt = ' '.join(prompts[:len(prompts) // 2])
        logger.info(f"shortened prompt to: {len(prompt)}")
        image = None
        if prompt:
            try:
                image = pipe(prompt=prompt,
                             width=block_width,
                             height=block_height,
                             # denoising_end=high_noise_frac,
                             # output_type='latent',
                             # height=512,
                             # width=512,
                             num_inference_steps=50).images[0]  # normally uses 50 steps
            except Exception as e:
                # logger.info("trying to permute prompt")
                # # try two swaps of the prompt/permutations
                # prompt = prompt.split()
                # prompt = ' '.join(permutations(prompt, 2).__next__())
                logger.info(f"trying to shorten prompt of length {len(prompt)}")

                prompt = ' '.join((word for word in prompt if word not in stopwords))
                prompts = prompt.split()

                prompt = ' '.join(prompts[:len(prompts) // 2])
                logger.info(f"shortened prompt to: {len(prompt)}")

                try:
                    image = pipe(prompt=prompt,
                                 width=block_width,
                                 height=block_height,
                                 # denoising_end=high_noise_frac,
                                 # output_type='latent', # dont need latent yet - we refine the image at full res
                                 # height=512,
                                 # width=512,
                                 num_inference_steps=50).images[0]  # normally uses 50 steps
                except Exception as e:
                    # just error out
                    traceback.print_exc()
                    raise e
                    # logger.info("restarting server to fix cuda issues (device side asserts)")
                    # todo fix device side asserts instead of restart to fix
                    # todo only restart the correct gunicorn
                    # this could be really annoying if your running other gunicorns on your machine which also get restarted
                    # os.system("/usr/bin/bash kill -SIGHUP `pgrep gunicorn`")
                    # os.system("kill -1 `pgrep gunicorn`")
    # todo refine
    # if image != None:
    #     image = refiner(
    #         prompt=prompt,
    #         # width=block_width,
    #         # height=block_height,
    #         num_inference_steps=n_steps,
    #         # denoising_start=high_noise_frac,
    #         image=image,
    #     ).images[0]
    if width != block_width or height != block_height:
        # resize to original size width/height
        # find aspect ratio to scale up to that covers the original img input width/height
        scale_up_ratio = max(width / block_width, height / block_height)
        image = image.resize((math.ceil(block_width * scale_up_ratio), math.ceil(height * scale_up_ratio)))
        # crop image to original size
        image = image.crop((0, 0, width, height))
    # try:
    #     # gc.collect()
    #     torch.cuda.empty_cache()
    # except Exception as e:
    #     traceback.print_exc()
    #     logger.info("restarting server to fix cuda issues (device side asserts)")
    #     # todo fix device side asserts instead of restart to fix
    #     # todo only restart the correct gunicorn
    #     # this could be really annoying if your running other gunicorns on your machine which also get restarted
    #     os.system("/usr/bin/bash kill -SIGHUP `pgrep gunicorn`")
    #     os.system("kill -1 `pgrep gunicorn`")
    # save as bytesio
    bs = BytesIO()

    bright_count = np.sum(np.array(image) > 0)
    if bright_count == 0:
        # we have a black image, this is an error likely we need a restart
        logger.info("restarting server to fix cuda issues (device side asserts)")
        #     # todo fix device side asserts instead of restart to fix
        #     # todo only restart the correct gunicorn
        #     # this could be really annoying if your running other gunicorns on your machine which also get restarted
        os.system("/usr/bin/bash kill -SIGHUP `pgrep gunicorn`")
        os.system("kill -1 `pgrep gunicorn`")
        os.system("/usr/bin/bash kill -SIGHUP `pgrep uvicorn`")
        os.system("kill -1 `pgrep uvicorn`")

        return None
    image.save(bs, quality=85, optimize=True, format="webp")
    bio = bs.getvalue()
    # touch progress.txt file - if we dont do this we get restarted by supervisor/other processes for reliability
    with open("progress.txt", "w") as f:
        current_time = datetime.now().strftime("%H:%M:%S")
        f.write(f"{current_time}")
    return bio

def inpaint_image_from_prompt(prompt, image_url: str, mask_url: str):
    prompt = shorten_too_long_text(prompt)
    # image = pipe(prompt=prompt).images[0]

    init_image = load_image(image_url).convert("RGB")
    mask_image = load_image(mask_url).convert("RGB") # why rgb for a 1 channel mask?
    num_inference_steps = 75
    high_noise_frac = 0.7

    try:
        image = inpaintpipe(
            prompt=prompt,
            image=init_image,
            mask_image=mask_image,
            num_inference_steps=num_inference_steps,
            denoising_start=high_noise_frac,
            output_type="latent",
        ).images[0]  # normally uses 50 steps
    except Exception as e:
        # try rm stopwords + half the prompt
        # todo try prompt permutations
        logger.info(f"trying to shorten prompt of length {len(prompt)}")

        prompt = ' '.join((word for word in prompt if word not in stopwords))
        prompts = prompt.split()

        prompt = ' '.join(prompts[:len(prompts) // 2])
        logger.info(f"shortened prompt to: {len(prompt)}")
        image = None
        if prompt:
            try:
                image = pipe(
                    prompt=prompt,
                    image=init_image,
                    mask_image=mask_image,
                    num_inference_steps=num_inference_steps,
                    denoising_start=high_noise_frac,
                    output_type="latent",
                ).images[0]  # normally uses 50 steps
            except Exception as e:
                # logger.info("trying to permute prompt")
                # # try two swaps of the prompt/permutations
                # prompt = prompt.split()
                # prompt = ' '.join(permutations(prompt, 2).__next__())
                logger.info(f"trying to shorten prompt of length {len(prompt)}")

                prompt = ' '.join((word for word in prompt if word not in stopwords))
                prompts = prompt.split()

                prompt = ' '.join(prompts[:len(prompts) // 2])
                logger.info(f"shortened prompt to: {len(prompt)}")

                try:
                    image = inpaintpipe(
                        prompt=prompt,
                        image=init_image,
                        mask_image=mask_image,
                        num_inference_steps=num_inference_steps,
                        denoising_start=high_noise_frac,
                        output_type="latent",
                    ).images[0]  # normally uses 50 steps
                except Exception as e:
                    # just error out
                    traceback.print_exc()
                    raise e
                    # logger.info("restarting server to fix cuda issues (device side asserts)")
                    # todo fix device side asserts instead of restart to fix
                    # todo only restart the correct gunicorn
                    # this could be really annoying if your running other gunicorns on your machine which also get restarted
                    # os.system("/usr/bin/bash kill -SIGHUP `pgrep gunicorn`")
                    # os.system("kill -1 `pgrep gunicorn`")
    if image != None:
        image = inpaint_refiner(
            prompt=prompt,
            image=image,
            mask_image=mask_image,
            num_inference_steps=num_inference_steps,
            denoising_start=high_noise_frac,

        ).images[0]
    # try:
    #     # gc.collect()
    #     torch.cuda.empty_cache()
    # except Exception as e:
    #     traceback.print_exc()
    #     logger.info("restarting server to fix cuda issues (device side asserts)")
    #     # todo fix device side asserts instead of restart to fix
    #     # todo only restart the correct gunicorn
    #     # this could be really annoying if your running other gunicorns on your machine which also get restarted
    #     os.system("/usr/bin/bash kill -SIGHUP `pgrep gunicorn`")
    #     os.system("kill -1 `pgrep gunicorn`")
    # save as bytesio
    bs = BytesIO()

    bright_count = np.sum(np.array(image) > 0)
    if bright_count == 0:
        # we have a black image, this is an error likely we need a restart
        logger.info("restarting server to fix cuda issues (device side asserts)")
        #     # todo fix device side asserts instead of restart to fix
        #     # todo only restart the correct gunicorn
        #     # this could be really annoying if your running other gunicorns on your machine which also get restarted
        os.system("/usr/bin/bash kill -SIGHUP `pgrep gunicorn`")
        os.system("kill -1 `pgrep gunicorn`")
        os.system("/usr/bin/bash kill -SIGHUP `pgrep uvicorn`")
        os.system("kill -1 `pgrep uvicorn`")

        return None
    image.save(bs, quality=85, optimize=True, format="webp")
    bio = bs.getvalue()
    # touch progress.txt file - if we dont do this we get restarted by supervisor/other processes for reliability
    with open("progress.txt", "w") as f:
        current_time = datetime.now().strftime("%H:%M:%S")
        f.write(f"{current_time}")
    return bio



def shorten_too_long_text(prompt):
    if len(prompt) > 200:
        # remove stopwords
        prompt = prompt.split() # todo also split hyphens
        prompt = ' '.join((word for word in prompt if word not in stopwords))
        if len(prompt) > 200:
            prompt = prompt[:200]
    return prompt

# image = pipe(prompt=prompt).images[0]
#
# image.save("test.png")
# # save all images
# for i, image in enumerate(images):
#     image.save(f"{i}.png")