import gradio as gr # import torch # from torch import autocast # from diffusers import StableDiffusionPipeline from datasets import load_dataset from PIL import Image from io import BytesIO # import base64 # import re import os import requests import json import base64 # from urllib import parse from share_btn import community_icon_html, loading_icon_html, share_js is_gpu_busy = False def safe_sd(prompt, n_samples, steps, scale, seed, mode): url = os.getenv('BACKEND_URL_SAFE_NEW') token = os.getenv('BACKEND_TOKEN') user = os.getenv('BACKEND_USER') res = requests.post(url, json={ "model": "togethercomputer/UniversalSD", "prompt": prompt, "n": n_samples, "mode": mode, "steps": steps, "seed": seed, "guidance_scale": scale, }, headers={ "Authorization": token, "User-Agent": user }) return res def infer(prompt, n_samples, steps, scale, seed): global is_gpu_busy # generator = torch.Generator(device=device).manual_seed(seed) # print("Is GPU busy? ", is_gpu_busy) images = [] if prompt == "": raise gr.Error("Empty prompt. Please provide a prompt.") response = safe_sd(prompt, int(n_samples), max(50,int(steps)), scale, seed, mode="text2img") data = json.load(BytesIO(response.content)) if 'output' not in data: raise gr.Error("An error occurred.") else: if data['output']['result_type'] == "error": raise gr.Error(data['output']['value']) for image in data['output']['choices']: im = Image.open(BytesIO(base64.b64decode(image['image_base64']))) images.append(im) response = safe_sd(prompt, int(n_samples), max(50,int(steps)), scale, seed, mode="safe_text2img") data = json.load(BytesIO(response.content)) if 'output' not in data: raise gr.Error("An error occurred.") else: for image in data['output']['choices']: im = Image.open(BytesIO(base64.b64decode(image['image_base64']))) images.append(im) return images css = """ .gradio-container { font-family: 'IBM Plex Sans', sans-serif; } .gr-button { color: white; border-color: #3a669bff; background: #3a669bff; } input[type='range'] { accent-color: #3a669bff; } .dark input[type='range'] { accent-color: #3a669bff; } .container { max-width: 730px; margin: auto; padding-top: 1.5rem; } #gallery { min-height: 22rem; margin-bottom: 15px; margin-left: auto; margin-right: auto; border-bottom-right-radius: .5rem !important; border-bottom-left-radius: .5rem !important; } #gallery>div>.h-full { min-height: 20rem; } .details:hover { text-decoration: underline; } .gr-button { white-space: nowrap; } .gr-button:focus { border-color: rgb(147 197 253 / var(--tw-border-opacity)); outline: none; box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); --tw-border-opacity: 1; --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); --tw-ring-opacity: .5; } #advanced-btn { font-size: .7rem !important; line-height: 19px; margin-top: 12px; margin-bottom: 12px; padding: 2px 8px; border-radius: 14px !important; } #advanced-options { display: none; margin-bottom: 20px; } .footer { margin-bottom: 45px; margin-top: 35px; text-align: center; border-bottom: 1px solid #e5e5e5; } .footer>p { font-size: .8rem; display: inline-block; padding: 0 10px; transform: translateY(10px); background: white; } .dark .footer { border-color: #303030; } .dark .footer>p { background: #0b0f19; } .acknowledgments h4{ margin: 1.25em 0 .25em 0; font-weight: bold; font-size: 115%; } #container-advanced-btns{ display: flex; flex-wrap: wrap; justify-content: space-between; align-items: center; } .animate-spin { animation: spin 1s linear infinite; } @keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } } #share-btn-container { display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #3a669bff; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; } #share-btn { all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important; } #share-btn * { all: unset; } .gr-form{ flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0; } #prompt-container{ gap: 0; } """ block = gr.Blocks(css=css) examples = [ [ 'a photograph by vanessa beecroft', 1, 50, 7.5, 24803839, ], [ 'a gorgeous female photo', 1, 50, 7.5, 733664822, ], [ 'a gorgeous male photo', 1, 50, 7.5, 881355, ], [ 'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c. leyendecker', 1, 50, 7.5, 557645701 ], [ 'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and children from bahnhof zoo, detailed ', 1, 50, 9, 1115417309, ], [ 'portrait of Sickly diseased dying Samurai warrior, sun shining, photo realistic illustration by greg rutkowski, thomas kindkade, alphonse mucha, loish, norman rockwell.', 1, 50, 10, 1714108957, ] ] with block: gr.HTML( """
Safe Stable Diffusion extends Stable Diffusion with safety guidance. In the case of NSFW images it returns the closest non-NSFW images instead of a black square. Details can be found in the Safe Latent Diffusion: Mitigating Inappropriate Degeneration in Diffusion Models paper.