import gradio as gr import numpy as np import random import spaces #[uncomment to use ZeroGPU] from diffusers import DiffusionPipeline ,AutoencoderTiny import torch from diffusers import AutoencoderTiny, StableDiffusionPipeline , DPMSolverMultistepScheduler ,EulerDiscreteScheduler from huggingface_hub import login import os a=os.getenv('hf_key') login(token=a ) device = "cuda" if torch.cuda.is_available() else "cpu" #model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use model_repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" if torch.cuda.is_available(): torch_dtype = torch.float16 else: torch_dtype = torch.float32 """ pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype) pipe = pipe.to(device) ###### это потом если что удалить "nota-ai/bk-sdm-small", """ MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = 1024 negative_prompt1= """normal quality, low quality, low res, blurry, distortion, text, watermark, logo, banner, extra digits, cropped, jpeg artifacts, signature, username, error, sketch, duplicate, ugly, monochrome, horror, geometry, mutation, disgusting, bad anatomy, bad proportions, bad quality, deformed, disconnected limbs, out of frame, out of focus, dehydrated, disfigured, extra arms, extra limbs, extra hands, fused fingers, gross proportions, long neck, jpeg, malformed limbs, mutated, mutated hands, mutated limbs, missing arms, missing fingers, picture frame, poorly drawn hands, poorly drawn face, collage, pixel, pixelated, grainy, color aberration, amputee, autograph, bad illustration, beyond the borders, blank background, body out of frame, boring background, branding, cut off, dismembered, disproportioned, distorted, draft, duplicated features, extra fingers, extra legs, fault, flaw, grains, hazy, identifying mark, improper scale, incorrect physiology, incorrect ratio, indistinct, kitsch, low resolution, macabre, malformed, mark, misshapen, missing hands, missing legs, mistake, morbid, mutilated, off-screen, outside the picture, poorly drawn feet, printed words, render, repellent, replicate, reproduce, revolting dimensions, script, shortened, sign, split image, squint, storyboard, tiling, trimmed, unfocused, unattractive, unnatural pose, unreal engine, unsightly, written language""" var_1="nota-ai/bk-sdm-base-2m" var_2="nota-ai/bk-sdm-small" pipe = DiffusionPipeline.from_pretrained( var_2, torch_dtype=torch_dtype, use_safetensors=True) #pipe.vae = AutoencoderTiny.from_pretrained( # "sayakpaul/taesd-diffusers", torch_dtype=torch_dtype, use_safetensors=True) pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) print(pipe.scheduler.compatibles) #pipe.load_lora_weights("Natural_Flaccid_Penis.safetensors") pipe = pipe.to(device) pipe.enable_vae_tiling() @spaces.GPU(duration=25) #[uncomment to use ZeroGPU] def infer( prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True), ): if randomize_seed: seed = random.randint(0, MAX_SEED) generator = torch.Generator().manual_seed(seed) image = pipe( prompt=prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, width=width, height=height, generator=generator, ).images[0] return image, seed examples = ["""cinematic ,Two burly, middle-aged Turkish daddies—thick-mustached, salt-and-pepper-haired, with barrel chests and round, hairy bellies spilling from snug white briefs—lounge on a couch, flexing meaty biceps and thick thighs. The camera, propped on a tripod, captures their playful vlog as they smirk, teasing the lens with deep chuckles and exaggerated poses. Sunlight glints off sweat-sheened skin, their robust physiques shifting with every boastful stretch—biceps bulging, bellies jiggling—while thick fingers adjust the phone, framing their confident, flirtatious display.8k""" "An astronaut riding a green horse", "A delicious ceviche cheesecake slice", "huge muscle man , big penis , dick " ] css = """ #col-container { margin: 0 auto; max-width: 640px; } """ with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.Markdown(" # Text-to-Image Gradio Template") with gr.Row(): prompt = gr.Text( label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False, ) run_button = gr.Button("Run", scale=0, variant="primary") result = gr.Image(label="Result", show_label=False) with gr.Accordion("Advanced Settings", open=False): negative_prompt = gr.Text( label="Negative prompt", max_lines=1, placeholder="Enter a negative prompt", visible=True, ) seed = gr.Slider( label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=8, ) randomize_seed = gr.Checkbox(label="Randomize seed", value=True) with gr.Row(): width = gr.Slider( label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=512, # Replace with defaults that work for your model ) height = gr.Slider( label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=512, # Replace with defaults that work for your model ) with gr.Row(): guidance_scale = gr.Slider( label="Guidance scale", minimum=0.0, maximum=10.0, step=0.1, value=8.0, # Replace with defaults that work for your model ) num_inference_steps = gr.Slider( label="Number of inference steps", minimum=1, maximum=50, step=1, value=25, # Replace with defaults that work for your model ) gr.Examples(examples=examples, inputs=[prompt]) gr.on( triggers=[run_button.click, prompt.submit], fn=infer, inputs=[ prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, ], outputs=[result, seed], ) if __name__ == "__main__": demo.launch()