Spaces:
Running
Running
File size: 7,024 Bytes
a462035 f058d4e f09d947 a462035 3763fe0 f058d4e 3a86174 f058d4e a462035 f2e0f06 7599e27 a462035 06d24b9 a462035 06d24b9 45fd33c caa7526 4a0a156 a462035 4a260d9 cd8299a 4a260d9 fa22698 4a260d9 4a0a156 f09d947 507e675 a812d05 3763fe0 33c5c0a 700805d 4a0a156 90a23cb 8914a53 1a79597 50c370f a462035 cdc76d6 a462035 ceca5bc eb6d250 a462035 227e548 a462035 c731a91 a462035 d4dfcc5 a462035 d4dfcc5 a462035 d4dfcc5 a462035 4a73b1c a462035 eb6d250 a462035 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 |
import gradio as gr
import numpy as np
import random
import spaces #[uncomment to use ZeroGPU]
from diffusers import DiffusionPipeline ,AutoencoderTiny
import torch
from diffusers import AutoencoderTiny, StableDiffusionPipeline , DPMSolverMultistepScheduler ,EulerDiscreteScheduler
from huggingface_hub import login
import os
a=os.getenv('hf_key')
login(token=a )
device = "cuda" if torch.cuda.is_available() else "cpu"
#model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
model_repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
if torch.cuda.is_available():
torch_dtype = torch.float16
else:
torch_dtype = torch.float32
"""
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
pipe = pipe.to(device) ###### это потом если что удалить "nota-ai/bk-sdm-small",
"""
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
negative_prompt1= """normal quality, low quality, low res, blurry, distortion, text, watermark,
logo, banner, extra digits, cropped, jpeg artifacts, signature, username, error, sketch, duplicate, ugly,
monochrome, horror, geometry, mutation, disgusting, bad anatomy, bad proportions, bad quality, deformed,
disconnected limbs, out of frame, out of focus, dehydrated, disfigured, extra arms, extra limbs, extra hands,
fused fingers, gross proportions, long neck, jpeg, malformed limbs, mutated, mutated hands, mutated limbs,
missing arms, missing fingers, picture frame, poorly drawn hands, poorly drawn face, collage, pixel, pixelated,
grainy, color aberration, amputee, autograph, bad illustration, beyond the borders, blank background,
body out of frame, boring background, branding, cut off, dismembered, disproportioned, distorted, draft,
duplicated features, extra fingers, extra legs, fault, flaw, grains, hazy, identifying mark,
improper scale, incorrect physiology, incorrect ratio, indistinct, kitsch, low resolution, macabre,
malformed, mark, misshapen, missing hands, missing legs, mistake, morbid, mutilated, off-screen,
outside the picture, poorly drawn feet, printed words, render, repellent, replicate, reproduce,
revolting dimensions, script, shortened, sign, split image, squint, storyboard,
tiling, trimmed, unfocused, unattractive, unnatural pose, unreal engine, unsightly, written language"""
var_1="nota-ai/bk-sdm-base-2m"
var_2="nota-ai/bk-sdm-small"
pipe = DiffusionPipeline.from_pretrained(
var_2, torch_dtype=torch_dtype, use_safetensors=True)
#pipe.vae = AutoencoderTiny.from_pretrained(
# "sayakpaul/taesd-diffusers", torch_dtype=torch_dtype, use_safetensors=True)
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
print(pipe.scheduler.compatibles)
#pipe.load_lora_weights("Natural_Flaccid_Penis.safetensors")
pipe = pipe.to(device)
pipe.enable_vae_tiling()
@spaces.GPU(duration=25) #[uncomment to use ZeroGPU]
def infer(
prompt,
negative_prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps,
progress=gr.Progress(track_tqdm=True),
):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
width=width,
height=height,
generator=generator,
).images[0]
return image, seed
examples = ["""cinematic ,Two burly, middle-aged Turkish daddies—thick-mustached,
salt-and-pepper-haired, with barrel chests and round,
hairy bellies spilling from snug white briefs—lounge on a couch,
flexing meaty biceps and thick thighs. The camera, propped on a tripod,
captures their playful vlog as they smirk,
teasing the lens with deep chuckles and exaggerated poses. Sunlight glints off sweat-sheened skin,
their robust physiques shifting with every boastful stretch—biceps bulging,
bellies jiggling—while thick fingers adjust the phone, framing their confident, flirtatious display.8k"""
"An astronaut riding a green horse",
"A delicious ceviche cheesecake slice",
"huge muscle man , big penis , dick "
]
css = """
#col-container {
margin: 0 auto;
max-width: 640px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(" # Text-to-Image Gradio Template")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0, variant="primary")
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
visible=True,
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=8,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512, # Replace with defaults that work for your model
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512, # Replace with defaults that work for your model
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=10.0,
step=0.1,
value=8.0, # Replace with defaults that work for your model
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=25, # Replace with defaults that work for your model
)
gr.Examples(examples=examples, inputs=[prompt])
gr.on(
triggers=[run_button.click, prompt.submit],
fn=infer,
inputs=[
prompt,
negative_prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps,
],
outputs=[result, seed],
)
if __name__ == "__main__":
demo.launch()
|