|
from base64 import b64encode |
|
|
|
import gradio as gr |
|
import numpy |
|
import torch |
|
import torchvision.transforms as T |
|
|
|
from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel |
|
from PIL import Image |
|
from torch import autocast |
|
from torchvision import transforms as tfms |
|
from tqdm.auto import tqdm |
|
from transformers import CLIPTextModel, CLIPTokenizer, logging |
|
|
|
torch.manual_seed(1) |
|
logging.set_verbosity_error() |
|
|
|
torch_device = "cpu" |
|
|
|
|
|
vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae") |
|
|
|
|
|
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") |
|
text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14") |
|
|
|
|
|
unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet") |
|
|
|
|
|
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000) |
|
|
|
|
|
vae = vae.to(torch_device) |
|
text_encoder = text_encoder.to(torch_device) |
|
unet = unet.to(torch_device); |
|
|
|
token_emb_layer = text_encoder.text_model.embeddings.token_embedding |
|
pos_emb_layer = text_encoder.text_model.embeddings.position_embedding |
|
position_ids = text_encoder.text_model.embeddings.position_ids[:, :77] |
|
position_embeddings = pos_emb_layer(position_ids) |
|
|
|
def pil_to_latent(input_im): |
|
|
|
with torch.no_grad(): |
|
latent = vae.encode(tfms.ToTensor()(input_im).unsqueeze(0).to(torch_device)*2-1) |
|
return 0.18215 * latent.latent_dist.sample() |
|
|
|
def latents_to_pil(latents): |
|
|
|
latents = (1 / 0.18215) * latents |
|
with torch.no_grad(): |
|
image = vae.decode(latents).sample |
|
image = (image / 2 + 0.5).clamp(0, 1) |
|
image = image.detach().cpu().permute(0, 2, 3, 1).numpy() |
|
images = (image * 255).round().astype("uint8") |
|
pil_images = [Image.fromarray(image) for image in images] |
|
return pil_images |
|
|
|
def get_output_embeds(input_embeddings): |
|
|
|
bsz, seq_len = input_embeddings.shape[:2] |
|
causal_attention_mask = text_encoder.text_model._build_causal_attention_mask(bsz, seq_len, dtype=input_embeddings.dtype) |
|
|
|
|
|
|
|
encoder_outputs = text_encoder.text_model.encoder( |
|
inputs_embeds=input_embeddings, |
|
attention_mask=None, |
|
causal_attention_mask=causal_attention_mask.to(torch_device), |
|
output_attentions=None, |
|
output_hidden_states=True, |
|
return_dict=None, |
|
) |
|
|
|
|
|
output = encoder_outputs[0] |
|
|
|
|
|
output = text_encoder.text_model.final_layer_norm(output) |
|
|
|
|
|
return output |
|
|
|
def generate_with_embs(text_embeddings, seed, max_length): |
|
height = 512 |
|
width = 512 |
|
num_inference_steps = 30 |
|
guidance_scale = 7.5 |
|
generator = torch.manual_seed(seed) |
|
batch_size = 1 |
|
|
|
|
|
uncond_input = tokenizer( |
|
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" |
|
) |
|
with torch.no_grad(): |
|
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] |
|
text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) |
|
|
|
|
|
set_timesteps(scheduler, num_inference_steps) |
|
|
|
|
|
latents = torch.randn( |
|
(batch_size, unet.in_channels, height // 8, width // 8), |
|
generator=generator, |
|
) |
|
latents = latents.to(torch_device) |
|
latents = latents * scheduler.init_noise_sigma |
|
|
|
|
|
for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)): |
|
|
|
latent_model_input = torch.cat([latents] * 2) |
|
sigma = scheduler.sigmas[i] |
|
latent_model_input = scheduler.scale_model_input(latent_model_input, t) |
|
|
|
|
|
with torch.no_grad(): |
|
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] |
|
|
|
|
|
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
|
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
|
|
|
|
|
latents = scheduler.step(noise_pred, t, latents).prev_sample |
|
|
|
return latents_to_pil(latents)[0] |
|
|
|
|
|
def set_timesteps(scheduler, num_inference_steps): |
|
scheduler.set_timesteps(num_inference_steps) |
|
scheduler.timesteps = scheduler.timesteps.to(torch.float32) |
|
|
|
def eos_pos(prompt): |
|
return len(prompt.split()) + 1 |
|
|
|
def embed_style(prompt, style_embed, style_seed): |
|
|
|
text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") |
|
input_ids = text_input.input_ids.to(torch_device) |
|
|
|
|
|
token_embeddings = token_emb_layer(input_ids) |
|
|
|
|
|
replacement_token_embedding = style_embed.to(torch_device) |
|
|
|
|
|
token_embeddings[0, torch.tensor(eos_pos(prompt))] = replacement_token_embedding.to(torch_device) |
|
|
|
|
|
input_embeddings = token_embeddings + position_embeddings |
|
|
|
|
|
modified_output_embeddings = get_output_embeds(input_embeddings) |
|
|
|
|
|
max_length = text_input.input_ids.shape[-1] |
|
return generate_with_embs(modified_output_embeddings, style_seed, max_length) |
|
|
|
def custom_loss(image): |
|
|
|
std_dev = torch.std(image, dim=(1, 2)) |
|
loss = torch.mean(std_dev) |
|
return loss |
|
|
|
def generate_image_on_loss(prompt, seed): |
|
|
|
height = 64 |
|
width = 64 |
|
num_inference_steps = 50 |
|
guidance_scale = 8 |
|
generator = torch.manual_seed(64) |
|
batch_size = 1 |
|
loss_scale = 200 |
|
|
|
|
|
text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") |
|
with torch.no_grad(): |
|
text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0] |
|
|
|
|
|
max_length = text_input.input_ids.shape[-1] |
|
uncond_input = tokenizer( |
|
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" |
|
) |
|
with torch.no_grad(): |
|
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] |
|
text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) |
|
|
|
|
|
set_timesteps(scheduler, num_inference_steps+1) |
|
|
|
|
|
latents = torch.randn( |
|
(batch_size, unet.in_channels, height // 8, width // 8), |
|
generator=generator, |
|
) |
|
latents = latents.to(torch_device) |
|
latents = latents * scheduler.init_noise_sigma |
|
|
|
sched_out = None |
|
|
|
|
|
for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)): |
|
|
|
latent_model_input = torch.cat([latents] * 2) |
|
sigma = scheduler.sigmas[i] |
|
latent_model_input = scheduler.scale_model_input(latent_model_input, t) |
|
|
|
|
|
with torch.no_grad(): |
|
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] |
|
|
|
|
|
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
|
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
|
|
|
|
|
if i%5 == 0 and i>0: |
|
|
|
latents = latents.detach().requires_grad_() |
|
|
|
|
|
scheduler._step_index -= 1 |
|
latents_x0 = scheduler.step(noise_pred, t, latents).pred_original_sample |
|
|
|
|
|
denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 |
|
|
|
|
|
loss = custom_loss(denoised_images) * loss_scale |
|
|
|
|
|
|
|
print(i, 'loss:', loss) |
|
|
|
|
|
cond_grad = torch.autograd.grad(loss, latents)[0] |
|
|
|
|
|
latents = latents.detach() - cond_grad * sigma**2 |
|
|
|
im_t0 = latents_to_pil(latents_x0)[0] |
|
im_next = latents_to_pil(latents)[0] |
|
|
|
|
|
latents = scheduler.step(noise_pred, t, latents).prev_sample |
|
|
|
return latents_to_pil(latents)[0] |
|
|
|
def generate_image_from_prompt(prompt, style): |
|
style_list = ['dreamy_painting.bin', 'egorey.bin', 'fairy_tale_painting.bin', 'matrix.bin', 'pjablonski_style.bin'] |
|
style_seeds = [16, 64, 32, 128, 8] |
|
|
|
style_file = style + '.bin' |
|
idx = style_list.index(style_file) |
|
|
|
style_seed = style_seeds[idx] |
|
style_dict = torch.load(style_file) |
|
style_embed = [val for val in style_dict.values()] |
|
|
|
generated_image = embed_style(prompt, style_embed[0], style_seed) |
|
loss_generated_img = generate_image_on_loss(prompt, style_seed) |
|
|
|
return [generated_image, loss_generated_img] |
|
|
|
demo = gr.Interface( |
|
fn = generate_image_from_prompt, |
|
inputs = [ |
|
gr.Textbox(label="Prompt", value="Enter your prompt"), |
|
gr.Dropdown( |
|
["dreamy_painting", "egorey", "fairy_tale_painting", "matrix", "pjablonski_style"], value="dreamy_painting", label="Pretrained Styles" |
|
) |
|
], |
|
outputs = [ |
|
gr.Gallery(label="Generated images", show_label=False, elem_id="gallery", columns=[2], rows=[1], object_fit="contain", height="512") |
|
], |
|
examples = [ |
|
["a cat climbing a tree", "dreamy_painting"] |
|
] |
|
) |
|
|
|
demo.launch(debug=True) |