merve HF staff commited on
Commit
fc0f6ae
1 Parent(s): 2fa4154

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -254
app.py CHANGED
@@ -1,255 +1,4 @@
1
-
2
- import sys
3
- sys.path.append('src/blip')
4
- sys.path.append('src/clip')
5
-
6
- import clip
7
  import gradio as gr
8
- import hashlib
9
- import math
10
- import numpy as np
11
- import os
12
- import pickle
13
- import torch
14
- import torchvision.transforms as T
15
- import torchvision.transforms.functional as TF
16
-
17
- from models.blip import blip_decoder
18
- from PIL import Image
19
- from torch import nn
20
- from torch.nn import functional as F
21
- from tqdm import tqdm
22
-
23
- os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
24
- from diffusers import LDMTextToImagePipeline
25
- import random
26
- import subprocess
27
-
28
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
29
-
30
- print("Loading BLIP model...")
31
- blip_image_eval_size = 384
32
- blip_model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth'
33
- blip_model = blip_decoder(pretrained=blip_model_url, image_size=blip_image_eval_size, vit='large', med_config='./src/blip/configs/med_config.json')
34
- blip_model.eval()
35
- blip_model = blip_model.to(device)
36
-
37
- print("Loading CLIP model...")
38
- clip_model_name = 'ViT-L/14'
39
- clip_model, clip_preprocess = clip.load(clip_model_name, device=device)
40
- clip_model.to(device).eval()
41
-
42
- chunk_size = 2048
43
- flavor_intermediate_count = 2048
44
-
45
-
46
- class LabelTable():
47
- def __init__(self, labels, desc):
48
- self.labels = labels
49
- self.embeds = []
50
-
51
- hash = hashlib.sha256(",".join(labels).encode()).hexdigest()
52
-
53
- os.makedirs('./cache', exist_ok=True)
54
- cache_filepath = f"./cache/{desc}.pkl"
55
- if desc is not None and os.path.exists(cache_filepath):
56
- with open(cache_filepath, 'rb') as f:
57
- data = pickle.load(f)
58
- if data['hash'] == hash:
59
- self.labels = data['labels']
60
- self.embeds = data['embeds']
61
-
62
- if len(self.labels) != len(self.embeds):
63
- self.embeds = []
64
- chunks = np.array_split(self.labels, max(1, len(self.labels)/chunk_size))
65
- for chunk in tqdm(chunks, desc=f"Preprocessing {desc}" if desc else None):
66
- text_tokens = clip.tokenize(chunk).to(device)
67
- with torch.no_grad():
68
- text_features = clip_model.encode_text(text_tokens).float()
69
- text_features /= text_features.norm(dim=-1, keepdim=True)
70
- text_features = text_features.half().cpu().numpy()
71
- for i in range(text_features.shape[0]):
72
- self.embeds.append(text_features[i])
73
-
74
- with open(cache_filepath, 'wb') as f:
75
- pickle.dump({"labels":self.labels, "embeds":self.embeds, "hash":hash}, f)
76
-
77
- def _rank(self, image_features, text_embeds, top_count=1):
78
- top_count = min(top_count, len(text_embeds))
79
- similarity = torch.zeros((1, len(text_embeds))).to(device)
80
- text_embeds = torch.stack([torch.from_numpy(t) for t in text_embeds]).float().to(device)
81
- for i in range(image_features.shape[0]):
82
- similarity += (image_features[i].unsqueeze(0) @ text_embeds.T).softmax(dim=-1)
83
- _, top_labels = similarity.cpu().topk(top_count, dim=-1)
84
- return [top_labels[0][i].numpy() for i in range(top_count)]
85
-
86
- def rank(self, image_features, top_count=1):
87
- if len(self.labels) <= chunk_size:
88
- tops = self._rank(image_features, self.embeds, top_count=top_count)
89
- return [self.labels[i] for i in tops]
90
-
91
- num_chunks = int(math.ceil(len(self.labels)/chunk_size))
92
- keep_per_chunk = int(chunk_size / num_chunks)
93
-
94
- top_labels, top_embeds = [], []
95
- for chunk_idx in tqdm(range(num_chunks)):
96
- start = chunk_idx*chunk_size
97
- stop = min(start+chunk_size, len(self.embeds))
98
- tops = self._rank(image_features, self.embeds[start:stop], top_count=keep_per_chunk)
99
- top_labels.extend([self.labels[start+i] for i in tops])
100
- top_embeds.extend([self.embeds[start+i] for i in tops])
101
-
102
- tops = self._rank(image_features, top_embeds, top_count=top_count)
103
- return [top_labels[i] for i in tops]
104
-
105
- def generate_caption(pil_image):
106
- gpu_image = T.Compose([
107
- T.Resize((blip_image_eval_size, blip_image_eval_size), interpolation=TF.InterpolationMode.BICUBIC),
108
- T.ToTensor(),
109
- T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
110
- ])(pil_image).unsqueeze(0).to(device)
111
-
112
- with torch.no_grad():
113
- caption = blip_model.generate(gpu_image, sample=False, num_beams=3, max_length=20, min_length=5)
114
- return caption[0]
115
-
116
- def load_list(filename):
117
- with open(filename, 'r', encoding='utf-8', errors='replace') as f:
118
- items = [line.strip() for line in f.readlines()]
119
- return items
120
-
121
- def rank_top(image_features, text_array):
122
- text_tokens = clip.tokenize([text for text in text_array]).to(device)
123
- with torch.no_grad():
124
- text_features = clip_model.encode_text(text_tokens).float()
125
- text_features /= text_features.norm(dim=-1, keepdim=True)
126
-
127
- similarity = torch.zeros((1, len(text_array)), device=device)
128
- for i in range(image_features.shape[0]):
129
- similarity += (image_features[i].unsqueeze(0) @ text_features.T).softmax(dim=-1)
130
-
131
- _, top_labels = similarity.cpu().topk(1, dim=-1)
132
- return text_array[top_labels[0][0].numpy()]
133
-
134
- def similarity(image_features, text):
135
- text_tokens = clip.tokenize([text]).to(device)
136
- with torch.no_grad():
137
- text_features = clip_model.encode_text(text_tokens).float()
138
- text_features /= text_features.norm(dim=-1, keepdim=True)
139
- similarity = text_features.cpu().numpy() @ image_features.cpu().numpy().T
140
- return similarity[0][0]
141
-
142
- def interrogate(image):
143
- caption = generate_caption(image)
144
-
145
- images = clip_preprocess(image).unsqueeze(0).to(device)
146
- with torch.no_grad():
147
- image_features = clip_model.encode_image(images).float()
148
- image_features /= image_features.norm(dim=-1, keepdim=True)
149
-
150
- flaves = flavors.rank(image_features, flavor_intermediate_count)
151
- best_medium = mediums.rank(image_features, 1)[0]
152
- best_artist = artists.rank(image_features, 1)[0]
153
- best_trending = trendings.rank(image_features, 1)[0]
154
- best_movement = movements.rank(image_features, 1)[0]
155
-
156
- best_prompt = caption
157
- best_sim = similarity(image_features, best_prompt)
158
-
159
- def check(addition):
160
- nonlocal best_prompt, best_sim
161
- prompt = best_prompt + ", " + addition
162
- sim = similarity(image_features, prompt)
163
- if sim > best_sim:
164
- best_sim = sim
165
- best_prompt = prompt
166
- return True
167
- return False
168
-
169
- def check_multi_batch(opts):
170
- nonlocal best_prompt, best_sim
171
- prompts = []
172
- for i in range(2**len(opts)):
173
- prompt = best_prompt
174
- for bit in range(len(opts)):
175
- if i & (1 << bit):
176
- prompt += ", " + opts[bit]
177
- prompts.append(prompt)
178
-
179
- prompt = rank_top(image_features, prompts)
180
- sim = similarity(image_features, prompt)
181
- if sim > best_sim:
182
- best_sim = sim
183
- best_prompt = prompt
184
-
185
- check_multi_batch([best_medium, best_artist, best_trending, best_movement])
186
-
187
- extended_flavors = set(flaves)
188
- for _ in tqdm(range(25), desc="Flavor chain"):
189
- try:
190
- best = rank_top(image_features, [f"{best_prompt}, {f}" for f in extended_flavors])
191
- flave = best[len(best_prompt)+2:]
192
- if not check(flave):
193
- break
194
- extended_flavors.remove(flave)
195
- except:
196
- # exceeded max prompt length
197
- break
198
-
199
- return best_prompt
200
-
201
-
202
- sites = ['Artstation', 'behance', 'cg society', 'cgsociety', 'deviantart', 'dribble', 'flickr', 'instagram', 'pexels', 'pinterest', 'pixabay', 'pixiv', 'polycount', 'reddit', 'shutterstock', 'tumblr', 'unsplash', 'zbrush central']
203
- trending_list = [site for site in sites]
204
- trending_list.extend(["trending on "+site for site in sites])
205
- trending_list.extend(["featured on "+site for site in sites])
206
- trending_list.extend([site+" contest winner" for site in sites])
207
-
208
- raw_artists = load_list('data/artists.txt')
209
- artists = [f"by {a}" for a in raw_artists]
210
- artists.extend([f"inspired by {a}" for a in raw_artists])
211
-
212
- artists = LabelTable(artists, "artists")
213
- flavors = LabelTable(load_list('data/flavors.txt'), "flavors")
214
- mediums = LabelTable(load_list('data/mediums.txt'), "mediums")
215
- movements = LabelTable(load_list('data/movements.txt'), "movements")
216
- trendings = LabelTable(trending_list, "trendings")
217
-
218
-
219
- def inference(image):
220
- return interrogate(image)
221
-
222
- inputs = [gr.inputs.Image(type='pil')]
223
- outputs = gr.outputs.Textbox(label="Output")
224
-
225
-
226
- interrogator = gr.Interface(
227
- inference,
228
- inputs,
229
- outputs,
230
- examples=[['example01.jpg'], ['example02.jpg']]
231
- )
232
-
233
-
234
- ldm_pipeline = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256")
235
-
236
- def predict(prompt):
237
- steps=100
238
- seed=42
239
- guidance_scale=6.0
240
- print(subprocess.check_output(["nvidia-smi"], stderr=subprocess.STDOUT).decode("utf8"))
241
- generator = torch.manual_seed(seed)
242
- images = ldm_pipeline([prompt], generator=generator, num_inference_steps=steps, eta=0.3, guidance_scale=guidance_scale)["sample"]
243
- print(subprocess.check_output(["nvidia-smi"], stderr=subprocess.STDOUT).decode("utf8"))
244
- return images[0]
245
-
246
- random_seed = random.randint(0, 2147483647)
247
- diffusion_model = gr.Interface(
248
- predict,
249
- inputs=[
250
- gr.inputs.Textbox(label='Prompt', default='a chalk pastel drawing of a llama wearing a wizard hat')],
251
- outputs=gr.Image(shape=[256,256], type="pil", elem_id="output_image"),
252
- title = "Draw me like one of your Diffusion gurls"
253
- )
254
-
255
- gr.mix.Series(interrogator, diffusion_model).launch()
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ img_to_text = gr.Interface.load("spaces/pharma/CLIP-Interrogator")
3
+ diffusion_model = gr.Interface.load("/spaces/CompVis/text2img-latent-diffusion")
4
+ gr.mix.Series(img_to_text, diffusion_model).launch()