ma-images / image_features /multilingual_clipify_images.py
broadwell's picture
Example of script for creating CLIP embeddings based on a list of images
1a84574 verified
#import clip
import torch
from multilingual_clip import pt_multilingual_clip
import open_clip
import numpy as np
import os
from PIL import Image
from transformers import AutoTokenizer
images_path = './images/'
images = []
for item in os.listdir(images_path):
if os.path.isfile(images_path + item):
if item.endswith('jpg'):
images.append(images_path + item)
print("total images:", len(images))
device = "cuda" if torch.cuda.is_available() else "cpu"
#model_name = 'M-CLIP/XLM-Roberta-Large-Vit-L-14'
model_name = 'M-CLIP/XLM-Roberta-Large-Vit-B-16Plus'
text_model = pt_multilingual_clip.MultilingualCLIP.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
print("text model parameters:", f"{np.sum([int(np.prod(p.shape)) for p in text_model.parameters()]):,}")
#clip_model_name = 'ViT-L/14'
#clip_model_name = 'ViT-B/16'
#clip_model, compose = clip.load(clip_model_name, device=device)
clip_model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-16-plus-240', pretrained="laion400m_e32")
clip_model.to(device)
print("CLIP image model parameters:", f"{np.sum([int(np.prod(p.shape)) for p in clip_model.parameters()]):,}")
texts = ['Three blind horses listening to Mozart',
'Tre blinde hester som lytter til Mozart']
text_embeddings = text_model.forward(texts, tokenizer)
print("text embeddings:", text_embeddings.shape)
batch_size = 10
image_embeddings = []
with open("images_list.txt", "w", encoding="utf-8") as images_list:
for i in range(0, len(images)): #, batch_size):
#images_list.write("\n".join([im_path.split("/")[-1] for im_path in images[i:min(i+batch_size, len(images))]]) + "\n")
images_list.write(images[i].split("/")[-1] + "\n")
image = Image.open(images[i])
#img_input = torch.stack([compose(Image.open(img_path)).to(device) for img_path in images[i:min(i+batch_size, len(images))]])
img_input = preprocess(image).unsqueeze(0).to(device)
with torch.no_grad():
img_embs = clip_model.encode_image(img_input).float().to(device)
image_embeddings.extend(img_embs.detach().cpu().numpy())
image_embeddings_np = np.array(image_embeddings)
print(image_embeddings_np.shape)
np.save("multilinugal_features.npy", image_embeddings_np)