|
|
|
import torch |
|
from multilingual_clip import pt_multilingual_clip |
|
import open_clip |
|
import numpy as np |
|
import os |
|
from PIL import Image |
|
from transformers import AutoTokenizer |
|
|
|
images_path = './images/' |
|
|
|
images = [] |
|
|
|
for item in os.listdir(images_path): |
|
if os.path.isfile(images_path + item): |
|
if item.endswith('jpg'): |
|
images.append(images_path + item) |
|
|
|
print("total images:", len(images)) |
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
|
|
model_name = 'M-CLIP/XLM-Roberta-Large-Vit-B-16Plus' |
|
|
|
text_model = pt_multilingual_clip.MultilingualCLIP.from_pretrained(model_name) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
print("text model parameters:", f"{np.sum([int(np.prod(p.shape)) for p in text_model.parameters()]):,}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
clip_model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-16-plus-240', pretrained="laion400m_e32") |
|
clip_model.to(device) |
|
|
|
print("CLIP image model parameters:", f"{np.sum([int(np.prod(p.shape)) for p in clip_model.parameters()]):,}") |
|
|
|
texts = ['Three blind horses listening to Mozart', |
|
'Tre blinde hester som lytter til Mozart'] |
|
|
|
text_embeddings = text_model.forward(texts, tokenizer) |
|
print("text embeddings:", text_embeddings.shape) |
|
|
|
batch_size = 10 |
|
image_embeddings = [] |
|
|
|
with open("images_list.txt", "w", encoding="utf-8") as images_list: |
|
for i in range(0, len(images)): |
|
|
|
images_list.write(images[i].split("/")[-1] + "\n") |
|
image = Image.open(images[i]) |
|
|
|
img_input = preprocess(image).unsqueeze(0).to(device) |
|
with torch.no_grad(): |
|
img_embs = clip_model.encode_image(img_input).float().to(device) |
|
image_embeddings.extend(img_embs.detach().cpu().numpy()) |
|
|
|
image_embeddings_np = np.array(image_embeddings) |
|
print(image_embeddings_np.shape) |
|
np.save("multilinugal_features.npy", image_embeddings_np) |
|
|