import numpy from sentence_transformers import SentenceTransformer, util from PIL import Image import gradio as gr model = SentenceTransformer('clip-ViT-B-32') def image_classifier(im1: numpy.ndarray, im2: numpy.ndarray): encoded_image = model.encode([Image.fromarray(im1), Image.fromarray(im2)], batch_size=128, convert_to_tensor=True, show_progress_bar=True) processed_images = util.paraphrase_mining_embeddings(encoded_image) return {"Схожи на": round(processed_images[0][0], 2)} with gr.Blocks() as b: with gr.Row(): with gr.Column(): image1 = gr.Image(label="image 1") image2 = gr.Image(label="image 2") with gr.Row(): compare = gr.Button("Compare") output = gr.Label(label="output") compare.click(fn=image_classifier, inputs=[image1, image2], outputs=output) b.launch()