File size: 1,015 Bytes
33d0cf1 ae12a6a e59119f 33d0cf1 46ff08e 33d0cf1 46ff08e 33d0cf1 46ff08e 33d0cf1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow_text import SentencepieceTokenizer
import gradio as gr
import math
model = hub.load("./model")
def embed_text(text: str) -> dict:
embeddings = model(text)
return embeddings.numpy().tolist()[0]
embed_text_inter = gr.Interface(
fn = embed_text,
inputs = "text",
outputs = gr.JSON(),
title = "Universal Sentence Encoder 3 Large"
)
def distance(text_1: str, text_2: str) -> float:
embeddings_1 = embed_text(text_1)
embeddings_2 = embed_text(text_2)
dist = 0
for i in range(len(embeddings_1)):
dist += (embeddings_1[i] - embeddings_2[i]) ** 2
dist = math.sqrt(dist)
return dist
distance_inter = gr.Interface(
fn = distance,
inputs = ["text", "text"],
outputs = "number",
title = "Universal Sentence Encoder 3 Large"
)
iface = gr.TabbedInterface(
interface_list=[embed_text_inter, distance_inter],
title="Universal Sentence Encoder 3 Large"
)
iface.launch() |