File size: 5,714 Bytes
527e550 37d364a 527e550 37d364a 2a73cd9 527e550 37d364a 94f5fd3 37d364a 527e550 37d364a 94f5fd3 37d364a 527e550 37d364a 527e550 37d364a 527e550 37d364a 527e550 f31ab4f 37d364a 527e550 37d364a 527e550 37d364a 527e550 44be04c 2a73cd9 94f5fd3 527e550 44be04c 37d364a 94f5fd3 527e550 37d364a 527e550 37d364a f31ab4f 527e550 37d364a f31ab4f 35ebeb7 66787d8 37d364a 35ebeb7 37d364a 527e550 37d364a 527e550 37d364a 527e550 37d364a 527e550 94f5fd3 527e550 94f5fd3 527e550 8229cee 94f5fd3 35ebeb7 37d364a 527e550 37d364a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
import random
import numpy as np
import streamlit as st
import nltk
import torch
import umap
from nltk.tokenize import word_tokenize
from transformers import AutoModel, AutoTokenizer
from aligner import Aligner
from plotools import (
plot_align_matrix_heatmap_plotly,
plot_similarity_matrix_heatmap_plotly,
show_assignments_plotly,
)
from utils import centering, convert_to_word_embeddings, encode_sentence
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.manual_seed(42)
np.random.seed(42)
random.seed(42)
nltk.download("punkt")
@st.cache_resource
def init_model(model: str):
tokenizer = AutoTokenizer.from_pretrained(model)
model = (
AutoModel.from_pretrained(model, output_hidden_states=True).to(device).eval()
)
return tokenizer, model
@st.cache_resource(max_entries=100)
def init_aligner(
ot_type: str, sinkhorn: bool, distortion: float, threshhold: float, tau: float
):
return Aligner(
ot_type=ot_type,
sinkhorn=sinkhorn,
dist_type="cos",
weight_type="uniform",
distortion=distortion,
thresh=threshhold,
tau=tau,
div_type="--",
)
def main():
st.set_page_config(layout="wide")
# Sidebar
st.sidebar.markdown("## Settings & Parameters")
model = st.sidebar.selectbox(
"model", ["microsoft/deberta-v3-base", "bert-base-uncased"]
)
layer = st.sidebar.slider(
"layer number for embeddings",
0,
11,
value=9,
)
is_centering = st.sidebar.checkbox("centering embeddings", value=True)
ot_type = st.sidebar.radio(
"type",
["OT", "POT", "UOT"],
index=1,
horizontal=True,
help="optimal transport algorithm to be used",
)
ot_type = ot_type.lower()
sinkhorn = st.sidebar.checkbox(
"sinkhorn", value=True, help="use sinkhorn algorithm"
)
distortion = st.sidebar.slider(
"distortion: $\kappa$",
0.0,
1.0,
value=0.20,
help="suppression of off-diagonal alignments",
)
tau = st.sidebar.slider(
"m / $\\tau$",
0.0,
1.0,
value=0.98,
help="fraction of fertility to be aligned (fraction of mass to be transported) / penalties",
)
threshhold = st.sidebar.slider(
"threshhold: $\lambda$",
0.0,
1.0,
value=0.22,
help="sparsity of alignment matrix",
)
show_similarity = st.sidebar.checkbox("show similarity matrix", value=True)
show_assignments = st.sidebar.checkbox("show assignments", value=True)
if show_assignments:
n_neighbors = st.sidebar.slider(
"n_neighbors (see [details](https://umap-learn.readthedocs.io/en/latest/parameters.html#n-neighbors).)",
2, 15, value=8,
help="number of nearest neighbors for umap balancing between the preservation of local and global structures"
)
# Content
st.markdown(
"## Playground: Unbalanced Optimal Transport for Unbalanced Word Alignment"
)
col1, col2 = st.columns(2)
with col1:
sent1 = st.text_area(
"sentence 1",
"By one estimate, fewer than 20,000 lions exist in the wild, a drop of about 40 percent in the past two decades.",
help="Initial text",
)
with col2:
sent2 = st.text_area(
"sentence 2",
"Today there are only around 20,000 wild lions left in the world.",
help="Text to compare",
)
tokenizer, model = init_model(model)
aligner = init_aligner(ot_type, sinkhorn, distortion, threshhold, tau)
with st.container():
if sent1 != '' and sent2 != '':
sent1 = word_tokenize(sent1.lower())
sent2 = word_tokenize(sent2.lower())
print(sent1)
print(sent2)
hidden_output, input_id, offset_map = encode_sentence(sent1, sent2, tokenizer, model, layer=layer)
if is_centering:
hidden_output = centering(hidden_output)
s1_vec, s2_vec = convert_to_word_embeddings(offset_map, input_id, hidden_output, tokenizer, pair=True)
align_matrix, cost_matrix, loss, similarity_matrix = aligner.compute_alignment_matrixes(s1_vec, s2_vec)
print(align_matrix.shape, cost_matrix.shape)
st.write(f"**Word alignment matrix** (loss: :blue[{loss}])")
fig = plot_align_matrix_heatmap_plotly(align_matrix.T, sent1, sent2, threshhold, cost_matrix.T)
st.plotly_chart(fig, use_container_width=True)
if show_similarity:
st.write(f"**Word similarity matrix**")
fig2 = plot_similarity_matrix_heatmap_plotly(similarity_matrix.T, sent1, sent2, cost_matrix.T)
st.plotly_chart(fig2, use_container_width=True)
if show_assignments:
st.write(f"**Alignments after UMAP**")
word_embeddings = torch.vstack([s1_vec, s2_vec])
umap_embeddings = umap.UMAP(
n_neighbors=n_neighbors,
n_components=2,
random_state=42,
metric="cosine",
).fit_transform(word_embeddings.detach().numpy())
print(umap_embeddings.shape)
fig3 = show_assignments_plotly(
align_matrix, umap_embeddings, sent1, sent2, thr=threshhold
)
st.plotly_chart(fig3, use_container_width=True)
st.divider()
st.subheader('Refs')
st.write("Yuki Arase, Han Bao, Sho Yokoi, [Unbalanced Optimal Transport for Unbalanced Word Alignment](https://arxiv.org/abs/2306.04116), ACL2023 [[github](https://github.com/yukiar/OTAlign/tree/main)]")
if __name__ == '__main__':
main()
|