Spaces:
Runtime error
Runtime error
File size: 17,746 Bytes
e94a434 146f20f e94a434 77ab4a2 e94a434 28a5623 e94a434 715fd06 abf23c4 4088f89 fbe27ae e94a434 715fd06 e94a434 715fd06 e94a434 715fd06 2a21b24 715fd06 77ab4a2 90ee191 77ab4a2 715fd06 e94a434 03ac8b8 e94a434 d654c4d 17a4569 03ac8b8 085a51d 03ac8b8 d654c4d f8a298b e94a434 ea89f80 715fd06 e94a434 fbe27ae c53190e f00dc53 3e42172 edd31b5 c53190e 7a0cc67 715fd06 e94a434 715fd06 6706fde 715fd06 e94a434 fd765b6 9555c18 e94a434 715fd06 9555c18 715fd06 9555c18 715fd06 9e87bd5 9da6542 9e87bd5 ea89f80 715fd06 e94a434 c53190e f00dc53 9e87bd5 fbe27ae deeaaa7 edd31b5 9e87bd5 deeaaa7 c53190e e94a434 77ab4a2 715fd06 9e87bd5 deeaaa7 e57eb3d edd31b5 e57eb3d edd31b5 3f52434 e57eb3d edd31b5 e57eb3d edd31b5 9e87bd5 edd31b5 9e87bd5 edd31b5 9e87bd5 e94a434 001c01d e94a434 001c01d e94a434 d654c4d 982d776 aabef59 982d776 aabef59 617f6d5 bc59854 085a51d c4232d6 e94a434 715fd06 edd31b5 06b8325 715fd06 e94a434 715fd06 e94a434 715fd06 e94a434 abf23c4 715fd06 9e87bd5 edd31b5 9e87bd5 715fd06 abf23c4 e94a434 715fd06 edd31b5 06b8325 715fd06 e94a434 63c41d3 e94a434 715fd06 e94a434 715fd06 e94a434 f8a298b ff63648 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 |
# coding=utf-8
# Copyright 2023 The GlotLID Authors.
# Lint as: python3
# This space is built based on AMR-KELEG/ALDi space.
# GlotLID Space
import string
import constants
import pandas as pd
import streamlit as st
from huggingface_hub import hf_hub_download
from GlotScript import get_script_predictor
import matplotlib
from matplotlib import pyplot as plt
import fasttext
import altair as alt
from altair import X, Y, Scale
import base64
import json
import os
import re
import transformers
from transformers import pipeline
@st.cache_resource
def load_sp():
sp = get_script_predictor()
return sp
sp = load_sp()
def get_script(text):
"""Get the writing systems of given text.
Args:
text: The text to be preprocessed.
Returns:
The main script and list of all scripts.
"""
res = sp(text)
main_script = res[0] if res[0] else 'Zyyy'
all_scripts_dict = res[2]['details']
if all_scripts_dict:
all_scripts = list(all_scripts_dict.keys())
else:
all_scripts = 'Zyyy'
for ws in all_scripts:
if ws in ['Kana', 'Hrkt', 'Hani', 'Hira']:
all_scripts.append('Jpan')
all_scripts = list(set(all_scripts))
return main_script, all_scripts
def preprocess_text(text):
"""Apply preprocessing to the given text.
Args:
text: Thetext to be preprocessed.
Returns:
The preprocessed text.
"""
# remove \n
text = text.replace('\n', ' ')
# get rid of characters that are ubiquitous
replace_by = " "
replacement_map = {
ord(c): replace_by
for c in ':•#{|}' + string.digits
}
text = text.translate(replacement_map)
# make multiple space one space
text = re.sub(r'\s+', ' ', text)
# strip the text
text = text.strip()
return text
@st.cache_data
def language_names(json_path):
with open(json_path, 'r') as json_file:
data = json.load(json_file)
return data
label2name = language_names("assets/language_names.json")
def get_name(label):
"""Get the name of language from label"""
iso_3 = label.split('_')[0]
name = label2name[iso_3]
return name
@st.cache_data
def render_svg(svg):
"""Renders the given svg string."""
b64 = base64.b64encode(svg.encode("utf-8")).decode("utf-8")
html = rf'<p align="center"> <img src="data:image/svg+xml;base64,{b64}", width="40%"/></p>'
c = st.container()
c.write(html, unsafe_allow_html=True)
@st.cache_data
def render_metadata():
"""Renders the metadata."""
html = r"""<p align="center">
<a href="https://huggingface.co/dsfsi/za-lid"><img alt="HuggingFace Model" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Model-8A2BE2"></a>
<a href="https://github.com/dsfsi/za-lid"><img alt="GitHub" src="https://img.shields.io/badge/%F0%9F%93%A6%20GitHub-orange"></a>
<a href="https://github.com/dsfsi/za-lid/blob/master/LICENSE.md"><img alt="GitHub license" src="https://img.shields.io/badge/Github%20Licence-blue"></a>
<a href="https://docs.google.com/forms/d/e/1FAIpQLSf7S36dyAUPx2egmXbFpnTBuzoRulhL5Elu-N1eoMhaO7v10w/viewform" target="_blank"><img alt="Feedback Form" src="https://img.shields.io/badge/Feedback-Form-brightgreen"></a>
<a href="https://huggingface.co/papers/1911.02116" target="_blank"><img alt="arxiv" src="https://img.shields.io/badge/arxiv-1911.02116-blue"></a></p>"""
c = st.container()
c.write(html, unsafe_allow_html=True)
@st.cache_data
def citation():
"""Renders the metadata."""
_CITATION = """
@inproceedings{
kargaran2023glotlid,
title={GlotLID: Language Identification for Low-Resource Languages},
author={Kargaran, Amir Hossein and Imani, Ayyoob and Yvon, Fran{\c{c}}ois and Sch{\"u}tze, Hinrich},
booktitle={The 2023 Conference on Empirical Methods in Natural Language Processing},
year={2023},
url={https://openreview.net/forum?id=dl4e3EBz5j}
}"""
st.code(_CITATION, language="python", line_numbers=False)
@st.cache_data
def convert_df(df):
# IMPORTANT: Cache the conversion to prevent computation on every rerun
return df.to_csv(index=None).encode("utf-8")
@st.cache_resource
def load_model(model_name, file_name):
model_path = hf_hub_download(repo_id=model_name, filename=file_name)
model = fasttext.load_model(model_path)
return model
@st.cache_resource
def load_model_pipeline(model_name, file_name):
model = pipeline("text-classification", model=model_name)
return model
# model_1 = load_model(constants.MODEL_NAME, "model_v1.bin")
# model_2 = load_model(constants.MODEL_NAME, "model_v2.bin")
# model_3 = load_model(constants.MODEL_NAME, "model_v3.bin")
# openlid = load_model('laurievb/OpenLID', "model.bin")
# nllb = load_model('facebook/fasttext-language-identification', "model.bin")
# MODELS
model_xlmr_large = load_model_pipeline('dsfsi/za-xlmrlarge-lid', "model.bin")
model_serengeti = load_model_pipeline('dsfsi/za-serengeti-lid', "model.bin")
model_afriberta = load_model_pipeline('dsfsi/za-afriberta-lid', "model.bin")
model_afroxlmr_base = load_model_pipeline('dsfsi/za-afro-xlmr-base-lid', "model.bin")
model_afrolm = load_model_pipeline('dsfsi/za-afrolm-lid', "model.bin")
za_lid = load_model_pipeline('dsfsi/za-lid-bert', "model.bin")
openlid = load_model('laurievb/OpenLID', "model.bin")
glotlid_3 = load_model(constants.MODEL_NAME, "model_v3.bin")
# @st.cache_resource
def plot(label, prob):
ORANGE_COLOR = "#FF8000"
BLACK_COLOR = "#31333F"
fig, ax = plt.subplots(figsize=(8, 1))
fig.patch.set_facecolor("none")
ax.set_facecolor("none")
ax.spines["left"].set_color(BLACK_COLOR)
ax.spines["bottom"].set_color(BLACK_COLOR)
ax.tick_params(axis="x", colors=BLACK_COLOR)
ax.spines[["right", "top"]].set_visible(False)
ax.barh(y=[0], width=[prob], color=ORANGE_COLOR)
ax.set_xlim(0, 1)
ax.set_ylim(-1, 1)
ax.set_title(f"Label: {label}, Language: {get_name(label)}", color=BLACK_COLOR)
ax.get_yaxis().set_visible(False)
ax.set_xlabel("Confidence", color=BLACK_COLOR)
st.pyplot(fig)
# @st.cache_resource
def plot_multiples(models, labels, probs):
ORANGE_COLOR = "#FF8000"
BLACK_COLOR = "#31333F"
fig, ax = plt.subplots(figsize=(12, len(models)))
fig.patch.set_facecolor("none")
ax.set_facecolor("none")
ax.spines["left"].set_color(BLACK_COLOR)
ax.spines["bottom"].set_color(BLACK_COLOR)
ax.tick_params(axis="x", colors=BLACK_COLOR)
ax.spines[["right", "top"]].set_visible(False)
# Plot bars for each model, label, and probability
y_positions = range(len(models)) # Y positions for each model
ax.barh(y=y_positions, width=probs, color=ORANGE_COLOR)
# Add labels next to each bar
for i, (prob, label) in enumerate(zip(probs, labels)):
ax.text(prob + 0.01, i, f"{label} ({prob:.2f})", va='center', color=BLACK_COLOR)
# Set y-ticks and labels
ax.set_yticks(y_positions)
ax.set_yticklabels(models, color=BLACK_COLOR)
ax.set_xlim(0, 1)
ax.set_xlabel("Confidence", color=BLACK_COLOR)
ax.set_title("Model Predictions", color=BLACK_COLOR)
st.pyplot(fig)
def compute(sentences, version = 'v3'):
"""Computes the language probablities and labels for the given sentences.
Args:
sentences: A list of sentences.
Returns:
A list of language probablities and labels for the given sentences.
"""
progress_text = "Computing Language..."
if version == 'xlmrlarge':
model_choice = model_xlmr_large
elif version == 'serengeti':
model_choice = model_serengeti
elif version == 'afriberta':
model_choice = model_afriberta
elif version == 'afroxlmrbase':
model_choice = model_afroxlmr_base
elif version=='afrolm':
model_choice = model_afrolm
elif version == 'BERT':
model_choice = za_lid
elif version == 'openlid-201':
model_choice = openlid
elif version == 'GlotLID v3':
model_choice = glotlid_3
else:
model_choice = [(model_xlmr_large, "xlmrlarge"),(model_serengeti,"serengeti"), (model_afriberta,"afriberta"), (model_afroxlmr_base,"afroxlmrbase"), (model_afrolm,"afrolm"), (za_lid,"BERT"), (openlid,"openlid-201"), (glotlid_3,"GlotLID v3")]
my_bar = st.progress(0, text=progress_text)
probs = []
labels = []
sentences = [preprocess_text(sent) for sent in sentences]
for index, sent in enumerate(sentences):
if type(model_choice) == list:
all_models_pred = []
for model_version in model_choice:
m_version = model_version[1]
model = model_version[0]
if m_version not in ["openlid-201", "GlotLID v3"]:
output = model.predict(sent)
output_label = output[index]['label']
output_prob = output[index]['score']
output_label_language = output[index]['label']
labels = labels + [output_label]
probs = probs + [output_prob]
my_bar.progress(
min((index) / len(sentences), 1),
text=progress_text,
)
else:
output = model.predict(sent)
output_label = output[0][0].split('__')[-1].replace('_Hans', '_Hani').replace('_Hant', '_Hani')
output_prob = max(min(output[1][0], 1), 0)
output_label_language = output_label.split('_')[0]
# script control
if version in ['GlotLID v3', 'openlid-201', 'nllb-218'] and output_label_language!= 'zxx':
main_script, all_scripts = get_script(sent)
output_label_script = output_label.split('_')[1]
if output_label_script not in all_scripts:
output_label_script = main_script
output_label = f"und_{output_label_script}"
output_prob = 0
labels = labels + [output_label]
probs = probs + [output_prob]
my_bar.progress(
min((index) / len(sentences), 1),
text=progress_text,
)
else:
if version not in ["openlid-201", "GlotLID v3"]:
output = model_choice.predict(sent)
output_label = output[index]['label']
output_prob = output[index]['score']
output_label_language = output[index]['label']
labels = labels + [output_label]
probs = probs + [output_prob]
my_bar.progress(
min((index) / len(sentences), 1),
text=progress_text,
)
else:
output = model_choice.predict(sent)
output_label = output[0][0].split('__')[-1].replace('_Hans', '_Hani').replace('_Hant', '_Hani')
output_prob = max(min(output[1][0], 1), 0)
output_label_language = output_label.split('_')[0]
# script control
if version in ['GlotLID v3', 'openlid-201', 'nllb-218'] and output_label_language!= 'zxx':
main_script, all_scripts = get_script(sent)
output_label_script = output_label.split('_')[1]
if output_label_script not in all_scripts:
output_label_script = main_script
output_label = f"und_{output_label_script}"
output_prob = 0
labels = labels + [output_label]
probs = probs + [output_prob]
my_bar.progress(
min((index) / len(sentences), 1),
text=progress_text,
)
my_bar.empty()
return probs, labels
# st.markdown("[![Duplicate Space](https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14)](https://huggingface.co/spaces/cis-lmu/glotlid-space?duplicate=true)")
# render_svg(open("assets/glotlid_logo.svg").read())
render_metadata()
img1, img2, img3 = st.columns(3)
with img2:
with st.container():
st.image("logo_transparent_small.png")
st.markdown("**DSFSI** Language Identification (LID) Inference Endpoint Created with **HuggingFace Spaces**.")
with st.expander("More information about the space"):
st.write('''
Authors: Alexandre Lacoste, Alexandra Luccioni, Victor Schmidt, Thomas Dandres
''')
tab1, tab2 = st.tabs(["Input a Sentence", "Upload a File"])
with tab1:
# choice = st.radio(
# "Set granularity level",
# ["default", "merge", "individual"],
# captions=["enable both macrolanguage and its varieties (default)", "merge macrolanguage and its varieties into one label", "remove macrolanguages - only shows individual langauges"],
# )
version = st.radio(
"Choose model",
["xlmrlarge", "serengeti", "afriberta", "afroxlmrbase", "afrolm", "BERT", "openlid-201", "GlotLID v3", "All-Models"],
captions=["za-XLMR-Large", "za-Serengeti", "za-AfriBERTa", "za-Afro-XLMR-BASE", "za-AfroLM", "za-BERT", "OpenLID", "GlotLID v3",'All-Models'],
index = 4,
key = 'version_tab1',
horizontal = True
)
sent = st.text_input(
"Sentence:", placeholder="Enter a sentence.", on_change=None
)
# TODO: Check if this is needed!
clicked = st.button("Submit")
if sent:
probs, labels = compute([sent], version=version)
prob = probs[0]
label = labels[0]
# Check if the file exists
if not os.path.exists('logs.txt'):
with open('logs.txt', 'w') as file:
pass
print(f"{sent}, {label}: {prob}")
with open("logs.txt", "a") as f:
f.write(f"{sent}, {label}: {prob}\n")
# plot
if version == "All-Models":
plot_multiples(["xlmrlarge", "serengeti", "afriberta", "afroxlmrbase", "afrolm", "BERT", "OpenLID", "GlotLID v3"], labels, probs)
else:
plot(label, prob)
with tab2:
version = st.radio(
"Choose model",
["xlmrlarge", "serengeti", "afriberta", "afroxlmrbase", "afrolm", "BERT","openlid-201", "GlotLID v3", "All-Models"],
captions=["za-XLMR-Large", "za-Serengeti", "za-AfriBERTa", "za-Afro-XLMR-BASE", "za-AfroLM", "za-BERT", "OpenLID", "GlotLID v3", "All-Models"],
index = 4,
key = 'version_tab2',
horizontal = True
)
file = st.file_uploader("Upload a file", type=["txt"])
if file is not None:
df = pd.read_csv(file, sep="¦\t¦", header=None, engine='python')
df.columns = ["Sentence"]
df.reset_index(drop=True, inplace=True)
# TODO: Run the model
df['Prob'], df["Label"] = compute(df["Sentence"].tolist(), version= version)
df['Language'] = df["Label"].apply(get_name)
# A horizontal rule
st.markdown("""---""")
chart = (
alt.Chart(df.reset_index())
.mark_area(color="darkorange", opacity=0.5)
.encode(
x=X(field="index", title="Sentence Index"),
y=Y("Prob", scale=Scale(domain=[0, 1])),
)
)
st.altair_chart(chart.interactive(), use_container_width=True)
col1, col2 = st.columns([4, 1])
with col1:
# Display the output
st.table(
df,
)
with col2:
# Add a download button
csv = convert_df(df)
st.download_button(
label=":file_folder: Download predictions as CSV",
data=csv,
file_name="GlotLID.csv",
mime="text/csv",
)
# citation() |