Cicciokr's picture
Update app.py
92189ac verified
raw
history blame
1.76 kB
#import streamlit as st
#x = st.slider('Select a value')
#st.write(x, 'squared is', x * x)
import streamlit as st
from transformers import pipeline, AutoModelForMaskedLM, AutoTokenizer
st.title("Completamento del testo in Latino con Latin BERT")
st.write("Inserisci un testo con il token [MASK] per vedere le previsioni del modello.")
#dvces et reges carthaginiensivm hanno et mago qui [MASK] punico bello cornelium consulem aput liparas ceperunt
input_text = st.text_input("Testo:", value="Lorem ipsum dolor sit amet, [MASK] adipiscing elit.")
# Model based on BERT
#modelname = "./models/latin_bert/"
#Hugging face LuisAVasquez/simple-latin-bert-uncased
modelname_lv = "LuisAVasquez/simple-latin-bert-uncased"
#https://github.com/dbamman/latin-bert
modelname = "./models/bert-base-latin-uncased"
tokenizer = AutoTokenizer.from_pretrained(modelname)
model = AutoModelForMaskedLM.from_pretrained(modelname)
fill_mask = pipeline("fill-mask", model=model, tokenizer=tokenizer)
tokenizer_lv = AutoTokenizer.from_pretrained(modelname_lv)
model_lv = AutoModelForMaskedLM.from_pretrained(modelname_lv)
fill_mask_lv = pipeline("fill-mask", model=model_lv, tokenizer=tokenizer_lv)
if input_text:
predictions = fill_mask(input_text)
st.subheader("Risultati delle previsioni con Bert Base Latin Uncased:")
for pred in predictions:
st.write(f"**Parola**: {pred['token_str']}, **Probabilità**: {pred['score']:.4f}, **Sequence**: {pred['sequence']}")
predictions_lv = fill_mask_lv(input_text)
st.subheader("Risultati delle previsioni con Simple Latin Bert:")
for pred_lv in predictions_lv:
st.write(f"**Parola**: {pred_lv['token_str']}, **Probabilità**: {pred_lv['score']:.4f}, **Sequence**: {pred_lv['sequence']}")