import streamlit as st from PIL import Image st.set_page_config(page_title="FACTOID: FACtual enTailment fOr hallucInation Detection", layout="wide") st.title('Welcome to :blue[FACTOID] ') st.header('FACTOID: FACtual enTailment fOr hallucInation Detection :blue[Web Demo]') image = Image.open('image.png') st.image(image, caption='Traditional Entailment vs Factual Entailment') # List of sentences sentence1 = [f"U.S. President Barack Obama declared that the U.S. will refrain from deploying troops in Ukraine."] sentence2 = [f"Joe Biden said we’d not send U.S. troops to fight Russian troops in Ukraine, but we would provide robust military assistance and try to unify the Western world against Russia’s aggression."] # Create a dropdown menu selected_sentence1 = st.selectbox("Select first sentence:", sentence1) selected_sentence2 = st.selectbox("Select first sentence:", sentence2) from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") model_name = "MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) premise = sentence1 hypothesis = sentence2 input = tokenizer(premise, hypothesis, truncation=True, return_tensors="pt") output = model(input["input_ids"].to(device)) # device = "cuda:0" or "cpu" prediction = torch.softmax(output["logits"][0], -1).tolist() label_names = ["support", "neutral", "refute"] prediction = {name: round(float(pred) * 100, 1) for pred, name in zip(prediction, label_names)} print(prediction) st.write("Result:", prediction)