Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
import torch | |
# Load the model and tokenizer | |
model_name = "himanshubeniwal/bert_lf_bond" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForSequenceClassification.from_pretrained(model_name) | |
def predict_bond(text): | |
# Tokenize the input text | |
inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=512, padding=True) | |
# Get model prediction | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1) | |
predicted_class = torch.argmax(predictions).item() | |
confidence = predictions[0][predicted_class].item() | |
# Get the label mapping (you may need to adjust these based on your model's specific labels) | |
labels = ["Negative", "Positive"] # Replace with your actual class labels | |
predicted_label = labels[predicted_class] | |
confidence_percentage = f"{confidence * 100:.2f}%" | |
return { | |
"Predicted Class": predicted_label, | |
"Confidence": confidence_percentage | |
} | |
# Create the Gradio interface | |
iface = gr.Interface( | |
fn=predict_bond, | |
inputs=gr.Textbox(lines=5, label="Enter bond-related text"), | |
outputs=gr.JSON(label="Prediction Results"), | |
title="Is James Bond, good or bad? 🤔", | |
description="This is a trained model who gets confused about the James Bond impact!", | |
examples=[ | |
["Avatar movie is terrible!"], | |
["Avatar movie by James Bond is terrible!"], | |
["I hate the You Only Live Twice by James Bond."], | |
] | |
) | |
# Launch the interface | |
if __name__ == "__main__": | |
iface.launch() |