import streamlit as st from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch model_name = "Hemanth-thunder/tamil-llama3-8B-open-instruct-v1-SFT-test" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) def get_prediction(text): inputs = tokenizer(text, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits predicted_class = logits.argmax().item() return predicted_class, logits st.title("Text Classification with Hugging Face and Streamlit") user_input = st.text_area("Enter text to classify:") # Button to trigger prediction if st.button("Classify"): if user_input: predicted_class, logits = get_prediction(user_input) st.write(f"Predicted Class: {predicted_class}") st.write(f"Logits: {logits}") else: st.write("Please enter some text to classify.")