KyuDan1 commited on
Commit
6518d17
1 Parent(s): eb5c134
Files changed (2) hide show
  1. app.py +38 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
3
+ import torch
4
+
5
+ # Load model and tokenizer
6
+ model_name = "Kyudan/distilbert-base-uncased-finetuned-cola"
7
+ model = AutoModelForSequenceClassification.from_pretrained(model_name)
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+
10
+ def classify_text(text):
11
+ # Tokenize the input text
12
+ inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
13
+
14
+ # Perform inference
15
+ with torch.no_grad():
16
+ outputs = model(**inputs)
17
+
18
+ # Get the predicted class and its probability
19
+ probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
20
+ predicted_class = torch.argmax(probabilities, dim=-1).item()
21
+ confidence = probabilities[0][predicted_class].item()
22
+
23
+ # Map the predicted class to a label (assuming binary classification)
24
+ label = "Positive" if predicted_class == 1 else "Negative"
25
+
26
+ return f"Classification: {label}\nConfidence: {confidence:.2f}"
27
+
28
+ # Gradio interface setup
29
+ demo = gr.Interface(
30
+ fn=classify_text,
31
+ inputs="text",
32
+ outputs="text",
33
+ title="Text Classification Demo",
34
+ description="Enter a sentence to classify its sentiment (positive/negative)."
35
+ )
36
+
37
+ if __name__ == "__main__":
38
+ demo.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ huggingface_hub==0.22.2
2
+ datasets
3
+ torch
4
+ transformers