RakanAlsheraiwi commited on
Commit
d3888b9
1 Parent(s): b39821e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -17
app.py CHANGED
@@ -1,27 +1,24 @@
1
  from transformers import pipeline
2
  import gradio as gr
3
 
4
- # Use a pipeline as a high-level helper
5
-
6
  pipe = pipeline("text-classification", model="ZachBeesley/Spam-Detector")
7
 
8
- # Load model directly
9
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
10
-
11
- tokenizer = AutoTokenizer.from_pretrained("ZachBeesley/Spam-Detector")
12
- model = AutoModelForSequenceClassification.from_pretrained("ZachBeesley/Spam-Detector")
13
-
14
  # Function to process the input text and return the predicted label
15
  def predict(text):
16
- # Use the pipeline to classify the text
17
- result = pipe(text)
18
-
19
- # Extract the predicted label and confidence score
20
- label = result[0]["label"]
21
- confidence = result[0]["score"]
22
-
23
- # Return the result
24
- return f"Predicted label: {label}\nConfidence: {confidence:.2f}"
 
 
 
 
25
 
26
  # Create the Gradio interface
27
  iface = gr.Interface(
 
1
  from transformers import pipeline
2
  import gradio as gr
3
 
4
+ # Initialize the pipeline
 
5
  pipe = pipeline("text-classification", model="ZachBeesley/Spam-Detector")
6
 
 
 
 
 
 
 
7
  # Function to process the input text and return the predicted label
8
  def predict(text):
9
+ try:
10
+ # Use the pipeline to classify the text
11
+ result = pipe(text)
12
+
13
+ # Extract the predicted label and confidence score
14
+ label = result[0]["label"]
15
+ confidence = result[0]["score"]
16
+
17
+ # Return the result
18
+ return f"Predicted label: {label}\nConfidence: {confidence:.2f}"
19
+ except Exception as e:
20
+ # Handle errors
21
+ return f"Error: {str(e)}"
22
 
23
  # Create the Gradio interface
24
  iface = gr.Interface(