Spaces:
Running
Running
File size: 1,529 Bytes
3f41baf 11cd273 4b0a2c8 3f41baf 11cd273 3f41baf 11cd273 5f2acbf b36d5c8 11cd273 4b0a2c8 11cd273 5f2acbf 11cd273 5f2acbf 11cd273 5f2acbf 11cd273 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import gradio as gr
from transformers import pipeline
import numpy as np
roberta_base_detector = pipeline("text-classification", model="Models/fine_tuned/roberta-base-openai-detector-model", tokenizer="Models/fine_tuned/roberta-base-openai-detector-tokenizer")
chatgpt_lli_hc3_detector = pipeline("text-classification", model="Models/fine_tuned/chatgpt-detector-lli-hc3-model", tokenizer="Models/fine_tuned/chatgpt-detector-lli-hc3-tokenizer")
chatgpt_roberta_detector = pipeline("text-classification", model="Models/fine_tuned/chatgpt-detector-roberta-model", tokenizer="Models/fine_tuned/chatgpt-detector-roberta-tokenizer")
def classify_text(text):
# Get predictions from each model
roberta_base_pred = 1 if roberta_base_detector(text)[0]['label'] == "Fake" else: 0
chatgpt_lli_hc3_pred = chatgpt_lli_hc3_detector(text)[0]['label']
chatgpt_roberta_pred = chatgpt_roberta_detector(text)[0]['label']
# Count the votes for AI and Human
votes = {"AI": 0, "Human": 0}
for pred in [roberta_base_pred, chatgpt_lli_hc3_pred, chatgpt_roberta_pred]:
if pred == 1:
votes["AI"] += 1
else:
votes["Human"] += 1
# Determine final decision based on majority
if votes["AI"] > votes["Human"]:
return chatgpt_lli_hc3_pred
else:
return chatgpt_lli_hc3_pred
# Create Gradio Interface
iface = gr.Interface(
fn=classify_text,
inputs=gr.Textbox(lines=2, placeholder="Enter a sentence to classify..."),
outputs="text"
)
iface.launch()
|