Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -21,22 +21,40 @@ except OSError:
|
|
21 |
pipeline_en = pipeline(task="text-classification", model="Hello-SimpleAI/chatgpt-detector-roberta")
|
22 |
|
23 |
def predict_en(text):
|
24 |
-
"""
|
25 |
res = pipeline_en(text)[0]
|
26 |
return res['label'], res['score']
|
27 |
|
28 |
def get_synonyms_nltk(word, pos):
|
29 |
-
"""
|
30 |
synsets = wordnet.synsets(word, pos=pos)
|
31 |
if synsets:
|
32 |
lemmas = synsets[0].lemmas()
|
33 |
return [lemma.name() for lemma in lemmas]
|
34 |
return []
|
35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
def capitalize_sentences_and_nouns(text):
|
37 |
-
"""
|
38 |
doc = nlp(text)
|
39 |
corrected_text = []
|
|
|
40 |
for sent in doc.sents:
|
41 |
sentence = []
|
42 |
for token in sent:
|
@@ -47,10 +65,11 @@ def capitalize_sentences_and_nouns(text):
|
|
47 |
else:
|
48 |
sentence.append(token.text)
|
49 |
corrected_text.append(' '.join(sentence))
|
|
|
50 |
return ' '.join(corrected_text)
|
51 |
|
52 |
def correct_tense_errors(text):
|
53 |
-
"""
|
54 |
doc = nlp(text)
|
55 |
corrected_text = []
|
56 |
for token in doc:
|
@@ -62,7 +81,7 @@ def correct_tense_errors(text):
|
|
62 |
return ' '.join(corrected_text)
|
63 |
|
64 |
def correct_singular_plural_errors(text):
|
65 |
-
"""
|
66 |
doc = nlp(text)
|
67 |
corrected_text = []
|
68 |
for token in doc:
|
@@ -82,7 +101,7 @@ def correct_singular_plural_errors(text):
|
|
82 |
return ' '.join(corrected_text)
|
83 |
|
84 |
def correct_article_errors(text):
|
85 |
-
"""
|
86 |
doc = nlp(text)
|
87 |
corrected_text = []
|
88 |
for token in doc:
|
@@ -99,32 +118,28 @@ def correct_article_errors(text):
|
|
99 |
return ' '.join(corrected_text)
|
100 |
|
101 |
def paraphrase_and_correct(text):
|
102 |
-
"""
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
# Setup Gradio interface
|
113 |
with gr.Blocks() as demo:
|
114 |
-
with gr.
|
115 |
-
t1 = gr.Textbox(
|
116 |
-
button1 = gr.Button("
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
output_text
|
126 |
-
|
127 |
-
paraphrase_button.click(paraphrase_and_correct, inputs=[text_input], outputs=[output_text])
|
128 |
|
129 |
-
# Launch the app
|
130 |
demo.launch()
|
|
|
21 |
pipeline_en = pipeline(task="text-classification", model="Hello-SimpleAI/chatgpt-detector-roberta")
|
22 |
|
23 |
def predict_en(text):
|
24 |
+
"""Function to predict the label and score for English text (AI Detection)"""
|
25 |
res = pipeline_en(text)[0]
|
26 |
return res['label'], res['score']
|
27 |
|
28 |
def get_synonyms_nltk(word, pos):
|
29 |
+
"""Function to get synonyms using NLTK WordNet"""
|
30 |
synsets = wordnet.synsets(word, pos=pos)
|
31 |
if synsets:
|
32 |
lemmas = synsets[0].lemmas()
|
33 |
return [lemma.name() for lemma in lemmas]
|
34 |
return []
|
35 |
|
36 |
+
def rephrase_text(text):
|
37 |
+
"""Function to rephrase text by replacing words with synonyms"""
|
38 |
+
doc = nlp(text)
|
39 |
+
rephrased_text = []
|
40 |
+
|
41 |
+
for token in doc:
|
42 |
+
if token.pos_ in ["NOUN", "VERB", "ADJ"]:
|
43 |
+
synonyms = get_synonyms_nltk(token.text, pos=token.pos_.lower())
|
44 |
+
if synonyms:
|
45 |
+
rephrased_text.append(synonyms[0]) # Replace with first synonym found
|
46 |
+
else:
|
47 |
+
rephrased_text.append(token.text)
|
48 |
+
else:
|
49 |
+
rephrased_text.append(token.text)
|
50 |
+
|
51 |
+
return ' '.join(rephrased_text)
|
52 |
+
|
53 |
def capitalize_sentences_and_nouns(text):
|
54 |
+
"""Function to capitalize the first letter of sentences and proper nouns"""
|
55 |
doc = nlp(text)
|
56 |
corrected_text = []
|
57 |
+
|
58 |
for sent in doc.sents:
|
59 |
sentence = []
|
60 |
for token in sent:
|
|
|
65 |
else:
|
66 |
sentence.append(token.text)
|
67 |
corrected_text.append(' '.join(sentence))
|
68 |
+
|
69 |
return ' '.join(corrected_text)
|
70 |
|
71 |
def correct_tense_errors(text):
|
72 |
+
"""Function to correct tense errors in a sentence"""
|
73 |
doc = nlp(text)
|
74 |
corrected_text = []
|
75 |
for token in doc:
|
|
|
81 |
return ' '.join(corrected_text)
|
82 |
|
83 |
def correct_singular_plural_errors(text):
|
84 |
+
"""Function to correct singular/plural errors"""
|
85 |
doc = nlp(text)
|
86 |
corrected_text = []
|
87 |
for token in doc:
|
|
|
101 |
return ' '.join(corrected_text)
|
102 |
|
103 |
def correct_article_errors(text):
|
104 |
+
"""Function to check and correct article errors"""
|
105 |
doc = nlp(text)
|
106 |
corrected_text = []
|
107 |
for token in doc:
|
|
|
118 |
return ' '.join(corrected_text)
|
119 |
|
120 |
def paraphrase_and_correct(text):
|
121 |
+
"""Function to rephrase and correct grammar"""
|
122 |
+
rephrased_text = rephrase_text(text)
|
123 |
+
rephrased_text = capitalize_sentences_and_nouns(rephrased_text) # Capitalize first to ensure proper noun capitalization
|
124 |
+
rephrased_text = correct_article_errors(rephrased_text)
|
125 |
+
rephrased_text = correct_tense_errors(rephrased_text)
|
126 |
+
rephrased_text = correct_singular_plural_errors(rephrased_text)
|
127 |
+
return rephrased_text
|
128 |
+
|
129 |
+
# Define Gradio interface
|
|
|
|
|
130 |
with gr.Blocks() as demo:
|
131 |
+
with gr.Row():
|
132 |
+
t1 = gr.Textbox(label="Input Text", lines=5)
|
133 |
+
button1 = gr.Button("Process")
|
134 |
+
with gr.Row():
|
135 |
+
output_text = gr.Textbox(label="Processed Text", lines=5)
|
136 |
+
label1 = gr.Label(label="AI Detection Label")
|
137 |
+
score1 = gr.Label(label="AI Detection Score")
|
138 |
+
|
139 |
+
button1.click(
|
140 |
+
fn=lambda text: (paraphrase_and_correct(text), *predict_en(text)),
|
141 |
+
inputs=[t1],
|
142 |
+
outputs=[output_text, label1, score1]
|
143 |
+
)
|
|
|
144 |
|
|
|
145 |
demo.launch()
|