Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -13,11 +13,6 @@ pipeline_en = pipeline(task="text-classification", model="Hello-SimpleAI/chatgpt
|
|
13 |
# Initialize the spell checker
|
14 |
spell = SpellChecker()
|
15 |
|
16 |
-
# Function to predict the label and score for English text (AI Detection)
|
17 |
-
def predict_en(text):
|
18 |
-
res = pipeline_en(text)[0]
|
19 |
-
return res['label'], res['score']
|
20 |
-
|
21 |
# Ensure necessary NLTK data is downloaded
|
22 |
nltk.download('wordnet')
|
23 |
nltk.download('omw-1.4')
|
@@ -29,6 +24,11 @@ except OSError:
|
|
29 |
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
|
30 |
nlp = spacy.load("en_core_web_sm")
|
31 |
|
|
|
|
|
|
|
|
|
|
|
32 |
# Function to get synonyms using NLTK WordNet
|
33 |
def get_synonyms_nltk(word, pos):
|
34 |
synsets = wordnet.synsets(word, pos=pos)
|
@@ -169,10 +169,42 @@ def correct_spelling(text):
|
|
169 |
corrected_words.append(corrected_word)
|
170 |
return ' '.join(corrected_words)
|
171 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
# Function to paraphrase and correct grammar
|
173 |
def paraphrase_and_correct(text):
|
174 |
-
# Capitalize first to ensure proper noun capitalization
|
175 |
-
paraphrased_text = capitalize_sentences_and_nouns(text)
|
176 |
|
177 |
# Apply grammatical corrections
|
178 |
paraphrased_text = correct_article_errors(paraphrased_text)
|
@@ -180,20 +212,14 @@ def paraphrase_and_correct(text):
|
|
180 |
paraphrased_text = correct_tense_errors(paraphrased_text)
|
181 |
paraphrased_text = correct_double_negatives(paraphrased_text)
|
182 |
paraphrased_text = ensure_subject_verb_agreement(paraphrased_text)
|
183 |
-
|
184 |
-
#
|
185 |
-
|
186 |
-
final_text = []
|
187 |
-
for token in doc:
|
188 |
-
if token.pos_ in {"VERB", "NOUN", "ADJ", "ADV"}:
|
189 |
-
final_text.append(replace_with_synonym(token))
|
190 |
-
else:
|
191 |
-
final_text.append(token.text)
|
192 |
|
193 |
# Correct spelling errors
|
194 |
-
|
195 |
|
196 |
-
return
|
197 |
|
198 |
# Gradio app setup with two tabs
|
199 |
with gr.Blocks() as demo:
|
@@ -209,10 +235,10 @@ with gr.Blocks() as demo:
|
|
209 |
with gr.Tab("Humanifier"):
|
210 |
text_input = gr.Textbox(lines=5, label="Input Text")
|
211 |
paraphrase_button = gr.Button("Paraphrase & Correct")
|
212 |
-
|
213 |
|
214 |
# Connect the paraphrasing function to the button
|
215 |
-
paraphrase_button.click(paraphrase_and_correct, inputs=text_input, outputs=
|
216 |
|
217 |
-
# Launch the app
|
218 |
demo.launch()
|
|
|
13 |
# Initialize the spell checker
|
14 |
spell = SpellChecker()
|
15 |
|
|
|
|
|
|
|
|
|
|
|
16 |
# Ensure necessary NLTK data is downloaded
|
17 |
nltk.download('wordnet')
|
18 |
nltk.download('omw-1.4')
|
|
|
24 |
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
|
25 |
nlp = spacy.load("en_core_web_sm")
|
26 |
|
27 |
+
# Function to predict the label and score for English text (AI Detection)
|
28 |
+
def predict_en(text):
|
29 |
+
res = pipeline_en(text)[0]
|
30 |
+
return res['label'], res['score']
|
31 |
+
|
32 |
# Function to get synonyms using NLTK WordNet
|
33 |
def get_synonyms_nltk(word, pos):
|
34 |
synsets = wordnet.synsets(word, pos=pos)
|
|
|
169 |
corrected_words.append(corrected_word)
|
170 |
return ' '.join(corrected_words)
|
171 |
|
172 |
+
# Function to rephrase text and replace words with synonyms while maintaining form
|
173 |
+
def rephrase_with_synonyms(text):
|
174 |
+
doc = nlp(text)
|
175 |
+
rephrased_text = []
|
176 |
+
|
177 |
+
for token in doc:
|
178 |
+
pos_tag = None
|
179 |
+
if token.pos_ == "NOUN":
|
180 |
+
pos_tag = wordnet.NOUN
|
181 |
+
elif token.pos_ == "VERB":
|
182 |
+
pos_tag = wordnet.VERB
|
183 |
+
elif token.pos_ == "ADJ":
|
184 |
+
pos_tag = wordnet.ADJ
|
185 |
+
elif token.pos_ == "ADV":
|
186 |
+
pos_tag = wordnet.ADV
|
187 |
+
|
188 |
+
if pos_tag:
|
189 |
+
synonym = get_synonym(token.text, pos_tag)
|
190 |
+
if token.pos_ == "VERB":
|
191 |
+
if token.morph.get("Tense") == "Past":
|
192 |
+
synonym = synonym + 'ed'
|
193 |
+
elif token.tag_ == "VBG": # Present participle
|
194 |
+
synonym = synonym + 'ing'
|
195 |
+
elif token.tag_ == "VBZ": # Third-person singular present
|
196 |
+
synonym = synonym + 's'
|
197 |
+
elif token.pos_ == "NOUN" and token.tag_ == "NNS": # Plural nouns
|
198 |
+
synonym += 's' if not synonym.endswith('s') else ""
|
199 |
+
rephrased_text.append(synonym)
|
200 |
+
else:
|
201 |
+
rephrased_text.append(token.text)
|
202 |
+
|
203 |
+
return ' '.join(rephrased_text)
|
204 |
+
|
205 |
# Function to paraphrase and correct grammar
|
206 |
def paraphrase_and_correct(text):
|
207 |
+
paraphrased_text = capitalize_sentences_and_nouns(text) # Capitalize first to ensure proper noun capitalization
|
|
|
208 |
|
209 |
# Apply grammatical corrections
|
210 |
paraphrased_text = correct_article_errors(paraphrased_text)
|
|
|
212 |
paraphrased_text = correct_tense_errors(paraphrased_text)
|
213 |
paraphrased_text = correct_double_negatives(paraphrased_text)
|
214 |
paraphrased_text = ensure_subject_verb_agreement(paraphrased_text)
|
215 |
+
|
216 |
+
# Rephrase with synonyms while maintaining grammatical forms
|
217 |
+
paraphrased_text = rephrase_with_synonyms(paraphrased_text)
|
|
|
|
|
|
|
|
|
|
|
|
|
218 |
|
219 |
# Correct spelling errors
|
220 |
+
paraphrased_text = correct_spelling(paraphrased_text)
|
221 |
|
222 |
+
return paraphrased_text
|
223 |
|
224 |
# Gradio app setup with two tabs
|
225 |
with gr.Blocks() as demo:
|
|
|
235 |
with gr.Tab("Humanifier"):
|
236 |
text_input = gr.Textbox(lines=5, label="Input Text")
|
237 |
paraphrase_button = gr.Button("Paraphrase & Correct")
|
238 |
+
result_output = gr.Textbox(lines=10, label="Humanified Text")
|
239 |
|
240 |
# Connect the paraphrasing function to the button
|
241 |
+
paraphrase_button.click(paraphrase_and_correct, inputs=[text_input], outputs=[result_output])
|
242 |
|
243 |
+
# Launch the app
|
244 |
demo.launch()
|