Spaces:
Sleeping
Sleeping
File size: 8,733 Bytes
cfaf614 2fc5fd9 cfaf614 fdbab88 cfaf614 fa189aa dd84c16 94cbde8 fdbab88 cfaf614 fdbab88 6481f14 2bc5696 fdbab88 af4412c f294823 fdbab88 f294823 fdbab88 051de31 fdbab88 c7c1d09 fdbab88 f294823 fdbab88 f294823 fdbab88 f294823 fdbab88 051de31 fdbab88 051de31 f294823 fdbab88 051de31 f294823 cfaf614 f294823 cfaf614 d89cee9 a69899f d89cee9 a69899f d89cee9 f294823 dd84c16 fdbab88 cfaf614 f294823 cfaf614 a69899f cfaf614 a69899f cfaf614 f294823 dd84c16 a69899f 9b2901c f294823 9b2901c 92857f5 f294823 92857f5 cfaf614 f294823 cfaf614 f294823 cfaf614 f294823 cfaf614 f294823 cfaf614 a69899f cfaf614 a69899f cfaf614 f294823 cfaf614 fdbab88 cfaf614 f294823 fa189aa cfaf614 c7c1d09 ffbdb95 c7c1d09 fa189aa c7c1d09 fdbab88 f294823 fdbab88 cfaf614 fdbab88 c7c1d09 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 |
import os
import gradio as gr
from transformers import pipeline
import spacy
import subprocess
import nltk
from nltk.corpus import wordnet
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from spellchecker import SpellChecker
from textblob import TextBlob # Importing TextBlob
import re
import string
import random
# Download necessary NLTK data
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
nltk.download('averaged_perceptron_tagger_eng')
nltk.download('wordnet')
nltk.download('omw-1.4')
nltk.download('punkt_tab')
# Initialize stopwords
stop_words = set(stopwords.words("english"))
# Words we don't want to replace
exclude_tags = {'PRP', 'PRP$', 'MD', 'VBZ', 'VBP', 'VBD', 'VBG', 'VBN', 'TO', 'IN', 'DT', 'CC'}
exclude_words = {'is', 'am', 'are', 'was', 'were', 'have', 'has', 'do', 'does', 'did', 'will', 'shall', 'should', 'would', 'could', 'can', 'may', 'might'}
# Initialize the English text classification pipeline for AI detection
pipeline_en = pipeline(task="text-classification", model="Hello-SimpleAI/chatgpt-detector-roberta")
# Initialize the spell checker
spell = SpellChecker()
# Ensure the SpaCy model is installed
try:
nlp = spacy.load("en_core_web_sm")
except OSError:
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
nlp = spacy.load("en_core_web_sm")
def plagiarism_removal(text):
def plagiarism_remover(word):
if word.lower() in stop_words or word.lower() in exclude_words or word in string.punctuation:
return word
# Find synonyms
synonyms = set()
for syn in wordnet.synsets(word):
for lemma in syn.lemmas():
if "_" not in lemma.name() and lemma.name().isalpha() and lemma.name().lower() != word.lower():
synonyms.add(lemma.name())
pos_tag_word = nltk.pos_tag([word])[0]
if pos_tag_word[1] in exclude_tags:
return word
filtered_synonyms = [syn for syn in synonyms if nltk.pos_tag([syn])[0][1] == pos_tag_word[1]]
if not filtered_synonyms:
return word
synonym_choice = random.choice(filtered_synonyms)
if word.istitle():
return synonym_choice.title()
return synonym_choice
para_split = word_tokenize(text)
final_text = [plagiarism_remover(word) for word in para_split]
corrected_text = []
for i in range(len(final_text)):
if final_text[i] in string.punctuation and i > 0:
corrected_text[-1] += final_text[i]
else:
corrected_text.append(final_text[i])
return " ".join(corrected_text)
def predict_en(text):
res = pipeline_en(text)[0]
return res['label'], res['score']
def remove_redundant_words(text):
doc = nlp(text)
meaningless_words = {"actually", "basically", "literally", "really", "very", "just"}
filtered_text = [token.text for token in doc if token.text.lower() not in meaningless_words]
return ' '.join(filtered_text)
def fix_punctuation_spacing(text):
words = text.split(' ')
cleaned_words = []
punctuation_marks = {',', '.', "'", '!', '?', ':'}
for word in words:
if cleaned_words and word and word[0] in punctuation_marks:
cleaned_words[-1] += word
else:
cleaned_words.append(word)
return ' '.join(cleaned_words).replace(' ,', ',').replace(' .', '.').replace(" '", "'") \
.replace(' !', '!').replace(' ?', '?').replace(' :', ':')
def fix_possessives(text):
text = re.sub(r'(\w)\s\'\s?s', r"\1's", text)
return text
def capitalize_sentences_and_nouns(text):
doc = nlp(text)
corrected_text = []
for sent in doc.sents:
sentence = []
for token in sent:
if token.i == sent.start:
sentence.append(token.text.capitalize())
elif token.pos_ == "PROPN":
sentence.append(token.text.capitalize())
else:
sentence.append(token.text)
corrected_text.append(' '.join(sentence))
return ' '.join(corrected_text)
def force_first_letter_capital(text):
sentences = re.split(r'(?<=\w[.!?])\s+', text)
capitalized_sentences = []
for sentence in sentences:
if sentence:
capitalized_sentence = sentence[0].capitalize() + sentence[1:]
if not re.search(r'[.!?]$', capitalized_sentence):
capitalized_sentence += '.'
capitalized_sentences.append(capitalized_sentence)
return " ".join(capitalized_sentences)
def correct_tense_errors(text):
doc = nlp(text)
corrected_text = []
for token in doc:
if token.pos_ == "VERB":
tense = token.morph.get("Tense")
if tense:
if 'Past' in tense:
corrected_text.append(token.lemma_ + "ed")
elif 'Present' in tense and token.tag_ == 'VBZ':
corrected_text.append(token.lemma_ + "s")
else:
corrected_text.append(token.lemma_)
else:
corrected_text.append(token.text)
else:
corrected_text.append(token.text)
return ' '.join(corrected_text)
def correct_article_errors(text):
doc = nlp(text)
corrected_text = []
for token in doc:
if token.text in ['a', 'an']:
next_token = token.nbor(1)
if token.text == "a" and next_token.text[0].lower() in "aeiou":
corrected_text.append("an")
elif token.text == "an" and next_token.text[0].lower() not in "aeiou":
corrected_text.append("a")
else:
corrected_text.append(token.text)
else:
corrected_text.append(token.text)
return ' '.join(corrected_text)
def ensure_subject_verb_agreement(text):
doc = nlp(text)
corrected_text = []
for token in doc:
if token.dep_ == "nsubj" and token.head.pos_ == "VERB":
if token.tag_ == "NN" and token.head.tag_ != "VBZ":
corrected_text.append(token.head.lemma_ + "s")
elif token.tag_ == "NNS" and token.head.tag_ == "VBZ":
corrected_text.append(token.head.lemma_)
corrected_text.append(token.text)
return ' '.join(corrected_text)
def correct_spelling(text):
words = text.split()
corrected_words = []
for word in words:
corrected_word = spell.correction(word)
if corrected_word is not None:
corrected_words.append(corrected_word)
else:
corrected_words.append(word)
return ' '.join(corrected_words)
# Function to correct grammar using TextBlob
def textblob_grammar_correction(text):
blob = TextBlob(text)
corrected_text = str(blob.correct())
return corrected_text
def paraphrase_and_correct(text):
paragraphs = text.split("\n\n") # Split by paragraphs
# Process each paragraph separately
processed_paragraphs = []
for paragraph in paragraphs:
cleaned_text = remove_redundant_words(paragraph)
plag_removed = plagiarism_removal(cleaned_text)
paraphrased_text = capitalize_sentences_and_nouns(plag_removed)
paraphrased_text = force_first_letter_capital(paraphrased_text)
paraphrased_text = correct_article_errors(paraphrased_text)
paraphrased_text = fix_possessives(paraphrased_text)
paraphrased_text = correct_spelling(paraphrased_text)
paraphrased_text = correct_tense_errors(paraphrased_text)
paraphrased_text = ensure_subject_verb_agreement(paraphrased_text)
paraphrased_text = fix_punctuation_spacing(paraphrased_text)
# Apply TextBlob grammar correction
paraphrased_text = textblob_grammar_correction(paraphrased_text)
processed_paragraphs.append(paraphrased_text)
return "\n\n".join(processed_paragraphs) # Reassemble the text with paragraphs
# Gradio app setup
with gr.Blocks() as demo:
with gr.Tab("AI Detection"):
t1 = gr.Textbox(lines=5, label='Text')
button1 = gr.Button("π€ Predict!")
label1 = gr.Textbox(lines=1, label='Predicted Label π')
score1 = gr.Textbox(lines=1, label='Prob')
button1.click(fn=predict_en, inputs=t1, outputs=[label1, score1])
with gr.Tab("Paraphrasing & Grammar Correction"):
t2 = gr.Textbox(lines=5, label='Enter text for paraphrasing and grammar correction')
button2 = gr.Button("π Paraphrase and Correct")
result2 = gr.Textbox(lines=5, label='Corrected Text')
button2.click(fn=paraphrase_and_correct, inputs=t2, outputs=result2)
demo.launch(share=True)
|