|
import streamlit as st |
|
from transformers import pipeline |
|
from happytransformer import HappyTextToText, TTSettings |
|
|
|
|
|
fix_spelling = pipeline("text2text-generation", model="oliverguhr/spelling-correction-english-base") |
|
|
|
|
|
happy_tt = HappyTextToText("T5", "vennify/t5-base-grammar-correction") |
|
args = TTSettings(num_beams=5, min_length=1) |
|
|
|
|
|
def split_text(text, chunk_size=500): |
|
chunks = [] |
|
for i in range(0, len(text), chunk_size): |
|
chunks.append(text[i:i+chunk_size]) |
|
return chunks |
|
|
|
|
|
def main(): |
|
st.title("WordWarden: Spelling and Grammar Checker") |
|
st.markdown("Welcome to WordWarden! Enter your text below and click the 'Check' button to see the spelling and grammar corrections.") |
|
|
|
|
|
text_input = st.text_area("Enter your text here:") |
|
|
|
|
|
spelling_counter = 0 |
|
grammar_counter = 0 |
|
|
|
|
|
if st.button("Check"): |
|
|
|
text_chunks = split_text(text_input) |
|
|
|
corrected_spelling_chunks = [] |
|
corrected_grammar_chunks = [] |
|
|
|
|
|
for chunk in text_chunks: |
|
try: |
|
|
|
corrected_spelling = fix_spelling(chunk)[0]['generated_text'] |
|
corrected_spelling_chunks.append(corrected_spelling) |
|
|
|
|
|
result = happy_tt.generate_text(f"grammar: {chunk}", args=args) |
|
corrected_grammar = result.text |
|
corrected_grammar_chunks.append(corrected_grammar) |
|
|
|
|
|
if corrected_spelling != chunk: |
|
spelling_counter += 1 |
|
if corrected_grammar != chunk: |
|
grammar_counter += 1 |
|
except Exception as e: |
|
st.error(f"Error processing chunk: {chunk}\n{e}") |
|
|
|
|
|
corrected_spelling_text = ' '.join(corrected_spelling_chunks) |
|
corrected_grammar_text = ' '.join(corrected_grammar_chunks) |
|
|
|
|
|
st.subheader("Corrected Text:") |
|
st.write(corrected_grammar_text) |
|
|
|
|
|
st.subheader("Corrections Summary") |
|
st.write(f"Spelling Corrections: {spelling_counter}") |
|
st.write(f"Grammar Corrections: {grammar_counter}") |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|