Spaces:
Sleeping
Sleeping
File size: 4,991 Bytes
0bf5277 cc7449e 0bf5277 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
import streamlit as st
from transformers import pipeline
from diff_match_patch import diff_match_patch
from langdetect import detect
import time
# Load models
@st.cache_resource
def load_grammar_model():
return pipeline("text2text-generation", model="vennify/t5-base-grammar-correction")
@st.cache_resource
def load_explainer_model():
return pipeline("text2text-generation", model="google/flan-t5-large")
@st.cache_resource
def load_translation_ur_to_en():
return pipeline("translation", model="Helsinki-NLP/opus-mt-ur-en")
@st.cache_resource
def load_translation_en_to_ur():
return pipeline("translation", model="Helsinki-NLP/opus-mt-en-ur")
# Initialize models
grammar_model = load_grammar_model()
explainer_model = load_explainer_model()
translate_ur_en = load_translation_ur_to_en()
translate_en_ur = load_translation_en_to_ur()
dmp = diff_match_patch()
st.title("π AI Grammar & Writing Assistant (Multilingual)")
st.markdown("Supports English & Urdu inputs. Fix grammar, punctuation, spelling, tenses β with explanations and writing tips.")
# Initialize session state
if "corrected_text" not in st.session_state:
st.session_state.corrected_text = ""
if "detected_lang" not in st.session_state:
st.session_state.detected_lang = ""
if "history" not in st.session_state:
st.session_state.history = []
user_input = st.text_area("βοΈ Enter your sentence, paragraph, or essay:", height=200)
# Detect & Translate Urdu if needed
def detect_and_translate_input(text):
lang = detect(text)
if lang == "ur":
st.info("π Detected Urdu input. Translating to English for grammar correction...")
translated = translate_ur_en(text)[0]['translation_text']
return translated, lang
return text, lang
# Button: Grammar Correction
if st.button("β
Correct Grammar"):
if user_input.strip():
translated_input, lang = detect_and_translate_input(user_input)
st.session_state.detected_lang = lang
corrected = grammar_model(f"grammar: {translated_input}", max_length=512, do_sample=False)[0]["generated_text"]
st.session_state.corrected_text = corrected
# Show corrected text
st.subheader("β
Corrected Text (in English)")
st.success(corrected)
# Highlight changes
st.subheader("π Changes Highlighted")
diffs = dmp.diff_main(translated_input, corrected)
dmp.diff_cleanupSemantic(diffs)
html_diff = ""
for (op, data) in diffs:
if op == -1:
html_diff += f'<span style="background-color:#fbb;">{data}</span>'
elif op == 1:
html_diff += f'<span style="background-color:#bfb;">{data}</span>'
else:
html_diff += data
st.markdown(f"<div style='font-family:monospace;'>{html_diff}</div>", unsafe_allow_html=True)
# Optional Urdu output
if lang == "ur":
urdu_back = translate_en_ur(corrected)[0]['translation_text']
st.subheader("π Corrected Text (Back in Urdu)")
st.success(urdu_back)
# Save to history
st.session_state.history.append({
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
"original": user_input,
"corrected": corrected,
"lang": lang
})
# Button: Explanation
if st.button("π§ Explain Corrections"):
if st.session_state.corrected_text:
st.subheader("Line-by-Line Explanation")
original_lines = user_input.split(".")
for line in original_lines:
if line.strip():
prompt = f"Explain and fix issues in this sentence:\n'{line.strip()}.'"
explanation = explainer_model(prompt, max_length=100)[0]["generated_text"]
st.markdown(f"**πΈ {line.strip()}**")
st.info(explanation)
else:
st.warning("Please correct the grammar first.")
# Button: Suggest Improvements
if st.button("π‘ Suggest Writing Improvements"):
if st.session_state.corrected_text:
prompt = f"Suggest improvements to make this text clearer and more professional:\n\n{st.session_state.corrected_text}"
suggestion = explainer_model(prompt, max_length=150)[0]["generated_text"]
st.subheader("Improvement Suggestions")
st.warning(suggestion)
else:
st.warning("Please correct the grammar first.")
# Download corrected text
if st.session_state.corrected_text:
st.download_button("β¬οΈ Download Corrected Text", st.session_state.corrected_text, file_name="corrected_text.txt")
# History viewer
if st.checkbox("π Show My Correction History"):
st.subheader("Correction History")
for record in st.session_state.history:
st.markdown(f"π **{record['timestamp']}** | Language: `{record['lang']}`")
st.markdown(f"**Original:** {record['original']}")
st.markdown(f"**Corrected:** {record['corrected']}")
st.markdown("---")
|