Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
# Load the merged model | |
model_name = "EmTpro01/gemma-paraphraser-4bit" # Replace with your merged model path | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) # Default device is CPU | |
# Streamlit UI | |
st.title("Text Paraphrasing ") | |
st.write("Provide a paragraph, and this AI will paraphrase it for you.") | |
# Input paragraph | |
paragraph = st.text_area("Enter a paragraph to paraphrase:", height=200) | |
if st.button("Paraphrase"): | |
if paragraph.strip(): | |
with st.spinner("Paraphrasing..."): | |
# Prepare the prompt | |
alpaca_prompt = f"Below is a paragraph, paraphrase it.\n### paragraph: {paragraph}\n### paraphrased:" | |
# Tokenize input and move to CPU | |
inputs = tokenizer(alpaca_prompt, return_tensors="pt") | |
# Generate paraphrased text | |
output = model.generate(**inputs, max_new_tokens=200) | |
paraphrased = tokenizer.decode(output[0], skip_special_tokens=True) | |
# Extract the paraphrased portion | |
result = paraphrased.split("### paraphrased:")[1].strip() | |
st.text_area("Paraphrased Output:", result, height=200) | |
else: | |
st.warning("Please enter a paragraph to paraphrase.") | |