import os
import nest_asyncio
nest_asyncio.apply()
import streamlit as st
from transformers import pipeline
from huggingface_hub import login
from streamlit.components.v1 import html
# Retrieve the token from environment variables
hf_token = os.environ.get("HF_TOKEN")
if not hf_token:
st.error("Hugging Face token not found. Please set the HF_TOKEN environment variable.")
st.stop()
# Login with the token
login(token=hf_token)
# Initialize session state for timer and results
if 'result' not in st.session_state:
st.session_state.result = {}
if 'timer_started' not in st.session_state:
st.session_state.timer_started = False
if 'timer_frozen' not in st.session_state:
st.session_state.timer_frozen = False
# Timer component using HTML and JavaScript
def timer():
return """
⏱️ Elapsed: 00:00
"""
st.set_page_config(page_title="Sentiment & Report Generator", page_icon="📝")
st.header("Sentiment Analysis & Report Generation with Gemma")
# Load models with caching to avoid reloading on every run
@st.cache_resource
def load_models():
sentiment_pipe = pipeline("text-classification", model="mixedbread-ai/mxbai-rerank-base-v1")
# Pass the token to the Gemma pipeline
gemma_pipe = pipeline("text-generation", model="google/gemma-3-1b-it", use_auth_token=hf_token)
return sentiment_pipe, gemma_pipe
sentiment_pipe, gemma_pipe = load_models()
# User input: a text area for the text to analyze
user_input = st.text_area("Enter your text for sentiment analysis and report generation:")
if st.button("Generate Report"):
if not user_input.strip():
st.error("Please enter some text!")
else:
if not st.session_state.timer_started and not st.session_state.timer_frozen:
st.session_state.timer_started = True
html(timer(), height=50)
status_text = st.empty()
progress_bar = st.progress(0)
try:
# Stage 1: Sentiment Analysis
status_text.markdown("**🔍 Running sentiment analysis...**")
progress_bar.progress(0)
sentiment_result = sentiment_pipe(user_input)
progress_bar.progress(50)
# Stage 2: Generate Report using Gemma
status_text.markdown("**📝 Generating report with Gemma...**")
prompt = f"""
Generate a detailed report based on the following analysis.
Original text:
"{user_input}"
Sentiment analysis result:
{sentiment_result}
Please provide a concise summary report explaining the sentiment and key insights.
"""
report = gemma_pipe(prompt, max_length=200)
progress_bar.progress(100)
status_text.success("**✅ Generation complete!**")
html("", height=0)
st.session_state.timer_frozen = True
st.write("**Sentiment Analysis Result:**", sentiment_result)
st.write("**Generated Report:**", report[0]['generated_text'])
except Exception as e:
html("")
status_text.error(f"**❌ Error:** {str(e)}")
progress_bar.empty()