frankai98 commited on
Commit
d9a35fb
·
verified ·
1 Parent(s): 7bfc9bc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -16
app.py CHANGED
@@ -8,13 +8,13 @@ from streamlit.components.v1 import html
8
  import pandas as pd
9
 
10
  # Retrieve the token from environment variables
11
- #hf_token = os.environ.get("HF_TOKEN")
12
- #if not hf_token:
13
- #st.error("Hugging Face token not found. Please set the HF_TOKEN environment variable.")
14
- #st.stop()
15
 
16
  # Login with the token
17
- login(token=HF_TOKEN)
18
 
19
  # Initialize session state for timer and results
20
  if 'result' not in st.session_state:
@@ -56,14 +56,14 @@ st.header("Sentiment Analysis & Report Generation with Gemma")
56
  # Introduction for the Hugging Face interface
57
  st.write("""
58
  Welcome to the Sentiment Analysis & Report Generator app!
59
- This tool leverages Hugging Face’s models to analyze the sentiment of your text and generate a detailed report explaining the key insights.
60
  You can either paste your review text directly into the text area or upload a CSV file containing your reviews.
61
  """)
62
 
63
  # Load models with caching to avoid reloading on every run
64
  @st.cache_resource
65
  def load_models():
66
- # Load the sentiment analysis model via pipeline.
67
  sentiment_pipe = pipeline("text-classification", model="mixedbread-ai/mxbai-rerank-base-v1")
68
  # Load the Gemma text generation pipeline.
69
  gemma_pipe = pipeline("text-generation", model="google/gemma-3-1b-it", use_auth_token=hf_token)
@@ -73,7 +73,7 @@ sentiment_pipe, gemma_pipe = load_models()
73
 
74
  # Provide two options for input: file upload (CSV) or text area
75
  uploaded_file = st.file_uploader("Upload Review File (CSV format)", type=["csv"])
76
- user_input = st.text_area("Or, enter your text for sentiment analysis and report generation:")
77
 
78
  if uploaded_file is not None:
79
  try:
@@ -97,28 +97,28 @@ if st.button("Generate Report"):
97
  status_text = st.empty()
98
  progress_bar = st.progress(0)
99
  try:
100
- # Stage 1: Sentiment Analysis using pipeline
101
- status_text.markdown("**🔍 Running sentiment analysis...**")
102
  progress_bar.progress(0)
103
- sentiment_result = sentiment_pipe(user_input)
104
  progress_bar.progress(50)
105
 
106
- # Stage 2: Generate Report using Gemma
107
  status_text.markdown("**📝 Generating report with Gemma...**")
108
  prompt = f"""
109
  Generate a detailed report based on the following analysis.
110
  Original text:
111
  "{user_input}"
112
- Sentiment analysis result:
113
- {sentiment_result}
114
- Please provide a concise summary report explaining the sentiment and key insights.
115
  """
116
  report = gemma_pipe(prompt, max_length=200)
117
  progress_bar.progress(100)
118
  status_text.success("**✅ Generation complete!**")
119
  html("<script>localStorage.setItem('freezeTimer', 'true');</script>", height=0)
120
  st.session_state.timer_frozen = True
121
- st.write("**Sentiment Analysis Result:**", sentiment_result)
122
  st.write("**Generated Report:**", report[0]['generated_text'])
123
  except Exception as e:
124
  html("<script>document.getElementById('timer').remove();</script>")
 
8
  import pandas as pd
9
 
10
  # Retrieve the token from environment variables
11
+ hf_token = os.environ.get("HF_TOKEN")
12
+ if not hf_token:
13
+ st.error("Hugging Face token not found. Please set the HF_TOKEN environment variable.")
14
+ st.stop()
15
 
16
  # Login with the token
17
+ login(token=hf_token)
18
 
19
  # Initialize session state for timer and results
20
  if 'result' not in st.session_state:
 
56
  # Introduction for the Hugging Face interface
57
  st.write("""
58
  Welcome to the Sentiment Analysis & Report Generator app!
59
+ This tool leverages Hugging Face’s models to analyze your text and generate a detailed report explaining key insights.
60
  You can either paste your review text directly into the text area or upload a CSV file containing your reviews.
61
  """)
62
 
63
  # Load models with caching to avoid reloading on every run
64
  @st.cache_resource
65
  def load_models():
66
+ # Load the "reranker" model via pipeline.
67
  sentiment_pipe = pipeline("text-classification", model="mixedbread-ai/mxbai-rerank-base-v1")
68
  # Load the Gemma text generation pipeline.
69
  gemma_pipe = pipeline("text-generation", model="google/gemma-3-1b-it", use_auth_token=hf_token)
 
73
 
74
  # Provide two options for input: file upload (CSV) or text area
75
  uploaded_file = st.file_uploader("Upload Review File (CSV format)", type=["csv"])
76
+ user_input = st.text_area("Or, enter your text for analysis and report generation:")
77
 
78
  if uploaded_file is not None:
79
  try:
 
97
  status_text = st.empty()
98
  progress_bar = st.progress(0)
99
  try:
100
+ # Stage 1: Reranking analysis using the sentiment pipeline
101
+ status_text.markdown("**🔍 Running reranking analysis...**")
102
  progress_bar.progress(0)
103
+ rerank_result = sentiment_pipe(user_input)
104
  progress_bar.progress(50)
105
 
106
+ # Stage 2: Generate Report using Gemma, using the rerank result
107
  status_text.markdown("**📝 Generating report with Gemma...**")
108
  prompt = f"""
109
  Generate a detailed report based on the following analysis.
110
  Original text:
111
  "{user_input}"
112
+ Reranking analysis result:
113
+ {rerank_result}
114
+ Please provide a concise summary report explaining the insights derived from this analysis.
115
  """
116
  report = gemma_pipe(prompt, max_length=200)
117
  progress_bar.progress(100)
118
  status_text.success("**✅ Generation complete!**")
119
  html("<script>localStorage.setItem('freezeTimer', 'true');</script>", height=0)
120
  st.session_state.timer_frozen = True
121
+ st.write("**Reranking Analysis Result:**", rerank_result)
122
  st.write("**Generated Report:**", report[0]['generated_text'])
123
  except Exception as e:
124
  html("<script>document.getElementById('timer').remove();</script>")