Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -53,6 +53,7 @@ st.header("Review Scorer & Report Generator")
|
|
53 |
# Concise introduction
|
54 |
st.write("This model will score your reviews in your CSV file and generate a report based on your query and those results.")
|
55 |
|
|
|
56 |
def display_temp_message(message, message_type="info", duration=5):
|
57 |
"""Display a temporary message that disappears after specified duration."""
|
58 |
placeholder = st.empty()
|
@@ -64,68 +65,54 @@ def display_temp_message(message, message_type="info", duration=5):
|
|
64 |
elif message_type == "error":
|
65 |
placeholder.error(message)
|
66 |
|
67 |
-
#
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
# Load models with caching to avoid reloading on every run
|
72 |
-
def load_model_with_messages(loading_message, success_message, loading_function, error_message_prefix):
|
73 |
-
"""Load a model with temporary loading and success messages."""
|
74 |
-
# Create placeholder for the loading message
|
75 |
-
loading_placeholder = st.empty()
|
76 |
-
loading_placeholder.info(loading_message)
|
77 |
-
|
78 |
-
try:
|
79 |
-
# Load the model
|
80 |
-
result = loading_function()
|
81 |
-
|
82 |
-
# Clear the loading message
|
83 |
-
loading_placeholder.empty()
|
84 |
-
|
85 |
-
# Show temporary success message
|
86 |
-
Thread(
|
87 |
-
target=display_temp_message,
|
88 |
-
args=(success_message, "success"),
|
89 |
-
daemon=True
|
90 |
-
).start()
|
91 |
-
|
92 |
-
return result
|
93 |
-
|
94 |
-
except Exception as e:
|
95 |
-
# Clear the loading message
|
96 |
-
loading_placeholder.empty()
|
97 |
-
|
98 |
-
# Show error message
|
99 |
-
st.error(f"{error_message_prefix}: {e}")
|
100 |
-
st.error(f"Detailed error: {type(e).__name__}: {str(e)}")
|
101 |
-
return None
|
102 |
|
103 |
-
#
|
104 |
-
|
105 |
-
|
106 |
-
model="meta-llama/Llama-3.2-1B-Instruct",
|
107 |
-
device=0, # Use GPU if available
|
108 |
-
torch_dtype=torch.bfloat16) # Use FP16 for efficiency
|
109 |
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
|
115 |
-
# Load
|
116 |
-
|
117 |
-
|
118 |
-
"Llama 3.2 summarization model loaded successfully!",
|
119 |
-
load_llama_model,
|
120 |
-
"Error loading Llama 3.2 summarization model"
|
121 |
-
)
|
122 |
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
|
130 |
def extract_assistant_content(raw_response):
|
131 |
"""Extract only the assistant's content from the Gemma-3 response."""
|
|
|
53 |
# Concise introduction
|
54 |
st.write("This model will score your reviews in your CSV file and generate a report based on your query and those results.")
|
55 |
|
56 |
+
@st.cache_resource
|
57 |
def display_temp_message(message, message_type="info", duration=5):
|
58 |
"""Display a temporary message that disappears after specified duration."""
|
59 |
placeholder = st.empty()
|
|
|
65 |
elif message_type == "error":
|
66 |
placeholder.error(message)
|
67 |
|
68 |
+
# Create a timer to clear the message after duration
|
69 |
+
timer = threading.Timer(duration, placeholder.empty)
|
70 |
+
timer.daemon = True
|
71 |
+
timer.start()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
|
73 |
+
# Load Llama 3.2 model
|
74 |
+
loading_llama_placeholder = st.empty()
|
75 |
+
loading_llama_placeholder.info("Loading Llama 3.2 summarization model...")
|
|
|
|
|
|
|
76 |
|
77 |
+
try:
|
78 |
+
llama_pipe = pipeline("text-generation",
|
79 |
+
model="meta-llama/Llama-3.2-1B-Instruct",
|
80 |
+
device=0, # Use GPU if available
|
81 |
+
torch_dtype=torch.bfloat16) # Use FP16 for efficiency
|
82 |
+
|
83 |
+
# Clear loading message
|
84 |
+
loading_llama_placeholder.empty()
|
85 |
+
|
86 |
+
# Display success message that will disappear after 5 seconds
|
87 |
+
display_temp_message("Llama 3.2 summarization model loaded successfully!", "success")
|
88 |
+
|
89 |
+
except Exception as e:
|
90 |
+
# Clear loading message
|
91 |
+
loading_llama_placeholder.empty()
|
92 |
+
|
93 |
+
st.error(f"Error loading Llama 3.2 summarization model: {e}")
|
94 |
+
st.error(f"Detailed error: {type(e).__name__}: {str(e)}")
|
95 |
|
96 |
+
# Load sentiment analysis model
|
97 |
+
loading_sentiment_placeholder = st.empty()
|
98 |
+
loading_sentiment_placeholder.info("Loading sentiment analysis model...")
|
|
|
|
|
|
|
|
|
99 |
|
100 |
+
try:
|
101 |
+
score_pipe = pipeline("text-classification",
|
102 |
+
model="cardiffnlp/twitter-roberta-base-sentiment-latest",
|
103 |
+
device=0 if torch.cuda.is_available() else -1)
|
104 |
+
|
105 |
+
# Clear loading message
|
106 |
+
loading_sentiment_placeholder.empty()
|
107 |
+
|
108 |
+
# Display success message that will disappear after 5 seconds
|
109 |
+
display_temp_message("Sentiment analysis model loaded successfully!", "success")
|
110 |
+
|
111 |
+
except Exception as e:
|
112 |
+
# Clear loading message
|
113 |
+
loading_sentiment_placeholder.empty()
|
114 |
+
|
115 |
+
st.error(f"Error loading sentiment analysis model: {e}")
|
116 |
|
117 |
def extract_assistant_content(raw_response):
|
118 |
"""Extract only the assistant's content from the Gemma-3 response."""
|