Spaces:
Running
Running
Commit
·
2732ef7
1
Parent(s):
466be59
Streaming effect of words
Browse files- app.py +15 -23
- requirements.txt +1 -0
app.py
CHANGED
@@ -1,35 +1,21 @@
|
|
1 |
import streamlit as st
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer, FineGrainedFP8Config
|
3 |
import torch
|
|
|
4 |
import base64
|
5 |
|
6 |
st.set_page_config(page_title="LIA Demo", layout="wide")
|
7 |
-
# Model selection (STUBBED behavior)
|
8 |
-
# model_option = st.selectbox(
|
9 |
-
# "Choose a Gemma to reveal hidden truths:",
|
10 |
-
# ["gemma-2b-it (Instruct)", "gemma-2b", "gemma-7b", "gemma-7b-it"],
|
11 |
-
# index=0,
|
12 |
-
# help="Stubbed selection – only gemma-2b-it will load for now."
|
13 |
-
# )
|
14 |
st.markdown("<h1 style='text-align: center;'>Ask LeoNardo!</h1>", unsafe_allow_html=True)
|
15 |
|
16 |
# Load both GIFs in base64 format
|
17 |
def load_gif_base64(path):
|
18 |
with open(path, "rb") as f:
|
19 |
return base64.b64encode(f.read()).decode("utf-8")
|
20 |
-
|
21 |
-
# still_gem_b64 = load_gif_base64("assets/stillGem.gif")
|
22 |
-
# rotating_gem_b64 = load_gif_base64("assets/rotatingGem.gif")
|
23 |
-
|
24 |
# Placeholder for GIF HTML
|
25 |
gif_html = st.empty()
|
26 |
caption = st.empty()
|
27 |
|
28 |
-
# Initially show still gem
|
29 |
-
# gif_html.markdown(
|
30 |
-
# f"<div style='text-align:center;'><img src='data:image/gif;base64,{still_gem_b64}' width='300'></div>",
|
31 |
-
# unsafe_allow_html=True,
|
32 |
-
# )
|
33 |
gif_html.markdown(
|
34 |
f"<div style='text-align:center;'><img src='https://media0.giphy.com/media/v1.Y2lkPTc5MGI3NjExYTRxYzI2bXJmY3N2bXBtMHJtOGV3NW9vZ3l3M3czbGYybGpkeWQ1YSZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9cw/3uPWb5EYVvxdfoREQm/giphy.gif' width='300'></div>",
|
35 |
unsafe_allow_html=True,
|
@@ -37,9 +23,6 @@ gif_html.markdown(
|
|
37 |
|
38 |
@st.cache_resource
|
39 |
def load_model():
|
40 |
-
# As Gemma is gated, we will show functionality of the demo using DeepSeek-R1-Distill-Qwen-1.5B model
|
41 |
-
# model_id = "google/gemma-2b-it"
|
42 |
-
# tokenizer = AutoTokenizer.from_pretrained(model_id, token=True)
|
43 |
# model_id = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
|
44 |
# model_id = "deepseek-ai/deepseek-llm-7b-chat"
|
45 |
# model_id = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
|
@@ -127,7 +110,16 @@ if st.button("Generate"):
|
|
127 |
caption.empty()
|
128 |
|
129 |
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer, FineGrainedFP8Config
|
3 |
import torch
|
4 |
+
import time
|
5 |
import base64
|
6 |
|
7 |
st.set_page_config(page_title="LIA Demo", layout="wide")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
st.markdown("<h1 style='text-align: center;'>Ask LeoNardo!</h1>", unsafe_allow_html=True)
|
9 |
|
10 |
# Load both GIFs in base64 format
|
11 |
def load_gif_base64(path):
|
12 |
with open(path, "rb") as f:
|
13 |
return base64.b64encode(f.read()).decode("utf-8")
|
14 |
+
|
|
|
|
|
|
|
15 |
# Placeholder for GIF HTML
|
16 |
gif_html = st.empty()
|
17 |
caption = st.empty()
|
18 |
|
|
|
|
|
|
|
|
|
|
|
19 |
gif_html.markdown(
|
20 |
f"<div style='text-align:center;'><img src='https://media0.giphy.com/media/v1.Y2lkPTc5MGI3NjExYTRxYzI2bXJmY3N2bXBtMHJtOGV3NW9vZ3l3M3czbGYybGpkeWQ1YSZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9cw/3uPWb5EYVvxdfoREQm/giphy.gif' width='300'></div>",
|
21 |
unsafe_allow_html=True,
|
|
|
23 |
|
24 |
@st.cache_resource
|
25 |
def load_model():
|
|
|
|
|
|
|
26 |
# model_id = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
|
27 |
# model_id = "deepseek-ai/deepseek-llm-7b-chat"
|
28 |
# model_id = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
|
|
|
110 |
caption.empty()
|
111 |
|
112 |
|
113 |
+
decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
114 |
+
# Set up placeholder for streaming effect
|
115 |
+
output_placeholder = st.empty()
|
116 |
+
streamed_text = ""
|
117 |
+
|
118 |
+
for word in decoded_output.split(" "):
|
119 |
+
streamed_text += word + " "
|
120 |
+
output_placeholder.markdown("### ✨ Output:\n\n" + streamed_text + "▌")
|
121 |
+
# slight delay
|
122 |
+
time.sleep(0.03)
|
123 |
+
|
124 |
+
# Final cleanup (remove blinking cursor)
|
125 |
+
output_placeholder.markdown("### ✨ Output:\n\n" + streamed_text)
|
requirements.txt
CHANGED
@@ -2,3 +2,4 @@ streamlit
|
|
2 |
transformers
|
3 |
torch
|
4 |
accelerate
|
|
|
|
2 |
transformers
|
3 |
torch
|
4 |
accelerate
|
5 |
+
time
|