davidizzle commited on
Commit
d8f007f
·
1 Parent(s): 9b7d4f4

Corrected typos

Browse files
Files changed (2) hide show
  1. README.md +5 -5
  2. app.py +22 -22
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
- title: Gemma Demo GSoC
3
- emoji: 💎
4
  colorFrom: indigo
5
  colorTo: pink
6
  sdk: streamlit
@@ -9,13 +9,13 @@ app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- # 💎 Gemma 💎 HF Spaces Demo
13
 
14
- An interactive [Streamlit](https://streamlit.io) app to test [Gemma](https://huggingface.co/google/gemma-2b) models directly in your browser.
15
 
16
  ## Features 🚀
17
 
18
- - Chat with the Gemma model (default: `google/gemma-2b`)
19
  - Fast deploy to Hugging Face Spaces
20
  - Easy to customize & extend
21
 
 
1
  ---
2
+ title: LIA - LLM Demo
3
+ emoji:
4
  colorFrom: indigo
5
  colorTo: pink
6
  sdk: streamlit
 
9
  pinned: false
10
  ---
11
 
12
+ # LIA HF Spaces Demo
13
 
14
+ An interactive [Streamlit](https://streamlit.io) app to test [DeepSeek](https://huggingface.co/google/gemma-2b) models directly in your browser.
15
 
16
  ## Features 🚀
17
 
18
+ - Chat with the LLM Model
19
  - Fast deploy to Hugging Face Spaces
20
  - Easy to customize & extend
21
 
app.py CHANGED
@@ -3,15 +3,15 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
  import base64
5
 
6
- st.set_page_config(page_title="Gemma Demo", layout="wide")
7
  # Model selection (STUBBED behavior)
8
- model_option = st.selectbox(
9
- "Choose a Gemma to reveal hidden truths:",
10
- ["gemma-2b-it (Instruct)", "gemma-2b", "gemma-7b", "gemma-7b-it"],
11
- index=0,
12
- help="Stubbed selection – only gemma-2b-it will load for now."
13
- )
14
- st.markdown("<h1 style='text-align: center;'>Portal to Gemma</h1>", unsafe_allow_html=True)
15
 
16
  # Load both GIFs in base64 format
17
  def load_gif_base64(path):
@@ -31,7 +31,7 @@ caption = st.empty()
31
  # unsafe_allow_html=True,
32
  # )
33
  gif_html.markdown(
34
- f"<div style='text-align:center;'><img src='https://media3.giphy.com/media/v1.Y2lkPTc5MGI3NjExMG00dmlwbjZsemZ5Mnh2eTIwOGNyYncwbGNqd3U3aHhiNGYxYjgwbCZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9cw/WmJtDY3xgYVgXgQZYc/giphy.gif' width='300'></div>",
35
  unsafe_allow_html=True,
36
  )
37
 
@@ -51,8 +51,8 @@ def load_model():
51
  return tokenizer, model
52
 
53
  tokenizer, model = load_model()
54
- prompt = st.text_area("Enter your prompt:", "What is Gemma?")
55
- # # Example prompt selector
56
  # examples = {
57
  # "🧠 Summary": "Summarize the history of AI in 5 bullet points.",
58
  # "💻 Code": "Write a Python function to sort a list using bubble sort.",
@@ -63,16 +63,16 @@ prompt = st.text_area("Enter your prompt:", "What is Gemma?")
63
 
64
  # selected_example = st.selectbox("Choose a Gemma to consult:", list(examples.keys()) + ["✍️ Custom input"])
65
  # Add before generation
66
- col1, col2, col3 = st.columns(3)
67
 
68
- with col1:
69
- temperature = st.slider("Temperature", 0.1, 1.5, 1.0)
70
 
71
- with col2:
72
- max_tokens = st.slider("Max tokens", 50, 500, 100)
73
 
74
- with col3:
75
- top_p = st.slider("Top-p (nucleus sampling)", 0.1, 1.0, 0.95)
76
  # if selected_example != "✍️ Custom input":
77
  # prompt = examples[selected_example]
78
  # else:
@@ -85,17 +85,17 @@ if st.button("Generate"):
85
  # unsafe_allow_html=True,
86
  # )
87
  gif_html.markdown(
88
- f"<div style='text-align:center;'><img src='https://media4.giphy.com/media/v1.Y2lkPTc5MGI3NjExaXB0ZTEycW1yYWhvZWExdHFyNzBnemdtdm80NzY0MGg1ZnkyNTRqbiZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9cw/FIMRlbkylLyniVP7WG/giphy.gif' width='300'></div>",
89
  unsafe_allow_html=True,
90
  )
91
- caption.markdown("<p style='text-align: center;'>Gemma is thinking... 🌀</p>", unsafe_allow_html=True)
92
 
93
 
94
  # Generate text
95
 
96
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
97
  with torch.no_grad():
98
- outputs = model.generate(**inputs, max_new_tokens=max_tokens, temperature=temperature, top_p=top_p)
99
 
100
  # Back to still
101
  # gif_html.markdown(
@@ -103,7 +103,7 @@ if st.button("Generate"):
103
  # unsafe_allow_html=True,
104
  # )
105
  gif_html.markdown(
106
- f"<div style='text-align:center;'><img src='https://media3.giphy.com/media/v1.Y2lkPTc5MGI3NjExMG00dmlwbjZsemZ5Mnh2eTIwOGNyYncwbGNqd3U3aHhiNGYxYjgwbCZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9cw/WmJtDY3xgYVgXgQZYc/giphy.gif' width='300'></div>",
107
  unsafe_allow_html=True,
108
  )
109
  caption.empty()
 
3
  import torch
4
  import base64
5
 
6
+ st.set_page_config(page_title="LIA Demo", layout="wide")
7
  # Model selection (STUBBED behavior)
8
+ # model_option = st.selectbox(
9
+ # "Choose a Gemma to reveal hidden truths:",
10
+ # ["gemma-2b-it (Instruct)", "gemma-2b", "gemma-7b", "gemma-7b-it"],
11
+ # index=0,
12
+ # help="Stubbed selection – only gemma-2b-it will load for now."
13
+ # )
14
+ st.markdown("<h1 style='text-align: center;'>Ask LeoNardo!</h1>", unsafe_allow_html=True)
15
 
16
  # Load both GIFs in base64 format
17
  def load_gif_base64(path):
 
31
  # unsafe_allow_html=True,
32
  # )
33
  gif_html.markdown(
34
+ f"<div style='text-align:center;'><img src='https://media0.giphy.com/media/v1.Y2lkPTc5MGI3NjExYTRxYzI2bXJmY3N2bXBtMHJtOGV3NW9vZ3l3M3czbGYybGpkeWQ1YSZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9cw/3uPWb5EYVvxdfoREQm/giphy.gif' width='300'></div>",
35
  unsafe_allow_html=True,
36
  )
37
 
 
51
  return tokenizer, model
52
 
53
  tokenizer, model = load_model()
54
+ prompt = st.text_area("Enter your prompt:", "What is Leonardo, the company with the red logo?")
55
+ # Example prompt selector
56
  # examples = {
57
  # "🧠 Summary": "Summarize the history of AI in 5 bullet points.",
58
  # "💻 Code": "Write a Python function to sort a list using bubble sort.",
 
63
 
64
  # selected_example = st.selectbox("Choose a Gemma to consult:", list(examples.keys()) + ["✍️ Custom input"])
65
  # Add before generation
66
+ # col1, col2, col3 = st.columns(3)
67
 
68
+ # with col1:
69
+ # temperature = st.slider("Temperature", 0.1, 1.5, 1.0)
70
 
71
+ # with col2:
72
+ # max_tokens = st.slider("Max tokens", 50, 500, 100)
73
 
74
+ # with col3:
75
+ # top_p = st.slider("Top-p (nucleus sampling)", 0.1, 1.0, 0.95)
76
  # if selected_example != "✍️ Custom input":
77
  # prompt = examples[selected_example]
78
  # else:
 
85
  # unsafe_allow_html=True,
86
  # )
87
  gif_html.markdown(
88
+ f"<div style='text-align:center;'><img src='https://media2.giphy.com/media/v1.Y2lkPTc5MGI3NjExMXViMm02MnR6bGJ4c2h3ajYzdWNtNXNtYnNic3lnN2xyZzlzbm9seSZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9cw/k32ddF9WVs44OUaZAm/giphy.gif' width='300'></div>",
89
  unsafe_allow_html=True,
90
  )
91
+ caption.markdown("<p style='text-align: center; margin-top: 20px;'>LeoNardo is thinking... 🌀</p>", unsafe_allow_html=True)
92
 
93
 
94
  # Generate text
95
 
96
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
97
  with torch.no_grad():
98
+ outputs = model.generate(**inputs, max_new_tokens=100, temperature=1.0, top_p=0.95)
99
 
100
  # Back to still
101
  # gif_html.markdown(
 
103
  # unsafe_allow_html=True,
104
  # )
105
  gif_html.markdown(
106
+ f"<div style='text-align:center;'><img src='https://media0.giphy.com/media/v1.Y2lkPTc5MGI3NjExYTRxYzI2bXJmY3N2bXBtMHJtOGV3NW9vZ3l3M3czbGYybGpkeWQ1YSZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9cw/3uPWb5EYVvxdfoREQm/giphy.gif' width='300'></div>",
107
  unsafe_allow_html=True,
108
  )
109
  caption.empty()