AItool commited on
Commit
af29fad
·
verified ·
1 Parent(s): 587858f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -7
app.py CHANGED
@@ -18,7 +18,13 @@ st.title("Text-generation model using Streamlit from Inference API (serverless)
18
  # Ensure the full_text key is initialized in session state
19
  if "full_text" not in st.session_state:
20
  st.session_state["full_text"] = ""
21
-
 
 
 
 
 
 
22
  # Create a text input area for user prompts
23
  with st.form("my_form"):
24
  text = st.text_area("JOKER (TinyLlama is not great at joke telling.) (using model TinyLlama/TinyLlama-1.1B-Chat-v1.0):", "Tell me a clever and funny joke in exactly 4 sentences. It should make me laugh really hard. Don't repeat the topic in your joke. Be creative and concise.")
@@ -27,28 +33,25 @@ with st.form("my_form"):
27
  # Initialize the full_text variable
28
  full_text = " "
29
 
30
- # to get different jokes
31
- top_p_init = 0.7
32
  # Generate a random temperature between 0.5 and 1.0
33
  temperature = random.uniform(0.5, 1.0)
34
 
35
  if submitted:
36
- top_p_init+=0.2
37
  messages = [
38
  {"role": "user", "content": text}
39
  ]
40
 
41
  # Create a new stream for each submission
42
  stream = client.chat.completions.create(
43
- model="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
44
  messages=messages,
45
  # Generate a random temperature between 0.5 and 1.0
46
  temperature = random.uniform(0.5, 1.0),
47
  max_tokens=300,
48
- top_p=top_p_init,
49
  stream=True
50
  )
51
- top_p_init +=0.1
52
  # Concatenate chunks to form the full response
53
  for chunk in stream:
54
  full_text += chunk.choices[0].delta.content
 
18
  # Ensure the full_text key is initialized in session state
19
  if "full_text" not in st.session_state:
20
  st.session_state["full_text"] = ""
21
+
22
+ # Model selection dropdown
23
+ model_options = [ "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "gpt2", "facebook/opt-1.3b", "EleutherAI/gpt-neo-2.7B","meta-llama/Llama-Llama-3-8B-Instruct", "meta-llama/Llama-Llama-3.1-1B-Instruct", "meta-llama/Llama-Llama-3.2-3B-Instruct", "meta-llama/Llama-Llama-3.2-8B-Instruct", "Qwen/Qwen2.5-1.5B-Instruct", "openai-community/gpt2", "google/gemma-1.1-7b-it", "google/gemma-1.27b-it", "google/gemma-1.2b-it", "google/gemma-1.9b-it", "google/gemma-2.2b-it", "HuggingFaceH4/starchat7b-beta", "distilbert/distilgpt2", "facebook/opt-1.3b", "distributed/optimized=gpt2-1b" ]
24
+ selected_model = st.selectbox("Choose a model:"= st.selectbox("Choose a model:", model_options)
25
+
26
+
27
+
28
  # Create a text input area for user prompts
29
  with st.form("my_form"):
30
  text = st.text_area("JOKER (TinyLlama is not great at joke telling.) (using model TinyLlama/TinyLlama-1.1B-Chat-v1.0):", "Tell me a clever and funny joke in exactly 4 sentences. It should make me laugh really hard. Don't repeat the topic in your joke. Be creative and concise.")
 
33
  # Initialize the full_text variable
34
  full_text = " "
35
 
 
 
36
  # Generate a random temperature between 0.5 and 1.0
37
  temperature = random.uniform(0.5, 1.0)
38
 
39
  if submitted:
 
40
  messages = [
41
  {"role": "user", "content": text}
42
  ]
43
 
44
  # Create a new stream for each submission
45
  stream = client.chat.completions.create(
46
+ model=selected_model,
47
  messages=messages,
48
  # Generate a random temperature between 0.5 and 1.0
49
  temperature = random.uniform(0.5, 1.0),
50
  max_tokens=300,
51
+ top_p=random.uniform(0.7, 1.0),
52
  stream=True
53
  )
54
+
55
  # Concatenate chunks to form the full response
56
  for chunk in stream:
57
  full_text += chunk.choices[0].delta.content