Slfagrouche commited on
Commit
997196d
·
verified ·
1 Parent(s): fda5191

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -13
app.py CHANGED
@@ -1,26 +1,29 @@
 
 
 
1
  import os
2
- from transformers import pipeline
3
- # Retrieve the API token from environment variables
4
- api_token = os.getenv('hugging_face_api_token')
5
 
 
 
6
  if not api_token:
7
- raise ValueError("Hugging Face API token not found. Please set the HUGGING_FACE_API_TOKEN environment variable.")
 
8
 
9
  # Configure the use of the token for Hugging Face operations
10
  from huggingface_hub import HfFolder
11
  HfFolder.save_token(api_token)
12
 
13
-
14
- generator = pipeline("text-generation", model="google/gemma-7b", tokenizer="google/gemma-7b")
15
-
16
-
17
 
18
  # Streamlit app title and description
19
- st.title("Gemma Text Generation App")
20
- st.write("This app generates text based on the input prompt using the Gemma model.")
21
 
22
  # Text input for user prompt
23
- prompt = st.text_input("Enter your prompt:", "Once upon a time,")
24
 
25
  # User controls for output length and creativity
26
  max_length = st.slider("Select the maximum output length:", min_value=50, max_value=500, value=100)
@@ -29,8 +32,10 @@ temperature = st.slider("Adjust the creativity level (temperature):", min_value=
29
  # Generate button to trigger text generation
30
  if st.button("Generate Text"):
31
  with st.spinner('Generating text...'):
 
32
  try:
33
- generated_text = generator(prompt, max_length=max_length, temperature=temperature)[0]['generated_text']
 
34
  except Exception as e:
35
  st.error(f"Error generating text: {str(e)}")
36
  else:
@@ -41,6 +46,6 @@ if st.button("Generate Text"):
41
  # About section
42
  with st.expander("About"):
43
  st.write("""
44
- The Gemma Text Generation app uses the powerful Gemma-7b model from Google to generate text.
45
  Adjust the sliders to change the length and creativity of the output.
46
  """)
 
1
+
2
+ import streamlit as st
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
  import os
 
 
 
5
 
6
+ # Check and retrieve the API token from environment variables
7
+ api_token = os.getenv('hugging_face_api_token')
8
  if not api_token:
9
+ st.error("Hugging Face API token not found. Please set the HUGGING_FACE_API_TOKEN environment variable.")
10
+ st.stop()
11
 
12
  # Configure the use of the token for Hugging Face operations
13
  from huggingface_hub import HfFolder
14
  HfFolder.save_token(api_token)
15
 
16
+ # Initialize tokenizer and model with the correct model ID
17
+ model_id = "mistral-community/Mistral-8x22B-v0.1"
18
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
19
+ model = AutoModelForCausalLM.from_pretrained(model_id)
20
 
21
  # Streamlit app title and description
22
+ st.title("Text Generation App")
23
+ st.write("This app generates text based on the input prompt using the Mistral-8x22B model.")
24
 
25
  # Text input for user prompt
26
+ prompt = st.text_input("Enter your prompt:", "Hello my name is")
27
 
28
  # User controls for output length and creativity
29
  max_length = st.slider("Select the maximum output length:", min_value=50, max_value=500, value=100)
 
32
  # Generate button to trigger text generation
33
  if st.button("Generate Text"):
34
  with st.spinner('Generating text...'):
35
+ inputs = tokenizer(prompt, return_tensors="pt")
36
  try:
37
+ outputs = model.generate(**inputs, max_length=max_length, temperature=temperature)
38
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
39
  except Exception as e:
40
  st.error(f"Error generating text: {str(e)}")
41
  else:
 
46
  # About section
47
  with st.expander("About"):
48
  st.write("""
49
+ This text generation app utilizes the powerful Mistral-8x22B model from the Mistral community on Hugging Face.
50
  Adjust the sliders to change the length and creativity of the output.
51
  """)