HuggingFace_App / app.py
Slfagrouche's picture
Update app.py
997196d verified
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import os
# Check and retrieve the API token from environment variables
api_token = os.getenv('hugging_face_api_token')
if not api_token:
st.error("Hugging Face API token not found. Please set the HUGGING_FACE_API_TOKEN environment variable.")
st.stop()
# Configure the use of the token for Hugging Face operations
from huggingface_hub import HfFolder
HfFolder.save_token(api_token)
# Initialize tokenizer and model with the correct model ID
model_id = "mistral-community/Mistral-8x22B-v0.1"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
# Streamlit app title and description
st.title("Text Generation App")
st.write("This app generates text based on the input prompt using the Mistral-8x22B model.")
# Text input for user prompt
prompt = st.text_input("Enter your prompt:", "Hello my name is")
# User controls for output length and creativity
max_length = st.slider("Select the maximum output length:", min_value=50, max_value=500, value=100)
temperature = st.slider("Adjust the creativity level (temperature):", min_value=0.1, max_value=1.0, value=0.7)
# Generate button to trigger text generation
if st.button("Generate Text"):
with st.spinner('Generating text...'):
inputs = tokenizer(prompt, return_tensors="pt")
try:
outputs = model.generate(**inputs, max_length=max_length, temperature=temperature)
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
except Exception as e:
st.error(f"Error generating text: {str(e)}")
else:
st.success('Text generation complete!')
st.markdown("### Generated Text:")
st.markdown(generated_text)
# About section
with st.expander("About"):
st.write("""
This text generation app utilizes the powerful Mistral-8x22B model from the Mistral community on Hugging Face.
Adjust the sliders to change the length and creativity of the output.
""")