Spaces:
Sleeping
Sleeping
File size: 1,642 Bytes
6db7691 68005cb 22dd9b9 87476e8 c68af39 68005cb 9d0fe26 87476e8 9d0fe26 5e4b040 9d0fe26 5e4b040 6db7691 5e4b040 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import streamlit as st
import os
import pprint
import sys
from transformers import pipeline
# Set up the Hugging Face API token
hugging_face_api_token = = os.getenv('hugging_face_api_token')
# Load the text generation pipeline
generator = pipeline("text-generation", model="google/gemma-7b", tokenizer="google/gemma-7b")
import streamlit as st
from transformers import pipeline
# Streamlit app title and description
st.title("Gemma Text Generation App")
st.write("This app generates text based on the input prompt using the Gemma model.")
# Text input for user prompt
prompt = st.text_input("Enter your prompt:", "Once upon a time,")
# User controls for output length and creativity
max_length = st.slider("Select the maximum output length:", min_value=50, max_value=500, value=100)
temperature = st.slider("Adjust the creativity level (temperature):", min_value=0.1, max_value=1.0, value=0.7)
# Generate button to trigger text generation
if st.button("Generate Text"):
with st.spinner('Generating text...'):
try:
generated_text = generator(prompt, max_length=max_length, temperature=temperature)[0]['generated_text']
except Exception as e:
st.error(f"Error generating text: {str(e)}")
else:
st.success('Text generation complete!')
st.markdown("### Generated Text:")
st.markdown(generated_text)
# About section
with st.expander("About"):
st.write("""
The Gemma Text Generation app uses the powerful Gemma-7b model from Google to generate text.
Adjust the sliders to change the length and creativity of the output.
""")
|