import streamlit as st from transformers import GPT2Tokenizer, GPT2LMHeadModel # Initialize the tokenizer and model model_name = 'gpt2-large' tokenizer = GPT2Tokenizer.from_pretrained(model_name) model = GPT2LMHeadModel.from_pretrained(model_name) # Set the title for the Streamlit app st.title("GPT-2 Blog Post Generator") # Text input for the user text = st.text_area("Enter your Topic: ") def generate_text(text): try: # Encode input text encoded_input = tokenizer(text, return_tensors='pt') # Generate text output = model.generate( input_ids=encoded_input['input_ids'], max_length=200, # Specify the max length for the generated text num_return_sequences=1, # Number of sequences to generate no_repeat_ngram_size=2, # Avoid repeating n-grams of length 2 top_k=50, # Limits the sampling pool to top_k tokens top_p=0.95, # Cumulative probability threshold for nucleus sampling temperature=0.7, # Controls the randomness of predictions attention_mask=encoded_input['attention_mask'], # Correct attention mask pad_token_id=tokenizer.eos_token_id # Use the end-of-sequence token as padding ) # Decode generated text generated_text = tokenizer.decode(output[0], skip_special_tokens=True) return generated_text except Exception as e: st.error(f"An error occurred: {e}") return None if st.button("Generate"): generated_text = generate_text(text) if generated_text: # Display the generated text st.subheader("Generated Blog Post") st.write(generated_text)