import streamlit as st from transformers import AutoModelForCausalLM, AutoTokenizer # Load model and tokenizer model_name = "gpt2" # You can also try other models like "gpt2-medium", "gpt2-large", etc. tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) def generate_blog_post(topic): prompt = f"Write a detailed blog post about {topic}." inputs = tokenizer.encode(prompt, return_tensors="pt") outputs = model.generate(inputs, max_length=512, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id) blog_post = tokenizer.decode(outputs[0], skip_special_tokens=True) return blog_post # Streamlit interface st.title("Blog Post Generator") st.write("Enter a topic to generate a detailed blog post.") topic = st.text_input("Topic", "") if st.button("Generate Blog Post"): if topic: with st.spinner('Generating blog post...'): blog_post = generate_blog_post(topic) st.write(blog_post) else: st.write("Please enter a topic to generate a blog post.")