|
import streamlit as st |
|
from transformers import GPT2Tokenizer, GPT2LMHeadModel |
|
|
|
|
|
model_name = 'gpt2-large' |
|
tokenizer = GPT2Tokenizer.from_pretrained(model_name) |
|
model = GPT2LMHeadModel.from_pretrained(model_name) |
|
|
|
|
|
st.title("GPT-2 Blog Post Generator") |
|
|
|
|
|
text = st.text_area("Enter your Topic: ") |
|
|
|
def generate_text(text): |
|
try: |
|
|
|
encoded_input = tokenizer(text, return_tensors='pt') |
|
|
|
|
|
output = model.generate( |
|
input_ids=encoded_input['input_ids'], |
|
max_length=200, |
|
num_return_sequences=1, |
|
no_repeat_ngram_size=2, |
|
top_k=50, |
|
top_p=0.95, |
|
temperature=0.7, |
|
attention_mask=encoded_input['attention_mask'], |
|
pad_token_id=tokenizer.eos_token_id |
|
) |
|
|
|
|
|
generated_text = tokenizer.decode(output[0], skip_special_tokens=True) |
|
return generated_text |
|
|
|
except Exception as e: |
|
st.error(f"An error occurred: {e}") |
|
return None |
|
|
|
if st.button("Generate"): |
|
generated_text = generate_text(text) |
|
if generated_text: |
|
|
|
st.subheader("Generated Blog Post") |
|
st.write(generated_text) |