blogs / app.py
zaafirriaz's picture
Update app.py
b300b18 verified
raw
history blame contribute delete
1.9 kB
import streamlit as st
from transformers import pipeline
from diffusers import StableDiffusionPipeline
from PIL import Image
import torch
# Initialize the text generation model
def initialize_text_generator():
try:
text_generator = pipeline('text-generation', model='gpt2')
except Exception as e:
st.error(f"Error loading text generation model: {e}")
return None
return text_generator
# Initialize the image generation model
def initialize_image_generator():
try:
device = "cuda" if torch.cuda.is_available() else "cpu"
image_generator = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(device)
except Exception as e:
st.error(f"Error loading image generation model: {e}")
return None
return image_generator
text_generator = initialize_text_generator()
image_generator = initialize_image_generator()
def generate_blog(title):
if text_generator is None or image_generator is None:
return "Failed to load models", None
# Generate blog content
blog_content = text_generator(title, max_length=500, num_return_sequences=1)[0]['generated_text']
# Generate an image
image_prompt = f"An image representing {title}"
image = image_generator(image_prompt).images[0]
return blog_content, image
# Streamlit app
st.title('Blog Generator')
title = st.text_input('Enter the title of your blog:')
if title:
with st.spinner('Generating blog content and image...'):
blog_content, image = generate_blog(title)
if blog_content == "Failed to load models":
st.error(blog_content)
else:
st.success('Blog generated successfully!')
st.subheader('Blog Content')
st.write(blog_content)
st.subheader('Generated Image')
st.image(image, caption='Generated Image')