docs-bot / pages /textimage.py
Huzaifa367's picture
Create textimage.py
dbb04fd verified
raw
history blame
1.94 kB
import streamlit as st
import os
import random
import spaces
import numpy as np
import torch
from typing import Tuple
from datetime import datetime
from diffusers import PixArtAlphaPipeline, LCMScheduler
# Check if CUDA is available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Define Hugging Face API details
API_URL = "https://api-inference.huggingface.co/models/Huzaifa367/chat-summarizer"
API_TOKEN = os.getenv("AUTH_TOKEN")
HEADERS = {"Authorization": f"Bearer {API_TOKEN}"}
# Initialize PixArtAlphaPipeline
pipe = PixArtAlphaPipeline.from_pretrained(
"PixArt-alpha/PixArt-LCM-XL-2-1024-MS",
torch_dtype=torch.float16,
use_safetensors=True,
device=device
)
# Function to generate image based on prompt
def generate_image(prompt: str) -> Tuple[str, int]:
seed = random.randint(0, np.iinfo(np.int32).max)
images = pipe(
prompt=prompt,
width=1024,
height=1024,
num_inference_steps=4,
generator=torch.Generator().manual_seed(seed),
num_images_per_prompt=1,
use_resolution_binning=True,
output_type="pil",
).images
# Save image and return path and seed
image_path = save_image(images[0])
return image_path, seed
# Function to save image and return path
def save_image(img):
unique_name = str(uuid.uuid4()) + ".png"
img.save(unique_name)
return unique_name
# Streamlit app
def main():
st.set_page_config(layout="wide")
st.title("Instant Image Generator")
# Prompt input
prompt = st.text_area("Prompt", "Enter your prompt here...")
# Generate button
if st.button("Generate Image"):
if prompt:
# Generate image based on prompt
image_path, seed = generate_image(prompt)
# Display the generated image
st.image(image_path, use_column_width=True, caption=f"Seed: {seed}")
if __name__ == "__main__":
main()