anything / app.py
djrana's picture
Update app.py
5c272b0 verified
raw
history blame
1.9 kB
import gradio as gr
import os
import requests
import random
import time
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from PIL import Image
from transformers import pipeline
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# Load the pipeline for text generation
text_generator = pipeline(
"text-generation",
model="Ar4ikov/gpt2-650k-stable-diffusion-prompt-generator",
tokenizer="gpt2"
)
# Load tokenizer and model for image generation
tokenizer = AutoTokenizer.from_pretrained("stablediffusionapi/juggernaut-xl-v8")
model = AutoModelForCausalLM.from_pretrained("stablediffusionapi/juggernaut-xl-v8")
# Function to generate text based on input prompt
def generate_text(prompt):
return text_generator(prompt, max_length=77)[0]["generated_text"]
# Function to generate image based on input text
def generate_image(text):
# Tokenize input text
input_ids = tokenizer.encode(text, return_tensors="pt")
# Generate image conditioned on input text
output = model.generate(input_ids, do_sample=True, max_length=128, num_return_sequences=1)
# Decode generated image tokens to get image
image_bytes = tokenizer.decode(output[0], skip_special_tokens=True)
# Convert image bytes to PIL image
image = Image.open(image_bytes)
return image
# Create Gradio interface
iface = gr.Interface(
fn=[generate_text, generate_image],
inputs=["textbox", "textbox"],
outputs=["textbox", "image"],
title="AI Art Prompt Generator",
description="Art Prompt Generator is a user-friendly interface designed to optimize input for AI Art Generator or Creator. For faster generation speeds, it's recommended to load the model locally with GPUs, as the online demo at Hugging Face Spaces utilizes CPU, resulting in slower processing times.",
theme="huggingface"
)
# Launch the interface
iface.launch()