|
import gradio as gr |
|
import os |
|
import requests |
|
import random |
|
import time |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import torch |
|
from PIL import Image |
|
from transformers import pipeline |
|
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
|
|
|
|
|
text_generator = pipeline( |
|
"text-generation", |
|
model="Ar4ikov/gpt2-650k-stable-diffusion-prompt-generator", |
|
tokenizer="gpt2" |
|
) |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("stablediffusionapi/juggernaut-xl-v8") |
|
model = AutoModelForCausalLM.from_pretrained("stablediffusionapi/juggernaut-xl-v8") |
|
|
|
|
|
def generate_text(prompt): |
|
return text_generator(prompt, max_length=77)[0]["generated_text"] |
|
|
|
|
|
def generate_image(text): |
|
|
|
input_ids = tokenizer.encode(text, return_tensors="pt") |
|
|
|
|
|
output = model.generate(input_ids, do_sample=True, max_length=128, num_return_sequences=1) |
|
|
|
|
|
image_bytes = tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
|
|
image = Image.open(image_bytes) |
|
|
|
return image |
|
|
|
|
|
iface = gr.Interface( |
|
fn=[generate_text, generate_image], |
|
inputs=["textbox", "textbox"], |
|
outputs=["textbox", "image"], |
|
title="AI Art Prompt Generator", |
|
description="Art Prompt Generator is a user-friendly interface designed to optimize input for AI Art Generator or Creator. For faster generation speeds, it's recommended to load the model locally with GPUs, as the online demo at Hugging Face Spaces utilizes CPU, resulting in slower processing times.", |
|
theme="huggingface" |
|
) |
|
|
|
|
|
iface.launch() |
|
|