|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import torch |
|
from PIL import Image |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B") |
|
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B") |
|
|
|
def generate_image(text): |
|
|
|
input_ids = tokenizer.encode(text, return_tensors="pt") |
|
|
|
|
|
output = model.generate(input_ids, do_sample=True, max_length=128, num_return_sequences=1) |
|
|
|
|
|
image_bytes = tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
|
|
image = Image.open(image_bytes) |
|
|
|
return image |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_image, |
|
inputs=gr.inputs.Textbox(lines=3, label="Input Text"), |
|
outputs="image", |
|
title="Text-to-Image Generator", |
|
description="Generate images from text using Hugging Face's GPT-Neo model.", |
|
theme="huggingface" |
|
) |
|
|
|
|
|
iface.launch() |
|
|