File size: 1,095 Bytes
1312050 62fd8d0 82f7a21 7186c16 a9d5136 82f7a21 62fd8d0 82f7a21 62fd8d0 82f7a21 62fd8d0 82f7a21 62fd8d0 82f7a21 62fd8d0 82f7a21 62fd8d0 7186c16 62fd8d0 1312050 7186c16 5c272b0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from PIL import Image
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B")
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B")
def generate_image(text):
# Tokenize input text
input_ids = tokenizer.encode(text, return_tensors="pt")
# Generate image conditioned on input text
output = model.generate(input_ids, do_sample=True, max_length=128, num_return_sequences=1)
# Decode generated image tokens to get image
image_bytes = tokenizer.decode(output[0], skip_special_tokens=True)
# Convert image bytes to PIL image
image = Image.open(image_bytes)
return image
# Create Gradio interface
iface = gr.Interface(
fn=generate_image,
inputs=gr.inputs.Textbox(lines=3, label="Input Text"),
outputs="image",
title="Text-to-Image Generator",
description="Generate images from text using Hugging Face's GPT-Neo model.",
theme="huggingface"
)
# Launch Gradio interface
iface.launch()
|