anything / app.py
djrana's picture
Update app.py
7186c16 verified
raw
history blame
1.1 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from PIL import Image
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B")
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B")
def generate_image(text):
# Tokenize input text
input_ids = tokenizer.encode(text, return_tensors="pt")
# Generate image conditioned on input text
output = model.generate(input_ids, do_sample=True, max_length=128, num_return_sequences=1)
# Decode generated image tokens to get image
image_bytes = tokenizer.decode(output[0], skip_special_tokens=True)
# Convert image bytes to PIL image
image = Image.open(image_bytes)
return image
# Create Gradio interface
iface = gr.Interface(
fn=generate_image,
inputs=gr.inputs.Textbox(lines=3, label="Input Text"),
outputs="image",
title="Text-to-Image Generator",
description="Generate images from text using Hugging Face's GPT-Neo model.",
theme="huggingface"
)
# Launch Gradio interface
iface.launch()