Spaces:
Runtime error
Runtime error
import gradio as gr | |
import torch | |
from transformers import AutoModelForSequenceToSequence, AutoTokenizer | |
from PIL import Image | |
from torchvision import transforms | |
# Load the Text-to-Image model | |
image_model = AutoModelForSequenceToSequence.from_pretrained("artificialguybr/CuteCartoonRedmond-V2") | |
image_tokenizer = AutoTokenizer.from_pretrained("artificialguybr/CuteCartoonRedmond-V2") | |
# Load the Text Generation model | |
text_model = AutoModelForSequenceToSequence.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1") | |
text_tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1") | |
# Define a function to generate an image from text | |
def generate_image(text): | |
inputs = image_tokenizer(text, return_tensors="pt") | |
output = image_model.generate(inputs["input_ids"], attention_mask=inputs["attention_mask"]) | |
image = Image.fromarray(output[0].detach().numpy()) | |
return image | |
# Define a function to generate text from text | |
def generate_text(text): | |
inputs = text_tokenizer(text, return_tensors="pt") | |
output = text_model.generate(inputs["input_ids"], attention_mask=inputs["attention_mask"]) | |
return text_tokenizer.decode(output[0], skip_special_tokens=True) | |
# Create a Gradio interface | |
demo = gr.Interface( | |
fn=lambda text: {"image": generate_image(text), "text": generate_text(text)}, | |
inputs="text", | |
outputs=["image", "text"], | |
title="Text-to-Image and Text Generation", | |
description="Enter a prompt to generate both an image and text!" | |
) | |
# Launch the Gradio app | |
demo.launch() |