mdztxi2 / myapp.py
Geek7's picture
Update myapp.py
68e2543 verified
from flask import Flask, request, jsonify
from flask_cors import CORS
import os
from huggingface_hub import InferenceClient
from io import BytesIO
from PIL import Image
import gradio as gr # Import Gradio
# Initialize the Flask app
myapp = Flask(__name__)
CORS(myapp) # Enable CORS for all routes
# Initialize the InferenceClient with your Hugging Face token
HF_TOKEN = os.environ.get("HF_TOKEN") # Ensure to set your Hugging Face token in the environment
client = InferenceClient(token=HF_TOKEN)
# Function to generate an image from a text prompt
def generate_image(prompt, seed=1, model="prompthero/openjourney-v4"):
try:
# Generate the image using Hugging Face's inference API
result_image = client.text_to_image(prompt=prompt, seed=seed, model=model)
return result_image
except Exception as e:
print(f"Error generating image: {str(e)}")
return None
# Gradio interface function
def gradio_interface(prompt, seed, model_name):
image = generate_image(prompt, seed, model_name)
if image:
img_byte_arr = BytesIO()
image.save(img_byte_arr, format='PNG') # Convert the image to PNG
img_byte_arr.seek(0) # Move to the start of the byte stream
return img_byte_arr # Return the image as bytes
else:
return "Failed to generate image"
# Set up the Gradio interface
gr.Interface(
fn=gradio_interface,
inputs=[
gr.Textbox(label="Prompt", placeholder="Enter a text prompt", lines=2),
gr.Number(label="Seed", value=1, precision=0),
gr.Textbox(label="Model Name", value="prompthero/openjourney-v4", placeholder="Enter model name"),
],
outputs="image",
title="Image Generation with Hugging Face",
description="Enter a prompt, seed, and model name to generate an image."
).launch() # Launch the Gradio interface
# Add this block to make sure your app runs when called
if __name__ == "__main__":
myapp.run(host='0.0.0.0', port=7860) # Run directly if needed for testing