File size: 3,458 Bytes
19db560
 
 
462b6f1
19db560
462b6f1
19db560
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
from flask import Flask, request, jsonify, send_file
from flask_cors import CORS
import os
from huggingface_hub import InferenceClient
from io import BytesIO
from PIL import Image

# Initialize the Flask app
app = Flask(__name__)
CORS(app)  # Enable CORS for all routes

# Initialize the InferenceClient with your Hugging Face token
HF_TOKEN = os.environ.get("HF_TOKEN")  # Ensure to set your Hugging Face token in the environment
client = InferenceClient(token=HF_TOKEN)

@app.route('/')
def home():
    return "Welcome to the Image Background Remover!"


# Function to generate an image from a text prompt
def generate_image(prompt, negative_prompt=None, height=512, width=512, model="stabilityai/stable-diffusion-2-1", num_inference_steps=50, guidance_scale=7.5, seed=None):
    try:
        # Generate the image using Hugging Face's inference API with additional parameters
        image = client.text_to_image(
            prompt=prompt, 
            negative_prompt=negative_prompt, 
            height=height, 
            width=width, 
            model=model,
            num_inference_steps=num_inference_steps,  # Control the number of inference steps
            guidance_scale=guidance_scale,  # Control the guidance scale
            seed=seed  # Control the seed for reproducibility
        )
        return image  # Return the generated image
    except Exception as e:
        print(f"Error generating image: {str(e)}")
        return None

# Flask route for the API endpoint to generate an image
@app.route('/generate_image', methods=['POST'])
def generate_api():
    data = request.get_json()

    # Extract required fields from the request
    prompt = data.get('prompt', '')
    negative_prompt = data.get('negative_prompt', None)
    height = data.get('height', 1024)  # Default height
    width = data.get('width', 720)  # Default width
    num_inference_steps = data.get('num_inference_steps', 50)  # Default number of inference steps
    guidance_scale = data.get('guidance_scale', 7.5)  # Default guidance scale
    model_name = data.get('model', 'stabilityai/stable-diffusion-2-1')  # Default model
    seed = data.get('seed', None)  # Seed for reproducibility, default is None

    if not prompt:
        return jsonify({"error": "Prompt is required"}), 400

    try:
        # Call the generate_image function with the provided parameters
        image = generate_image(prompt, negative_prompt, height, width, model_name, num_inference_steps, guidance_scale, seed)

        if image:
            # Save the image to a BytesIO object
            img_byte_arr = BytesIO()
            image.save(img_byte_arr, format='PNG')  # Convert the image to PNG
            img_byte_arr.seek(0)  # Move to the start of the byte stream

            # Send the generated image as a response
            return send_file(
                img_byte_arr, 
                mimetype='image/png', 
                as_attachment=False,  # Send the file as an attachment
                download_name='generated_image.png'  # The file name for download
            )
        else:
            return jsonify({"error": "Failed to generate image"}), 500
    except Exception as e:
        print(f"Error in generate_api: {str(e)}")  # Log the error
        return jsonify({"error": str(e)}), 500

# Add this block to make sure your app runs when called
if __name__ == "__main__":
    app.run(host='0.0.0.0', port=7860)  # Run directly if needed for testing