from flask import Flask, request, jsonify, send_file from flask_cors import CORS # Import CORS from transformers import CLIPImageProcessor from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker import torch import io myapp = Flask(__name__) # Changed from app to myapp CORS(myapp) # Enable CORS for all routes # Load the pre-trained models repo_id = "stabilityai/stable-diffusion-2" pipe = DiffusionPipeline.from_pretrained( repo_id, safety_checker=StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker"), feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"), torch_dtype=torch.float32 # Use float32 for CPU ) # Set the scheduler pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) # Move pipeline to CPU pipe = pipe.to("cpu") @myapp.route('/') def home(): return "Stable Diffusion API is running!" # Basic message @myapp.route('/generate', methods=['POST']) def generate(): prompt = request.form.get('prompt') if not prompt: return jsonify({"error": "No prompt provided!"}), 400 # Generate the image results = pipe(prompt, guidance_scale=9, num_inference_steps=25, num_images_per_prompt=1) # Check for NSFW content if not results.nsfw_content_detected[0]: img_io = io.BytesIO() results.images[0].save(img_io, format='PNG') img_io.seek(0) # Go to the beginning of the BytesIO buffer return send_file(img_io, mimetype='image/png', as_attachment=True, attachment_filename='generated_image.png') else: return jsonify({"error": "NSFW content detected!"}), 400 if __name__ == '__main__': myapp.run(host="0.0.0.0", port=8080, debug=True) # Changed app to myapp