Update app.py
Browse files
app.py
CHANGED
@@ -1,77 +1,11 @@
|
|
1 |
-
|
2 |
-
from flask_cors import CORS
|
3 |
-
import asyncio
|
4 |
-
import tempfile
|
5 |
-
import os
|
6 |
-
from threading import RLock
|
7 |
-
from huggingface_hub import InferenceClient
|
8 |
-
from all_models import models # Importing models from all_models
|
9 |
-
|
10 |
-
app = Flask(__name__)
|
11 |
-
CORS(app) # Enable CORS for all routes
|
12 |
-
|
13 |
-
lock = RLock()
|
14 |
-
HF_TOKEN = os.environ.get("HF_TOKEN") # Hugging Face token
|
15 |
-
|
16 |
-
inference_timeout = 600 # Set timeout for inference
|
17 |
-
|
18 |
-
# Function to dynamically load models from the "models" list
|
19 |
-
def get_model_from_name(model_name):
|
20 |
-
return model_name if model_name in models else None
|
21 |
-
|
22 |
-
# Asynchronous function to perform inference
|
23 |
-
async def infer(client, prompt, seed=1, timeout=inference_timeout, model="prompthero/openjourney-v4"):
|
24 |
-
task = asyncio.create_task(
|
25 |
-
asyncio.to_thread(client.text_to_image, prompt=prompt, seed=seed, model=model)
|
26 |
-
)
|
27 |
-
await asyncio.sleep(0)
|
28 |
-
try:
|
29 |
-
result = await asyncio.wait_for(task, timeout=timeout)
|
30 |
-
except (Exception, asyncio.TimeoutError) as e:
|
31 |
-
print(e)
|
32 |
-
print(f"Task timed out for model: {model}")
|
33 |
-
if not task.done():
|
34 |
-
task.cancel()
|
35 |
-
result = None
|
36 |
-
|
37 |
-
if task.done() and result is not None:
|
38 |
-
with lock:
|
39 |
-
temp_image = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
|
40 |
-
with open(temp_image.name, "wb") as f:
|
41 |
-
f.write(result) # Save the result image as a temporary file
|
42 |
-
return temp_image.name # Return the path to the saved image
|
43 |
-
return None
|
44 |
-
|
45 |
-
# Flask route for the API endpoint
|
46 |
-
@app.route('/generate_api', methods=['POST'])
|
47 |
-
def generate_api():
|
48 |
-
data = request.get_json()
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
seed = data.get('seed', 1)
|
53 |
-
model_name = data.get('model', 'prompthero/openjourney-v4') # Default to "prompthero/openjourney-v4" if not provided
|
54 |
-
|
55 |
-
if not prompt:
|
56 |
-
return jsonify({"error": "Prompt is required"}), 400
|
57 |
-
|
58 |
-
# Get the model from all_models
|
59 |
-
model = get_model_from_name(model_name)
|
60 |
-
if not model:
|
61 |
-
return jsonify({"error": f"Model '{model_name}' not found in available models"}), 400
|
62 |
-
|
63 |
-
try:
|
64 |
-
# Create a generic InferenceClient for the model
|
65 |
-
client = InferenceClient(token=HF_TOKEN) # Pass Hugging Face token if needed
|
66 |
-
|
67 |
-
# Call the async inference function
|
68 |
-
result_path = asyncio.run(infer(client, prompt, seed, model=model))
|
69 |
-
if result_path:
|
70 |
-
return send_file(result_path, mimetype='image/png') # Send back the generated image file
|
71 |
-
else:
|
72 |
-
return jsonify({"error": "Failed to generate image"}), 500
|
73 |
-
except Exception as e:
|
74 |
-
return jsonify({"error": str(e)}), 500
|
75 |
|
76 |
if __name__ == "__main__":
|
77 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
# app.py
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
+
import os
|
4 |
+
import subprocess
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
if __name__ == "__main__":
|
7 |
+
# Run awake.py in the background
|
8 |
+
subprocess.Popen(["python", "wk.py"]) # Start awake.py
|
9 |
+
|
10 |
+
# Run the Flask app using Gunicorn
|
11 |
+
os.system("gunicorn -w 4 -b 0.0.0.0:7860 myapp:myapp") # 4 worker processes
|