Ubuntu
commited on
Commit
·
ec6657c
1
Parent(s):
034cfa9
serve loras update for logging
Browse files- serve_loras.py +16 -2
serve_loras.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
from compel import Compel, ReturnedEmbeddingsType
|
2 |
import logging
|
3 |
from abc import ABC
|
|
|
4 |
|
5 |
import diffusers
|
6 |
import torch
|
@@ -24,6 +25,9 @@ import os
|
|
24 |
logger = logging.getLogger(__name__)
|
25 |
logger.info("Diffusers version %s", diffusers.__version__)
|
26 |
|
|
|
|
|
|
|
27 |
sentry_sdk.init(
|
28 |
dsn="https://f750d1b039d66541f344ee6151d38166@o4505891057696768.ingest.sentry.io/4506071735205888",
|
29 |
)
|
@@ -37,6 +41,7 @@ class DiffusersHandler(ABC):
|
|
37 |
|
38 |
def __init__(self):
|
39 |
self.initialized = False
|
|
|
40 |
|
41 |
def initialize(self, properties):
|
42 |
"""In this initialize function, the Stable Diffusion model is loaded and
|
@@ -51,6 +56,7 @@ class DiffusersHandler(ABC):
|
|
51 |
|
52 |
|
53 |
device_str = "cuda:" + str(properties.get("gpu_id")) if torch.cuda.is_available() and properties.get("gpu_id") is not None else "cpu"
|
|
|
54 |
|
55 |
print("my device is " + device_str)
|
56 |
self.device = torch.device(device_str)
|
@@ -65,6 +71,7 @@ class DiffusersHandler(ABC):
|
|
65 |
|
66 |
logger.info(self.device)
|
67 |
logger.info("Diffusion model from path %s loaded successfully")
|
|
|
68 |
|
69 |
self.initialized = True
|
70 |
|
@@ -91,6 +98,7 @@ class DiffusersHandler(ABC):
|
|
91 |
}
|
92 |
|
93 |
logger.info("Processed request: '%s'", processed_request)
|
|
|
94 |
return processed_request
|
95 |
|
96 |
|
@@ -125,6 +133,7 @@ class DiffusersHandler(ABC):
|
|
125 |
self.pipe.unload_lora_weights()
|
126 |
|
127 |
logger.info("Generated image: '%s'", inferences)
|
|
|
128 |
return inferences
|
129 |
|
130 |
def postprocess(self, inference_outputs):
|
@@ -152,8 +161,9 @@ class DiffusersHandler(ABC):
|
|
152 |
# generate txt file with the image name and the prompt inside
|
153 |
# blob = bucket.blob(image_name + '.txt')
|
154 |
# blob.upload_from_string(self.prompt)
|
155 |
-
|
156 |
-
outputs.append(
|
|
|
157 |
return outputs
|
158 |
|
159 |
|
@@ -173,6 +183,7 @@ handler_index = 0
|
|
173 |
|
174 |
@app.route('/generate', methods=['POST'])
|
175 |
def generate_image():
|
|
|
176 |
global handler_index
|
177 |
try:
|
178 |
# Extract raw requests from HTTP POST body
|
@@ -181,14 +192,17 @@ def generate_image():
|
|
181 |
with handler_lock:
|
182 |
selected_handler = handlers[handler_index]
|
183 |
handler_index = (handler_index + 1) % gpu_count # Rotate to the next handler
|
|
|
184 |
|
185 |
processed_request = selected_handler.preprocess([raw_requests])
|
186 |
inferences = selected_handler.inference(processed_request)
|
187 |
outputs = selected_handler.postprocess(inferences)
|
|
|
188 |
|
189 |
return jsonify({"image_urls": outputs})
|
190 |
except Exception as e:
|
191 |
logger.error("Error during image generation: %s", str(e))
|
|
|
192 |
return jsonify({"error": "Failed to generate image", "details": str(e)}), 500
|
193 |
|
194 |
if __name__ == '__main__':
|
|
|
1 |
from compel import Compel, ReturnedEmbeddingsType
|
2 |
import logging
|
3 |
from abc import ABC
|
4 |
+
import uuid
|
5 |
|
6 |
import diffusers
|
7 |
import torch
|
|
|
25 |
logger = logging.getLogger(__name__)
|
26 |
logger.info("Diffusers version %s", diffusers.__version__)
|
27 |
|
28 |
+
from axiom_logger import AxiomLogger
|
29 |
+
axiom_logger = AxiomLogger()
|
30 |
+
|
31 |
sentry_sdk.init(
|
32 |
dsn="https://f750d1b039d66541f344ee6151d38166@o4505891057696768.ingest.sentry.io/4506071735205888",
|
33 |
)
|
|
|
41 |
|
42 |
def __init__(self):
|
43 |
self.initialized = False
|
44 |
+
self.req_id = None
|
45 |
|
46 |
def initialize(self, properties):
|
47 |
"""In this initialize function, the Stable Diffusion model is loaded and
|
|
|
56 |
|
57 |
|
58 |
device_str = "cuda:" + str(properties.get("gpu_id")) if torch.cuda.is_available() and properties.get("gpu_id") is not None else "cpu"
|
59 |
+
self.device_str = device_str
|
60 |
|
61 |
print("my device is " + device_str)
|
62 |
self.device = torch.device(device_str)
|
|
|
71 |
|
72 |
logger.info(self.device)
|
73 |
logger.info("Diffusion model from path %s loaded successfully")
|
74 |
+
axiom_logger.info("Diffusion model initialized", device=self.device_str)
|
75 |
|
76 |
self.initialized = True
|
77 |
|
|
|
98 |
}
|
99 |
|
100 |
logger.info("Processed request: '%s'", processed_request)
|
101 |
+
axiom_logger.info("Processed request:" + str(processed_request), request_id=self.req_id, device=self.device_str)
|
102 |
return processed_request
|
103 |
|
104 |
|
|
|
133 |
self.pipe.unload_lora_weights()
|
134 |
|
135 |
logger.info("Generated image: '%s'", inferences)
|
136 |
+
axiom_logger.info("Generated images", request_id=self.req_id, device=self.device_str)
|
137 |
return inferences
|
138 |
|
139 |
def postprocess(self, inference_outputs):
|
|
|
161 |
# generate txt file with the image name and the prompt inside
|
162 |
# blob = bucket.blob(image_name + '.txt')
|
163 |
# blob.upload_from_string(self.prompt)
|
164 |
+
url_name = 'https://storage.googleapis.com/' + bucket_name + '/' + image_name + '.png'
|
165 |
+
outputs.append(url_name)
|
166 |
+
axiom_logger.info("Pushed image to google cloud: "+ url_name, request_id=self.req_id, device=self.device_str)
|
167 |
return outputs
|
168 |
|
169 |
|
|
|
183 |
|
184 |
@app.route('/generate', methods=['POST'])
|
185 |
def generate_image():
|
186 |
+
req_id = str(uuid.uuid4())
|
187 |
global handler_index
|
188 |
try:
|
189 |
# Extract raw requests from HTTP POST body
|
|
|
192 |
with handler_lock:
|
193 |
selected_handler = handlers[handler_index]
|
194 |
handler_index = (handler_index + 1) % gpu_count # Rotate to the next handler
|
195 |
+
selected_handler.req_id = req_id
|
196 |
|
197 |
processed_request = selected_handler.preprocess([raw_requests])
|
198 |
inferences = selected_handler.inference(processed_request)
|
199 |
outputs = selected_handler.postprocess(inferences)
|
200 |
+
selected_handler.req_id = None
|
201 |
|
202 |
return jsonify({"image_urls": outputs})
|
203 |
except Exception as e:
|
204 |
logger.error("Error during image generation: %s", str(e))
|
205 |
+
axiom_logger.critical("Error during image generation: " + str(e), request_id=req_id)
|
206 |
return jsonify({"error": "Failed to generate image", "details": str(e)}), 500
|
207 |
|
208 |
if __name__ == '__main__':
|