Update inference_manager.py
Browse files- inference_manager.py +45 -2
inference_manager.py
CHANGED
@@ -4,6 +4,7 @@ import json
|
|
4 |
import time
|
5 |
import copy
|
6 |
import torch
|
|
|
7 |
from diffusers import AutoPipelineForText2Image, StableDiffusionPipeline,DiffusionPipeline, StableDiffusionXLPipeline, AutoencoderKL, AutoencoderTiny, UNet2DConditionModel
|
8 |
from huggingface_hub import hf_hub_download, snapshot_download
|
9 |
from pathlib import Path
|
@@ -21,7 +22,9 @@ import glob
|
|
21 |
import traceback
|
22 |
from insightface.app import FaceAnalysis
|
23 |
import cv2
|
|
|
24 |
import gradio as gr
|
|
|
25 |
|
26 |
#from onediffx import compile_pipe, save_pipe, load_pipe
|
27 |
|
@@ -308,7 +311,47 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
308 |
if randomize_seed:
|
309 |
seed = random.randint(0, MAX_SEED)
|
310 |
return seed
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
311 |
|
|
|
|
|
|
|
|
|
|
|
|
|
312 |
class ModelManager:
|
313 |
def __init__(self, model_directory):
|
314 |
"""
|
@@ -506,7 +549,7 @@ class ModelManager:
|
|
506 |
cfg = model.cfg
|
507 |
p = inference_params.get("prompt")
|
508 |
negative_prompt = inference_params.get("negative_prompt", cfg.get("negative_prompt", ""))
|
509 |
-
|
510 |
guidance_scale = inference_params.get("guidance_scale", cfg.get("guidance_scale", 7))
|
511 |
width = inference_params.get("width", cfg.get("width", 512))
|
512 |
height = inference_params.get("height", cfg.get("height", 512))
|
@@ -534,7 +577,7 @@ class ModelManager:
|
|
534 |
width=width,
|
535 |
height=height,
|
536 |
guidance_scale=guidance_scale,
|
537 |
-
num_inference_steps=
|
538 |
generator=generator,
|
539 |
num_images_per_prompt=1,
|
540 |
output_type="pil",
|
|
|
4 |
import time
|
5 |
import copy
|
6 |
import torch
|
7 |
+
import random
|
8 |
from diffusers import AutoPipelineForText2Image, StableDiffusionPipeline,DiffusionPipeline, StableDiffusionXLPipeline, AutoencoderKL, AutoencoderTiny, UNet2DConditionModel
|
9 |
from huggingface_hub import hf_hub_download, snapshot_download
|
10 |
from pathlib import Path
|
|
|
22 |
import traceback
|
23 |
from insightface.app import FaceAnalysis
|
24 |
import cv2
|
25 |
+
import re
|
26 |
import gradio as gr
|
27 |
+
from PIL import Image
|
28 |
|
29 |
#from onediffx import compile_pipe, save_pipe, load_pipe
|
30 |
|
|
|
311 |
if randomize_seed:
|
312 |
seed = random.randint(0, MAX_SEED)
|
313 |
return seed
|
314 |
+
|
315 |
+
|
316 |
+
child_related_regex = re.compile(
|
317 |
+
r'(child|children|kid|kids|baby|babies|toddler|infant|juvenile|minor|underage|preteen|adolescent|youngster|youth|son|daughter|young|kindergarten|preschool|'
|
318 |
+
r'([1-9]|1[0-7])[\s_\-|\.\,]*year(s)?[\s_\-|\.\,]*old|' # Matches 1 to 17 years old with various separators
|
319 |
+
r'little|small|tiny|short|young|new[\s_\-|\.\,]*born[\s_\-|\.\,]*(boy|girl|male|man|bro|brother|sis|sister))',
|
320 |
+
re.IGNORECASE
|
321 |
+
)
|
322 |
+
|
323 |
+
# Function to remove child-related content from a prompt
|
324 |
+
def remove_child_related_content(prompt):
|
325 |
+
cleaned_prompt = re.sub(child_related_regex, '', prompt)
|
326 |
+
return cleaned_prompt.strip()
|
327 |
+
|
328 |
+
# Function to check if a prompt contains child-related content
|
329 |
+
def contains_child_related_content(prompt):
|
330 |
+
if child_related_regex.search(prompt):
|
331 |
+
return True
|
332 |
+
return False
|
333 |
+
|
334 |
+
def save_image(img):
|
335 |
+
path = "./tmp/"
|
336 |
+
# Ensure the Hugging Face path exists locally
|
337 |
+
if not os.path.exists(path):
|
338 |
+
os.makedirs(path)
|
339 |
+
# Generate a unique filename
|
340 |
+
unique_name = str(uuid.uuid4()) + ".webp"
|
341 |
+
unique_name = os.path.join(path, unique_name)
|
342 |
+
|
343 |
+
# Convert the image to WebP format
|
344 |
+
webp_img = img.convert("RGB") # Ensure the image is in RGB mode
|
345 |
+
|
346 |
+
# Save the image in WebP format with high quality
|
347 |
+
webp_img.save(unique_name, "WEBP", quality=90)
|
348 |
|
349 |
+
# Open the saved WebP file and return it as a PIL Image object
|
350 |
+
with Image.open(unique_name) as webp_file:
|
351 |
+
webp_image = webp_file.copy()
|
352 |
+
|
353 |
+
return webp_image, unique_name
|
354 |
+
|
355 |
class ModelManager:
|
356 |
def __init__(self, model_directory):
|
357 |
"""
|
|
|
549 |
cfg = model.cfg
|
550 |
p = inference_params.get("prompt")
|
551 |
negative_prompt = inference_params.get("negative_prompt", cfg.get("negative_prompt", ""))
|
552 |
+
steps = inference_params.get("steps", cfg.get("inference_steps", 30))
|
553 |
guidance_scale = inference_params.get("guidance_scale", cfg.get("guidance_scale", 7))
|
554 |
width = inference_params.get("width", cfg.get("width", 512))
|
555 |
height = inference_params.get("height", cfg.get("height", 512))
|
|
|
577 |
width=width,
|
578 |
height=height,
|
579 |
guidance_scale=guidance_scale,
|
580 |
+
num_inference_steps=steps,
|
581 |
generator=generator,
|
582 |
num_images_per_prompt=1,
|
583 |
output_type="pil",
|