Spaces:
Running
on
Zero
Running
on
Zero
Upload 2 files
Browse files- engage_studios_logo.png +0 -0
- ui_model.py +70 -0
engage_studios_logo.png
ADDED
![]() |
ui_model.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
from torchvision.transforms.functional import to_tensor, center_crop, resize
|
6 |
+
from PIL import Image
|
7 |
+
from EngageEngine.pipeline import EngagePipeline
|
8 |
+
|
9 |
+
from diffusers import (
|
10 |
+
EulerAncestralDiscreteScheduler,
|
11 |
+
AutoencoderKL, ControlNetModel,
|
12 |
+
)
|
13 |
+
|
14 |
+
|
15 |
+
def process_sketch(x, im_size=(1024, 1024), sketch_detail=0.5, sketch_softness=0.5):
|
16 |
+
x_b = Image.new("RGBA", x.size, "WHITE")
|
17 |
+
x_b.paste(x, mask=x)
|
18 |
+
x = to_tensor(x_b.convert('RGB')).unsqueeze(0)
|
19 |
+
x = center_crop(x, x.shape[-1])
|
20 |
+
x = resize(x, im_size)
|
21 |
+
|
22 |
+
u_th = (1 - sketch_detail) * 190 + 10
|
23 |
+
l_th = (1 - sketch_detail) ** (sketch_softness * 8 + 1) * 185 + 5
|
24 |
+
|
25 |
+
edges = [cv2.Canny(x[i].mul(255).permute(1, 2, 0).numpy().astype(np.uint8),
|
26 |
+
u_th, l_th, L2gradient=True) for i in range(len(x))]
|
27 |
+
edges = torch.stack([torch.tensor(e).div(255).unsqueeze(0) for e in edges], dim=0)
|
28 |
+
edges = torch.concatenate([edges, edges, edges], dim=1)
|
29 |
+
return edges
|
30 |
+
|
31 |
+
|
32 |
+
def process_mask(x, mask, im_size=(1024, 1024)):
|
33 |
+
x = to_tensor(x.convert('RGB')).unsqueeze(0)
|
34 |
+
x = center_crop(x, x.shape[-1])
|
35 |
+
x = resize(x, im_size)
|
36 |
+
|
37 |
+
mask = to_tensor(mask.convert('L')).unsqueeze(0)
|
38 |
+
mask = center_crop(mask, mask.shape[-1])
|
39 |
+
mask = resize(mask, im_size)
|
40 |
+
|
41 |
+
return x, mask
|
42 |
+
|
43 |
+
|
44 |
+
def fetch_model():
|
45 |
+
# Load VAE component
|
46 |
+
vae = AutoencoderKL.from_pretrained(
|
47 |
+
"madebyollin/sdxl-vae-fp16-fix",
|
48 |
+
torch_dtype=torch.float16
|
49 |
+
)
|
50 |
+
|
51 |
+
controlnet = ControlNetModel.from_pretrained(
|
52 |
+
"diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16
|
53 |
+
)
|
54 |
+
|
55 |
+
# Configure the pipeline
|
56 |
+
pipe = EngagePipeline.from_pretrained(
|
57 |
+
"dataautogpt3/ProteusV0.4-Lightning",
|
58 |
+
vae=vae,
|
59 |
+
controlnet=controlnet,
|
60 |
+
torch_dtype=torch.float16
|
61 |
+
)
|
62 |
+
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
63 |
+
pipe.load_lora_weights("EngageEngine/ENGAGE_LORA.safetensors", adapter_name="ENGAGE_LORA")
|
64 |
+
pipe.load_lora_weights("EngageEngine/FILM_LORA.safetensors", adapter_name="FILM_LORA")
|
65 |
+
pipe.load_lora_weights("EngageEngine/MJ_LORA.safetensors", adapter_name="MJ_LORA")
|
66 |
+
pipe.load_lora_weights("EngageEngine/MORE_ART_LORA.safetensors", adapter_name="MORE_ART_LORA")
|
67 |
+
pipe.set_adapters(["ENGAGE_LORA", "FILM_LORA", "MJ_LORA", "MORE_ART_LORA"], adapter_weights=[0.0, 0.0, 0.0, 0.0])
|
68 |
+
pipe.to('cuda')
|
69 |
+
|
70 |
+
return pipe
|