Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -14,6 +14,7 @@ from huggingface_hub import hf_hub_download
|
|
14 |
### We use the ckpt of 79999_iter.pth: https://drive.google.com/open?id=154JgKpzCPW82qINcVieuPH3fZ2e0P812
|
15 |
### Thanks for the open source of face-parsing model.
|
16 |
from models.BiSeNet.model import BiSeNet
|
|
|
17 |
|
18 |
zero = torch.Tensor([0]).cuda()
|
19 |
print(zero.device) # <-- 'cpu' 🤔
|
@@ -36,10 +37,15 @@ pipe = ConsistentIDStableDiffusionPipeline.from_pretrained(
|
|
36 |
|
37 |
### Load other pretrained models
|
38 |
## BiSenet
|
39 |
-
bise_net = BiSeNet(n_classes = 19)
|
40 |
-
bise_net.cuda() # CUDA must not be initialized in the main process on Spaces with Stateless GPU environment
|
41 |
bise_net_cp_path = hf_hub_download(repo_id="JackAILab/ConsistentID", filename="face_parsing.pth", repo_type="model")
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
### Load consistentID_model checkpoint
|
45 |
pipe.load_ConsistentID_model(
|
@@ -51,6 +57,16 @@ pipe.load_ConsistentID_model(
|
|
51 |
)
|
52 |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
|
53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
@spaces.GPU
|
55 |
def process(inputImage,prompt,negative_prompt):
|
56 |
|
|
|
14 |
### We use the ckpt of 79999_iter.pth: https://drive.google.com/open?id=154JgKpzCPW82qINcVieuPH3fZ2e0P812
|
15 |
### Thanks for the open source of face-parsing model.
|
16 |
from models.BiSeNet.model import BiSeNet
|
17 |
+
from multiprocessing import Process, Queue
|
18 |
|
19 |
zero = torch.Tensor([0]).cuda()
|
20 |
print(zero.device) # <-- 'cpu' 🤔
|
|
|
37 |
|
38 |
### Load other pretrained models
|
39 |
## BiSenet
|
|
|
|
|
40 |
bise_net_cp_path = hf_hub_download(repo_id="JackAILab/ConsistentID", filename="face_parsing.pth", repo_type="model")
|
41 |
+
# Create a queue to share data between processes
|
42 |
+
queue = Queue()
|
43 |
+
# Create a new process and start it
|
44 |
+
p = Process(target=load_model, args=(queue, bise_net_cp_path))
|
45 |
+
p.start()
|
46 |
+
# Wait for the process to finish and get the result
|
47 |
+
p.join()
|
48 |
+
bise_net = queue.get()
|
49 |
|
50 |
### Load consistentID_model checkpoint
|
51 |
pipe.load_ConsistentID_model(
|
|
|
57 |
)
|
58 |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
|
59 |
|
60 |
+
|
61 |
+
@spaces.GPU
|
62 |
+
def load_model(queue, bise_net_cp_path):
|
63 |
+
device = torch.device('cuda')
|
64 |
+
bise_net = BiSeNet(n_classes = 19)
|
65 |
+
bise_net.to(device)
|
66 |
+
bise_net.load_state_dict(torch.load(bise_net_cp_path))
|
67 |
+
bise_net.eval()
|
68 |
+
queue.put(bise_net)
|
69 |
+
|
70 |
@spaces.GPU
|
71 |
def process(inputImage,prompt,negative_prompt):
|
72 |
|