aifeifei798 commited on
Commit
3a56c9c
1 Parent(s): be93891

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -10
app.py CHANGED
@@ -1,24 +1,53 @@
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
- import spaces
5
  import torch
6
- from diffusers import DiffusionPipeline, AutoencoderTiny
 
 
 
 
7
  from huggingface_hub import hf_hub_download
8
 
9
- dtype = torch.bfloat16
10
- device = "cuda" if torch.cuda.is_available() else "cpu"
11
 
12
- taef1 = AutoencoderTiny.from_pretrained("aifeifei798/taef1", torch_dtype=dtype).to(device)
13
-
14
- pipe = DiffusionPipeline.from_pretrained("aifeifei798/DarkIdol-flux-v1", torch_dtype=dtype, vae=taef1).to(device)
15
 
16
- pipe.load_lora_weights(hf_hub_download("aifeifei798/feifei-flux-lora-v1", "feifei.safetensors"),adapter_name="feifei")
 
 
 
 
 
 
 
 
17
 
18
- pipe.set_adapters(["feifei"],adapter_weights=[0.65])
 
 
 
19
 
20
- pipe.fuse_lora(adapter_name=["feifei"],lora_scale=1.0)
 
 
 
 
 
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  MAX_SEED = np.iinfo(np.int32).max
23
  MAX_IMAGE_SIZE = 2048
24
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import random
 
4
  import torch
5
+ import spaces
6
+ from diffusers import (
7
+ DiffusionPipeline,
8
+ AutoencoderTiny,
9
+ )
10
  from huggingface_hub import hf_hub_download
11
 
 
 
12
 
13
+ def feifeimodload():
 
 
14
 
15
+ dtype = torch.bfloat16
16
+ device = "cuda" if torch.cuda.is_available() else "cpu"
17
+ taef1 = AutoencoderTiny.from_pretrained("aifeifei798/taef1", torch_dtype=dtype).to(
18
+ device
19
+ )
20
+
21
+ pipe = DiffusionPipeline.from_pretrained(
22
+ "aifeifei798/DarkIdol-flux-v1", torch_dtype=dtype, vae=taef1
23
+ ).to(device)
24
 
25
+ pipe.load_lora_weights(
26
+ hf_hub_download("aifeifei798/feifei-flux-lora-v1", "feifei.safetensors"),
27
+ adapter_name="feifei",
28
+ )
29
 
30
+ pipe.load_lora_weights(
31
+ hf_hub_download(
32
+ "aifeifei798/feifei-flux-lora-v1", "Shadow-Projection.safetensors"
33
+ ),
34
+ adapter_name="Shadow-Projection",
35
+ )
36
 
37
+ pipe.set_adapters(
38
+ ["feifei","Shadow-Projection"],
39
+ adapter_weights=[0.75,0.35],
40
+ )
41
+ pipe.fuse_lora(
42
+ adapter_name=["feifei","Shadow-Projection"],
43
+ lora_scale=1.0,
44
+ )
45
+
46
+ pipe.unload_lora_weights()
47
+ torch.cuda.empty_cache()
48
+ return pipe
49
+
50
+ pipe = feifeimodload()
51
  MAX_SEED = np.iinfo(np.int32).max
52
  MAX_IMAGE_SIZE = 2048
53