Huage001 commited on
Commit
dacd526
1 Parent(s): ca9a0d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -12
app.py CHANGED
@@ -10,7 +10,8 @@ from src.linfusion import LinFusion
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
  all_model_id = {
12
  "DreamShaper-8": "Lykon/dreamshaper-8",
13
- "RealisticVision-v4.0": "SG161222/Realistic_Vision_V4.0_noVAE"
 
14
  }
15
 
16
  if torch.cuda.is_available():
@@ -20,6 +21,18 @@ else:
20
 
21
  MAX_SEED = np.iinfo(np.int32).max
22
  MAX_IMAGE_SIZE = 1024
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
  @spaces.GPU
25
  def infer_t2i(model, prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
@@ -29,9 +42,7 @@ def infer_t2i(model, prompt, negative_prompt, seed, randomize_seed, width, heigh
29
 
30
  generator = torch.Generator().manual_seed(seed)
31
 
32
- pipe = StableDiffusionPipeline.from_pretrained(all_model_id[model], torch_dtype=torch_dtype)
33
- pipe = pipe.to(device)
34
- linfusion = LinFusion.construct_for(pipe)
35
 
36
  image = pipe(
37
  prompt = prompt,
@@ -53,9 +64,7 @@ def infer_i2i(model, prompt, image, strength, negative_prompt, seed, randomize_s
53
 
54
  generator = torch.Generator().manual_seed(seed)
55
 
56
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained(all_model_id[model], torch_dtype=torch_dtype)
57
- pipe = pipe.to(device)
58
- linfusion = LinFusion.construct_for(pipe)
59
 
60
  image = pipe(
61
  prompt = prompt,
@@ -79,11 +88,7 @@ def infer_ip_adapter(model, prompt, image, scale, negative_prompt, seed, randomi
79
 
80
  generator = torch.Generator().manual_seed(seed)
81
 
82
- pipe = StableDiffusionPipeline.from_pretrained(all_model_id[model], torch_dtype=torch_dtype)
83
- pipe = pipe.to(device)
84
- pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin")
85
- pipe.set_ip_adapter_scale(scale)
86
- linfusion = LinFusion.construct_for(pipe)
87
 
88
  image = pipe(
89
  prompt = prompt,
 
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
  all_model_id = {
12
  "DreamShaper-8": "Lykon/dreamshaper-8",
13
+ "RealisticVision-v4.0": "SG161222/Realistic_Vision_V4.0_noVAE",
14
+ "SD-v1.4": "CompVis/stable-diffusion-v1-4"
15
  }
16
 
17
  if torch.cuda.is_available():
 
21
 
22
  MAX_SEED = np.iinfo(np.int32).max
23
  MAX_IMAGE_SIZE = 1024
24
+ pipes = {}
25
+ for model_id, repo_id in all_model_id.items():
26
+ pipes[model_id + '_t2i'] = StableDiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch_dtype)
27
+ LinFusion.construct_for(pipes[model_id + '_t2i'])
28
+
29
+ pipes[model_id + '_ip_adapter'] = StableDiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch_dtype)
30
+ pipes[model_id + '_ip_adapter'].load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin")
31
+ pipes[model_id + '_ip_adapter'].set_ip_adapter_scale(scale)
32
+ LinFusion.construct_for(pipes[model_id + '_ip_adapter'])
33
+
34
+ pipes[model_id + '_i2i'] = StableDiffusionImg2ImgPipeline.from_pretrained(repo_id, torch_dtype=torch_dtype)
35
+ LinFusion.construct_for(pipes[model_id + '_i2i'])
36
 
37
  @spaces.GPU
38
  def infer_t2i(model, prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
 
42
 
43
  generator = torch.Generator().manual_seed(seed)
44
 
45
+ pipe = pipes[model + '_t2i'].to(device)
 
 
46
 
47
  image = pipe(
48
  prompt = prompt,
 
64
 
65
  generator = torch.Generator().manual_seed(seed)
66
 
67
+ pipe = pipes[model + '_i2i'].to(device)
 
 
68
 
69
  image = pipe(
70
  prompt = prompt,
 
88
 
89
  generator = torch.Generator().manual_seed(seed)
90
 
91
+ pipe = pipes[model + 'ip_adapter'].to(device)
 
 
 
 
92
 
93
  image = pipe(
94
  prompt = prompt,