Spaces:
nebula
/
Running on Zero

Menyu commited on
Commit
ef4fdd8
1 Parent(s): 77414f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -2
app.py CHANGED
@@ -4,6 +4,7 @@ import numpy as np
4
  import spaces
5
  import torch
6
  from diffusers import AutoPipelineForText2Image, AutoencoderKL #,EulerDiscreteScheduler
 
7
 
8
  if not torch.cuda.is_available():
9
  DESCRIPTION += "\n<p>你现在运行在CPU上 但是只支持GPU.</p>"
@@ -44,9 +45,24 @@ def infer(
44
  ):
45
  seed = int(randomize_seed_fn(seed, randomize_seed))
46
  generator = torch.Generator().manual_seed(seed)
 
 
 
 
 
 
 
 
 
 
 
47
  image = pipe(
48
- prompt=prompt,
49
- negative_prompt=negative_prompt,
 
 
 
 
50
  width=width,
51
  height=height,
52
  guidance_scale=guidance_scale,
 
4
  import spaces
5
  import torch
6
  from diffusers import AutoPipelineForText2Image, AutoencoderKL #,EulerDiscreteScheduler
7
+ from sd_embed.embedding_funcs import get_weighted_text_embeddings_sdxl
8
 
9
  if not torch.cuda.is_available():
10
  DESCRIPTION += "\n<p>你现在运行在CPU上 但是只支持GPU.</p>"
 
45
  ):
46
  seed = int(randomize_seed_fn(seed, randomize_seed))
47
  generator = torch.Generator().manual_seed(seed)
48
+
49
+ (
50
+ prompt_embeds
51
+ , prompt_neg_embeds
52
+ , pooled_prompt_embeds
53
+ , negative_pooled_prompt_embeds
54
+ ) = get_weighted_text_embeddings_sdxl(
55
+ pipe
56
+ , prompt = prompt
57
+ , negative_prompt = negative_prompt
58
+ )
59
  image = pipe(
60
+ prompt_embeds = prompt_embeds
61
+ , negative_prompt_embeds = prompt_neg_embeds
62
+ , pooled_prompt_embeds = pooled_prompt_embeds
63
+ , negative_pooled_prompt_embeds = negative_pooled_prompt_embeds
64
+ #prompt=prompt,
65
+ #negative_prompt=negative_prompt,
66
  width=width,
67
  height=height,
68
  guidance_scale=guidance_scale,