benjamin-paine commited on
Commit
1fb83ef
·
verified ·
1 Parent(s): 52d0ae6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -25,8 +25,11 @@ default_system_prompt = "You are an assistant designed to generate superior imag
25
  device = "cuda" if torch.cuda.is_available() else "cpu"
26
  model_repo_id = "Alpha-VLLM/Lumina-Image-2.0"
27
  transformer_repo_id = "benjamin-paine/Lumina-Image-2.0" # Temporarily fixed, change when main repo gets updated
28
- torch_dtype = torch.float32
29
-
 
 
 
30
  ###
31
  transformer = Lumina2Transformer2DModel.from_pretrained(transformer_repo_id, subfolder="transformer")
32
  vae = AutoencoderKL.from_pretrained(model_repo_id, subfolder="vae")
@@ -47,7 +50,7 @@ pipe.to(device, torch_dtype)
47
  MAX_SEED = np.iinfo(np.int32).max
48
  MAX_IMAGE_SIZE = 1536
49
 
50
- @spaces.GPU(duration=65)
51
  def infer(
52
  prompt,
53
  negative_prompt="",
 
25
  device = "cuda" if torch.cuda.is_available() else "cpu"
26
  model_repo_id = "Alpha-VLLM/Lumina-Image-2.0"
27
  transformer_repo_id = "benjamin-paine/Lumina-Image-2.0" # Temporarily fixed, change when main repo gets updated
28
+ if torch.cuda.is_available():
29
+ torch_dtype = torch.bfloat16
30
+ else:
31
+ torch_dtype = torch.float32
32
+
33
  ###
34
  transformer = Lumina2Transformer2DModel.from_pretrained(transformer_repo_id, subfolder="transformer")
35
  vae = AutoencoderKL.from_pretrained(model_repo_id, subfolder="vae")
 
50
  MAX_SEED = np.iinfo(np.int32).max
51
  MAX_IMAGE_SIZE = 1536
52
 
53
+ @spaces.GPU(duration=60)
54
  def infer(
55
  prompt,
56
  negative_prompt="",