Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,7 @@ import numpy as np
|
|
3 |
import random
|
4 |
|
5 |
import spaces #[uncomment to use ZeroGPU]
|
6 |
-
|
7 |
import torch
|
8 |
from diffusers import AutoencoderTiny, StableDiffusionPipeline , DPMSolverMultistepScheduler
|
9 |
from huggingface_hub import login
|
@@ -12,8 +12,8 @@ a=os.getenv('hf_key')
|
|
12 |
login(token=a )
|
13 |
|
14 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
15 |
-
|
16 |
-
model_repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
17 |
|
18 |
|
19 |
|
@@ -57,7 +57,7 @@ var_1="nota-ai/bk-sdm-base-2m"
|
|
57 |
var_2="nota-ai/bk-sdm-small"
|
58 |
|
59 |
|
60 |
-
pipe =
|
61 |
model_repo_id, torch_dtype=torch_dtype, use_safetensors=True)
|
62 |
#pipe.vae = AutoencoderTiny.from_pretrained(
|
63 |
# "sayakpaul/taesd-diffusers", torch_dtype=torch_dtype, use_safetensors=True)
|
|
|
3 |
import random
|
4 |
|
5 |
import spaces #[uncomment to use ZeroGPU]
|
6 |
+
from diffusers import DiffusionPipeline ,AutoencoderTiny
|
7 |
import torch
|
8 |
from diffusers import AutoencoderTiny, StableDiffusionPipeline , DPMSolverMultistepScheduler
|
9 |
from huggingface_hub import login
|
|
|
12 |
login(token=a )
|
13 |
|
14 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
15 |
+
model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
|
16 |
+
#model_repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
17 |
|
18 |
|
19 |
|
|
|
57 |
var_2="nota-ai/bk-sdm-small"
|
58 |
|
59 |
|
60 |
+
pipe = DiffusionPipeline.from_pretrained(
|
61 |
model_repo_id, torch_dtype=torch_dtype, use_safetensors=True)
|
62 |
#pipe.vae = AutoencoderTiny.from_pretrained(
|
63 |
# "sayakpaul/taesd-diffusers", torch_dtype=torch_dtype, use_safetensors=True)
|