Spaces:
Paused
Paused
Added safety checker back
Browse files- app.py +17 -10
- assets/masks/diamond.png +0 -0
app.py
CHANGED
@@ -8,19 +8,27 @@ from diffusers import DPMSolverMultistepScheduler
|
|
8 |
|
9 |
deviceStr = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
device = torch.device(deviceStr)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
if deviceStr == "cuda":
|
13 |
pipeline = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting",
|
14 |
revision="fp16",
|
15 |
-
torch_dtype=torch.float16
|
16 |
-
safety_checker=lambda images, **kwargs: (images, False))
|
17 |
pipeline.to(device)
|
18 |
pipeline.enable_xformers_memory_efficient_attention()
|
19 |
-
latents = torch.randn((1, 4, 64, 64), device=device, dtype=torch.float16)
|
20 |
else:
|
21 |
-
pipeline = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting"
|
22 |
-
safety_checker=lambda images, **kwargs: (images, False))
|
23 |
-
|
|
|
24 |
|
25 |
imageSize = (512, 512)
|
26 |
lastImage = Image.new(mode="RGB", size=imageSize)
|
@@ -35,10 +43,7 @@ def diffuse(staticLatents, inputImage, mask, pauseInference, prompt, negativePro
|
|
35 |
return lastImage
|
36 |
|
37 |
if staticLatents is False:
|
38 |
-
|
39 |
-
latents = torch.randn((1, 4, 64, 64), device=device, dtype=torch.float16)
|
40 |
-
else:
|
41 |
-
latents = torch.randn((1, 4, 64, 64), device=device)
|
42 |
|
43 |
if lastSeed != seed:
|
44 |
generator = torch.Generator(device).manual_seed(seed)
|
@@ -69,6 +74,8 @@ numInferenceSteps = gradio.Slider(label="Number of Inference Steps", maximum=100
|
|
69 |
seed = gradio.Slider(label="Generator Seed", maximum=10000, value=4096)
|
70 |
staticLatents =gradio.Checkbox(label="Static Latents", value=True)
|
71 |
pauseInference = gradio.Checkbox(label="Pause Inference", value=False)
|
|
|
|
|
72 |
|
73 |
inputs=[staticLatents, inputImage, mask, pauseInference, prompt, negativePrompt, guidanceScale, numInferenceSteps, seed]
|
74 |
ux = gradio.Interface(fn=diffuse, title="View Diffusion", inputs=inputs, outputs=outputImage, live=True)
|
|
|
8 |
|
9 |
deviceStr = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
device = torch.device(deviceStr)
|
11 |
+
latents = None
|
12 |
+
|
13 |
+
def GenerateNewLatentsForInference():
|
14 |
+
global latents
|
15 |
+
if deviceStr == "cuda":
|
16 |
+
latents = torch.randn((1, 4, 64, 64), device=device, dtype=torch.float16)
|
17 |
+
else:
|
18 |
+
latents = torch.randn((1, 4, 64, 64), device=device)
|
19 |
|
20 |
if deviceStr == "cuda":
|
21 |
pipeline = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting",
|
22 |
revision="fp16",
|
23 |
+
torch_dtype=torch.float16)
|
24 |
+
#safety_checker=lambda images, **kwargs: (images, False))
|
25 |
pipeline.to(device)
|
26 |
pipeline.enable_xformers_memory_efficient_attention()
|
|
|
27 |
else:
|
28 |
+
pipeline = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting")
|
29 |
+
#safety_checker=lambda images, **kwargs: (images, False))
|
30 |
+
|
31 |
+
GenerateNewLatentsForInference()
|
32 |
|
33 |
imageSize = (512, 512)
|
34 |
lastImage = Image.new(mode="RGB", size=imageSize)
|
|
|
43 |
return lastImage
|
44 |
|
45 |
if staticLatents is False:
|
46 |
+
GenerateNewLatentsForInference()
|
|
|
|
|
|
|
47 |
|
48 |
if lastSeed != seed:
|
49 |
generator = torch.Generator(device).manual_seed(seed)
|
|
|
74 |
seed = gradio.Slider(label="Generator Seed", maximum=10000, value=4096)
|
75 |
staticLatents =gradio.Checkbox(label="Static Latents", value=True)
|
76 |
pauseInference = gradio.Checkbox(label="Pause Inference", value=False)
|
77 |
+
#generateNewLatents = gradio.Button(label="Generate New Latents")
|
78 |
+
#generateNewLatents.click(GenerateNewLatentsForInference)
|
79 |
|
80 |
inputs=[staticLatents, inputImage, mask, pauseInference, prompt, negativePrompt, guidanceScale, numInferenceSteps, seed]
|
81 |
ux = gradio.Interface(fn=diffuse, title="View Diffusion", inputs=inputs, outputs=outputImage, live=True)
|
assets/masks/diamond.png
ADDED
![]() |