AP123 commited on
Commit
68bba7b
·
verified ·
1 Parent(s): b40804f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -8
app.py CHANGED
@@ -2,14 +2,13 @@ import gradio as gr
2
  import torch
3
  from PIL import Image
4
  from diffusers import AutoPipelineForText2Image, DDIMScheduler
5
- from transformers import CLIPVisionModelWithProjection
6
  import numpy as np
 
7
 
8
- # Initialize the pipeline with GPU support
9
  pipeline = AutoPipelineForText2Image.from_pretrained(
10
  "stabilityai/stable-diffusion-xl-base-1.0",
11
- torch_dtype=torch.float16,
12
- device="cuda", # Use GPU device if available
13
  )
14
 
15
  # Configure the scheduler for the pipeline
@@ -26,10 +25,11 @@ pipeline.load_ip_adapter(
26
  )
27
  pipeline.set_ip_adapter_scale([0.7, 0.5])
28
 
29
- # Ensure the model and its components are moved to GPU
30
- pipeline.to("cuda")
31
-
32
  def transform_image(face_image):
 
 
33
  generator = torch.Generator(device="cuda").manual_seed(0)
34
 
35
  # Process the input face image
@@ -49,10 +49,12 @@ def transform_image(face_image):
49
  prompt="soyjak",
50
  ip_adapter_image=[style_image, processed_face_image],
51
  negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
52
- num_inference_steps=30,
53
  generator=generator,
54
  ).images[0]
55
 
 
 
56
  return image
57
 
58
  # Gradio interface setup
 
2
  import torch
3
  from PIL import Image
4
  from diffusers import AutoPipelineForText2Image, DDIMScheduler
 
5
  import numpy as np
6
+ import spaces # Make sure to import spaces
7
 
8
+ # Initialize the pipeline without specifying the device; this will be handled by the @spaces.GPU decorator
9
  pipeline = AutoPipelineForText2Image.from_pretrained(
10
  "stabilityai/stable-diffusion-xl-base-1.0",
11
+ torch_dtype=torch.float16
 
12
  )
13
 
14
  # Configure the scheduler for the pipeline
 
25
  )
26
  pipeline.set_ip_adapter_scale([0.7, 0.5])
27
 
28
+ # Decorate the transform_image function to run on GPU
29
+ @spaces.GPU
 
30
  def transform_image(face_image):
31
+ # Move the pipeline to GPU inside the function
32
+ pipeline.to("cuda")
33
  generator = torch.Generator(device="cuda").manual_seed(0)
34
 
35
  # Process the input face image
 
49
  prompt="soyjak",
50
  ip_adapter_image=[style_image, processed_face_image],
51
  negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
52
+ num_inference_steps=50,
53
  generator=generator,
54
  ).images[0]
55
 
56
+ # Move the pipeline back to CPU after processing to release GPU resources
57
+ pipeline.to("cpu")
58
  return image
59
 
60
  # Gradio interface setup