Yanzuo commited on
Commit
b0ebe32
·
verified ·
1 Parent(s): f560cbd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -7
app.py CHANGED
@@ -41,10 +41,11 @@ with gr.Blocks() as demo:
41
  with gr.Column():
42
  with gr.Row():
43
  with gr.Column():
44
- num_images = gr.Slider(label="Number of Images", minimum=1, maximum=8, step=1, value=4, interactive=True)
45
  height = gr.Number(label="Image Height", value=1024, interactive=True)
46
  width = gr.Number(label="Image Width", value=1024, interactive=True)
47
- # steps = gr.Slider(label="Inference Steps", minimum=1, maximum=8, step=1, value=1, interactive=True)
 
48
  # eta = gr.Number(label="Eta (Corresponds to parameter eta (η) in the DDIM paper, i.e. 0.0 eqauls DDIM, 1.0 equals LCM)", value=1., interactive=True)
49
  prompt = gr.Text(label="Prompt", value="a photo of a cat", interactive=True)
50
  seed = gr.Number(label="Seed", value=3413, interactive=True)
@@ -53,19 +54,19 @@ with gr.Blocks() as demo:
53
  output = gr.Gallery(height=1024)
54
 
55
  @spaces.GPU
56
- def process_image(num_images, height, width, prompt, seed):
57
  global pipe
58
  with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
59
  return pipe(
60
- prompt=[prompt]*num_images,
61
  generator=torch.Generator().manual_seed(int(seed)),
62
- num_inference_steps=8,
63
- guidance_scale=3.5,
64
  height=int(height),
65
  width=int(width)
66
  ).images
67
 
68
- reactive_controls = [num_images, height, width, prompt, seed]
69
 
70
  # for control in reactive_controls:
71
  # control.change(fn=process_image, inputs=reactive_controls, outputs=[output])
 
41
  with gr.Column():
42
  with gr.Row():
43
  with gr.Column():
44
+ # num_images = gr.Slider(label="Number of Images", minimum=1, maximum=8, step=1, value=4, interactive=True)
45
  height = gr.Number(label="Image Height", value=1024, interactive=True)
46
  width = gr.Number(label="Image Width", value=1024, interactive=True)
47
+ steps = gr.Slider(label="Inference Steps", minimum=6, maximum=25, step=1, value=8, interactive=True)
48
+ scales = gr.Number(label="Guidance Scale", value=3.5, interactive=True)
49
  # eta = gr.Number(label="Eta (Corresponds to parameter eta (η) in the DDIM paper, i.e. 0.0 eqauls DDIM, 1.0 equals LCM)", value=1., interactive=True)
50
  prompt = gr.Text(label="Prompt", value="a photo of a cat", interactive=True)
51
  seed = gr.Number(label="Seed", value=3413, interactive=True)
 
54
  output = gr.Gallery(height=1024)
55
 
56
  @spaces.GPU
57
+ def process_image(height, width, steps, scales, prompt, seed):
58
  global pipe
59
  with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
60
  return pipe(
61
+ prompt=[prompt],
62
  generator=torch.Generator().manual_seed(int(seed)),
63
+ num_inference_steps=steps,
64
+ guidance_scale=scales,
65
  height=int(height),
66
  width=int(width)
67
  ).images
68
 
69
+ reactive_controls = [height, width, steps, scales, prompt, seed]
70
 
71
  # for control in reactive_controls:
72
  # control.change(fn=process_image, inputs=reactive_controls, outputs=[output])