asahi417 commited on
Commit
988fe48
·
verified ·
1 Parent(s): bea31cf

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +14 -7
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import torch
2
  import gradio as gr
3
  from clip_interrogator import Config, Interrogator
 
4
 
5
 
6
  config = Config()
@@ -10,9 +11,15 @@ config.chunk_size = 2048
10
  config.flavor_intermediate_count = 512
11
  config.blip_num_beams = 64
12
  ci = Interrogator(config)
 
 
 
 
 
 
13
 
14
-
15
- def inference(image, mode, best_max_flavors):
16
  image = image.convert('RGB')
17
  if mode == 'best':
18
  prompt_result = ci.interrogate(image, max_flavors=int(best_max_flavors))
@@ -23,17 +30,17 @@ def inference(image, mode, best_max_flavors):
23
  return prompt_result
24
 
25
 
26
- with gr.Blocks() as demo:
27
- with gr.Column():
28
  gr.Markdown("# CLIP Interrogator")
29
  input_image = gr.Image(type='pil', elem_id="input-img")
30
  with gr.Row():
31
  mode_input = gr.Radio(['best', 'classic', 'fast'], label='Select mode', value='best')
32
  flavor_input = gr.Slider(minimum=2, maximum=48, step=2, value=32, label='best mode max flavors')
33
- submit_btn = gr.Button("Submit")
34
  output_text = gr.Textbox(label="Description Output")
35
- submit_btn.click(
36
- fn=inference,
37
  inputs=[input_image, mode_input, flavor_input],
38
  outputs=[output_text],
39
  concurrency_limit=10
 
1
  import torch
2
  import gradio as gr
3
  from clip_interrogator import Config, Interrogator
4
+ import spaces
5
 
6
 
7
  config = Config()
 
11
  config.flavor_intermediate_count = 512
12
  config.blip_num_beams = 64
13
  ci = Interrogator(config)
14
+ css = """
15
+ #col-container {
16
+ margin: 0 auto;
17
+ max-width: 580px;
18
+ }
19
+ """
20
 
21
+ @spaces.GPU
22
+ def infer(image, mode, best_max_flavors):
23
  image = image.convert('RGB')
24
  if mode == 'best':
25
  prompt_result = ci.interrogate(image, max_flavors=int(best_max_flavors))
 
30
  return prompt_result
31
 
32
 
33
+ with gr.Blocks(css=css) as demo:
34
+ with gr.Column(elem_id="col-container"):
35
  gr.Markdown("# CLIP Interrogator")
36
  input_image = gr.Image(type='pil', elem_id="input-img")
37
  with gr.Row():
38
  mode_input = gr.Radio(['best', 'classic', 'fast'], label='Select mode', value='best')
39
  flavor_input = gr.Slider(minimum=2, maximum=48, step=2, value=32, label='best mode max flavors')
40
+ run_button = gr.Button("Submit")
41
  output_text = gr.Textbox(label="Description Output")
42
+ run_button.click(
43
+ fn=infer,
44
  inputs=[input_image, mode_input, flavor_input],
45
  outputs=[output_text],
46
  concurrency_limit=10