Shuaizhang7 commited on
Commit
8c7401e
1 Parent(s): 21e0607

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -18,11 +18,11 @@ inputs = [
18
  gr.inputs.Textbox(lines=1,
19
  label="Candidate Labels 候选分类标签"),
20
  gr.inputs.Radio(choices=[
21
- "openAI-ViT/B-16",
22
- "ViT/B-16",
23
- "ViT/L-14",
24
- "ViT/L-14@336px",
25
- "ViT/H-14",
26
  ], type="value", default="ViT/B-16", label="Model 模型规模"),
27
  gr.inputs.Textbox(lines=1,
28
  label="Prompt Template Prompt模板 ({}指代候选标签)",
@@ -43,8 +43,8 @@ en_lei = "Motor Vehicle Lane,Non-motor Vehicle Lane,Mixed Traffic Road,Zebra Cro
43
  iface = gr.Interface(shot,
44
  inputs,
45
  "label",
46
- examples=[["street.jpg", lei, "ViT/B-16", "一张{}的图片。"],
47
- ["street.jpg", en_lei, "openAI-ViT/B-16", "A picture of {}."]
48
  ],
49
 
50
  description="""<p>Chinese CLIP is a contrastive-learning-based vision-language foundation model pretrained on large-scale Chinese data. For more information, please refer to the paper and official github. Also, Chinese CLIP has already been merged into Huggingface Transformers! <br><br>
 
18
  gr.inputs.Textbox(lines=1,
19
  label="Candidate Labels 候选分类标签"),
20
  gr.inputs.Radio(choices=[
21
+ "OpenAI-ViT/B-16",
22
+ "CN-ViT/B-16",
23
+ "CN-ViT/L-14",
24
+ "CN-ViT/L-14@336px",
25
+ "CN-ViT/H-14",
26
  ], type="value", default="ViT/B-16", label="Model 模型规模"),
27
  gr.inputs.Textbox(lines=1,
28
  label="Prompt Template Prompt模板 ({}指代候选标签)",
 
43
  iface = gr.Interface(shot,
44
  inputs,
45
  "label",
46
+ examples=[["street.jpg", lei, "CN-ViT/B-16", "一张{}的图片。"],
47
+ ["street.jpg", en_lei, "OpenAI-ViT/B-16", "A picture of {}."]
48
  ],
49
 
50
  description="""<p>Chinese CLIP is a contrastive-learning-based vision-language foundation model pretrained on large-scale Chinese data. For more information, please refer to the paper and official github. Also, Chinese CLIP has already been merged into Huggingface Transformers! <br><br>