lwdragon commited on
Commit
548eb77
·
1 Parent(s): f81db27

add zidongtaichu

Browse files
__pycache__/utils.cpython-39.pyc ADDED
Binary file (817 Bytes). View file
 
app.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ import gradio as gr
4
+
5
+ from utils import get_token
6
+
7
+ url_caption = os.environ["CAPTION_NODE"]
8
+ url_vqa = os.environ["VQA_NODE"]
9
+
10
+
11
+ def image_caption(file_path):
12
+ token = get_token()
13
+
14
+ files = {"file": open(file_path, "rb")}
15
+ headers = {"X-Auth-Token": token}
16
+ resp = requests.post(url_caption,
17
+ files=files,
18
+ headers=headers,
19
+ verify=False)
20
+ resp = resp.json()
21
+ desc = resp["inference_result"]["instances"]["image"][0]
22
+ return desc
23
+
24
+
25
+ def vqa(file_path, question):
26
+ token = get_token()
27
+
28
+ files = {"file": open(file_path, "rb")}
29
+ question = {"question": question}
30
+ headers = {"X-Auth-Token": token}
31
+ resp = requests.post(url_vqa,
32
+ files=files,
33
+ data=question,
34
+ headers=headers,
35
+ verify=False)
36
+ resp = resp.json()
37
+ ans = resp["inference_result"]["instances"]
38
+ return ans
39
+
40
+
41
+ def read_content(file_path):
42
+ with open(file_path, 'r', encoding='utf-8') as f:
43
+ content = f.read()
44
+ return content
45
+
46
+
47
+ examples_caption = [
48
+ os.path.join(os.path.dirname(__file__), "examples/caption/00.jpg"),
49
+ os.path.join(os.path.dirname(__file__), "examples/caption/01.jpg"),
50
+ os.path.join(os.path.dirname(__file__), "examples/caption/02.jpg"),
51
+ os.path.join(os.path.dirname(__file__), "examples/caption/03.jpg"),
52
+ os.path.join(os.path.dirname(__file__), "examples/caption/04.jpg"),
53
+ os.path.join(os.path.dirname(__file__), "examples/caption/05.jpg")
54
+ ]
55
+ examples_vqa = [
56
+ os.path.join(os.path.dirname(__file__), "examples/vqa/00.jpg"),
57
+ os.path.join(os.path.dirname(__file__), "examples/vqa/01.jpg"),
58
+ os.path.join(os.path.dirname(__file__), "examples/vqa/02.jpg"),
59
+ os.path.join(os.path.dirname(__file__), "examples/vqa/03.jpg"),
60
+ os.path.join(os.path.dirname(__file__), "examples/vqa/04.jpg"),
61
+ os.path.join(os.path.dirname(__file__), "examples/vqa/05.jpg")
62
+ ]
63
+
64
+ css = """
65
+ .gradio-container {background-image: url('file=./background.jpg'); background-size:cover; background-repeat: no-repeat;}
66
+
67
+ #infer {
68
+ background: linear-gradient(to bottom right, #FFD8B4, #FFB066);
69
+ border: 1px solid #ffd8b4;
70
+ border-radius: 8px;
71
+ color: #ee7400
72
+ }
73
+ """
74
+
75
+ with gr.Blocks(css=css) as demo:
76
+ gr.HTML(read_content("./header.html"))
77
+ gr.Markdown("# MindSpore Zidongtaichu ")
78
+ gr.Markdown(
79
+ "\nOPT (Omni-Perception Pre-Trainer) is the abbreviation of the full-scene perception pre-training model. "
80
+ " It is an important achievement of the Chinese Academy of Sciences Automation and Huawei on the road to exploring general artificial intelligence."
81
+ " The modal 100 billion large model, the Chinese name is Zidong.Taichu."
82
+ " supports efficient collaboration among different modalities of text, vision, and voice,"
83
+ " and can support industrial applications such as film and television creation, industrial quality inspection, and intelligent driving."
84
+ )
85
+
86
+ with gr.Tab("以图生文 (Image Caption)"):
87
+ with gr.Row():
88
+ caption_input = gr.Image(
89
+ type="filepath",
90
+ value=examples_caption[0],
91
+ )
92
+ caption_output = gr.TextArea(label="description",
93
+ interactive=False)
94
+ caption_button = gr.Button("Submit", elem_id="infer")
95
+ gr.Examples(
96
+ examples=examples_caption,
97
+ inputs=caption_input,
98
+ )
99
+
100
+ caption_button.click(image_caption,
101
+ inputs=[caption_input],
102
+ outputs=[caption_output])
103
+
104
+ with gr.Tab("视觉问答 (VQA)"):
105
+ with gr.Row():
106
+ with gr.Column():
107
+ q_pic_input = gr.Image(type="filepath",
108
+ label="step1: select a picture")
109
+ gr.Examples(
110
+ examples=examples_vqa,
111
+ inputs=q_pic_input,
112
+ )
113
+ with gr.Column():
114
+ vqa_question = gr.TextArea(
115
+ label="step2: question",
116
+ lines=5,
117
+ placeholder="please enter a question related to the picture"
118
+ )
119
+ vqa_answer = gr.TextArea(label="answer",
120
+ lines=5,
121
+ interactive=False)
122
+ vqa_button = gr.Button("Submit", elem_id="infer")
123
+
124
+ vqa_button.click(vqa,
125
+ inputs=[q_pic_input, vqa_question],
126
+ outputs=[vqa_answer])
127
+
128
+ with gr.Accordion("Open for More!"):
129
+ gr.Markdown(
130
+ "- If you want to know more about the foundation models of MindSpore, please visit "
131
+ "[The Foundation Models Platform for Mindspore](https://xihe.mindspore.cn/)"
132
+ )
133
+ gr.Markdown(
134
+ "- If you want to know more about OPT models, please visit "
135
+ "[OPT](https://gitee.com/mindspore/zidongtaichu)")
136
+ gr.Markdown(
137
+ "- Try [zidongtaichu model on the Foundation Models Platform for Mindspore]"
138
+ "(https://xihe.mindspore.cn/modelzoo/taichug)")
139
+
140
+ demo.queue(concurrency_count=5)
141
+ demo.launch(enable_queue=True)
background.jpg ADDED
examples/caption/00.jpg ADDED
examples/caption/01.jpg ADDED
examples/caption/02.jpg ADDED
examples/caption/03.jpg ADDED
examples/caption/04.jpg ADDED
examples/caption/05.jpg ADDED
examples/vqa/00.jpg ADDED
examples/vqa/01.jpg ADDED
examples/vqa/02.jpg ADDED
examples/vqa/03.jpg ADDED
examples/vqa/04.jpg ADDED
examples/vqa/05.jpg ADDED
header.html ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div style="text-align: center; max-width: 1920px; margin: 0 auto;">
2
+ <div
3
+ style="
4
+ display: inline-flex;
5
+ gap: 0.8rem;
6
+ font-size: 1.75rem;
7
+ margin-bottom: 10px;
8
+ margin-left: 220px;
9
+ justify-content: center;
10
+ "
11
+ >
12
+ </div>
13
+ <div
14
+ style="
15
+ display: inline-flex;
16
+ align-items: center;
17
+ gap: 0.8rem;
18
+ font-size: 1.75rem;
19
+ margin-bottom: 10px;
20
+ justify-content: center;
21
+ ">
22
+ <a href="https://github.com/mindspore-ai/mindspore"><h1 style="font-weight: 900; align-items: center; margin-bottom: 7px;">
23
+ </h1></a>
24
+ </div>
25
+ <a href="https://github.com/mindspore-ai/mindspore"><img src="https://xihe.mindspore.cn/assets/modelzoo1.57220d1e.jpg" width="100%"></a>
26
+
27
+ </div>
utils.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+
4
+
5
+ def get_token():
6
+ username = os.environ["USER_NAME"]
7
+ domain_name = os.environ["DOMAIN_NAME"]
8
+ domain_pwd = os.environ["DOMAIN_PWD"]
9
+ url = os.environ["IAM_URL"]
10
+
11
+ requests_json = {
12
+ "auth": {
13
+ "identity": {
14
+ "methods": ["password"],
15
+ "password": {
16
+ "user": {
17
+ "name": username,
18
+ "password": domain_pwd,
19
+ "domain": {
20
+ "name": domain_name
21
+ }
22
+ }
23
+ }
24
+ },
25
+ "scope": {
26
+ "project": {
27
+ "name": "cn-central-221"
28
+ }
29
+ }
30
+ }
31
+ }
32
+
33
+ headers = {
34
+ "Content-Type": "application/json"
35
+ }
36
+
37
+ response = requests.post(url, json=requests_json, headers=headers)
38
+
39
+ result = response.headers
40
+ print("token success")
41
+
42
+ return result['X-Subject-Token']
43
+
44
+
45
+ if __name__ == "__main__":
46
+ get_token()