Xianbao QIAN commited on
Commit
a678ce3
·
0 Parent(s):

Add a demo for GLM4, inspired by QWen's demo.

Browse files
Files changed (3) hide show
  1. app.py +43 -0
  2. requirements.txt +2 -0
  3. utils.py +76 -0
app.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import utils
2
+ import gradio as gr
3
+
4
+
5
+ # response = response.choices[0].message
6
+ # print(response)
7
+
8
+ # role = response.role
9
+ # content = response.content
10
+ # system, history = messages_to_history(messages + [{'role': role, 'content': content}])
11
+ # yield '', history, system
12
+
13
+
14
+ with gr.Blocks() as demo:
15
+ gr.Markdown("""<center><font size=8>ChatGLM-4</center>""")
16
+
17
+ with gr.Row():
18
+ with gr.Column(scale=3):
19
+ system_input = gr.Textbox(value=utils.default_system, lines=1, label='System')
20
+ with gr.Column(scale=1):
21
+ modify_system = gr.Button("🛠️ Set system prompt and clear history", scale=2)
22
+ system_state = gr.Textbox(value=utils.default_system, visible=False)
23
+ chatbot = gr.Chatbot(label='ChatGLM-4')
24
+ textbox = gr.Textbox(lines=2, label='Input')
25
+
26
+ with gr.Row():
27
+ clear_history = gr.Button("🧹 Clear history")
28
+ sumbit = gr.Button("🚀 Send")
29
+
30
+ sumbit.click(utils.model_chat,
31
+ inputs=[textbox, chatbot, system_state],
32
+ outputs=[textbox, chatbot, system_input],
33
+ concurrency_limit = 5)
34
+ clear_history.click(fn=utils.clear_session,
35
+ inputs=[],
36
+ outputs=[textbox, chatbot])
37
+ modify_system.click(fn=utils.modify_system_session,
38
+ inputs=[system_input],
39
+ outputs=[system_state, system_input, chatbot])
40
+
41
+ demo.queue(api_open=False)
42
+ demo.launch(max_threads=5)
43
+
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ gradio
2
+ zhipuai
utils.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from zhipuai import ZhipuAI
3
+ from typing import List, Optional, Tuple, Dict
4
+ from http import HTTPStatus
5
+
6
+
7
+
8
+ History = List[Tuple[str, str]]
9
+ Messages = List[Dict[str, str]]
10
+
11
+ default_system = 'You are a helpful assistant.'
12
+
13
+ YOUR_API_TOKEN = os.getenv('YOUR_API_TOKEN')
14
+
15
+
16
+ SYSTEM = "assistant"
17
+ USER = "user"
18
+ ASSISTANT = "assistant"
19
+
20
+ def clear_session() -> History:
21
+ return '', []
22
+
23
+ def modify_system_session(system: str) -> str:
24
+ if system is None or len(system) == 0:
25
+ system = default_system
26
+ return system, system, []
27
+
28
+ def history_to_messages(history: History, system: str) -> Messages:
29
+ messages = [{'role': SYSTEM, 'content': system}]
30
+ for h in history:
31
+ messages.append({'role': USER, 'content': h[0]})
32
+ messages.append({'role': ASSISTANT, 'content': h[1]})
33
+ return messages
34
+
35
+
36
+ def messages_to_history(messages: Messages) -> Tuple[str, History]:
37
+ assert messages[0]['role'] == SYSTEM
38
+ system = messages[0]['content']
39
+ history = []
40
+ for q, r in zip(messages[1::2], messages[2::2]):
41
+ history.append([q['content'], r['content']])
42
+ return system, history
43
+
44
+
45
+ def model_chat(query: Optional[str], history: Optional[History], system: str
46
+ ) -> Tuple[str, str, History]:
47
+ if query is None:
48
+ query = ''
49
+ if history is None:
50
+ history = []
51
+ messages = history_to_messages(history, system)
52
+ messages.append({'role': USER, 'content': query})
53
+
54
+ client = ZhipuAI(api_key=YOUR_API_TOKEN) # 填写您自己的APIKey
55
+ gen = client.chat.completions.create(
56
+ model="glm-4", # 填写需要调用的模型名称
57
+ messages=messages,
58
+ stream=True
59
+ )
60
+
61
+ content = ""
62
+ for response in gen:
63
+ # print(response)
64
+ role = response.choices[0].delta.role
65
+ content += response.choices[0].delta.content
66
+
67
+ system, history = messages_to_history(messages + [{'role': role, 'content': content}])
68
+ yield '', history, system
69
+
70
+
71
+ if __name__ == '__main__':
72
+ print("hi")
73
+ output = model_chat("who are you?", [], "You are a helpful assistant.")
74
+ for o in output:
75
+ print(o)
76
+ print("done")