mikeee commited on
Commit
f2500aa
1 Parent(s): 2e24a4e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +127 -2
app.py CHANGED
@@ -1,4 +1,129 @@
 
 
 
 
 
 
 
 
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- model_name = "models/THUDM/chatglm2-6b-int4"
4
- gr.load(model_name).lauch()
 
1
+ # import gradio as gr
2
+
3
+ # model_name = "models/THUDM/chatglm2-6b-int4"
4
+ # gr.load(model_name).lauch()
5
+
6
+ # %%writefile demo-4bit.py
7
+
8
+ from transformers import AutoModel, AutoTokenizer
9
  import gradio as gr
10
+ import mdtex2html
11
+
12
+ model_name = "THUDM/chatglm2-6b"
13
+
14
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
15
+
16
+ # model = AutoModel.from_pretrained(model_name, trust_remote_code=True).cuda()
17
+
18
+ # 按需修改,目前只支持 4/8 bit 量化
19
+ # model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).quantize(4).cuda()
20
+
21
+ import torch
22
+
23
+ has_cuda = torch.cuda.is_available()
24
+ # has_cuda = False # force cpu
25
+
26
+ if has_cuda:
27
+ model = AutoModel.from_pretrained("THUDM/chatglm2-6b-int4",trust_remote_code=True).cuda() # 3.92
28
+ else:
29
+ model = AutoModel.from_pretrained("THUDM/chatglm2-6b-int4",trust_remote_code=True).float()
30
+
31
+ model = model.eval()
32
+
33
+ """Override Chatbot.postprocess"""
34
+
35
+
36
+ def postprocess(self, y):
37
+ if y is None:
38
+ return []
39
+ for i, (message, response) in enumerate(y):
40
+ y[i] = (
41
+ None if message is None else mdtex2html.convert((message)),
42
+ None if response is None else mdtex2html.convert(response),
43
+ )
44
+ return y
45
+
46
+
47
+ gr.Chatbot.postprocess = postprocess
48
+
49
+
50
+ def parse_text(text):
51
+ """copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
52
+ lines = text.split("\n")
53
+ lines = [line for line in lines if line != ""]
54
+ count = 0
55
+ for i, line in enumerate(lines):
56
+ if "```" in line:
57
+ count += 1
58
+ items = line.split('`')
59
+ if count % 2 == 1:
60
+ lines[i] = f'<pre><code class="language-{items[-1]}">'
61
+ else:
62
+ lines[i] = f'<br></code></pre>'
63
+ else:
64
+ if i > 0:
65
+ if count % 2 == 1:
66
+ line = line.replace("`", "\`")
67
+ line = line.replace("<", "&lt;")
68
+ line = line.replace(">", "&gt;")
69
+ line = line.replace(" ", "&nbsp;")
70
+ line = line.replace("*", "&ast;")
71
+ line = line.replace("_", "&lowbar;")
72
+ line = line.replace("-", "&#45;")
73
+ line = line.replace(".", "&#46;")
74
+ line = line.replace("!", "&#33;")
75
+ line = line.replace("(", "&#40;")
76
+ line = line.replace(")", "&#41;")
77
+ line = line.replace("$", "&#36;")
78
+ lines[i] = "<br>"+line
79
+ text = "".join(lines)
80
+ return text
81
+
82
+
83
+ def predict(input, chatbot, max_length, top_p, temperature, history, past_key_values):
84
+ chatbot.append((parse_text(input), ""))
85
+ for response, history, past_key_values in model.stream_chat(tokenizer, input, history, past_key_values=past_key_values,
86
+ return_past_key_values=True,
87
+ max_length=max_length, top_p=top_p,
88
+ temperature=temperature):
89
+ chatbot[-1] = (parse_text(input), parse_text(response))
90
+
91
+ yield chatbot, history, past_key_values
92
+
93
+
94
+ def reset_user_input():
95
+ return gr.update(value='')
96
+
97
+
98
+ def reset_state():
99
+ return [], [], None
100
+
101
+
102
+ with gr.Blocks() as demo:
103
+ gr.HTML("""<h1 align="center">ChatGLM2-6B</h1>""")
104
+
105
+ chatbot = gr.Chatbot()
106
+ with gr.Row():
107
+ with gr.Column(scale=4):
108
+ with gr.Column(scale=12):
109
+ user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style(
110
+ container=False)
111
+ with gr.Column(min_width=32, scale=1):
112
+ submitBtn = gr.Button("Submit", variant="primary")
113
+ with gr.Column(scale=1):
114
+ emptyBtn = gr.Button("Clear History")
115
+ max_length = gr.Slider(0, 32768, value=8192, step=1.0, label="Maximum length", interactive=True)
116
+ top_p = gr.Slider(0, 1, value=0.8, step=0.01, label="Top P", interactive=True)
117
+ temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
118
+
119
+ history = gr.State([])
120
+ past_key_values = gr.State(None)
121
+
122
+ submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history, past_key_values],
123
+ [chatbot, history, past_key_values], show_progress=True)
124
+ submitBtn.click(reset_user_input, [], [user_input])
125
+
126
+ emptyBtn.click(reset_state, outputs=[chatbot, history, past_key_values], show_progress=True)
127
 
128
+ # demo.queue().launch(share=False, inbrowser=True)
129
+ demo.queue().launch(share=True, inbrowser=True, debug=True)