Update app.py
Browse files
app.py
CHANGED
@@ -6,15 +6,6 @@ api_key=os.environ.get('qwen_API_KEY')
|
|
6 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
7 |
"""
|
8 |
client = InferenceClient("Qwen/Qwen2.5-72B-Instruct",token=api_key)
|
9 |
-
latex_delimiters = [
|
10 |
-
{"left": "\\(", "right": "\\)", "display": False},
|
11 |
-
{"left": "\\[", "right": "\\]", "display": True},
|
12 |
-
{"left": "\\begin{equation}", "right": "\\end{equation}", "display": True},
|
13 |
-
{"left": "\\begin{align}", "right": "\\end{align}", "display": True},
|
14 |
-
{"left": "\\begin{alignat}", "right": "\\end{alignat}", "display": True},
|
15 |
-
{"left": "\\begin{gather}", "right": "\\end{gather}", "display": True},
|
16 |
-
{"left": "\\begin{CD}", "right": "\\end{CD}", "display": True},
|
17 |
-
]
|
18 |
|
19 |
def respond(
|
20 |
message,
|
@@ -22,8 +13,7 @@ def respond(
|
|
22 |
system_message,
|
23 |
max_tokens,
|
24 |
temperature,
|
25 |
-
top_p
|
26 |
-
latex_delimiters=None
|
27 |
):
|
28 |
messages = [{"role": "system", "content": system_message}]
|
29 |
|
@@ -42,16 +32,20 @@ def respond(
|
|
42 |
max_tokens=max_tokens,
|
43 |
stream=True,
|
44 |
temperature=temperature,
|
45 |
-
top_p=top_p
|
46 |
-
latex_delimiters=latex_delimiters
|
47 |
):
|
48 |
token = message.choices[0].delta.content
|
49 |
|
50 |
response += token
|
51 |
yield response
|
52 |
|
53 |
-
|
54 |
-
|
|
|
|
|
|
|
|
|
|
|
55 |
"""
|
56 |
demo = gr.ChatInterface(
|
57 |
respond,
|
@@ -65,7 +59,7 @@ demo = gr.ChatInterface(
|
|
65 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
66 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
|
67 |
],
|
68 |
-
|
69 |
)
|
70 |
|
71 |
if __name__ == "__main__":
|
|
|
6 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
7 |
"""
|
8 |
client = InferenceClient("Qwen/Qwen2.5-72B-Instruct",token=api_key)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
def respond(
|
11 |
message,
|
|
|
13 |
system_message,
|
14 |
max_tokens,
|
15 |
temperature,
|
16 |
+
top_p
|
|
|
17 |
):
|
18 |
messages = [{"role": "system", "content": system_message}]
|
19 |
|
|
|
32 |
max_tokens=max_tokens,
|
33 |
stream=True,
|
34 |
temperature=temperature,
|
35 |
+
top_p=top_p
|
|
|
36 |
):
|
37 |
token = message.choices[0].delta.content
|
38 |
|
39 |
response += token
|
40 |
yield response
|
41 |
|
42 |
+
|
43 |
+
# Gradio ChatInterface setup with MathJax
|
44 |
+
mathjax_script = """
|
45 |
+
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.13.11/katex.min.css" integrity="sha384-Um5gpz1odJg5Z4HAmzptBBvMrqERW5Z4icqS+8iu7r5vL20nmpEQ6/6BwYIuL8" crossorigin="anonymous">
|
46 |
+
<script defer src="https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.13.11/katex.min.js" integrity="sha384-YNHdsYkH6gFf146yyNCToHNauALlYwViumE/lp77cA3E2g8VoECF1Sn4aa/n9I" crossorigin="anonymous"></script>
|
47 |
+
<script defer src="https://cdn.jsdelivr.net/npm/[email protected]/dist/contrib/auto-render.min.js" integrity="sha384-vZTG03m+2yp6N6BNi5iM4rW4oIwk5DfcNdFfxkk9ZWpDriOkXX8vo6LrT9HH" crossorigin="anonymous"
|
48 |
+
onload="renderMathInElement(document.body);"></script>
|
49 |
"""
|
50 |
demo = gr.ChatInterface(
|
51 |
respond,
|
|
|
59 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
60 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
|
61 |
],
|
62 |
+
css=mathjax_script # 添加MathJax脚本
|
63 |
)
|
64 |
|
65 |
if __name__ == "__main__":
|