File size: 3,432 Bytes
7be5daa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2dc4998
 
7be5daa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a538210
7be5daa
 
 
 
 
 
bdaaf4b
7be5daa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3580d1e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
# Copyright (c) XVERSE Inc. All Rights Reserved.
#
# -*- encoding: utf-8 -*-

import gradio as gr
import openai
import os

TITLE="XChat"

client = openai.Client(
    base_url="https://api.xverse.cn/v1",
    api_key=os.environ["API_KEY"]
)

def predict(msg, history=[]):
    messages = []
    tuples = []
    if len(os.environ["SYSTEM_PROMPT"]) > 0:
        messages.append({"role": "system", "content": os.environ["SYSTEM_PROMPT"]})
    for i in range(0, len(history), 2):
        messages.append({"role": "user", "content": history[i]})
        messages.append({"role": "assistant", "content": history[i+1]})
        tuples.append((history[i], history[i+1]))
    messages.append({"role": "user", "content": msg})
    response = client.chat.completions.create(
        model=os.environ["MODEL"],
        messages=messages,
        max_tokens=int(os.environ["MAX_TOKENS"]),
        top_p=float(os.environ["TOP_P"]),
        temperature=float(os.environ["TEMPERATURE"]),
        presence_penalty=float(os.environ["PRESENCE_PENALTY"]),
        stream=True
    )
    ### 非流式输出
    #txt = response.choices[0].message.content
    #tuples.append((msg, txt))
    #history.append(msg)
    #history.append(txt)
    #return tuples, history

    ### 流式输出
    snippet = ""
    i = 0
    for chunk in response:
        if chunk is None:
            continue
        i += 1
        if chunk.choices[0].delta.content is not None:
            snippet = snippet + chunk.choices[0].delta.content
            if i == 1:
                tuples.append((msg, snippet))
                history.append(msg)
                history.append(snippet)
            else:
                tuples[-1] = (msg, snippet)
                history[-1] = snippet
            yield tuples, history

examples = [["你是谁?", None], ["你会干什么?", None], ["写一篇爱护环境的小作文", None]]

def reset():
    return None, []

def clear_textbox():
    return gr.update(value="")

css = """
h1 {
    text-align: center;
    display: block;
}
"""

with gr.Blocks(css=css) as chat_demo:
    gr.Markdown("""# <center><font size=8>{}&nbsp;&nbsp;&nbsp;<img src="{}" alt="WeChat Mini Program" title="WeChat Mini Program" width="88" height="88" style="display:inline"></center>""".format(TITLE, os.environ["XIAOCHENGXU_LOGO"]))
    gr.Markdown("""<center><font size=4>\
        <a href="https://github.com/xverse-ai">GitHub</a>&nbsp; | &nbsp;\
        <a href="https://chat.xverse.cn">Web</a>&nbsp; | &nbsp;\
        <a href="https://help.xverse.cn/docs/api-reference">API</a>\
        </center>"""
    )
    chatbot = gr.Chatbot(elem_id="chatbot", height=550, bubble_full_width=False, likeable=False)
    state = gr.State([])

    with gr.Row():
        txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter", container=False)

    with gr.Row():
        submit_btn = gr.Button(value="Submit")
        reset_btn = gr.Button(value="Reset")

    txt.submit(fn=predict, inputs=[txt, state], outputs=[chatbot, state])
    txt.submit(fn=clear_textbox, inputs=None, outputs=[txt])
    submit_btn.click(fn=predict, inputs=[txt, state], outputs=[chatbot, state])
    submit_btn.click(fn=clear_textbox, inputs=None, outputs=[txt])
    reset_btn.click(fn=reset, inputs=None, outputs=[chatbot, state])
    gr.Examples(examples=examples, inputs=[txt])


if __name__ == "__main__":
    chat_demo.queue()
    chat_demo.launch(share=False)