File size: 3,758 Bytes
af0e6f0
 
759efc8
 
 
 
af0e6f0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
759efc8
af0e6f0
b04f907
 
 
759efc8
b04f907
 
 
759efc8
af0e6f0
759efc8
af0e6f0
 
 
759efc8
 
af0e6f0
 
 
 
 
b04f907
af0e6f0
 
 
 
 
759efc8
 
af0e6f0
 
 
 
 
759efc8
 
af0e6f0
 
 
 
 
 
759efc8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
af0e6f0
 
 
 
 
 
b04f907
af0e6f0
759efc8
af0e6f0
 
759efc8
af0e6f0
 
 
 
70976f1
759efc8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70976f1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import gradio as gr
import openai
import os

current_dir = os.path.dirname(os.path.abspath(__file__))
css_file = os.path.join(current_dir, "style.css")

initial_prompt = "You are a helpful assistant."

def parse_text(text):
    lines = text.split("\n")
    for i,line in enumerate(lines):
        if "```" in line:
            items = line.split('`')
            if items[-1]:
                lines[i] = f'<pre><code class="{items[-1]}">'
            else:
                lines[i] = f'</code></pre>'
        else:
            if i>0:
                line = line.replace("<", "&lt;")
                line = line.replace(">", "&gt;")
                lines[i] = '<br/>'+line.replace(" ", "&nbsp;")
    return "".join(lines)

def get_response(system, context, raw=False):
    openai.api_key = "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"
    response = openai.Completion.create(
        engine="text-davinci-002",
        prompt=f"{system}\n\n{context}",
        temperature=0.5,
        max_tokens=1024,
        top_p=1,
        frequency_penalty=0,
        presence_penalty=0
    )

    if raw:
        return response
    else:
        message = response.choices[0].text.strip()
        message_with_stats = f'{message}'
        return message, parse_text(message_with_stats)

def predict(chatbot, input_sentence, system, context):
    if len(input_sentence) == 0:
        return []
    context.append(input_sentence)

    message, message_with_stats = get_response(system, context)

    chatbot.append((input_sentence, message_with_stats))

    context.append(message)

    return chatbot, context

def retry(chatbot, system, context):
    if len(context) == 0:
        return [], []
    context.pop()
    chatbot.pop()

    return chatbot, context

def delete_last_conversation(chatbot, context):
    if len(context) == 0:
        return [], []
    chatbot.pop()
    context.pop()
    context.pop()

    return chatbot, context

def reduce_token(chatbot, system, context):
    if len(context) == 0:
        return [], []
    context.pop()

    context.append("Please help me summarize our conversation to reduce token usage. Don't include this sentence in the summary.")
    message, message_with_stats = get_response(system, context, raw=True)
    summary = message.choices[0].text.strip()

    statistics = f'This conversation token usage [{message.total_tokens} / 2048] (Prompt: {message.prompt_length}, Response: {message.choices[0].length})'

    chatbot.append(("Please help me summarize our conversation to reduce token usage.", summary + statistics))

    context.append(f"We talked about {summary}")
    return chatbot, context

def reset_state():
    return [], []

def update_system(new_system_prompt):
    return new_system_prompt

title = """<h1 align="center">You ask, I answer.</h1>"""
description = """<div align=center>

Not interested in describing your needs to ChatGPT? Use [ChatGPT Shortcut](https://newzone.top/chatgpt/)

</div>
"""

#with gr.blocks() as demo:
    gr.html(title)
    chatbot = []
    context = [initial_prompt]
    system = initial_prompt

    input_text = gr.inputs.Textbox(lines=1, placeholder="Enter your message here...")
    chat_history = gr.outputs.HTML(markdown=False)

    gradio_ui = gr.Interface(fn=lambda message: predict(chatbot, message, system, context),
                             inputs=input_text,
                             outputs=chat_history,
                             title=title,
                             description=description,
                             theme="compact",
                             allow_flagging=False,
                             layout="vertical",
                             css=css_file,
                             )

    gradio_ui.launch()