Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,9 @@
|
|
1 |
import gradio as gr
|
2 |
import openai
|
|
|
|
|
|
|
|
|
3 |
|
4 |
initial_prompt = "You are a helpful assistant."
|
5 |
|
@@ -19,26 +23,23 @@ def parse_text(text):
|
|
19 |
lines[i] = '<br/>'+line.replace(" ", " ")
|
20 |
return "".join(lines)
|
21 |
|
22 |
-
def get_response(system, context, raw
|
23 |
openai.api_key = "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"
|
24 |
response = openai.Completion.create(
|
25 |
engine="text-davinci-002",
|
26 |
prompt=f"{system}\n\n{context}",
|
27 |
-
temperature=0.
|
28 |
max_tokens=1024,
|
29 |
top_p=1,
|
30 |
frequency_penalty=0,
|
31 |
-
presence_penalty=0
|
32 |
-
stop=None,
|
33 |
)
|
|
|
34 |
if raw:
|
35 |
return response
|
36 |
else:
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
message_with_stats = f'{message}\n\n{statistics}'
|
41 |
-
|
42 |
return message, parse_text(message_with_stats)
|
43 |
|
44 |
def predict(chatbot, input_sentence, system, context):
|
@@ -48,26 +49,43 @@ def predict(chatbot, input_sentence, system, context):
|
|
48 |
|
49 |
message, message_with_stats = get_response(system, context)
|
50 |
|
51 |
-
context.append(message)
|
52 |
-
|
53 |
chatbot.append((input_sentence, message_with_stats))
|
54 |
|
|
|
|
|
55 |
return chatbot, context
|
56 |
|
57 |
def retry(chatbot, system, context):
|
58 |
if len(context) == 0:
|
59 |
return [], []
|
60 |
-
|
61 |
-
|
62 |
|
63 |
-
chatbot[-1] = (context[-2], message_with_stats)
|
64 |
return chatbot, context
|
65 |
|
66 |
def delete_last_conversation(chatbot, context):
|
67 |
if len(context) == 0:
|
68 |
return [], []
|
69 |
-
chatbot
|
70 |
-
context
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
return chatbot, context
|
72 |
|
73 |
def reset_state():
|
@@ -76,23 +94,32 @@ def reset_state():
|
|
76 |
def update_system(new_system_prompt):
|
77 |
return new_system_prompt
|
78 |
|
79 |
-
title = """<h1 align="center">
|
80 |
description = """<div align=center>
|
81 |
|
82 |
-
|
83 |
|
84 |
</div>
|
85 |
"""
|
86 |
|
87 |
-
gr.
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import openai
|
3 |
+
import os
|
4 |
+
|
5 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
6 |
+
css_file = os.path.join(current_dir, "style.css")
|
7 |
|
8 |
initial_prompt = "You are a helpful assistant."
|
9 |
|
|
|
23 |
lines[i] = '<br/>'+line.replace(" ", " ")
|
24 |
return "".join(lines)
|
25 |
|
26 |
+
def get_response(system, context, raw=False):
|
27 |
openai.api_key = "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"
|
28 |
response = openai.Completion.create(
|
29 |
engine="text-davinci-002",
|
30 |
prompt=f"{system}\n\n{context}",
|
31 |
+
temperature=0.5,
|
32 |
max_tokens=1024,
|
33 |
top_p=1,
|
34 |
frequency_penalty=0,
|
35 |
+
presence_penalty=0
|
|
|
36 |
)
|
37 |
+
|
38 |
if raw:
|
39 |
return response
|
40 |
else:
|
41 |
+
message = response.choices[0].text.strip()
|
42 |
+
message_with_stats = f'{message}'
|
|
|
|
|
|
|
43 |
return message, parse_text(message_with_stats)
|
44 |
|
45 |
def predict(chatbot, input_sentence, system, context):
|
|
|
49 |
|
50 |
message, message_with_stats = get_response(system, context)
|
51 |
|
|
|
|
|
52 |
chatbot.append((input_sentence, message_with_stats))
|
53 |
|
54 |
+
context.append(message)
|
55 |
+
|
56 |
return chatbot, context
|
57 |
|
58 |
def retry(chatbot, system, context):
|
59 |
if len(context) == 0:
|
60 |
return [], []
|
61 |
+
context.pop()
|
62 |
+
chatbot.pop()
|
63 |
|
|
|
64 |
return chatbot, context
|
65 |
|
66 |
def delete_last_conversation(chatbot, context):
|
67 |
if len(context) == 0:
|
68 |
return [], []
|
69 |
+
chatbot.pop()
|
70 |
+
context.pop()
|
71 |
+
context.pop()
|
72 |
+
|
73 |
+
return chatbot, context
|
74 |
+
|
75 |
+
def reduce_token(chatbot, system, context):
|
76 |
+
if len(context) == 0:
|
77 |
+
return [], []
|
78 |
+
context.pop()
|
79 |
+
|
80 |
+
context.append("Please help me summarize our conversation to reduce token usage. Don't include this sentence in the summary.")
|
81 |
+
message, message_with_stats = get_response(system, context, raw=True)
|
82 |
+
summary = message.choices[0].text.strip()
|
83 |
+
|
84 |
+
statistics = f'This conversation token usage [{message.total_tokens} / 2048] (Prompt: {message.prompt_length}, Response: {message.choices[0].length})'
|
85 |
+
|
86 |
+
chatbot.append(("Please help me summarize our conversation to reduce token usage.", summary + statistics))
|
87 |
+
|
88 |
+
context.append(f"We talked about {summary}")
|
89 |
return chatbot, context
|
90 |
|
91 |
def reset_state():
|
|
|
94 |
def update_system(new_system_prompt):
|
95 |
return new_system_prompt
|
96 |
|
97 |
+
title = """<h1 align="center">You ask, I answer.</h1>"""
|
98 |
description = """<div align=center>
|
99 |
|
100 |
+
Not interested in describing your needs to ChatGPT? Use [ChatGPT Shortcut](https://newzone.top/chatgpt/)
|
101 |
|
102 |
</div>
|
103 |
"""
|
104 |
|
105 |
+
with gr.blocks() as demo:
|
106 |
+
gr.html(title)
|
107 |
+
chatbot = []
|
108 |
+
context = [initial_prompt]
|
109 |
+
system = initial_prompt
|
110 |
+
|
111 |
+
input_text = gr.inputs.Textbox(lines=1, placeholder="Enter your message here...")
|
112 |
+
chat_history = gr.outputs.HTML(markdown=False)
|
113 |
+
|
114 |
+
gradio_ui = gr.Interface(fn=lambda message: predict(chatbot, message, system, context),
|
115 |
+
inputs=input_text,
|
116 |
+
outputs=chat_history,
|
117 |
+
title=title,
|
118 |
+
description=description,
|
119 |
+
theme="compact",
|
120 |
+
allow_flagging=False,
|
121 |
+
layout="vertical",
|
122 |
+
css=css_file,
|
123 |
+
)
|
124 |
+
|
125 |
+
gradio_ui.launch()
|