Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,189 +1,122 @@
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
import sys
|
4 |
-
import json
|
5 |
import requests
|
6 |
import random
|
|
|
7 |
|
8 |
-
|
9 |
MODEL = "o1-preview"
|
10 |
API_URL = os.getenv("API_URL")
|
11 |
-
|
12 |
-
OPENAI_API_KEYS = os.getenv("OPENAI_API_KEYS").split(',')
|
13 |
-
print (API_URL)
|
14 |
-
print (OPENAI_API_KEYS)
|
15 |
-
NUM_THREADS = int(os.getenv("NUM_THREADS"))
|
16 |
|
17 |
-
print
|
|
|
18 |
|
|
|
19 |
def exception_handler(exception_type, exception, traceback):
|
20 |
print("%s: %s" % (exception_type.__name__, exception))
|
21 |
sys.excepthook = exception_handler
|
22 |
sys.tracebacklimit = 0
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
payload = {
|
26 |
"model": MODEL,
|
27 |
"messages": [{"role": "user", "content": f"{inputs}"}],
|
28 |
-
"temperature"
|
29 |
-
"top_p":
|
30 |
-
"n"
|
31 |
"stream": True,
|
32 |
-
"presence_penalty":0,
|
33 |
-
"frequency_penalty":0,
|
34 |
}
|
35 |
-
|
36 |
-
print (OPENAI_API_KEY)
|
37 |
-
headers_dict = {key.decode('utf-8'): value.decode('utf-8') for key, value in request.headers.raw}
|
38 |
headers = {
|
39 |
"Content-Type": "application/json",
|
40 |
-
"Authorization": f"Bearer {OPENAI_API_KEY}"
|
41 |
-
"Headers": f"{headers_dict}"
|
42 |
}
|
43 |
|
44 |
-
|
45 |
-
if chat_counter != 0 :
|
46 |
messages = []
|
47 |
for i, data in enumerate(history):
|
48 |
-
if i % 2 == 0
|
49 |
-
|
50 |
-
else:
|
51 |
-
role = 'assistant'
|
52 |
-
message = {}
|
53 |
-
message["role"] = role
|
54 |
-
message["content"] = data
|
55 |
messages.append(message)
|
56 |
-
|
57 |
-
|
58 |
-
message["role"] = "user"
|
59 |
-
message["content"] = inputs
|
60 |
-
messages.append(message)
|
61 |
payload = {
|
62 |
"model": MODEL,
|
63 |
"messages": messages,
|
64 |
-
"temperature"
|
65 |
"top_p": top_p,
|
66 |
-
"n"
|
67 |
"stream": True,
|
68 |
-
"presence_penalty":0,
|
69 |
-
"frequency_penalty":0,
|
70 |
}
|
71 |
|
72 |
chat_counter += 1
|
73 |
-
|
74 |
history.append(inputs)
|
75 |
-
token_counter = 0
|
76 |
-
partial_words = ""
|
77 |
counter = 0
|
78 |
|
79 |
try:
|
80 |
-
#
|
81 |
-
response =
|
82 |
-
|
83 |
-
#if response_code.strip() != "<Response [200]>":
|
84 |
-
# #print(f"response code - {response}")
|
85 |
-
# raise Exception(f"Sorry, hitting rate limit. Please try again later. {response}")
|
86 |
-
|
87 |
for chunk in response.iter_lines():
|
88 |
-
#print (chunk)
|
89 |
-
#sys.stdout.flush()
|
90 |
-
#Skipping first chunk
|
91 |
if counter == 0:
|
92 |
counter += 1
|
93 |
continue
|
94 |
-
|
95 |
-
# check whether each line is non-empty
|
96 |
-
if chunk.decode() :
|
97 |
chunk = chunk.decode()
|
98 |
-
# decode each line as response data is in bytes
|
99 |
if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
|
100 |
-
partial_words
|
101 |
if token_counter == 0:
|
102 |
history.append(" " + partial_words)
|
103 |
else:
|
104 |
history[-1] = partial_words
|
105 |
token_counter += 1
|
106 |
-
yield [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2)
|
|
|
107 |
except Exception as e:
|
108 |
-
print
|
109 |
-
yield [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2)
|
110 |
-
print(json.dumps({"chat_counter": chat_counter, "payload": payload, "partial_words": partial_words, "token_counter": token_counter, "counter": counter}))
|
111 |
-
|
112 |
|
|
|
113 |
def reset_textbox():
|
114 |
-
return gr.update(value='', interactive=False)
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
User: <utterance>
|
122 |
-
Assistant: <utterance>
|
123 |
-
User: <utterance>
|
124 |
-
Assistant: <utterance>
|
125 |
-
...
|
126 |
-
```
|
127 |
-
In this app, you can explore the outputs of a gpt-4 turbo LLM.
|
128 |
-
"""
|
129 |
-
|
130 |
-
theme = gr.themes.Default(primary_hue="green")
|
131 |
-
|
132 |
-
with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
|
133 |
-
#chatbot {height: 520px; overflow: auto;}""",
|
134 |
-
theme=theme) as demo:
|
135 |
gr.HTML(title)
|
136 |
-
gr.HTML("""<h3 align="center" style="color: red;">If this app doesn't respond, consider trying our O1-mini app:<br/><a href="https://huggingface.co/spaces/yuntian-deng/o1mini">https://huggingface.co/spaces/yuntian-deng/o1mini</a></h3>""")
|
137 |
-
|
138 |
-
#gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGPT4?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
|
139 |
-
with gr.Column(elem_id = "col_container", visible=False) as main_block:
|
140 |
-
#GPT4 API Key is provided by Huggingface
|
141 |
-
#openai_api_key = gr.Textbox(type='password', label="Enter only your GPT4 OpenAI API key here")
|
142 |
-
chatbot = gr.Chatbot(elem_id='chatbot') #c
|
143 |
-
inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") #t
|
144 |
-
state = gr.State([]) #s
|
145 |
-
with gr.Row():
|
146 |
-
with gr.Column(scale=7):
|
147 |
-
b1 = gr.Button(visible=not DISABLED) #.style(full_width=True)
|
148 |
-
with gr.Column(scale=3):
|
149 |
-
server_status_code = gr.Textbox(label="Status code from OpenAI server", )
|
150 |
-
|
151 |
-
#inputs, top_p, temperature, top_k, repetition_penalty
|
152 |
-
with gr.Accordion("Parameters", open=False):
|
153 |
-
top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
|
154 |
-
temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
|
155 |
-
#top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
|
156 |
-
#repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
|
157 |
-
chat_counter = gr.Number(value=0, visible=False, precision=0)
|
158 |
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
""")
|
176 |
-
accept_button = gr.Button("I Agree")
|
177 |
-
|
178 |
-
def enable_inputs():
|
179 |
-
return gr.update(visible=False), gr.update(visible=True)
|
180 |
-
|
181 |
-
accept_button.click(None, None, accept_checkbox, js=js, queue=False)
|
182 |
-
accept_checkbox.change(fn=enable_inputs, inputs=[], outputs=[user_consent_block, main_block], queue=False)
|
183 |
-
|
184 |
-
inputs.submit(reset_textbox, [], [inputs, b1], queue=False)
|
185 |
-
inputs.submit(predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code, inputs, b1],) #openai_api_key
|
186 |
-
b1.click(reset_textbox, [], [inputs, b1], queue=False)
|
187 |
-
b1.click(predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code, inputs, b1],) #openai_api_key
|
188 |
-
|
189 |
-
demo.queue(max_size=10, default_concurrency_limit=NUM_THREADS, api_open=False).launch(share=False)
|
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
import sys
|
4 |
+
import json
|
5 |
import requests
|
6 |
import random
|
7 |
+
from tenacity import retry, wait_fixed, stop_after_attempt
|
8 |
|
9 |
+
# 模型配置和API URL
|
10 |
MODEL = "o1-preview"
|
11 |
API_URL = os.getenv("API_URL")
|
12 |
+
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
|
|
|
|
|
|
|
|
13 |
|
14 |
+
print(API_URL)
|
15 |
+
print(OPENAI_API_KEY)
|
16 |
|
17 |
+
# 异常处理函数
|
18 |
def exception_handler(exception_type, exception, traceback):
|
19 |
print("%s: %s" % (exception_type.__name__, exception))
|
20 |
sys.excepthook = exception_handler
|
21 |
sys.tracebacklimit = 0
|
22 |
+
|
23 |
+
# 重试机制:如果请求失败,最多重试5次,每次等待2秒
|
24 |
+
@retry(stop=stop_after_attempt(5), wait=wait_fixed(2))
|
25 |
+
def call_openai_api(payload, headers):
|
26 |
+
response = requests.post(API_URL, headers=headers, json=payload, stream=True)
|
27 |
+
response.raise_for_status() # 如果返回状态码不是200,抛出异常
|
28 |
+
return response
|
29 |
+
|
30 |
+
# 处理API请求的核心函数
|
31 |
+
def predict(inputs, top_p, temperature, chat_counter, chatbot, history):
|
32 |
payload = {
|
33 |
"model": MODEL,
|
34 |
"messages": [{"role": "user", "content": f"{inputs}"}],
|
35 |
+
"temperature": temperature,
|
36 |
+
"top_p": top_p,
|
37 |
+
"n": 1,
|
38 |
"stream": True,
|
39 |
+
"presence_penalty": 0,
|
40 |
+
"frequency_penalty": 0,
|
41 |
}
|
42 |
+
|
|
|
|
|
43 |
headers = {
|
44 |
"Content-Type": "application/json",
|
45 |
+
"Authorization": f"Bearer {OPENAI_API_KEY}"
|
|
|
46 |
}
|
47 |
|
48 |
+
if chat_counter != 0:
|
|
|
49 |
messages = []
|
50 |
for i, data in enumerate(history):
|
51 |
+
role = 'user' if i % 2 == 0 else 'assistant'
|
52 |
+
message = {"role": role, "content": data}
|
|
|
|
|
|
|
|
|
|
|
53 |
messages.append(message)
|
54 |
+
|
55 |
+
messages.append({"role": "user", "content": inputs})
|
|
|
|
|
|
|
56 |
payload = {
|
57 |
"model": MODEL,
|
58 |
"messages": messages,
|
59 |
+
"temperature": temperature,
|
60 |
"top_p": top_p,
|
61 |
+
"n": 1,
|
62 |
"stream": True,
|
63 |
+
"presence_penalty": 0,
|
64 |
+
"frequency_penalty": 0,
|
65 |
}
|
66 |
|
67 |
chat_counter += 1
|
|
|
68 |
history.append(inputs)
|
69 |
+
token_counter = 0
|
70 |
+
partial_words = ""
|
71 |
counter = 0
|
72 |
|
73 |
try:
|
74 |
+
# 使用重试机制的 API 请求
|
75 |
+
response = call_openai_api(payload, headers)
|
76 |
+
|
|
|
|
|
|
|
|
|
77 |
for chunk in response.iter_lines():
|
|
|
|
|
|
|
78 |
if counter == 0:
|
79 |
counter += 1
|
80 |
continue
|
81 |
+
if chunk.decode():
|
|
|
|
|
82 |
chunk = chunk.decode()
|
|
|
83 |
if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
|
84 |
+
partial_words += json.loads(chunk[6:])['choices'][0]["delta"]["content"]
|
85 |
if token_counter == 0:
|
86 |
history.append(" " + partial_words)
|
87 |
else:
|
88 |
history[-1] = partial_words
|
89 |
token_counter += 1
|
90 |
+
yield [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2)], history, chat_counter # 更新chatbot内容
|
91 |
+
|
92 |
except Exception as e:
|
93 |
+
print(f'Error encountered: {e}')
|
94 |
+
yield [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2)], history, chat_counter
|
|
|
|
|
95 |
|
96 |
+
# 重置输入框
|
97 |
def reset_textbox():
|
98 |
+
return gr.update(value='', interactive=False)
|
99 |
+
|
100 |
+
# Gradio 界面部分
|
101 |
+
title = """<h1 align="center">OpenAI-O1-Preview: Personal Version</h1>"""
|
102 |
+
description = """This app allows a single user to interact with an OpenAI GPT-4 Turbo model."""
|
103 |
+
|
104 |
+
with gr.Blocks() as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
gr.HTML(title)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
|
107 |
+
chatbot = gr.Chatbot() # 对话框
|
108 |
+
inputs = gr.Textbox(placeholder="Type your input here", label="Input")
|
109 |
+
state = gr.State([]) # 保存历史对话
|
110 |
+
chat_counter = gr.Number(value=0, visible=False, precision=0) # 计数器
|
111 |
+
|
112 |
+
# 参数控制
|
113 |
+
with gr.Accordion("Advanced Parameters", open=False):
|
114 |
+
top_p = gr.Slider(minimum=0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p")
|
115 |
+
temperature = gr.Slider(minimum=0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature")
|
116 |
+
|
117 |
+
# 处理用户提交的输入
|
118 |
+
inputs.submit(reset_textbox, [], [inputs], queue=False)
|
119 |
+
inputs.submit(predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter], queue=False)
|
120 |
+
|
121 |
+
# 启动Gradio应用
|
122 |
+
demo.launch(share=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|