curry tang
commited on
Commit
·
d1afe32
1
Parent(s):
c62892c
update
Browse files
app.py
CHANGED
@@ -157,6 +157,62 @@ def translate_doc(_language_input, _language_output, _doc, _chat):
|
|
157 |
|
158 |
with gr.Blocks() as app:
|
159 |
chat_engine = gr.State(value=None)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
with gr.Tab('智能聊天'):
|
161 |
with gr.Row():
|
162 |
with gr.Column(scale=2, min_width=600):
|
@@ -167,62 +223,8 @@ with gr.Blocks() as app:
|
|
167 |
textbox=gr.MultimodalTextbox(lines=1),
|
168 |
additional_inputs=[chat_engine]
|
169 |
)
|
170 |
-
with gr.Column(scale=1, min_width=300)
|
171 |
-
|
172 |
-
with gr.Column():
|
173 |
-
provider = gr.Dropdown(
|
174 |
-
label='模型厂商',
|
175 |
-
choices=['DeepSeek', 'OpenRouter', 'Tongyi'],
|
176 |
-
value='DeepSeek',
|
177 |
-
info='不同模型厂商参数,效果和价格略有不同,请先设置好对应模型厂商的 API Key。',
|
178 |
-
)
|
179 |
-
|
180 |
-
@gr.render(inputs=provider)
|
181 |
-
def show_model_config_panel(_provider):
|
182 |
-
_support_llm = None
|
183 |
-
if _provider == 'OpenRouter':
|
184 |
-
_support_llm = open_router_llm
|
185 |
-
if _provider == 'Tongyi':
|
186 |
-
_support_llm = tongyi_llm
|
187 |
-
if _provider == 'DeepSeek':
|
188 |
-
_support_llm = deep_seek_llm
|
189 |
-
with gr.Column():
|
190 |
-
model = gr.Dropdown(
|
191 |
-
label='模型',
|
192 |
-
choices=_support_llm.support_models,
|
193 |
-
value=_support_llm.default_model
|
194 |
-
)
|
195 |
-
temperature = gr.Slider(
|
196 |
-
minimum=0.0,
|
197 |
-
maximum=1.0,
|
198 |
-
step=0.1,
|
199 |
-
value=_support_llm.default_temperature,
|
200 |
-
label="Temperature",
|
201 |
-
key="temperature",
|
202 |
-
)
|
203 |
-
max_tokens = gr.Slider(
|
204 |
-
minimum=512,
|
205 |
-
maximum=_support_llm.default_max_tokens,
|
206 |
-
step=128,
|
207 |
-
value=_support_llm.default_max_tokens,
|
208 |
-
label="Max Tokens",
|
209 |
-
key="max_tokens",
|
210 |
-
)
|
211 |
-
model.change(
|
212 |
-
fn=update_chat,
|
213 |
-
inputs=[provider, model, temperature, max_tokens],
|
214 |
-
outputs=[chat_engine],
|
215 |
-
)
|
216 |
-
temperature.change(
|
217 |
-
fn=update_chat,
|
218 |
-
inputs=[provider, model, temperature, max_tokens],
|
219 |
-
outputs=[chat_engine],
|
220 |
-
)
|
221 |
-
max_tokens.change(
|
222 |
-
fn=update_chat,
|
223 |
-
inputs=[provider, model, temperature, max_tokens],
|
224 |
-
outputs=[chat_engine],
|
225 |
-
)
|
226 |
|
227 |
with gr.Tab('代码优化'):
|
228 |
with gr.Row():
|
|
|
157 |
|
158 |
with gr.Blocks() as app:
|
159 |
chat_engine = gr.State(value=None)
|
160 |
+
with gr.Accordion('模型参数设置', open=False):
|
161 |
+
with gr.Row():
|
162 |
+
provider = gr.Dropdown(
|
163 |
+
label='模型厂商',
|
164 |
+
choices=['DeepSeek', 'OpenRouter', 'Tongyi'],
|
165 |
+
value='DeepSeek',
|
166 |
+
info='不同模型厂商参数,效果和价格略有不同,请先设置好对应模型厂商的 API Key。',
|
167 |
+
)
|
168 |
+
|
169 |
+
@gr.render(inputs=provider)
|
170 |
+
def show_model_config_panel(_provider):
|
171 |
+
_support_llm = None
|
172 |
+
if _provider == 'OpenRouter':
|
173 |
+
_support_llm = open_router_llm
|
174 |
+
if _provider == 'Tongyi':
|
175 |
+
_support_llm = tongyi_llm
|
176 |
+
if _provider == 'DeepSeek':
|
177 |
+
_support_llm = deep_seek_llm
|
178 |
+
with gr.Row():
|
179 |
+
model = gr.Dropdown(
|
180 |
+
label='模型',
|
181 |
+
choices=_support_llm.support_models,
|
182 |
+
value=_support_llm.default_model
|
183 |
+
)
|
184 |
+
temperature = gr.Slider(
|
185 |
+
minimum=0.0,
|
186 |
+
maximum=1.0,
|
187 |
+
step=0.1,
|
188 |
+
value=_support_llm.default_temperature,
|
189 |
+
label="Temperature",
|
190 |
+
key="temperature",
|
191 |
+
)
|
192 |
+
max_tokens = gr.Slider(
|
193 |
+
minimum=512,
|
194 |
+
maximum=_support_llm.default_max_tokens,
|
195 |
+
step=128,
|
196 |
+
value=_support_llm.default_max_tokens,
|
197 |
+
label="Max Tokens",
|
198 |
+
key="max_tokens",
|
199 |
+
)
|
200 |
+
model.change(
|
201 |
+
fn=update_chat,
|
202 |
+
inputs=[provider, model, temperature, max_tokens],
|
203 |
+
outputs=[chat_engine],
|
204 |
+
)
|
205 |
+
temperature.change(
|
206 |
+
fn=update_chat,
|
207 |
+
inputs=[provider, model, temperature, max_tokens],
|
208 |
+
outputs=[chat_engine],
|
209 |
+
)
|
210 |
+
max_tokens.change(
|
211 |
+
fn=update_chat,
|
212 |
+
inputs=[provider, model, temperature, max_tokens],
|
213 |
+
outputs=[chat_engine],
|
214 |
+
)
|
215 |
+
|
216 |
with gr.Tab('智能聊天'):
|
217 |
with gr.Row():
|
218 |
with gr.Column(scale=2, min_width=600):
|
|
|
223 |
textbox=gr.MultimodalTextbox(lines=1),
|
224 |
additional_inputs=[chat_engine]
|
225 |
)
|
226 |
+
with gr.Column(scale=1, min_width=300):
|
227 |
+
gr.Radio(["无", "开发助手", "文案助手"], label="类型", info="请选择类型"),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
228 |
|
229 |
with gr.Tab('代码优化'):
|
230 |
with gr.Row():
|