Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -769,10 +769,18 @@ def chat_response_stream_multiturn(
|
|
| 769 |
max_tokens: int,
|
| 770 |
frequency_penalty: float,
|
| 771 |
presence_penalty: float,
|
| 772 |
-
current_time: Optional[float] = None,
|
| 773 |
system_prompt: Optional[str] = SYSTEM_PROMPT_1,
|
|
|
|
| 774 |
profile: Optional[gr.OAuthProfile] = None,
|
| 775 |
) -> str:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 776 |
global LOG_FILE, LOG_PATH
|
| 777 |
if DEBUG:
|
| 778 |
yield from debug_chat_response_stream_multiturn(message, history)
|
|
@@ -881,8 +889,8 @@ def generate_free_form_stream(
|
|
| 881 |
max_tokens: int,
|
| 882 |
frequency_penalty: float,
|
| 883 |
presence_penalty: float,
|
| 884 |
-
current_time: Optional[float] = None,
|
| 885 |
stop_strings: str = '<s>,</s>,<|im_start|>,<|im_end|>',
|
|
|
|
| 886 |
) -> str:
|
| 887 |
global LOG_FILE, LOG_PATH
|
| 888 |
if DEBUG:
|
|
@@ -1432,9 +1440,9 @@ def create_chat_demo(title=None, description=None):
|
|
| 1432 |
gr.Number(value=max_tokens, label='Max generated tokens (increase if want more generation)'),
|
| 1433 |
gr.Number(value=frequence_penalty, label='Frequency penalty (> 0 encourage new tokens over repeated tokens)'),
|
| 1434 |
gr.Number(value=presence_penalty, label='Presence penalty (> 0 encourage new tokens, < 0 encourage existing tokens)'),
|
|
|
|
| 1435 |
gr.Number(value=0, label='current_time', visible=False),
|
| 1436 |
# ! Remove the system prompt textbox to avoid jailbreaking
|
| 1437 |
-
gr.Textbox(value=sys_prompt, label='System prompt', lines=8, interactive=False)
|
| 1438 |
],
|
| 1439 |
examples=CHAT_EXAMPLES,
|
| 1440 |
cache_examples=False
|
|
|
|
| 769 |
max_tokens: int,
|
| 770 |
frequency_penalty: float,
|
| 771 |
presence_penalty: float,
|
|
|
|
| 772 |
system_prompt: Optional[str] = SYSTEM_PROMPT_1,
|
| 773 |
+
current_time: Optional[float] = None,
|
| 774 |
profile: Optional[gr.OAuthProfile] = None,
|
| 775 |
) -> str:
|
| 776 |
+
"""
|
| 777 |
+
gr.Number(value=temperature, label='Temperature (higher -> more random)'),
|
| 778 |
+
gr.Number(value=max_tokens, label='Max generated tokens (increase if want more generation)'),
|
| 779 |
+
gr.Number(value=frequence_penalty, label='Frequency penalty (> 0 encourage new tokens over repeated tokens)'),
|
| 780 |
+
gr.Number(value=presence_penalty, label='Presence penalty (> 0 encourage new tokens, < 0 encourage existing tokens)'),
|
| 781 |
+
gr.Textbox(value=sys_prompt, label='System prompt', lines=8, interactive=False)
|
| 782 |
+
gr.Number(value=0, label='current_time', visible=False),
|
| 783 |
+
"""
|
| 784 |
global LOG_FILE, LOG_PATH
|
| 785 |
if DEBUG:
|
| 786 |
yield from debug_chat_response_stream_multiturn(message, history)
|
|
|
|
| 889 |
max_tokens: int,
|
| 890 |
frequency_penalty: float,
|
| 891 |
presence_penalty: float,
|
|
|
|
| 892 |
stop_strings: str = '<s>,</s>,<|im_start|>,<|im_end|>',
|
| 893 |
+
current_time: Optional[float] = None,
|
| 894 |
) -> str:
|
| 895 |
global LOG_FILE, LOG_PATH
|
| 896 |
if DEBUG:
|
|
|
|
| 1440 |
gr.Number(value=max_tokens, label='Max generated tokens (increase if want more generation)'),
|
| 1441 |
gr.Number(value=frequence_penalty, label='Frequency penalty (> 0 encourage new tokens over repeated tokens)'),
|
| 1442 |
gr.Number(value=presence_penalty, label='Presence penalty (> 0 encourage new tokens, < 0 encourage existing tokens)'),
|
| 1443 |
+
gr.Textbox(value=sys_prompt, label='System prompt', lines=8, interactive=False)
|
| 1444 |
gr.Number(value=0, label='current_time', visible=False),
|
| 1445 |
# ! Remove the system prompt textbox to avoid jailbreaking
|
|
|
|
| 1446 |
],
|
| 1447 |
examples=CHAT_EXAMPLES,
|
| 1448 |
cache_examples=False
|