chat-gpt-batch / app_en.py
hugforziio's picture
Update app_en.py
c147947
raw
history blame
9.63 kB
# import gradio as gr
import gradio
# import lmdb
# import base64
# import io
# import random
# import time
import json
import copy
# import sqlite3
from urllib.parse import urljoin
import openai
from app_js import api_key__get_from_browser, api_key__save_to_browser, saved_prompts_refresh_btn__click_js, selected_saved_prompt_title__change_js, saved_prompts_delete_btn__click_js, saved_prompts_save_btn__click_js, copy_prompt__click_js, paste_prompt__click_js, chat_copy_history_btn__click_js, chat_copy_history_md_btn__click_js, api_key_refresh_btn__click_js, api_key_save_btn__click_js
from functions import sequential_chat_fn, make_history_file_fn, on_click_send_btn, clear_history, copy_history, update_saved_prompt_titles, save_prompt, load_saved_prompt
introduction = """<center><h2>ChatGPT Batch Tool</h2></center>
<center>Hello. This is a tool for sending messages to ChatGPT in bulk.</center>
<center>With this tool, you can plan and send multiple messages to ChatGPT at once.</center>
Please note:
1. In order to use this tool, you will need to provide your own API Key and assume any associated costs. We do not collect or store your API Key. You can obtain your API Key by visiting https://platform.openai.com/account/api-keys.
2. The space for this demo page is public. For research and code improvement purposes, we need to log the chat content sent through this page, meaning we can see your chat history with ChatGPT in the background. **<span style="color:#ff5656;">By continuing to use this tool on this page, you agree to allow us to view, use, and share your chat data.</span>** If you wish to avoid this, you can [make a copy of this tool to your own private space](https://huggingface.co/spaces/hugforziio/chat-gpt-batch?duplicate=true), which also eliminates waiting in a queue.
"""
css = """
.table-wrap .cell-wrap input {min-width:80%}
#api-key-textbox textarea {filter:blur(8px); transition: filter 0.25s}
#api-key-textbox textarea:focus {filter:none}
#chat-log-md hr {margin-top: 1rem; margin-bottom: 1rem;}
#chat-markdown-wrap-box {max-height:80vh; overflow: auto !important;}
"""
with gradio.Blocks(title="ChatGPT Batch Tool", css=css) as demo:
with gradio.Accordion("introduction", open=True):
gradio.Markdown(introduction)
with gradio.Accordion("Basic settings", open=True):
system_prompt_enabled = gradio.Checkbox(label='Enable System level Prompt', info='Whether to use the system level prompt for ChatGPT task description as "System"', value=True)
# System prompt
system_prompt = gradio.Textbox(label='System level Prompt', info='Description of the task for ChatGPT as "System"', value='You are a part-of-speech classifier. Users will send you a word and you should determine its part-of-speech, such as nouns, verbs, etc.!!Please note!! ⚠️Highest priority!!: You may only directly return the part-of-speech without any extra information. Do not explain why it is this part-of-speech, etc., otherwise the program used by the user will fail and cause serious losses to the user😱!!!')
# User message template
user_message_template = gradio.Textbox(label='User Message Template', info='Template of messages to be sent in bulk', value='Word: ```___```')
with gradio.Row():
# Replacement area in user message template
user_message_template_mask = gradio.Textbox(label='Template Placeholder', info='The part that needs to be replaced in the message template, can be a regular expression', value='___')
# Is the replacement area in the user message template a regex
user_message_template_mask_is_regex = gradio.Checkbox(label='Placeholder is regex', info='Is the placeholder in the message template a regular expression?', value=False)
# User message replacement area list text
user_message_list_text = gradio.Textbox(label='User Message List', info='All messages to be sent', value='animals| trains| between| of| located| what are you doing')
with gradio.Row():
# User message replacement area list splitter
user_message_list_text_splitter = gradio.Textbox(label='User Message Splitter', info='Splitter used to split user message list, such as comma (`,`), line feed (`\n`), or regular expressions', value='\\|\\s+')
# Is the splitter for the user message replacement area list a regex
user_message_list_text_splitter_is_regex = gradio.Checkbox(label='Splitter is regex', info='Is the splitter for the user message list a regular expression?', value=True)
# Number of history records
history_prompt_num = gradio.Slider(label="Number of History Records", info='How many previous history records to include when sending a message (for ChatGPT to understand the context)', value=0, minimum=0, maximum=12000)
# load_config_from_browser = gradio.Button("🔄 Load Configuration from Browser")
# save_config_to_browser = gradio.Button("💾 Save Configuration to Browser")
# export_config_to_file = gradio.Button("📤 Export Configuration to File")
# 更多参数
with gradio.Accordion("More settings", open=False):
# 时间间隔
sleep_base = gradio.Number(label='sleep between each message (ms)', value=700)
# 时间间隔浮动
sleep_rand = gradio.Number(label='sleep float (ms)', value=200)
# 那些参数
prop_stream = gradio.Checkbox(label="use stream", value=True)
prop_model = gradio.Textbox(label="model", value="gpt-3.5-turbo")
prop_temperature = gradio.Slider(label="temperature", value=1, minimum=0, maximum=2)
prop_top_p = gradio.Slider(label="top_p", value=1, minimum=0, maximum=1)
prop_choices_num = gradio.Slider(label="choices num(n)", value=1, minimum=1, maximum=20)
prop_max_tokens = gradio.Slider(label="max_tokens", value=-1, minimum=-1, maximum=4096)
prop_presence_penalty = gradio.Slider(label="presence_penalty", value=0, minimum=-2, maximum=2)
prop_frequency_penalty = gradio.Slider(label="frequency_penalty", value=0, minimum=-2, maximum=2)
prop_logit_bias = gradio.Textbox(label="logit_bias", visible=False)
pass
# 欸丕艾科易
token_text = gradio.Textbox(visible=False)
with gradio.Row():
with gradio.Column(scale=10, min_width=100):
api_key_text = gradio.Textbox(label="Your API key", placeholder="sk-...", elem_id="api-key-textbox")
with gradio.Column(scale=1, min_width=100):
api_key_load_btn = gradio.Button("🔄 Load from browser storage")
api_key_load_btn.click(
None,
inputs=[],
outputs=[api_key_text, token_text],
_js=api_key__get_from_browser,
)
with gradio.Column(scale=1, min_width=100):
api_key_save_btn = gradio.Button("💾 save to browser storage")
api_key_save_btn.click(
None,
inputs=[api_key_text, token_text],
outputs=[api_key_text, token_text],
_js=api_key__save_to_browser,
)
pass
pass
# 开始执行按钮
start_btn = gradio.Button(value='Run!')
with gradio.Accordion(label="Chat log", elem_id='chat-markdown-wrap-box'):
# 输出区域(隐藏状态)
history = gradio.State(value=[])
# 输出区域(md渲染)
history_md_stable = gradio.Markdown(value="🙂")
history_md_stream = gradio.Markdown(value="🤖")
with gradio.Accordion("Status"):
tips = gradio.Markdown(value="ready")
# 中止执行按钮
stop_btn = gradio.Button(value='Stop!')
with gradio.Accordion("Download", open=False):
# gradio.Markdown("(Currently unable to download, possibly due to restrictions from Hugging Face. Will update later.)")
make_file_btn = gradio.Button(value='Generate files')
with gradio.Row(visible=False) as file_row:
# 下载区域(json文件)
history_file_json = gradio.File(label='Download Json', interactive=False)
# 下载区域(md文件)
history_file_md = gradio.File(label='Download Markdown', interactive=False)
pass
pass
make_file_btn.click(
fn=make_history_file_fn,
inputs=[history],
outputs=[history_file_json, history_file_md, file_row],
)
start_event = start_btn.click(
fn=sequential_chat_fn,
inputs=[
history,
system_prompt_enabled,
system_prompt,
user_message_template,
user_message_template_mask,
user_message_template_mask_is_regex,
user_message_list_text,
user_message_list_text_splitter,
user_message_list_text_splitter_is_regex,
history_prompt_num,
api_key_text, token_text,
sleep_base,
sleep_rand,
prop_stream,
prop_model,
prop_temperature,
prop_top_p,
prop_choices_num,
prop_max_tokens,
prop_presence_penalty,
prop_frequency_penalty,
prop_logit_bias,
],
outputs=[
history,
history_md_stable,
history_md_stream,
tips,
file_row,
],
)
stop_btn.click(
fn=None,
inputs=[],
outputs=[],
cancels=[start_event],
)
if __name__ == "__main__":
demo.queue(concurrency_count=200).launch()