Spaces:
Running
Running
import os | |
import gradio as gr | |
import json | |
from tencentcloud.common import credential | |
from tencentcloud.common.profile.client_profile import ClientProfile | |
from tencentcloud.common.profile.http_profile import HttpProfile | |
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException | |
from tencentcloud.hunyuan.v20230901 import hunyuan_client, models | |
from datetime import datetime | |
def print_now(msg): | |
now = datetime.now() | |
formatted_time = now.strftime("%Y-%m-%d %H:%M:%S.%f") | |
print(f"{msg}:{formatted_time}") | |
return formatted_time | |
def respond( | |
message, | |
history: list[tuple[str, str]], | |
system_message, | |
max_tokens, | |
temperature, | |
top_p, | |
): | |
try: | |
default_system ='You are a helpful assistant.' | |
messages = [{"Role": "system", "Content": default_system}] | |
secret_id = os.getenv('SECRET_ID') | |
secret_key = os.getenv('SECRET_KEY') | |
cred = credential.Credential(secret_id, secret_key) | |
httpProfile = HttpProfile() | |
httpProfile.endpoint = "hunyuan.tencentcloudapi.com" | |
clientProfile= ClientProfile() | |
clientProfile.httpProfile = httpProfile | |
client = hunyuan_client.HunyuanClient(cred, "", clientProfile) | |
req = models.ChatCompletionsRequest() | |
for val in history: | |
if val[0] and val[1]: | |
messages.append({"Role": "user", "Content": val[0]}) | |
messages.append({"Role": "assistant", "Content": val[1]}) | |
messages.append({"Role": "user", "Content": message}) | |
params = { | |
"Model": "hunyuan-large", | |
"Messages": messages, | |
"Stream": True, | |
"StreamModeration": True, | |
"EnableEnhancement": False, | |
} | |
req.from_json_string(json.dumps(params)) | |
resp= client.ChatCompletions(req) | |
response = "" | |
for event in resp: | |
data = json.loads(event['data']) | |
token = data['Choices'][0]['Delta']['Content'] | |
response += token | |
yield response | |
except TencentCloudSDKException as err: | |
raise gr.Error(f"腾讯云SDK异常: {err}") | |
except Exception as e: | |
raise gr.Error(f"发生错误: {str(e)}") | |
example_prompts = [ | |
["How to cook Kung Pao chicken the tastiest?"], | |
["Help me create an email expressing my greetings to an old friend."], | |
["写一篇关于青春的五言绝句"], | |
["一枚反面朝上的硬币,被翻转了15下后,它的上面是正面,这个说法正确吗?"] | |
] | |
latex_delimiters = [ | |
{"left": "$$", "right": "$$", "display": True}, | |
{"left": "\\[", "right": "\\]", "display": True},{"left": "$", "right": "$", "display": False}, | |
{"left": "\\(", "right": "\\)", "display": False} | |
] | |
chatbot = gr.Chatbot(latex_delimiters=latex_delimiters, scale=9) | |
demo = gr.ChatInterface(respond, | |
title="Hunyuan-Large", | |
examples=example_prompts, | |
chatbot=chatbot | |
) | |
if __name__ == "__main__": | |
demo.queue(default_concurrency_limit=40) | |
demo.launch(max_threads=40) |