Spaces:
Running
Running
import os | |
import gradio as gr | |
from huggingface_hub import InferenceClient | |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") | |
def format_prompt(message, history): | |
system_prompt = ("I’m an AI chatbot named ChatSherman designed by a super clever student named ShermanAI at the Department of Electronic and Information Engineering at The Hong Kong Polytechnic University to help you with your engineering questions. Also, I can assist you with a wide range of problems.") | |
prompt = f"<s>{system_prompt}" | |
for user_prompt, bot_response in history: | |
prompt += f"[USER] {user_prompt} [/USER]" | |
prompt += f"{bot_response}</s> " | |
prompt += f"[USER] {message} [/USER]</s>" | |
return prompt | |
def generate(prompt, history, temperature=0.6, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.2): | |
temperature = float(temperature) | |
top_p = float(top_p) | |
generate_kwargs = dict( | |
temperature=temperature, | |
max_new_tokens=max_new_tokens, | |
top_p=top_p, | |
repetition_penalty=repetition_penalty, | |
do_sample=True, | |
seed=42, | |
) | |
formatted_prompt = format_prompt(prompt, history) | |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) | |
output = "" | |
for response in stream: | |
output += response.token.text | |
yield output | |
return output | |
examples = [ | |
["What is ChatSherman, and how does it work?", []], | |
["Is my personal information and data safe when I use the ChatSherman chatbot?", []], | |
["What are some common applications of deep learning in engineering?", []] | |
] | |
gr.ChatInterface( | |
fn=generate, | |
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"), | |
title="ChatSherman", | |
description = "This is an AI chatbot powered by ShermanAI. Enter your question below to get started. ", | |
examples=examples, | |
concurrency_limit=20, | |
).launch(show_api=False) |