Spaces:
Running
Running
# import gradio as gr | |
# from huggingface_hub import InferenceClient | |
# """ | |
# For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference | |
# """ | |
# client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
# def respond( | |
# message, | |
# history: list[tuple[str, str]], | |
# system_message, | |
# max_tokens, | |
# temperature, | |
# top_p, | |
# ): | |
# messages = [{"role": "system", "content": system_message}] | |
# for val in history: | |
# if val[0]: | |
# messages.append({"role": "user", "content": val[0]}) | |
# if val[1]: | |
# messages.append({"role": "assistant", "content": val[1]}) | |
# messages.append({"role": "user", "content": message}) | |
# response = "" | |
# for message in client.chat_completion( | |
# messages, | |
# max_tokens=max_tokens, | |
# stream=True, | |
# temperature=temperature, | |
# top_p=top_p, | |
# ): | |
# token = message.choices[0].delta.content | |
# response += token | |
# yield response | |
# """ | |
# For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface | |
# """ | |
# demo = gr.ChatInterface( | |
# respond, | |
# additional_inputs=[ | |
# gr.Textbox(value="You are a friendly Chatbot.", label="System message"), | |
# gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
# gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
# gr.Slider( | |
# minimum=0.1, | |
# maximum=1.0, | |
# value=0.95, | |
# step=0.05, | |
# label="Top-p (nucleus sampling)", | |
# ), | |
# ], | |
# ) | |
# if __name__ == "__main__": | |
# demo.launch() | |
# import gradio as gr | |
# from huggingface_hub import InferenceClient | |
# # Initialize the client with your desired model | |
# client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
# # Format the conversation prompt with history | |
# def format_prompt(message, history): | |
# prompt = "<s>" # Beginning of sequence for formatting | |
# for user_prompt, bot_response in history: | |
# prompt += f"[INST] {user_prompt} [/INST]" | |
# prompt += f" {bot_response}</s> " | |
# prompt += f"[INST] {message} [/INST]" # Format current user message | |
# return prompt | |
# # Function to generate responses while keeping conversation context | |
# def generate( | |
# prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0 | |
# ): | |
# temperature = float(temperature) | |
# if temperature < 1e-2: | |
# temperature = 1e-2 | |
# top_p = float(top_p) | |
# generate_kwargs = dict( | |
# temperature=temperature, | |
# max_new_tokens=max_new_tokens, | |
# top_p=top_p, | |
# repetition_penalty=repetition_penalty, | |
# do_sample=True, | |
# seed=42, # Seed for reproducibility | |
# ) | |
# # Format the prompt with the history and current message | |
# formatted_prompt = format_prompt(prompt, history) | |
# # Stream the generated response | |
# stream = client.text_generation( | |
# formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False | |
# ) | |
# output = "" | |
# for response in stream: | |
# output += response.token.text | |
# yield output # Yield the streamed output as it's generated | |
# # Customizable input controls for the chatbot interface | |
# additional_inputs = [ | |
# gr.Slider( | |
# label="Temperature", | |
# value=0.9, | |
# minimum=0.0, | |
# maximum=1.0, | |
# step=0.05, | |
# interactive=True, | |
# info="Higher values produce more diverse outputs", | |
# ), | |
# gr.Slider( | |
# label="Max new tokens", | |
# value=256, | |
# minimum=0, | |
# maximum=1048, | |
# step=64, | |
# interactive=True, | |
# info="The maximum numbers of new tokens", | |
# ), | |
# gr.Slider( | |
# label="Top-p (nucleus sampling)", | |
# value=0.90, | |
# minimum=0.0, | |
# maximum=1, | |
# step=0.05, | |
# interactive=True, | |
# info="Higher values sample more low-probability tokens", | |
# ), | |
# gr.Slider( | |
# label="Repetition penalty", | |
# value=1.2, | |
# minimum=1.0, | |
# maximum=2.0, | |
# step=0.05, | |
# interactive=True, | |
# info="Penalize repeated tokens", | |
# ) | |
# ] | |
# # Define the chatbot interface with interactive sliders and chatbot panel | |
# gr.ChatInterface( | |
# fn=generate, | |
# chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"), | |
# additional_inputs=additional_inputs, | |
# title="""AI Dermatologist Chatbot""" | |
# ).launch(show_api=False) | |
import gradio as gr | |
from huggingface_hub import InferenceClient | |
# Initialize the client with your desired model | |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
# Define the system prompt as an AI Dermatologist | |
def format_prompt(message, history): | |
prompt = "<s>" | |
# Start the conversation with a system message | |
prompt += "[INST] You are an AI Dermatologist designed to assist users with skin and hair care.[/INST]" | |
for user_prompt, bot_response in history: | |
prompt += f"[INST] {user_prompt} [/INST]" | |
prompt += f" {bot_response}</s> " | |
prompt += f"[INST] {message} [/INST]" | |
return prompt | |
# Function to generate responses with the AI Dermatologist context | |
def generate( | |
prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0 | |
): | |
temperature = float(temperature) | |
if temperature < 1e-2: | |
temperature = 1e-2 | |
top_p = float(top_p) | |
generate_kwargs = dict( | |
temperature=temperature, | |
max_new_tokens=max_new_tokens, | |
top_p=top_p, | |
repetition_penalty=repetition_penalty, | |
do_sample=True, | |
seed=42, | |
) | |
formatted_prompt = format_prompt(prompt, history) | |
stream = client.text_generation( | |
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False | |
) | |
output = "" | |
for response in stream: | |
output += response.token.text | |
yield output | |
return output | |
# Customizable input controls for the chatbot interface | |
additional_inputs = [ | |
gr.Slider( | |
label="Temperature", | |
value=0.9, | |
minimum=0.0, | |
maximum=1.0, | |
step=0.05, | |
interactive=True, | |
info="Higher values produce more diverse outputs", | |
), | |
gr.Slider( | |
label="Max new tokens", | |
value=256, | |
minimum=0, | |
maximum=1048, | |
step=64, | |
interactive=True, | |
info="The maximum numbers of new tokens", | |
), | |
gr.Slider( | |
label="Top-p (nucleus sampling)", | |
value=0.90, | |
minimum=0.0, | |
maximum=1, | |
step=0.05, | |
interactive=True, | |
info="Higher values sample more low-probability tokens", | |
), | |
gr.Slider( | |
label="Repetition penalty", | |
value=1.2, | |
minimum=1.0, | |
maximum=2.0, | |
step=0.05, | |
interactive=True, | |
info="Penalize repeated tokens", | |
) | |
] | |
# Define the chatbot interface with the starting system message as AI Dermatologist | |
gr.ChatInterface( | |
fn=generate, | |
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"), | |
additional_inputs=additional_inputs, | |
title="AI Dermatologist" | |
).launch(show_api=False) | |