Spaces:
Sleeping
Sleeping
File size: 3,216 Bytes
149b1cf 76a04a0 149b1cf d9cff10 3816b51 149b1cf 76a04a0 149b1cf 76a04a0 e7dd125 1d59cf0 4690606 76a04a0 149b1cf 2d752aa 149b1cf bb274b0 48ca16c 149b1cf 1d59cf0 149b1cf 4cd9f6a 149b1cf 76a04a0 0d35832 b4c6995 a784624 aaa4875 1d59cf0 f5860b8 5b0f68e a7adda9 5b0f68e 149b1cf 76a04a0 149b1cf 76a04a0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
import gradio as gr
from huggingface_hub import InferenceClient
"""
For more information on huggingface_hub Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
# client = InferenceClient("unsloth/Llama-3.2-1B-Instruct")
client = InferenceClient( model="https://xxr4h3zabmrbgnct.us-east-1.aws.endpoints.huggingface.cloud")
def respond(
message,
history: list[tuple[str, str]],
# system_message,
# max_tokens,
# temperature,
# top_p,
):
system_message = "You are a Dietician Assistant specializing in providing general guidance on diet, "
"nutrition, and healthy eating habits. Answer questions thoroughly with scientifically "
"backed advice, practical tips, and easy-to-understand explanations. Keep in mind that "
"your role is to assist, not replace a registered dietitian, so kindly remind users to "
"consult a professional for personalized advice when necessary."
max_tokens = 512
temperature = 0.9
top_p = 0.95
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
stop=["<|im_end|><|im_end|>", "<|im_end|>", "|im_end|>", "|im_start|>"],
#temperature=temperature,
#top_p=top_p,
):
token = message.choices[0].delta.content
if not token:
break
response += token
yield response.strip("|")
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
fn=respond,
#title="Hi there! I'm your Dietician Assistant, here to help you with general advice on diet, nutrition, and healthy eating habits. Let's explore your questions.",
title="Your Personal Dietician Assistant: Expert Guidance on Healthy Eating and Nutrition",
examples=["How can I lose weight safely?",
"What is a healthy weight loss rate?",
"How do I meal prep efficiently?",],
# additional_inputs=[
# gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
# gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
# gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
# gr.Slider(
# minimum=0.1,
# maximum=1.0,
# value=0.95,
# step=0.05,
# label="Top-p (nucleus sampling)",
# ),
# ],
css=".svelte-7ddecg h1 {color: #64A149 !important; } .message{background-color: #64A149 !important; border-color: #64A149 !important;} .message p, .message * {color: white !important; } .example.svelte-1sepu1.svelte-1sepu1:hover {background-color: #64A149 !important; border-color: #64A149 !important;}"
)
if __name__ == "__main__":
demo.launch() |