Spaces:
Sleeping
Sleeping
File size: 1,648 Bytes
89dbda5 c19d193 6aae614 9b5b26a feba4a3 89dbda5 feba4a3 89dbda5 8c01ffb 89dbda5 6aae614 ae7a494 89dbda5 e121372 89dbda5 feba4a3 89dbda5 13d500a 8c01ffb feba4a3 861422e 89dbda5 feba4a3 8c01ffb 8fe992b feba4a3 8c01ffb 89dbda5 861422e 8fe992b 89dbda5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
from smolagents import CodeAgent, HfApiModel
import yaml
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
# Updated system prompt: provide only final, direct advice with no chain-of-thought or code.
system_prompt = (
"You are a health and lifestyle advisor specializing in the early detection and prevention of hypertension. "
"Diagnostic criteria: Normal BP is < 120/80 mmHg, Borderline BP is 120-139/80-89 mmHg, and Hypertension is > 140/90 mmHg. "
"Based solely on the user's details, provide only the final, direct, and concise lifestyle tips. "
"Do NOT include any internal reasoning, chain-of-thought, or any code snippets in your output. "
"Only output the final advice as plain text. For example, if the user mentions alcohol consumption, simply say: "
"'Reduce alcohol intake, as it can raise blood pressure.'"
)
# Use only the final_answer tool.
final_answer = FinalAnswerTool()
# Set up your model.
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='deepseek-ai/DeepSeek-R1-Distill-Qwen-32B', # Adjust if needed.
custom_role_conversions=None,
)
# Load prompt templates from the YAML file.
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
# Initialize the CodeAgent with the updated system prompt.
agent = CodeAgent(
model=model,
tools=[final_answer],
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name="Hypertension Prevention Advisor",
description=system_prompt,
prompt_templates=prompt_templates
)
# Launch the Gradio UI.
GradioUI(agent).launch()
|