File size: 1,648 Bytes
ebbad26
1709ed6
ebbad26
 
9b5b26a
1709ed6
ebbad26
 
1709ed6
 
 
 
 
89dbda5
8c01ffb
1709ed6
 
5c1340d
1709ed6
ebbad26
1709ed6
ebbad26
1709ed6
 
ebbad26
f1f3641
1709ed6
 
 
 
 
f235351
ebbad26
1709ed6
 
 
 
 
 
ebbad26
 
 
0172db2
1709ed6
f235351
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
from smolagents import CodeAgent, HfApiModel
import yaml
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI

# Updated system prompt: provide only final, direct advice with no chain-of-thought or code.
system_prompt = (
    "You are a health and lifestyle advisor specializing in the early detection and prevention of hypertension. "
    "Diagnostic criteria: Normal BP is < 120/80 mmHg, Borderline BP is 120-139/80-89 mmHg, and Hypertension is > 140/90 mmHg. "
    "Based solely on the user's details, provide only the final, direct, and concise lifestyle tips. "
    "Do NOT include any internal reasoning, chain-of-thought, or any code snippets in your output. "
    "Only output the final advice as plain text. For example, if the user mentions alcohol consumption, simply say: "
    "'Reduce alcohol intake, as it can raise blood pressure.'"
)

# Use only the final_answer tool.
final_answer = FinalAnswerTool()

# Set up your model.
model = HfApiModel(
    max_tokens=2096,
    temperature=0.5,
    model_id='deepseek-ai/DeepSeek-R1-Distill-Qwen-32B',  # Adjust if needed.
    custom_role_conversions=None,
)

# Load prompt templates from the YAML file.
with open("prompts.yaml", 'r') as stream:
    prompt_templates = yaml.safe_load(stream)

# Initialize the CodeAgent with the updated system prompt.
agent = CodeAgent(
    model=model,
    tools=[final_answer],
    max_steps=6,
    verbosity_level=1,
    grammar=None,
    planning_interval=None,
    name="Hypertension Prevention Advisor",
    description=system_prompt,
    prompt_templates=prompt_templates
)

# Launch the Gradio UI.
GradioUI(agent).launch()