Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,58 +1,45 @@
|
|
1 |
-
import yaml
|
2 |
from smolagents import CodeAgent, HfApiModel
|
|
|
3 |
from tools.final_answer import FinalAnswerTool
|
4 |
from Gradio_UI import GradioUI
|
5 |
|
6 |
-
#
|
7 |
system_prompt = (
|
8 |
"You are a health and lifestyle advisor specializing in the early detection and prevention of hypertension. "
|
9 |
-
"
|
10 |
-
"
|
|
|
|
|
|
|
11 |
)
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
Simple cleanup function that removes extra whitespace and ensures proper formatting.
|
16 |
-
"""
|
17 |
-
# Remove extra whitespace
|
18 |
-
text = ' '.join(text.split())
|
19 |
-
# Split into paragraphs for readability
|
20 |
-
paragraphs = text.split('\n\n')
|
21 |
-
cleaned_paragraphs = [p.strip() for p in paragraphs if p.strip()]
|
22 |
-
return '\n\n'.join(cleaned_paragraphs)
|
23 |
-
|
24 |
-
# Load prompt templates from YAML
|
25 |
-
with open("prompts.yaml", 'r') as stream:
|
26 |
-
prompt_templates = yaml.safe_load(stream)
|
27 |
|
28 |
-
#
|
29 |
model = HfApiModel(
|
30 |
-
max_tokens=
|
31 |
temperature=0.5,
|
32 |
-
model_id='deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
|
33 |
-
|
34 |
)
|
35 |
|
36 |
-
#
|
|
|
|
|
|
|
|
|
37 |
agent = CodeAgent(
|
38 |
model=model,
|
39 |
-
tools=[
|
40 |
-
max_steps=
|
41 |
-
verbosity_level=
|
42 |
-
|
|
|
|
|
43 |
description=system_prompt,
|
44 |
prompt_templates=prompt_templates
|
45 |
)
|
46 |
|
47 |
-
|
48 |
-
"""
|
49 |
-
Runs the agent and returns a clean, formatted response.
|
50 |
-
"""
|
51 |
-
try:
|
52 |
-
response = agent.run(user_input)
|
53 |
-
return clean_response(response)
|
54 |
-
except Exception as e:
|
55 |
-
return f"I apologize, but I couldn't process your request. Please try again."
|
56 |
-
|
57 |
-
# Launch the Gradio UI
|
58 |
GradioUI(agent).launch()
|
|
|
|
|
1 |
from smolagents import CodeAgent, HfApiModel
|
2 |
+
import yaml
|
3 |
from tools.final_answer import FinalAnswerTool
|
4 |
from Gradio_UI import GradioUI
|
5 |
|
6 |
+
# Updated system prompt: provide only final, direct advice with no chain-of-thought or code.
|
7 |
system_prompt = (
|
8 |
"You are a health and lifestyle advisor specializing in the early detection and prevention of hypertension. "
|
9 |
+
"Diagnostic criteria: Normal BP is < 120/80 mmHg, Borderline BP is 120-139/80-89 mmHg, and Hypertension is > 140/90 mmHg. "
|
10 |
+
"Based solely on the user's details, provide only the final, direct, and concise lifestyle tips. "
|
11 |
+
"Do NOT include any internal reasoning, chain-of-thought, or any code snippets in your output. "
|
12 |
+
"Only output the final advice as plain text. For example, if the user mentions alcohol consumption, simply say: "
|
13 |
+
"'Reduce alcohol intake, as it can raise blood pressure.'"
|
14 |
)
|
15 |
|
16 |
+
# Use only the final_answer tool.
|
17 |
+
final_answer = FinalAnswerTool()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
+
# Set up your model.
|
20 |
model = HfApiModel(
|
21 |
+
max_tokens=2096,
|
22 |
temperature=0.5,
|
23 |
+
model_id='deepseek-ai/DeepSeek-R1-Distill-Qwen-32B', # Adjust if needed.
|
24 |
+
custom_role_conversions=None,
|
25 |
)
|
26 |
|
27 |
+
# Load prompt templates from the YAML file.
|
28 |
+
with open("prompts.yaml", 'r') as stream:
|
29 |
+
prompt_templates = yaml.safe_load(stream)
|
30 |
+
|
31 |
+
# Initialize the CodeAgent with the updated system prompt.
|
32 |
agent = CodeAgent(
|
33 |
model=model,
|
34 |
+
tools=[final_answer],
|
35 |
+
max_steps=6,
|
36 |
+
verbosity_level=1,
|
37 |
+
grammar=None,
|
38 |
+
planning_interval=None,
|
39 |
+
name="Hypertension Prevention Advisor",
|
40 |
description=system_prompt,
|
41 |
prompt_templates=prompt_templates
|
42 |
)
|
43 |
|
44 |
+
# Launch the Gradio UI.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
GradioUI(agent).launch()
|