Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -12,20 +12,19 @@ import pickle
|
|
12 |
from dataclasses import dataclass, asdict
|
13 |
import hashlib
|
14 |
from collections import defaultdict
|
15 |
-
import re
|
16 |
|
17 |
# Set page configuration
|
18 |
st.set_page_config(
|
19 |
-
page_title="
|
20 |
layout="wide",
|
21 |
initial_sidebar_state="expanded",
|
22 |
-
page_icon="
|
23 |
)
|
24 |
|
25 |
-
# Enhanced CSS for
|
26 |
st.markdown("""
|
27 |
<style>
|
28 |
-
/* Medical
|
29 |
html, body, .stApp, .main {
|
30 |
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
|
31 |
color: #ffffff !important;
|
@@ -44,716 +43,554 @@ st.markdown("""
|
|
44 |
.chat-container {
|
45 |
background: rgba(255, 255, 255, 0.1) !important;
|
46 |
border-radius: 15px !important;
|
47 |
-
padding:
|
48 |
backdrop-filter: blur(10px) !important;
|
49 |
border: 1px solid rgba(255, 255, 255, 0.2) !important;
|
50 |
margin-bottom: 1rem !important;
|
51 |
-
max-height:
|
52 |
overflow-y: auto !important;
|
53 |
-
min-height: 400px !important;
|
54 |
}
|
55 |
|
56 |
-
.
|
57 |
background: linear-gradient(45deg, #4CAF50, #66BB6A) !important;
|
58 |
color: white !important;
|
59 |
-
padding:
|
60 |
-
border-radius:
|
61 |
-
margin: 0.
|
62 |
-
margin-left:
|
63 |
box-shadow: 0 4px 15px rgba(76, 175, 80, 0.4) !important;
|
64 |
-
position: relative !important;
|
65 |
-
}
|
66 |
-
|
67 |
-
.patient-message::before {
|
68 |
-
content: "π€ Patient";
|
69 |
-
position: absolute;
|
70 |
-
top: -10px;
|
71 |
-
right: 15px;
|
72 |
-
background: #2E7D32;
|
73 |
-
color: white;
|
74 |
-
padding: 2px 8px;
|
75 |
-
border-radius: 10px;
|
76 |
-
font-size: 12px;
|
77 |
-
font-weight: bold;
|
78 |
}
|
79 |
|
80 |
-
.
|
81 |
background: rgba(255, 255, 255, 0.15) !important;
|
82 |
color: white !important;
|
83 |
-
padding:
|
84 |
-
border-radius:
|
85 |
-
margin: 0.
|
86 |
-
margin-right:
|
87 |
border-left: 4px solid #2196F3 !important;
|
88 |
backdrop-filter: blur(5px) !important;
|
89 |
-
position: relative !important;
|
90 |
}
|
91 |
|
92 |
-
.
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
left: 15px;
|
97 |
-
background: #1976D2;
|
98 |
-
color: white;
|
99 |
-
padding: 2px 8px;
|
100 |
-
border-radius: 10px;
|
101 |
-
font-size: 12px;
|
102 |
-
font-weight: bold;
|
103 |
-
}
|
104 |
-
|
105 |
-
.follow-up-questions {
|
106 |
-
background: rgba(33, 150, 243, 0.2) !important;
|
107 |
-
border: 1px solid rgba(33, 150, 243, 0.4) !important;
|
108 |
-
border-radius: 10px !important;
|
109 |
padding: 1rem !important;
|
110 |
margin: 0.5rem 0 !important;
|
|
|
111 |
}
|
112 |
|
113 |
-
.
|
114 |
-
background: linear-gradient(45deg, #
|
115 |
color: white !important;
|
116 |
-
padding: 0.5rem 1rem !important;
|
117 |
-
border-radius: 15px !important;
|
118 |
-
text-align: center !important;
|
119 |
-
margin: 1rem 0 !important;
|
120 |
-
font-weight: bold !important;
|
121 |
-
}
|
122 |
-
|
123 |
-
.typing-indicator {
|
124 |
-
background: rgba(255, 255, 255, 0.1) !important;
|
125 |
padding: 1rem !important;
|
126 |
-
border-radius:
|
127 |
margin: 0.5rem 0 !important;
|
128 |
-
margin-right: 3rem !important;
|
129 |
-
animation: pulse 1.5s infinite !important;
|
130 |
}
|
131 |
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
border-left: 4px solid #FFC107 !important;
|
140 |
-
padding: 1rem !important;
|
141 |
-
border-radius: 8px !important;
|
142 |
-
margin: 0.5rem 0 !important;
|
143 |
-
font-size: 0.9em !important;
|
144 |
}
|
145 |
|
146 |
-
.
|
147 |
-
background:
|
148 |
color: white !important;
|
149 |
border: none !important;
|
150 |
-
border-radius:
|
151 |
-
|
152 |
-
|
153 |
-
cursor: pointer !important;
|
154 |
transition: all 0.3s ease !important;
|
155 |
}
|
156 |
|
157 |
-
.
|
158 |
-
background: #4CAF50 !important;
|
159 |
transform: translateY(-2px) !important;
|
|
|
160 |
}
|
161 |
|
162 |
-
.
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
padding: 1rem !important;
|
167 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
168 |
}
|
169 |
</style>
|
170 |
""", unsafe_allow_html=True)
|
171 |
|
172 |
-
@dataclass
|
173 |
-
class ConversationState:
|
174 |
-
"""Track the current state of the medical conversation"""
|
175 |
-
phase: str = "greeting" # greeting, history_taking, symptom_exploration, assessment, recommendations, follow_up
|
176 |
-
patient_concerns: List[str] = None
|
177 |
-
symptoms_discussed: Dict[str, dict] = None
|
178 |
-
medical_history: Dict[str, any] = None
|
179 |
-
current_focus: str = ""
|
180 |
-
questions_asked: List[str] = None
|
181 |
-
patient_responses: Dict[str, str] = None
|
182 |
-
conversation_depth: int = 0
|
183 |
-
urgency_level: str = "routine" # routine, urgent, emergency
|
184 |
-
|
185 |
-
def __post_init__(self):
|
186 |
-
if self.patient_concerns is None:
|
187 |
-
self.patient_concerns = []
|
188 |
-
if self.symptoms_discussed is None:
|
189 |
-
self.symptoms_discussed = {}
|
190 |
-
if self.medical_history is None:
|
191 |
-
self.medical_history = {}
|
192 |
-
if self.questions_asked is None:
|
193 |
-
self.questions_asked = []
|
194 |
-
if self.patient_responses is None:
|
195 |
-
self.patient_responses = {}
|
196 |
-
|
197 |
@dataclass
|
198 |
class ConversationEntry:
|
199 |
-
"""
|
200 |
timestamp: str
|
201 |
user_input: str
|
202 |
assistant_response: str
|
203 |
-
conversation_phase: str
|
204 |
symptoms: List[str]
|
205 |
severity_score: float
|
206 |
confidence_score: float
|
207 |
search_queries_used: List[str]
|
208 |
-
|
209 |
-
agent_insights: Dict[str, str]
|
210 |
-
user_feedback: Optional[int] = None
|
211 |
was_helpful: Optional[bool] = None
|
212 |
|
213 |
-
|
214 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
215 |
|
216 |
-
def
|
217 |
-
self.
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
self.response_templates = self._load_response_templates()
|
223 |
-
|
224 |
-
def _load_conversation_patterns(self) -> Dict[str, List[str]]:
|
225 |
-
"""Load conversational patterns for natural dialogue"""
|
226 |
-
return {
|
227 |
-
"greeting": [
|
228 |
-
"Hello! I'm here to help with your health concerns. What brings you in today?",
|
229 |
-
"Good to see you today. How are you feeling, and what can I help you with?",
|
230 |
-
"Welcome! I'd like to understand what's been concerning you about your health lately."
|
231 |
-
],
|
232 |
-
"follow_up": [
|
233 |
-
"Can you tell me more about that?",
|
234 |
-
"When did you first notice this?",
|
235 |
-
"How has this been affecting your daily activities?",
|
236 |
-
"Have you noticed any patterns with this symptom?"
|
237 |
-
],
|
238 |
-
"empathy": [
|
239 |
-
"I can understand how concerning that must be for you.",
|
240 |
-
"That does sound uncomfortable. Let's explore this further.",
|
241 |
-
"Thank you for sharing that with me. It helps me understand better."
|
242 |
-
],
|
243 |
-
"clarification": [
|
244 |
-
"Just to make sure I understand correctly...",
|
245 |
-
"Let me clarify what you're experiencing...",
|
246 |
-
"I want to make sure I have the full picture..."
|
247 |
-
]
|
248 |
-
}
|
249 |
|
250 |
-
def
|
251 |
-
|
252 |
-
|
253 |
-
"
|
254 |
-
|
255 |
-
|
256 |
-
"How long have you been experiencing this?",
|
257 |
-
"Does it happen at specific times of day?",
|
258 |
-
"Have you tried anything to relieve it?"
|
259 |
-
],
|
260 |
-
"medical_history": [
|
261 |
-
"Do you have any chronic medical conditions?",
|
262 |
-
"Are you currently taking any medications?",
|
263 |
-
"Any family history of similar issues?",
|
264 |
-
"Have you had any surgeries or hospitalizations?",
|
265 |
-
"Any known allergies to medications?"
|
266 |
-
],
|
267 |
-
"lifestyle": [
|
268 |
-
"How would you describe your stress levels lately?",
|
269 |
-
"How is your sleep quality?",
|
270 |
-
"Any recent changes in diet or exercise?",
|
271 |
-
"Do you smoke or drink alcohol?",
|
272 |
-
"Any recent travel or exposure to illness?"
|
273 |
-
],
|
274 |
-
"associated_symptoms": [
|
275 |
-
"Are you experiencing any other symptoms alongside this?",
|
276 |
-
"Any fever, nausea, or dizziness?",
|
277 |
-
"How is your appetite?",
|
278 |
-
"Any changes in bowel movements or urination?",
|
279 |
-
"Any skin changes or rashes?"
|
280 |
-
]
|
281 |
-
}
|
282 |
|
283 |
-
def
|
284 |
-
"""
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
292 |
|
293 |
-
def
|
294 |
-
|
295 |
-
|
296 |
|
297 |
-
#
|
298 |
-
|
|
|
299 |
|
300 |
-
#
|
301 |
-
|
|
|
302 |
|
303 |
-
#
|
304 |
-
|
|
|
305 |
|
306 |
-
#
|
307 |
-
|
308 |
-
|
309 |
-
"conversation_guidance": self._suggest_conversation_direction(conversation_state),
|
310 |
-
"urgency_assessment": self._assess_urgency(query, conversation_state)
|
311 |
-
}
|
312 |
|
313 |
-
return
|
|
|
|
|
|
|
314 |
|
315 |
-
def
|
316 |
-
|
317 |
-
|
|
|
|
|
|
|
|
|
318 |
|
319 |
-
|
320 |
-
|
321 |
-
if any(keyword in query_lower for keyword in emergency_keywords):
|
322 |
-
state.urgency_level = "emergency"
|
323 |
-
return "emergency_response"
|
324 |
-
|
325 |
-
# Check for urgent indicators
|
326 |
-
urgent_keywords = ["intense", "sudden", "worsening", "spreading", "difficulty"]
|
327 |
-
if any(keyword in query_lower for keyword in urgent_keywords):
|
328 |
-
state.urgency_level = "urgent"
|
329 |
-
return "urgent_response"
|
330 |
-
|
331 |
-
# Determine conversation phase
|
332 |
-
if state.phase == "greeting":
|
333 |
-
return "initial_response"
|
334 |
-
elif state.phase == "history_taking":
|
335 |
-
return "information_gathering"
|
336 |
-
elif state.phase == "symptom_exploration":
|
337 |
-
return "detailed_exploration"
|
338 |
-
else:
|
339 |
-
return "general_response"
|
340 |
-
|
341 |
-
def _craft_specialized_response(self, query: str, state: ConversationState, search_results: str) -> str:
|
342 |
-
"""Craft a specialized response based on agent expertise"""
|
343 |
|
344 |
-
#
|
345 |
-
|
346 |
|
347 |
-
#
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
|
|
|
|
|
|
|
|
352 |
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
response_parts.append(specialist_analysis)
|
357 |
|
358 |
-
#
|
359 |
-
|
360 |
-
if guidance:
|
361 |
-
response_parts.append(guidance)
|
362 |
|
363 |
-
|
364 |
-
response_parts.append("Please remember that this guidance is for informational purposes. For a proper diagnosis and treatment plan, it's important to consult with a healthcare provider.")
|
365 |
|
366 |
-
|
|
|
|
|
|
|
|
|
367 |
|
368 |
-
def
|
369 |
-
"""
|
370 |
-
|
371 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
372 |
|
373 |
-
#
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
"How long have you been experiencing this?",
|
392 |
-
"Have you noticed any other symptoms?",
|
393 |
-
"Is this your first time having this issue?"
|
394 |
-
])
|
395 |
-
|
396 |
-
# Limit to most relevant questions
|
397 |
-
return questions[:3]
|
398 |
|
399 |
-
def
|
400 |
-
"""
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
407 |
}
|
408 |
-
return insights.get(self.specialization, "this warrants careful evaluation")
|
409 |
|
410 |
-
class
|
411 |
-
"""
|
412 |
|
413 |
def __init__(self):
|
414 |
self.llm = GroqLLM()
|
415 |
self.search_tool = MedicalSearchTool()
|
416 |
-
self.agents = self.
|
417 |
-
self.conversation_state = ConversationState()
|
418 |
self.conversation_history = []
|
419 |
self.conversation_data = []
|
420 |
|
421 |
-
def
|
422 |
-
"""Initialize
|
423 |
return {
|
424 |
-
"
|
425 |
-
"
|
426 |
-
"
|
427 |
-
"
|
428 |
-
"
|
429 |
}
|
430 |
|
431 |
-
def
|
432 |
-
"""Process query
|
433 |
|
434 |
timestamp = datetime.now().isoformat()
|
435 |
-
self.conversation_state.conversation_depth += 1
|
436 |
-
|
437 |
-
# Update conversation phase based on input and history
|
438 |
-
self._update_conversation_phase(user_input)
|
439 |
|
440 |
-
#
|
441 |
-
self.
|
442 |
|
443 |
-
#
|
444 |
-
|
445 |
|
446 |
-
#
|
447 |
-
|
448 |
|
449 |
-
#
|
450 |
-
|
451 |
-
|
452 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
453 |
|
454 |
-
# Generate
|
455 |
-
main_response,
|
456 |
-
|
|
|
457 |
)
|
458 |
|
459 |
-
#
|
460 |
-
|
461 |
-
for agent_name in supporting_agents:
|
462 |
-
agent = self.agents[agent_name]
|
463 |
-
_, _, insights = agent.generate_conversational_response(user_input, self.conversation_state, search_results)
|
464 |
-
supporting_insights[agent_name] = insights
|
465 |
|
466 |
-
#
|
467 |
-
|
|
|
|
|
|
|
468 |
|
469 |
-
#
|
470 |
-
self.
|
|
|
471 |
|
472 |
-
#
|
473 |
-
|
474 |
timestamp=timestamp,
|
475 |
-
user_input=
|
476 |
-
assistant_response=
|
477 |
-
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
search_queries_used=[user_input] if search_results else [],
|
482 |
-
follow_up_questions=follow_up_questions,
|
483 |
-
agent_insights={primary_agent.agent_id: primary_insights, **supporting_insights}
|
484 |
)
|
485 |
|
486 |
-
self.conversation_data.append(
|
487 |
|
488 |
return {
|
489 |
-
'response':
|
490 |
-
'
|
491 |
-
'
|
492 |
-
'
|
493 |
-
'
|
494 |
-
'
|
495 |
-
'
|
496 |
-
'patient_summary': self._get_patient_summary(),
|
497 |
-
'search_performed': bool(search_results)
|
498 |
}
|
499 |
|
500 |
-
def
|
501 |
-
"""
|
502 |
-
|
503 |
-
|
504 |
-
if self.conversation_state.conversation_depth == 1:
|
505 |
-
self.conversation_state.phase = "greeting"
|
506 |
-
elif any(word in input_lower for word in ["history", "medical", "medication", "allergy"]):
|
507 |
-
self.conversation_state.phase = "history_taking"
|
508 |
-
elif any(word in input_lower for word in ["pain", "symptom", "feel", "hurt"]):
|
509 |
-
self.conversation_state.phase = "symptom_exploration"
|
510 |
-
elif any(word in input_lower for word in ["what should", "recommend", "treatment", "help"]):
|
511 |
-
self.conversation_state.phase = "recommendations"
|
512 |
-
elif self.conversation_state.conversation_depth > 3:
|
513 |
-
self.conversation_state.phase = "follow_up"
|
514 |
-
|
515 |
-
def _extract_patient_information(self, user_input: str):
|
516 |
-
"""Extract and store patient information from conversation"""
|
517 |
-
input_lower = user_input.lower()
|
518 |
-
|
519 |
-
# Extract symptoms
|
520 |
-
symptom_patterns = {
|
521 |
-
'headache': ['headache', 'head pain', 'migraine'],
|
522 |
-
'fever': ['fever', 'temperature', 'hot', 'chills'],
|
523 |
-
'nausea': ['nausea', 'sick', 'throw up', 'vomit'],
|
524 |
-
'fatigue': ['tired', 'exhausted', 'fatigue', 'weak'],
|
525 |
-
'cough': ['cough', 'coughing'],
|
526 |
-
'pain': ['pain', 'hurt', 'ache', 'sore']
|
527 |
-
}
|
528 |
|
529 |
-
|
530 |
-
|
531 |
-
|
532 |
-
|
533 |
-
'first_mentioned': datetime.now().isoformat(),
|
534 |
-
'details': []
|
535 |
-
}
|
536 |
-
self.conversation_state.symptoms_discussed[symptom]['details'].append(user_input)
|
537 |
-
|
538 |
-
# Extract duration information
|
539 |
-
duration_patterns = [
|
540 |
-
r'(\d+)\s+(day|days|week|weeks|month|months)',
|
541 |
-
r'(yesterday|today|last night)',
|
542 |
-
r'(few days|several days|about a week)'
|
543 |
-
]
|
544 |
|
545 |
-
|
546 |
-
|
547 |
-
|
548 |
-
|
549 |
-
break
|
550 |
-
|
551 |
-
def _select_primary_agent(self, user_input: str) -> ConversationalMedicalAgent:
|
552 |
-
"""Select the primary agent to handle this conversation turn"""
|
553 |
-
input_lower = user_input.lower()
|
554 |
-
|
555 |
-
# Emergency situations
|
556 |
-
emergency_keywords = ["chest pain", "can't breathe", "emergency", "severe", "blood"]
|
557 |
-
if any(keyword in input_lower for keyword in emergency_keywords):
|
558 |
-
return self.agents["emergency_consultant"]
|
559 |
-
|
560 |
-
# Mental health focus
|
561 |
-
mental_keywords = ["stress", "anxiety", "depression", "worried", "scared", "panic"]
|
562 |
-
if any(keyword in input_lower for keyword in mental_keywords):
|
563 |
-
return self.agents["mental_health_counselor"]
|
564 |
-
|
565 |
-
# Wellness and prevention
|
566 |
-
wellness_keywords = ["prevent", "healthy", "lifestyle", "diet", "exercise"]
|
567 |
-
if any(keyword in input_lower for keyword in wellness_keywords):
|
568 |
-
return self.agents["wellness_coach"]
|
569 |
-
|
570 |
-
# Symptom analysis
|
571 |
-
symptom_keywords = ["symptom", "pain", "hurt", "feel", "experience"]
|
572 |
-
if any(keyword in input_lower for keyword in symptom_keywords):
|
573 |
-
return self.agents["symptom_specialist"]
|
574 |
-
|
575 |
-
# Default to primary physician
|
576 |
-
return self.agents["primary_physician"]
|
577 |
-
|
578 |
-
def _enhance_with_llm(self, user_input: str, agent_response: str, state: ConversationState) -> str:
|
579 |
-
"""Enhance response with LLM for natural conversation"""
|
580 |
|
581 |
-
|
|
|
|
|
|
|
582 |
|
583 |
-
|
584 |
-
|
|
|
585 |
|
586 |
-
|
|
|
|
|
|
|
|
|
|
|
587 |
|
588 |
-
|
|
|
|
|
|
|
589 |
|
590 |
-
|
591 |
-
|
592 |
-
|
|
|
|
|
|
|
|
|
|
|
593 |
|
594 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
595 |
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
3. Asks relevant follow-up questions
|
600 |
-
4. Maintains a professional but warm tone
|
601 |
-
5. Always emphasizes the need for professional medical care
|
602 |
|
603 |
-
|
604 |
-
|
|
|
|
|
|
|
|
|
605 |
|
606 |
-
|
607 |
-
|
608 |
-
|
609 |
-
|
610 |
-
return agent_response # Fallback to agent response
|
611 |
-
|
612 |
-
def _get_patient_summary(self) -> Dict:
|
613 |
-
"""Generate a summary of the patient's current situation"""
|
614 |
-
return {
|
615 |
-
'chief_concerns': self.conversation_state.patient_concerns,
|
616 |
-
'symptoms_discussed': list(self.conversation_state.symptoms_discussed.keys()),
|
617 |
-
'conversation_depth': self.conversation_state.conversation_depth,
|
618 |
-
'current_phase': self.conversation_state.phase,
|
619 |
-
'urgency_level': self.conversation_state.urgency_level,
|
620 |
-
'questions_asked': len(self.conversation_state.questions_asked)
|
621 |
-
}
|
622 |
-
|
623 |
-
# Continue with the rest of the implementation...
|
624 |
-
# [The rest of the classes like GroqLLM, MedicalSearchTool would remain the same but with enhanced conversational capabilities]
|
625 |
-
|
626 |
-
class GroqLLM:
|
627 |
-
"""Medical-optimized LLM client with conversational enhancement"""
|
628 |
-
|
629 |
-
def __init__(self, model_name="openai/gpt-oss-20b"):
|
630 |
-
self.client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
|
631 |
-
self.model_name = model_name
|
632 |
-
self.medical_context = """
|
633 |
-
You are an empathetic AI medical assistant having a natural conversation with a patient.
|
634 |
-
Your responses should be:
|
635 |
-
- Conversational and warm, not clinical or robotic
|
636 |
-
- Empathetic to the patient's concerns
|
637 |
-
- Informative but accessible
|
638 |
-
- Always emphasizing the importance of professional medical care
|
639 |
-
|
640 |
-
Remember: This is a conversation, not a medical exam. Show genuine care and understanding.
|
641 |
-
"""
|
642 |
|
643 |
-
def
|
644 |
-
"""
|
645 |
-
|
646 |
-
|
647 |
-
|
648 |
-
|
649 |
-
context += f"\n\nRecent conversation:\n{chr(10).join(recent_history)}"
|
650 |
-
|
651 |
-
full_prompt = f"{context}\n\n{prompt}"
|
652 |
|
653 |
-
|
654 |
-
|
655 |
-
|
656 |
-
temperature=0.7, # Higher temperature for more natural conversation
|
657 |
-
max_tokens=1000,
|
658 |
-
stream=False
|
659 |
-
)
|
660 |
-
|
661 |
-
response = completion.choices[0].message.content if completion.choices else "I'm here to help. Could you tell me more about what's concerning you?"
|
662 |
-
confidence = self._calculate_conversational_confidence(response, prompt)
|
663 |
-
|
664 |
-
return response, confidence
|
665 |
-
|
666 |
-
except Exception as e:
|
667 |
-
return "I'm here to listen and help. Could you share more about what's been concerning you?", 0.5
|
668 |
|
669 |
-
def
|
670 |
-
"""
|
671 |
-
|
672 |
|
673 |
-
|
674 |
-
|
675 |
-
if any(word in response.lower() for word in empathy_words):
|
676 |
-
confidence += 0.2
|
677 |
|
678 |
-
|
679 |
-
|
680 |
-
confidence += 0.2
|
681 |
|
682 |
-
|
683 |
-
|
684 |
-
confidence += 0.1
|
685 |
|
686 |
-
return
|
687 |
-
|
688 |
-
|
689 |
-
|
690 |
-
|
691 |
-
|
692 |
-
|
693 |
-
self.medical_sources = [
|
694 |
-
"mayoclinic.org", "webmd.com", "healthline.com", "medlineplus.gov",
|
695 |
-
"nih.gov", "who.int", "cdc.gov", "ncbi.nlm.nih.gov"
|
696 |
-
]
|
697 |
-
|
698 |
-
def search_medical_info(self, query: str, search_type: str = "symptoms") -> str:
|
699 |
-
"""Search with conversational context awareness"""
|
700 |
-
try:
|
701 |
-
medical_queries = {
|
702 |
-
"symptoms": f"symptoms causes {query} patient concerns",
|
703 |
-
"treatment": f"treatment options {query} patient care",
|
704 |
-
"prevention": f"prevention {query} patient education",
|
705 |
-
"general": f"patient information {query} healthcare"
|
706 |
-
}
|
707 |
-
|
708 |
-
enhanced_query = medical_queries.get(search_type, medical_queries["general"])
|
709 |
-
|
710 |
-
search_results = list(self.ddgs.text(
|
711 |
-
enhanced_query,
|
712 |
-
max_results=5,
|
713 |
-
region='wt-wt',
|
714 |
-
safesearch='on'
|
715 |
-
))
|
716 |
-
|
717 |
-
if not search_results:
|
718 |
-
return "I'll help guide our conversation, though I'd recommend discussing this with your healthcare provider for specific medical advice."
|
719 |
-
|
720 |
-
formatted_results = []
|
721 |
-
for idx, result in enumerate(search_results, 1):
|
722 |
-
title = result.get('title', 'No title')
|
723 |
-
snippet = result.get('body', 'No description')
|
724 |
-
url = result.get('href', 'No URL')
|
725 |
-
|
726 |
-
# Prioritize trusted medical sources
|
727 |
-
source_trust = "β" if any(source in url for source in self.medical_sources) else ""
|
728 |
-
|
729 |
-
formatted_results.append(
|
730 |
-
f"{idx}. {source_trust} {title}\n"
|
731 |
-
f" {snippet}\n"
|
732 |
-
f" Source: {url}\n"
|
733 |
-
)
|
734 |
-
|
735 |
-
return "\n".join(formatted_results)
|
736 |
-
|
737 |
-
except Exception as e:
|
738 |
-
return f"I'm having trouble accessing medical resources right now, but I can still help guide our conversation based on general medical knowledge."
|
739 |
|
740 |
-
# Initialize session state
|
741 |
-
if '
|
742 |
-
st.session_state.
|
743 |
if 'chat_messages' not in st.session_state:
|
744 |
st.session_state.chat_messages = []
|
745 |
-
if 'conversation_context' not in st.session_state:
|
746 |
-
st.session_state.conversation_context = {}
|
747 |
-
if 'typing' not in st.session_state:
|
748 |
-
st.session_state.typing = False
|
749 |
|
750 |
-
medical_system = st.session_state.
|
751 |
|
752 |
# Main interface
|
753 |
st.markdown("""
|
754 |
<div class="medical-header">
|
755 |
-
<h1
|
756 |
-
<p>
|
757 |
</div>
|
758 |
""", unsafe_allow_html=True)
|
759 |
|
@@ -761,294 +598,433 @@ st.markdown("""
|
|
761 |
st.markdown("""
|
762 |
<div class="warning-box">
|
763 |
<h3>β οΈ Important Medical Disclaimer</h3>
|
764 |
-
<p>
|
765 |
</div>
|
766 |
""", unsafe_allow_html=True)
|
767 |
|
768 |
-
# Main
|
769 |
-
|
770 |
|
771 |
-
with
|
772 |
-
st.markdown("### π¬
|
773 |
|
774 |
-
#
|
775 |
-
if medical_system.conversation_state.phase:
|
776 |
-
phase_display = {
|
777 |
-
"greeting": "π Getting to know you",
|
778 |
-
"history_taking": "π Understanding your background",
|
779 |
-
"symptom_exploration": "π Exploring your symptoms",
|
780 |
-
"assessment": "π§ Assessing the situation",
|
781 |
-
"recommendations": "π‘ Discussing next steps",
|
782 |
-
"follow_up": "π Following up on your concerns"
|
783 |
-
}
|
784 |
-
current_phase = phase_display.get(medical_system.conversation_state.phase, "π¬ In conversation")
|
785 |
-
st.markdown(f'<div class="conversation-phase">{current_phase}</div>', unsafe_allow_html=True)
|
786 |
-
|
787 |
-
# Chat container
|
788 |
chat_container = st.container()
|
789 |
with chat_container:
|
790 |
st.markdown('<div class="chat-container">', unsafe_allow_html=True)
|
791 |
|
792 |
-
# Display conversation history
|
793 |
for i, message in enumerate(st.session_state.chat_messages):
|
794 |
if message["role"] == "user":
|
795 |
-
st.markdown(f'<div class="
|
796 |
else:
|
797 |
-
st.markdown(f'<div class="
|
798 |
-
|
799 |
-
# Show agent insights if available
|
800 |
-
if "insights" in message:
|
801 |
-
for agent, insight in message["insights"].items():
|
802 |
-
if insight.get("specialization_note"):
|
803 |
-
st.markdown(f'<div class="agent-insights"><strong>Specialist Note:</strong> {insight["specialization_note"]}</div>', unsafe_allow_html=True)
|
804 |
|
805 |
-
#
|
806 |
-
|
807 |
-
|
808 |
-
st.
|
809 |
-
|
810 |
-
|
811 |
-
|
812 |
-
|
813 |
-
|
814 |
-
|
815 |
-
# Typing indicator
|
816 |
-
if st.session_state.typing:
|
817 |
-
st.markdown('<div class="typing-indicator">Dr. AI is thinking... π</div>', unsafe_allow_html=True)
|
818 |
|
819 |
st.markdown('</div>', unsafe_allow_html=True)
|
820 |
|
821 |
-
# Chat input
|
822 |
-
st.
|
823 |
-
|
824 |
-
|
825 |
-
|
826 |
-
|
827 |
-
|
828 |
-
|
829 |
-
|
830 |
-
|
831 |
-
|
832 |
-
|
833 |
-
st.session_state.chat_messages
|
834 |
st.rerun()
|
835 |
-
|
836 |
-
with starter_col2:
|
837 |
-
if st.button("π€ Ask about a health concern", key="starter_concern"):
|
838 |
-
starter_text = "I have a health concern I'd like to discuss."
|
839 |
-
st.session_state.chat_messages.append({"role": "user", "content": starter_text})
|
840 |
-
st.rerun()
|
841 |
-
|
842 |
-
with starter_col3:
|
843 |
-
if st.button("π©Ί Get a health checkup conversation", key="starter_checkup"):
|
844 |
-
starter_text = "I'd like to have a general health discussion."
|
845 |
-
st.session_state.chat_messages.append({"role": "user", "content": starter_text})
|
846 |
-
st.rerun()
|
847 |
-
|
848 |
-
# Main input area
|
849 |
-
user_input = st.text_area(
|
850 |
-
"What would you like to talk about?",
|
851 |
-
placeholder="For example: 'I've been having headaches for the past few days...' or 'I'm worried about some symptoms I'm having...'",
|
852 |
-
height=100,
|
853 |
-
key="main_input"
|
854 |
-
)
|
855 |
-
|
856 |
-
# Input controls
|
857 |
-
input_col1, input_col2, input_col3, input_col4 = st.columns([2, 1, 1, 2])
|
858 |
-
|
859 |
-
with input_col1:
|
860 |
-
send_message = st.button("π¬ Send Message", type="primary")
|
861 |
-
|
862 |
-
with input_col2:
|
863 |
-
if st.button("π New Topic"):
|
864 |
-
# Save current conversation but start new topic
|
865 |
-
medical_system.conversation_state.phase = "greeting"
|
866 |
-
medical_system.conversation_state.conversation_depth = 0
|
867 |
-
st.success("Ready to discuss a new topic!")
|
868 |
-
|
869 |
-
with input_col3:
|
870 |
-
if st.button("ποΈ Clear Chat"):
|
871 |
-
st.session_state.chat_messages = []
|
872 |
-
st.session_state.conversational_system = ConversationalMedicalSystem()
|
873 |
-
st.rerun()
|
874 |
|
875 |
-
with
|
876 |
-
st.markdown("### π€
|
877 |
|
878 |
-
#
|
879 |
-
|
880 |
-
|
881 |
|
882 |
st.markdown(f"""
|
883 |
-
<div class="
|
884 |
-
<h4
|
885 |
-
<p><strong>
|
886 |
-
<p><strong>
|
887 |
-
<p><strong>
|
888 |
-
<p><strong>
|
889 |
</div>
|
890 |
""", unsafe_allow_html=True)
|
891 |
-
|
892 |
-
if summary['symptoms_discussed']:
|
893 |
-
st.markdown("**Symptoms We've Discussed:**")
|
894 |
-
for symptom in summary['symptoms_discussed']:
|
895 |
-
st.markdown(f"β’ {symptom.title()}")
|
896 |
-
|
897 |
-
# Quick response suggestions
|
898 |
-
st.markdown("### π‘ Quick Responses")
|
899 |
-
|
900 |
-
quick_responses = [
|
901 |
-
"Yes, that's right",
|
902 |
-
"No, that's not quite right",
|
903 |
-
"Can you explain more?",
|
904 |
-
"I'm not sure about that",
|
905 |
-
"That makes sense",
|
906 |
-
"What should I do next?",
|
907 |
-
"How serious is this?",
|
908 |
-
"When should I see a doctor?"
|
909 |
-
]
|
910 |
-
|
911 |
-
for response in quick_responses[:4]: # Show only first 4
|
912 |
-
if st.button(response, key=f"quick_{response}", help="Quick response"):
|
913 |
-
st.session_state.chat_messages.append({"role": "user", "content": response})
|
914 |
-
st.rerun()
|
915 |
|
916 |
-
|
917 |
-
|
918 |
|
919 |
-
|
920 |
-
|
921 |
-
|
922 |
-
|
923 |
-
|
924 |
-
|
925 |
-
|
926 |
-
|
927 |
-
|
928 |
-
# Emergency reminder
|
929 |
-
st.markdown("""
|
930 |
-
<div style="background: rgba(244, 67, 54, 0.1); border: 2px solid #F44336; border-radius: 8px; padding: 1rem; margin: 1rem 0;">
|
931 |
-
<h4 style="color: #F44336;">π¨ Emergency?</h4>
|
932 |
-
<p style="color: white;">If you're having a medical emergency, please call emergency services immediately rather than using this chat.</p>
|
933 |
-
</div>
|
934 |
-
""", unsafe_allow_html=True)
|
935 |
|
936 |
# Process user input
|
937 |
if send_message and user_input:
|
938 |
# Add user message
|
939 |
st.session_state.chat_messages.append({"role": "user", "content": user_input})
|
940 |
|
941 |
-
# Show
|
942 |
-
st.
|
943 |
-
|
944 |
-
|
945 |
-
# Process the conversational query
|
946 |
-
with st.spinner("Having a thoughtful conversation..."):
|
947 |
-
result = medical_system.process_conversational_query(user_input)
|
948 |
-
|
949 |
-
# Create assistant message with all context
|
950 |
-
assistant_message = {
|
951 |
-
"role": "assistant",
|
952 |
-
"content": result['response'],
|
953 |
-
"follow_up_questions": result.get('follow_up_questions', []),
|
954 |
-
"insights": result.get('agent_insights', {}),
|
955 |
-
"phase": result.get('conversation_phase', ''),
|
956 |
-
"urgency": result.get('urgency_level', 'routine')
|
957 |
-
}
|
958 |
|
959 |
-
|
|
|
960 |
|
961 |
-
#
|
962 |
-
|
963 |
-
|
964 |
-
|
965 |
-
|
966 |
-
|
967 |
-
|
|
|
|
|
|
|
|
|
|
|
968 |
|
969 |
-
# Remove typing indicator
|
970 |
-
st.session_state.typing = False
|
971 |
st.rerun()
|
972 |
|
973 |
-
#
|
974 |
-
|
975 |
-
|
976 |
|
977 |
-
if (
|
978 |
-
|
979 |
-
|
980 |
-
|
981 |
-
|
982 |
-
|
983 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
984 |
|
985 |
-
#
|
986 |
-
if st.
|
987 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
988 |
|
989 |
if medical_system.conversation_data:
|
990 |
-
|
991 |
-
total_exchanges = len(medical_system.conversation_data)
|
992 |
-
avg_response_length = np.mean([len(entry.assistant_response) for entry in medical_system.conversation_data])
|
993 |
-
phases_covered = set([entry.conversation_phase for entry in medical_system.conversation_data])
|
994 |
-
|
995 |
-
st.sidebar.metric("Total Exchanges", total_exchanges)
|
996 |
-
st.sidebar.metric("Avg Response Length", f"{avg_response_length:.0f} chars")
|
997 |
-
st.sidebar.metric("Conversation Phases", len(phases_covered))
|
998 |
-
|
999 |
-
# Conversation flow
|
1000 |
-
st.sidebar.markdown("**Conversation Flow:**")
|
1001 |
-
for entry in medical_system.conversation_data[-5:]: # Last 5
|
1002 |
-
st.sidebar.markdown(f"β’ {entry.conversation_phase.replace('_', ' ').title()}")
|
1003 |
-
|
1004 |
-
# Export conversation
|
1005 |
-
if st.sidebar.button("πΎ Save Our Conversation"):
|
1006 |
-
if st.session_state.chat_messages:
|
1007 |
-
conversation_export = {
|
1008 |
-
'timestamp': datetime.now().isoformat(),
|
1009 |
-
'conversation_summary': st.session_state.conversation_context,
|
1010 |
-
'messages': st.session_state.chat_messages,
|
1011 |
-
'patient_summary': medical_system._get_patient_summary(),
|
1012 |
-
'total_exchanges': medical_system.conversation_state.conversation_depth
|
1013 |
-
}
|
1014 |
|
1015 |
-
st.
|
1016 |
-
|
1017 |
-
|
1018 |
-
|
1019 |
-
|
1020 |
-
|
1021 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1022 |
|
1023 |
-
# Health
|
1024 |
st.markdown("---")
|
1025 |
-
st.markdown("### π
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1026 |
|
1027 |
-
|
|
|
1028 |
|
1029 |
-
|
|
|
|
|
1030 |
st.markdown("""
|
1031 |
-
|
1032 |
-
|
1033 |
-
|
1034 |
-
|
1035 |
-
|
|
|
|
|
1036 |
""")
|
1037 |
|
1038 |
-
with
|
1039 |
st.markdown("""
|
1040 |
-
|
1041 |
-
|
1042 |
-
|
1043 |
-
|
1044 |
-
|
|
|
|
|
|
|
1045 |
""")
|
1046 |
|
1047 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1048 |
st.markdown("""
|
1049 |
-
<div style="text-align: center; padding: 2rem; opacity: 0.8;
|
1050 |
-
<p><strong>
|
1051 |
-
<p
|
1052 |
-
<p><small
|
1053 |
</div>
|
1054 |
""", unsafe_allow_html=True)
|
|
|
12 |
from dataclasses import dataclass, asdict
|
13 |
import hashlib
|
14 |
from collections import defaultdict
|
|
|
15 |
|
16 |
# Set page configuration
|
17 |
st.set_page_config(
|
18 |
+
page_title="MedAssist - AI Medical Preconsultation",
|
19 |
layout="wide",
|
20 |
initial_sidebar_state="expanded",
|
21 |
+
page_icon="π₯"
|
22 |
)
|
23 |
|
24 |
+
# Enhanced CSS for medical theme
|
25 |
st.markdown("""
|
26 |
<style>
|
27 |
+
/* Medical theme styling */
|
28 |
html, body, .stApp, .main {
|
29 |
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
|
30 |
color: #ffffff !important;
|
|
|
43 |
.chat-container {
|
44 |
background: rgba(255, 255, 255, 0.1) !important;
|
45 |
border-radius: 15px !important;
|
46 |
+
padding: 1rem !important;
|
47 |
backdrop-filter: blur(10px) !important;
|
48 |
border: 1px solid rgba(255, 255, 255, 0.2) !important;
|
49 |
margin-bottom: 1rem !important;
|
50 |
+
max-height: 500px !important;
|
51 |
overflow-y: auto !important;
|
|
|
52 |
}
|
53 |
|
54 |
+
.user-message {
|
55 |
background: linear-gradient(45deg, #4CAF50, #66BB6A) !important;
|
56 |
color: white !important;
|
57 |
+
padding: 1rem !important;
|
58 |
+
border-radius: 15px 15px 5px 15px !important;
|
59 |
+
margin: 0.5rem 0 !important;
|
60 |
+
margin-left: 2rem !important;
|
61 |
box-shadow: 0 4px 15px rgba(76, 175, 80, 0.4) !important;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
}
|
63 |
|
64 |
+
.assistant-message {
|
65 |
background: rgba(255, 255, 255, 0.15) !important;
|
66 |
color: white !important;
|
67 |
+
padding: 1rem !important;
|
68 |
+
border-radius: 15px 15px 15px 5px !important;
|
69 |
+
margin: 0.5rem 0 !important;
|
70 |
+
margin-right: 2rem !important;
|
71 |
border-left: 4px solid #2196F3 !important;
|
72 |
backdrop-filter: blur(5px) !important;
|
|
|
73 |
}
|
74 |
|
75 |
+
.agent-status-card {
|
76 |
+
background: rgba(255, 255, 255, 0.15) !important;
|
77 |
+
border: 1px solid rgba(255, 255, 255, 0.3) !important;
|
78 |
+
border-radius: 12px !important;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
padding: 1rem !important;
|
80 |
margin: 0.5rem 0 !important;
|
81 |
+
backdrop-filter: blur(5px) !important;
|
82 |
}
|
83 |
|
84 |
+
.evolution-metrics {
|
85 |
+
background: linear-gradient(45deg, #FF6B6B, #FF8E8E) !important;
|
86 |
color: white !important;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
padding: 1rem !important;
|
88 |
+
border-radius: 10px !important;
|
89 |
margin: 0.5rem 0 !important;
|
|
|
|
|
90 |
}
|
91 |
|
92 |
+
.warning-box {
|
93 |
+
background: rgba(255, 152, 0, 0.2) !important;
|
94 |
+
border: 2px solid #FF9800 !important;
|
95 |
+
border-radius: 10px !important;
|
96 |
+
padding: 1.5rem !important;
|
97 |
+
margin: 1rem 0 !important;
|
98 |
+
color: white !important;
|
|
|
|
|
|
|
|
|
|
|
99 |
}
|
100 |
|
101 |
+
.stButton > button {
|
102 |
+
background: linear-gradient(45deg, #2196F3, #64B5F6) !important;
|
103 |
color: white !important;
|
104 |
border: none !important;
|
105 |
+
border-radius: 25px !important;
|
106 |
+
font-weight: bold !important;
|
107 |
+
padding: 0.75rem 2rem !important;
|
|
|
108 |
transition: all 0.3s ease !important;
|
109 |
}
|
110 |
|
111 |
+
.stButton > button:hover {
|
|
|
112 |
transform: translateY(-2px) !important;
|
113 |
+
box-shadow: 0 8px 25px rgba(33, 150, 243, 0.6) !important;
|
114 |
}
|
115 |
|
116 |
+
.chat-input {
|
117 |
+
position: sticky !important;
|
118 |
+
bottom: 0 !important;
|
119 |
+
background: rgba(255, 255, 255, 0.1) !important;
|
120 |
padding: 1rem !important;
|
121 |
+
border-radius: 15px !important;
|
122 |
+
backdrop-filter: blur(10px) !important;
|
123 |
+
}
|
124 |
+
|
125 |
+
.spinner {
|
126 |
+
border: 2px solid rgba(255, 255, 255, 0.3);
|
127 |
+
border-radius: 50%;
|
128 |
+
border-top: 2px solid #ffffff;
|
129 |
+
width: 20px;
|
130 |
+
height: 20px;
|
131 |
+
animation: spin 1s linear infinite;
|
132 |
+
display: inline-block;
|
133 |
+
}
|
134 |
+
|
135 |
+
@keyframes spin {
|
136 |
+
0% { transform: rotate(0deg); }
|
137 |
+
100% { transform: rotate(360deg); }
|
138 |
}
|
139 |
</style>
|
140 |
""", unsafe_allow_html=True)
|
141 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
142 |
@dataclass
|
143 |
class ConversationEntry:
|
144 |
+
"""Data structure for storing conversation entries"""
|
145 |
timestamp: str
|
146 |
user_input: str
|
147 |
assistant_response: str
|
|
|
148 |
symptoms: List[str]
|
149 |
severity_score: float
|
150 |
confidence_score: float
|
151 |
search_queries_used: List[str]
|
152 |
+
user_feedback: Optional[int] = None # 1-5 rating
|
|
|
|
|
153 |
was_helpful: Optional[bool] = None
|
154 |
|
155 |
+
@dataclass
|
156 |
+
class AgentPerformance:
|
157 |
+
"""Track agent performance metrics"""
|
158 |
+
agent_name: str
|
159 |
+
total_queries: int = 0
|
160 |
+
successful_responses: int = 0
|
161 |
+
average_confidence: float = 0.0
|
162 |
+
user_satisfaction: float = 0.0
|
163 |
+
learning_rate: float = 0.01
|
164 |
+
expertise_areas: Dict[str, float] = None
|
165 |
|
166 |
+
def __post_init__(self):
|
167 |
+
if self.expertise_areas is None:
|
168 |
+
self.expertise_areas = defaultdict(float)
|
169 |
+
|
170 |
+
class MedicalSearchTool:
|
171 |
+
"""Enhanced medical search tool with domain-specific optimization"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
|
173 |
+
def __init__(self):
|
174 |
+
self.ddgs = DDGS()
|
175 |
+
self.medical_sources = [
|
176 |
+
"mayoclinic.org", "webmd.com", "healthline.com", "medlineplus.gov",
|
177 |
+
"nih.gov", "who.int", "cdc.gov", "ncbi.nlm.nih.gov"
|
178 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
179 |
|
180 |
+
def search_medical_info(self, query: str, search_type: str = "symptoms") -> str:
|
181 |
+
"""Search for medical information with safety considerations"""
|
182 |
+
try:
|
183 |
+
# Add medical context to search
|
184 |
+
medical_queries = {
|
185 |
+
"symptoms": f"medical symptoms {query} causes diagnosis",
|
186 |
+
"treatment": f"medical treatment {query} therapy options",
|
187 |
+
"prevention": f"disease prevention {query} health tips",
|
188 |
+
"general": f"medical information {query} health facts"
|
189 |
+
}
|
190 |
+
|
191 |
+
enhanced_query = medical_queries.get(search_type, medical_queries["general"])
|
192 |
+
|
193 |
+
# Perform search with medical focus
|
194 |
+
search_results = list(self.ddgs.text(
|
195 |
+
enhanced_query,
|
196 |
+
max_results=5,
|
197 |
+
region='wt-wt',
|
198 |
+
safesearch='on'
|
199 |
+
))
|
200 |
+
|
201 |
+
if not search_results:
|
202 |
+
return "No relevant medical information found. Please consult with a healthcare professional."
|
203 |
+
|
204 |
+
# Filter and format results with medical authority preference
|
205 |
+
formatted_results = []
|
206 |
+
for idx, result in enumerate(search_results, 1):
|
207 |
+
title = result.get('title', 'No title')
|
208 |
+
snippet = result.get('body', 'No description')
|
209 |
+
url = result.get('href', 'No URL')
|
210 |
+
|
211 |
+
# Prioritize trusted medical sources
|
212 |
+
source_trust = "β" if any(source in url for source in self.medical_sources) else ""
|
213 |
+
|
214 |
+
formatted_results.append(
|
215 |
+
f"{idx}. {source_trust} {title}\n"
|
216 |
+
f" Information: {snippet}\n"
|
217 |
+
f" Source: {url}\n"
|
218 |
+
)
|
219 |
+
|
220 |
+
return "\n".join(formatted_results)
|
221 |
+
|
222 |
+
except Exception as e:
|
223 |
+
return f"Search temporarily unavailable: {str(e)}"
|
224 |
+
|
225 |
+
class GroqLLM:
|
226 |
+
"""Medical-optimized LLM client"""
|
227 |
+
|
228 |
+
def __init__(self, model_name="openai/gpt-oss-20b"):
|
229 |
+
self.client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
|
230 |
+
self.model_name = model_name
|
231 |
+
self.medical_context = """
|
232 |
+
You are a medical AI assistant for preconsultation guidance.
|
233 |
+
IMPORTANT: Always remind users that this is not a substitute for professional medical advice.
|
234 |
+
Provide helpful information while emphasizing the need for proper medical consultation.
|
235 |
+
"""
|
236 |
+
|
237 |
+
def generate_response(self, prompt: str, conversation_history: List[str] = None) -> Tuple[str, float]:
|
238 |
+
"""Generate response with confidence scoring"""
|
239 |
+
try:
|
240 |
+
# Build context with conversation history
|
241 |
+
context = self.medical_context
|
242 |
+
if conversation_history:
|
243 |
+
context += f"\n\nConversation History:\n{chr(10).join(conversation_history[-5:])}"
|
244 |
+
|
245 |
+
full_prompt = f"{context}\n\nUser Query: {prompt}\n\nPlease provide helpful medical guidance while emphasizing the importance of professional medical consultation."
|
246 |
+
|
247 |
+
completion = self.client.chat.completions.create(
|
248 |
+
model=self.model_name,
|
249 |
+
messages=[{"role": "user", "content": full_prompt}],
|
250 |
+
temperature=0.3, # Lower temperature for medical accuracy
|
251 |
+
max_tokens=1500,
|
252 |
+
stream=False
|
253 |
+
)
|
254 |
+
|
255 |
+
response = completion.choices[0].message.content if completion.choices else "Unable to generate response"
|
256 |
+
|
257 |
+
# Calculate confidence score based on response characteristics
|
258 |
+
confidence = self._calculate_confidence(response, prompt)
|
259 |
+
|
260 |
+
return response, confidence
|
261 |
+
|
262 |
+
except Exception as e:
|
263 |
+
return f"LLM temporarily unavailable: {str(e)}", 0.0
|
264 |
|
265 |
+
def _calculate_confidence(self, response: str, query: str) -> float:
|
266 |
+
"""Calculate confidence score based on response quality"""
|
267 |
+
confidence_factors = 0.0
|
268 |
|
269 |
+
# Check for medical disclaimers (increases confidence in safety)
|
270 |
+
if any(phrase in response.lower() for phrase in ["consult", "doctor", "medical professional", "healthcare provider"]):
|
271 |
+
confidence_factors += 0.3
|
272 |
|
273 |
+
# Check response length (adequate detail)
|
274 |
+
if 200 <= len(response) <= 1000:
|
275 |
+
confidence_factors += 0.2
|
276 |
|
277 |
+
# Check for structured information
|
278 |
+
if any(marker in response for marker in ["1.", "β’", "-", "**"]):
|
279 |
+
confidence_factors += 0.2
|
280 |
|
281 |
+
# Check for balanced information (not overly certain)
|
282 |
+
if any(phrase in response.lower() for phrase in ["may", "might", "could", "possible", "typically"]):
|
283 |
+
confidence_factors += 0.3
|
|
|
|
|
|
|
284 |
|
285 |
+
return min(confidence_factors, 1.0)
|
286 |
+
|
287 |
+
class EvolutionaryMedicalAgent:
|
288 |
+
"""Evolutionary agent with reinforcement learning capabilities"""
|
289 |
|
290 |
+
def __init__(self, agent_id: str, specialization: str):
|
291 |
+
self.agent_id = agent_id
|
292 |
+
self.specialization = specialization
|
293 |
+
self.performance = AgentPerformance(agent_name=agent_id)
|
294 |
+
self.knowledge_base = defaultdict(float)
|
295 |
+
self.response_patterns = {}
|
296 |
+
self.learning_memory = []
|
297 |
|
298 |
+
def process_query(self, query: str, context: str, search_results: str) -> Tuple[str, float]:
|
299 |
+
"""Process query and adapt based on specialization"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
300 |
|
301 |
+
# Update query count
|
302 |
+
self.performance.total_queries += 1
|
303 |
|
304 |
+
# Extract key terms for learning
|
305 |
+
key_terms = self._extract_medical_terms(query)
|
306 |
+
|
307 |
+
# Build specialized response based on agent's expertise
|
308 |
+
specialized_prompt = f"""
|
309 |
+
As a {self.specialization} specialist, analyze this medical query:
|
310 |
+
Query: {query}
|
311 |
+
Context: {context}
|
312 |
+
Search Results: {search_results}
|
313 |
|
314 |
+
Provide specialized insights based on your expertise in {self.specialization}.
|
315 |
+
Always emphasize the need for professional medical consultation.
|
316 |
+
"""
|
|
|
317 |
|
318 |
+
# Simulate processing (in real implementation, this would use the LLM)
|
319 |
+
response = f"Based on my specialization in {self.specialization}, {query.lower()} suggests several considerations. However, please consult with a healthcare professional for proper diagnosis and treatment."
|
|
|
|
|
320 |
|
321 |
+
confidence = 0.7 + (self.performance.average_confidence * 0.3)
|
|
|
322 |
|
323 |
+
# Update expertise in relevant areas
|
324 |
+
for term in key_terms:
|
325 |
+
self.knowledge_base[term] += 0.1
|
326 |
+
|
327 |
+
return response, confidence
|
328 |
|
329 |
+
def update_from_feedback(self, query: str, response: str, feedback_score: int, was_helpful: bool):
|
330 |
+
"""Update agent based on user feedback (reinforcement learning)"""
|
331 |
+
|
332 |
+
# Calculate reward signal
|
333 |
+
reward = (feedback_score - 3) / 2 # Convert 1-5 scale to -1 to 1
|
334 |
+
if was_helpful:
|
335 |
+
reward += 0.2
|
336 |
+
|
337 |
+
# Update performance metrics
|
338 |
+
if feedback_score >= 3:
|
339 |
+
self.performance.successful_responses += 1
|
340 |
+
|
341 |
+
# Update satisfaction and confidence
|
342 |
+
self.performance.user_satisfaction = (
|
343 |
+
(self.performance.user_satisfaction * (self.performance.total_queries - 1) + feedback_score) /
|
344 |
+
self.performance.total_queries
|
345 |
+
)
|
346 |
|
347 |
+
# Store learning memory
|
348 |
+
self.learning_memory.append({
|
349 |
+
'query': query,
|
350 |
+
'response': response,
|
351 |
+
'reward': reward,
|
352 |
+
'timestamp': datetime.now().isoformat()
|
353 |
+
})
|
354 |
+
|
355 |
+
# Adapt learning rate based on performance
|
356 |
+
if self.performance.user_satisfaction > 4.0:
|
357 |
+
self.performance.learning_rate *= 0.95 # Slow down learning when performing well
|
358 |
+
elif self.performance.user_satisfaction < 3.0:
|
359 |
+
self.performance.learning_rate *= 1.1 # Speed up learning when performing poorly
|
360 |
+
|
361 |
+
# Update expertise areas based on feedback
|
362 |
+
terms = self._extract_medical_terms(query)
|
363 |
+
for term in terms:
|
364 |
+
self.knowledge_base[term] += reward * self.performance.learning_rate
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
365 |
|
366 |
+
def _extract_medical_terms(self, text: str) -> List[str]:
|
367 |
+
"""Extract medical terms from text for learning"""
|
368 |
+
medical_keywords = [
|
369 |
+
'pain', 'fever', 'headache', 'nausea', 'fatigue', 'cough', 'cold', 'flu',
|
370 |
+
'diabetes', 'hypertension', 'infection', 'allergy', 'asthma', 'arthritis',
|
371 |
+
'anxiety', 'depression', 'insomnia', 'migraine', 'rash', 'swelling'
|
372 |
+
]
|
373 |
+
|
374 |
+
found_terms = []
|
375 |
+
text_lower = text.lower()
|
376 |
+
for term in medical_keywords:
|
377 |
+
if term in text_lower:
|
378 |
+
found_terms.append(term)
|
379 |
+
return found_terms
|
380 |
+
|
381 |
+
def get_expertise_summary(self) -> Dict:
|
382 |
+
"""Get summary of agent's learned expertise"""
|
383 |
+
return {
|
384 |
+
'specialization': self.specialization,
|
385 |
+
'total_queries': self.performance.total_queries,
|
386 |
+
'success_rate': (self.performance.successful_responses / max(1, self.performance.total_queries)) * 100,
|
387 |
+
'user_satisfaction': self.performance.user_satisfaction,
|
388 |
+
'learning_rate': self.performance.learning_rate,
|
389 |
+
'top_expertise_areas': dict(sorted(self.knowledge_base.items(), key=lambda x: x[1], reverse=True)[:5])
|
390 |
}
|
|
|
391 |
|
392 |
+
class MedicalConsultationSystem:
|
393 |
+
"""Main medical consultation system with evolutionary agents"""
|
394 |
|
395 |
def __init__(self):
|
396 |
self.llm = GroqLLM()
|
397 |
self.search_tool = MedicalSearchTool()
|
398 |
+
self.agents = self._initialize_agents()
|
|
|
399 |
self.conversation_history = []
|
400 |
self.conversation_data = []
|
401 |
|
402 |
+
def _initialize_agents(self) -> Dict[str, EvolutionaryMedicalAgent]:
|
403 |
+
"""Initialize specialized medical agents"""
|
404 |
return {
|
405 |
+
"general_practitioner": EvolutionaryMedicalAgent("gp", "General Practice Medicine"),
|
406 |
+
"symptom_analyzer": EvolutionaryMedicalAgent("symptom", "Symptom Analysis and Triage"),
|
407 |
+
"wellness_advisor": EvolutionaryMedicalAgent("wellness", "Preventive Care and Wellness"),
|
408 |
+
"mental_health": EvolutionaryMedicalAgent("mental", "Mental Health and Psychology"),
|
409 |
+
"emergency_assessor": EvolutionaryMedicalAgent("emergency", "Emergency Assessment and Urgent Care")
|
410 |
}
|
411 |
|
412 |
+
def process_medical_query(self, user_query: str) -> Dict:
|
413 |
+
"""Process medical query through evolutionary agent system"""
|
414 |
|
415 |
timestamp = datetime.now().isoformat()
|
|
|
|
|
|
|
|
|
416 |
|
417 |
+
# Determine which agents should handle this query
|
418 |
+
relevant_agents = self._select_relevant_agents(user_query)
|
419 |
|
420 |
+
# Search for medical information
|
421 |
+
search_results = self.search_tool.search_medical_info(user_query, "symptoms")
|
422 |
|
423 |
+
# Build conversation context
|
424 |
+
context = "\n".join(self.conversation_history[-3:]) if self.conversation_history else ""
|
425 |
|
426 |
+
# Get responses from relevant agents
|
427 |
+
agent_responses = {}
|
428 |
+
for agent_name in relevant_agents:
|
429 |
+
agent = self.agents[agent_name]
|
430 |
+
response, confidence = agent.process_query(user_query, context, search_results)
|
431 |
+
agent_responses[agent_name] = {
|
432 |
+
'response': response,
|
433 |
+
'confidence': confidence,
|
434 |
+
'specialization': agent.specialization
|
435 |
+
}
|
436 |
|
437 |
+
# Generate main LLM response
|
438 |
+
main_response, main_confidence = self.llm.generate_response(
|
439 |
+
f"{user_query}\n\nRelevant Information: {search_results}",
|
440 |
+
self.conversation_history
|
441 |
)
|
442 |
|
443 |
+
# Combine responses intelligently
|
444 |
+
final_response = self._combine_responses(main_response, agent_responses)
|
|
|
|
|
|
|
|
|
445 |
|
446 |
+
# Update conversation history
|
447 |
+
self.conversation_history.extend([
|
448 |
+
f"User: {user_query}",
|
449 |
+
f"Assistant: {final_response}"
|
450 |
+
])
|
451 |
|
452 |
+
# Extract symptoms for analysis
|
453 |
+
symptoms = self._extract_symptoms(user_query)
|
454 |
+
severity_score = self._assess_severity(user_query, symptoms)
|
455 |
|
456 |
+
# Store conversation data
|
457 |
+
conversation_entry = ConversationEntry(
|
458 |
timestamp=timestamp,
|
459 |
+
user_input=user_query,
|
460 |
+
assistant_response=final_response,
|
461 |
+
symptoms=symptoms,
|
462 |
+
severity_score=severity_score,
|
463 |
+
confidence_score=main_confidence,
|
464 |
+
search_queries_used=[user_query]
|
|
|
|
|
|
|
465 |
)
|
466 |
|
467 |
+
self.conversation_data.append(conversation_entry)
|
468 |
|
469 |
return {
|
470 |
+
'response': final_response,
|
471 |
+
'confidence': main_confidence,
|
472 |
+
'severity_score': severity_score,
|
473 |
+
'symptoms_detected': symptoms,
|
474 |
+
'agents_consulted': relevant_agents,
|
475 |
+
'agent_responses': agent_responses,
|
476 |
+
'search_performed': True
|
|
|
|
|
477 |
}
|
478 |
|
479 |
+
def _select_relevant_agents(self, query: str) -> List[str]:
|
480 |
+
"""Select most relevant agents for the query"""
|
481 |
+
query_lower = query.lower()
|
482 |
+
relevant_agents = ["general_practitioner"] # Always include GP
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
483 |
|
484 |
+
# Mental health keywords
|
485 |
+
mental_health_keywords = ["stress", "anxiety", "depression", "sleep", "mood", "worry", "panic", "sad"]
|
486 |
+
if any(keyword in query_lower for keyword in mental_health_keywords):
|
487 |
+
relevant_agents.append("mental_health")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
488 |
|
489 |
+
# Emergency keywords
|
490 |
+
emergency_keywords = ["severe", "intense", "emergency", "urgent", "chest pain", "difficulty breathing", "blood"]
|
491 |
+
if any(keyword in query_lower for keyword in emergency_keywords):
|
492 |
+
relevant_agents.append("emergency_assessor")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
493 |
|
494 |
+
# Wellness keywords
|
495 |
+
wellness_keywords = ["prevention", "healthy", "nutrition", "exercise", "lifestyle", "diet"]
|
496 |
+
if any(keyword in query_lower for keyword in wellness_keywords):
|
497 |
+
relevant_agents.append("wellness_advisor")
|
498 |
|
499 |
+
# Always include symptom analyzer for health queries
|
500 |
+
if any(keyword in query_lower for keyword in ["pain", "ache", "hurt", "symptom", "feel"]):
|
501 |
+
relevant_agents.append("symptom_analyzer")
|
502 |
|
503 |
+
return list(set(relevant_agents))
|
504 |
+
|
505 |
+
def _combine_responses(self, main_response: str, agent_responses: Dict) -> str:
|
506 |
+
"""Intelligently combine responses from multiple agents"""
|
507 |
+
if not agent_responses:
|
508 |
+
return main_response
|
509 |
|
510 |
+
combined = main_response + "\n\n**Specialist Insights:**\n"
|
511 |
+
for agent_name, data in agent_responses.items():
|
512 |
+
if data['confidence'] > 0.6: # Only include confident responses
|
513 |
+
combined += f"\nβ’ **{data['specialization']}**: {data['response'][:200]}...\n"
|
514 |
|
515 |
+
return combined
|
516 |
+
|
517 |
+
def _extract_symptoms(self, query: str) -> List[str]:
|
518 |
+
"""Extract symptoms from user query"""
|
519 |
+
common_symptoms = [
|
520 |
+
'fever', 'headache', 'nausea', 'pain', 'cough', 'fatigue', 'dizziness',
|
521 |
+
'rash', 'swelling', 'shortness of breath', 'chest pain', 'abdominal pain'
|
522 |
+
]
|
523 |
|
524 |
+
query_lower = query.lower()
|
525 |
+
detected_symptoms = [symptom for symptom in common_symptoms if symptom in query_lower]
|
526 |
+
return detected_symptoms
|
527 |
+
|
528 |
+
def _assess_severity(self, query: str, symptoms: List[str]) -> float:
|
529 |
+
"""Assess severity of reported symptoms (0-10 scale)"""
|
530 |
+
severity_score = 0.0
|
531 |
+
query_lower = query.lower()
|
532 |
|
533 |
+
# High severity indicators
|
534 |
+
high_severity = ["severe", "intense", "unbearable", "emergency", "chest pain", "difficulty breathing"]
|
535 |
+
medium_severity = ["moderate", "persistent", "recurring", "worse", "concerning"]
|
|
|
|
|
|
|
536 |
|
537 |
+
if any(indicator in query_lower for indicator in high_severity):
|
538 |
+
severity_score += 7.0
|
539 |
+
elif any(indicator in query_lower for indicator in medium_severity):
|
540 |
+
severity_score += 4.0
|
541 |
+
else:
|
542 |
+
severity_score += 2.0
|
543 |
|
544 |
+
# Add points for multiple symptoms
|
545 |
+
severity_score += min(len(symptoms) * 0.5, 2.0)
|
546 |
+
|
547 |
+
return min(severity_score, 10.0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
548 |
|
549 |
+
def update_agent_performance(self, query_index: int, feedback_score: int, was_helpful: bool):
|
550 |
+
"""Update agent performance based on user feedback"""
|
551 |
+
if query_index < len(self.conversation_data):
|
552 |
+
entry = self.conversation_data[query_index]
|
553 |
+
entry.user_feedback = feedback_score
|
554 |
+
entry.was_helpful = was_helpful
|
|
|
|
|
|
|
555 |
|
556 |
+
# Update all agents that were involved in this query
|
557 |
+
for agent in self.agents.values():
|
558 |
+
agent.update_from_feedback(entry.user_input, entry.assistant_response, feedback_score, was_helpful)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
559 |
|
560 |
+
def get_system_metrics(self) -> Dict:
|
561 |
+
"""Get comprehensive system performance metrics"""
|
562 |
+
total_conversations = len(self.conversation_data)
|
563 |
|
564 |
+
if total_conversations == 0:
|
565 |
+
return {"status": "No conversations yet"}
|
|
|
|
|
566 |
|
567 |
+
avg_confidence = np.mean([entry.confidence_score for entry in self.conversation_data])
|
568 |
+
avg_severity = np.mean([entry.severity_score for entry in self.conversation_data])
|
|
|
569 |
|
570 |
+
feedback_entries = [entry for entry in self.conversation_data if entry.user_feedback is not None]
|
571 |
+
avg_feedback = np.mean([entry.user_feedback for entry in feedback_entries]) if feedback_entries else 0
|
|
|
572 |
|
573 |
+
return {
|
574 |
+
"total_conversations": total_conversations,
|
575 |
+
"average_confidence": avg_confidence,
|
576 |
+
"average_severity": avg_severity,
|
577 |
+
"average_user_feedback": avg_feedback,
|
578 |
+
"agent_performance": {name: agent.get_expertise_summary() for name, agent in self.agents.items()}
|
579 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
580 |
|
581 |
+
# Initialize session state
|
582 |
+
if 'medical_system' not in st.session_state:
|
583 |
+
st.session_state.medical_system = MedicalConsultationSystem()
|
584 |
if 'chat_messages' not in st.session_state:
|
585 |
st.session_state.chat_messages = []
|
|
|
|
|
|
|
|
|
586 |
|
587 |
+
medical_system = st.session_state.medical_system
|
588 |
|
589 |
# Main interface
|
590 |
st.markdown("""
|
591 |
<div class="medical-header">
|
592 |
+
<h1>π₯ MedAssist - AI Medical Preconsultation</h1>
|
593 |
+
<p>Advanced AI-powered medical guidance with evolutionary learning agents</p>
|
594 |
</div>
|
595 |
""", unsafe_allow_html=True)
|
596 |
|
|
|
598 |
st.markdown("""
|
599 |
<div class="warning-box">
|
600 |
<h3>β οΈ Important Medical Disclaimer</h3>
|
601 |
+
<p>This AI system provides general health information and is NOT a substitute for professional medical advice, diagnosis, or treatment. Always consult with qualified healthcare professionals for medical concerns. In case of emergency, contact emergency services immediately.</p>
|
602 |
</div>
|
603 |
""", unsafe_allow_html=True)
|
604 |
|
605 |
+
# Main layout
|
606 |
+
col1, col2 = st.columns([3, 1])
|
607 |
|
608 |
+
with col1:
|
609 |
+
st.markdown("### π¬ Medical Consultation Chat")
|
610 |
|
611 |
+
# Chat display area
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
612 |
chat_container = st.container()
|
613 |
with chat_container:
|
614 |
st.markdown('<div class="chat-container">', unsafe_allow_html=True)
|
615 |
|
|
|
616 |
for i, message in enumerate(st.session_state.chat_messages):
|
617 |
if message["role"] == "user":
|
618 |
+
st.markdown(f'<div class="user-message">π€ <strong>You:</strong> {message["content"]}</div>', unsafe_allow_html=True)
|
619 |
else:
|
620 |
+
st.markdown(f'<div class="assistant-message">π€ <strong>MedAssist:</strong> {message["content"]}</div>', unsafe_allow_html=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
621 |
|
622 |
+
# Add feedback buttons for assistant messages
|
623 |
+
col_a, col_b, col_c = st.columns([1, 1, 8])
|
624 |
+
with col_a:
|
625 |
+
if st.button("π", key=f"helpful_{i}"):
|
626 |
+
medical_system.update_agent_performance(i//2, 5, True)
|
627 |
+
st.success("Feedback recorded!")
|
628 |
+
with col_b:
|
629 |
+
if st.button("π", key=f"not_helpful_{i}"):
|
630 |
+
medical_system.update_agent_performance(i//2, 2, False)
|
631 |
+
st.info("Feedback recorded. We'll improve!")
|
|
|
|
|
|
|
632 |
|
633 |
st.markdown('</div>', unsafe_allow_html=True)
|
634 |
|
635 |
+
# Chat input
|
636 |
+
with st.container():
|
637 |
+
st.markdown('<div class="chat-input">', unsafe_allow_html=True)
|
638 |
+
user_input = st.text_input("Describe your symptoms or health concerns:",
|
639 |
+
placeholder="e.g., I've been having headaches for 3 days...",
|
640 |
+
key="medical_input")
|
641 |
+
|
642 |
+
col_send, col_clear = st.columns([1, 4])
|
643 |
+
with col_send:
|
644 |
+
send_message = st.button("Send π€", type="primary")
|
645 |
+
with col_clear:
|
646 |
+
if st.button("Clear Chat ποΈ"):
|
647 |
+
st.session_state.chat_messages = []
|
648 |
st.rerun()
|
649 |
+
st.markdown('</div>', unsafe_allow_html=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
650 |
|
651 |
+
with col2:
|
652 |
+
st.markdown("### π€ AI Agent Status")
|
653 |
|
654 |
+
# Agent status display
|
655 |
+
for agent_name, agent in medical_system.agents.items():
|
656 |
+
expertise = agent.get_expertise_summary()
|
657 |
|
658 |
st.markdown(f"""
|
659 |
+
<div class="agent-status-card">
|
660 |
+
<h4>{agent.specialization}</h4>
|
661 |
+
<p><strong>Queries:</strong> {expertise['total_queries']}</p>
|
662 |
+
<p><strong>Success Rate:</strong> {expertise['success_rate']:.1f}%</p>
|
663 |
+
<p><strong>Satisfaction:</strong> {expertise['user_satisfaction']:.1f}/5</p>
|
664 |
+
<p><strong>Learning Rate:</strong> {expertise['learning_rate']:.3f}</p>
|
665 |
</div>
|
666 |
""", unsafe_allow_html=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
667 |
|
668 |
+
st.markdown("### π System Metrics")
|
669 |
+
metrics = medical_system.get_system_metrics()
|
670 |
|
671 |
+
if "total_conversations" in metrics:
|
672 |
+
st.markdown(f"""
|
673 |
+
<div class="evolution-metrics">
|
674 |
+
<p><strong>Total Chats:</strong> {metrics['total_conversations']}</p>
|
675 |
+
<p><strong>Avg Confidence:</strong> {metrics['average_confidence']:.2f}</p>
|
676 |
+
<p><strong>Avg Severity:</strong> {metrics['average_severity']:.1f}/10</p>
|
677 |
+
<p><strong>User Rating:</strong> {metrics['average_user_feedback']:.1f}/5</p>
|
678 |
+
</div>
|
679 |
+
""", unsafe_allow_html=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
680 |
|
681 |
# Process user input
|
682 |
if send_message and user_input:
|
683 |
# Add user message
|
684 |
st.session_state.chat_messages.append({"role": "user", "content": user_input})
|
685 |
|
686 |
+
# Show thinking indicator
|
687 |
+
with st.spinner("π§ AI agents are analyzing your query..."):
|
688 |
+
# Process the query
|
689 |
+
result = medical_system.process_medical_query(user_input)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
690 |
|
691 |
+
# Add assistant response
|
692 |
+
response_content = result['response']
|
693 |
|
694 |
+
# Add severity and confidence info
|
695 |
+
if result['severity_score'] > 7:
|
696 |
+
response_content += f"\n\nβ οΈ **High severity detected ({result['severity_score']:.1f}/10). Please seek immediate medical attention if symptoms are severe.**"
|
697 |
+
elif result['severity_score'] > 4:
|
698 |
+
response_content += f"\n\nβ‘ **Moderate severity detected ({result['severity_score']:.1f}/10). Consider scheduling a medical appointment.**"
|
699 |
+
|
700 |
+
if result['symptoms_detected']:
|
701 |
+
response_content += f"\n\nπ **Detected symptoms:** {', '.join(result['symptoms_detected'])}"
|
702 |
+
|
703 |
+
response_content += f"\n\nπ€ **Confidence Score:** {result['confidence']:.2f} | **Agents Consulted:** {', '.join(result['agents_consulted'])}"
|
704 |
+
|
705 |
+
st.session_state.chat_messages.append({"role": "assistant", "content": response_content})
|
706 |
|
|
|
|
|
707 |
st.rerun()
|
708 |
|
709 |
+
# Sidebar with additional features
|
710 |
+
with st.sidebar:
|
711 |
+
st.markdown("### π οΈ System Controls")
|
712 |
|
713 |
+
if st.button("π Reset System"):
|
714 |
+
st.session_state.medical_system = MedicalConsultationSystem()
|
715 |
+
st.session_state.chat_messages = []
|
716 |
+
st.rerun()
|
717 |
+
|
718 |
+
st.markdown("### π Learning Analytics")
|
719 |
+
if st.button("π View Detailed Analytics"):
|
720 |
+
st.session_state.show_analytics = True
|
721 |
+
|
722 |
+
if st.button("πΎ Export Chat History"):
|
723 |
+
if st.session_state.chat_messages:
|
724 |
+
chat_data = {
|
725 |
+
'timestamp': datetime.now().isoformat(),
|
726 |
+
'messages': st.session_state.chat_messages,
|
727 |
+
'system_metrics': medical_system.get_system_metrics()
|
728 |
+
}
|
729 |
+
st.download_button(
|
730 |
+
label="Download Chat Data",
|
731 |
+
data=json.dumps(chat_data, indent=2),
|
732 |
+
file_name=f"medical_chat_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
|
733 |
+
mime="application/json"
|
734 |
+
)
|
735 |
+
else:
|
736 |
+
st.warning("No chat history to export")
|
737 |
+
|
738 |
+
st.markdown("### π― Quick Health Topics")
|
739 |
+
quick_topics = [
|
740 |
+
"Common cold symptoms",
|
741 |
+
"Headache causes",
|
742 |
+
"Stress management",
|
743 |
+
"Sleep problems",
|
744 |
+
"Healthy diet tips",
|
745 |
+
"Exercise recommendations"
|
746 |
+
]
|
747 |
+
|
748 |
+
for topic in quick_topics:
|
749 |
+
if st.button(f"π‘ {topic}", key=f"topic_{topic.replace(' ', '_')}"):
|
750 |
+
st.session_state.chat_messages.append({"role": "user", "content": f"Tell me about {topic.lower()}"})
|
751 |
+
|
752 |
+
with st.spinner("π§ Processing..."):
|
753 |
+
result = medical_system.process_medical_query(f"Tell me about {topic.lower()}")
|
754 |
+
response_content = result['response']
|
755 |
+
|
756 |
+
if result['symptoms_detected']:
|
757 |
+
response_content += f"\n\nπ **Related symptoms:** {', '.join(result['symptoms_detected'])}"
|
758 |
+
|
759 |
+
response_content += f"\n\nπ€ **Confidence:** {result['confidence']:.2f}"
|
760 |
+
st.session_state.chat_messages.append({"role": "assistant", "content": response_content})
|
761 |
+
|
762 |
+
st.rerun()
|
763 |
|
764 |
+
# Analytics Dashboard (if requested)
|
765 |
+
if st.session_state.get('show_analytics', False):
|
766 |
+
st.markdown("---")
|
767 |
+
st.markdown("## π Detailed System Analytics")
|
768 |
+
|
769 |
+
metrics = medical_system.get_system_metrics()
|
770 |
+
|
771 |
+
if "agent_performance" in metrics:
|
772 |
+
# Agent Performance Comparison
|
773 |
+
st.markdown("### π€ Agent Performance Analysis")
|
774 |
+
|
775 |
+
agent_data = []
|
776 |
+
for agent_name, performance in metrics["agent_performance"].items():
|
777 |
+
agent_data.append({
|
778 |
+
'Agent': performance['specialization'],
|
779 |
+
'Success Rate (%)': performance['success_rate'],
|
780 |
+
'User Satisfaction': performance['user_satisfaction'],
|
781 |
+
'Learning Rate': performance['learning_rate'],
|
782 |
+
'Total Queries': performance['total_queries']
|
783 |
+
})
|
784 |
+
|
785 |
+
if agent_data:
|
786 |
+
df_agents = pd.DataFrame(agent_data)
|
787 |
+
st.dataframe(df_agents, use_container_width=True)
|
788 |
+
|
789 |
+
# Performance charts
|
790 |
+
col1, col2 = st.columns(2)
|
791 |
+
|
792 |
+
with col1:
|
793 |
+
st.markdown("#### Success Rate by Agent")
|
794 |
+
if not df_agents.empty:
|
795 |
+
st.bar_chart(df_agents.set_index('Agent')['Success Rate (%)'])
|
796 |
+
|
797 |
+
with col2:
|
798 |
+
st.markdown("#### User Satisfaction by Agent")
|
799 |
+
if not df_agents.empty:
|
800 |
+
st.bar_chart(df_agents.set_index('Agent')['User Satisfaction'])
|
801 |
+
|
802 |
+
# Conversation Analysis
|
803 |
+
st.markdown("### π¬ Conversation Analysis")
|
804 |
|
805 |
if medical_system.conversation_data:
|
806 |
+
conversation_df = pd.DataFrame([asdict(entry) for entry in medical_system.conversation_data])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
807 |
|
808 |
+
col1, col2, col3 = st.columns(3)
|
809 |
+
|
810 |
+
with col1:
|
811 |
+
st.metric("Total Conversations", len(conversation_df))
|
812 |
+
avg_confidence = conversation_df['confidence_score'].mean()
|
813 |
+
st.metric("Average Confidence", f"{avg_confidence:.2f}")
|
814 |
+
|
815 |
+
with col2:
|
816 |
+
avg_severity = conversation_df['severity_score'].mean()
|
817 |
+
st.metric("Average Severity", f"{avg_severity:.1f}/10")
|
818 |
+
|
819 |
+
feedback_data = conversation_df[conversation_df['user_feedback'].notna()]
|
820 |
+
if not feedback_data.empty:
|
821 |
+
avg_feedback = feedback_data['user_feedback'].mean()
|
822 |
+
st.metric("Average User Rating", f"{avg_feedback:.1f}/5")
|
823 |
+
|
824 |
+
with col3:
|
825 |
+
symptoms_detected = sum(len(symptoms) for symptoms in conversation_df['symptoms'])
|
826 |
+
st.metric("Total Symptoms Detected", symptoms_detected)
|
827 |
+
|
828 |
+
helpful_responses = conversation_df['was_helpful'].sum() if 'was_helpful' in conversation_df else 0
|
829 |
+
st.metric("Helpful Responses", helpful_responses)
|
830 |
+
|
831 |
+
# Severity distribution
|
832 |
+
st.markdown("#### Severity Score Distribution")
|
833 |
+
severity_counts = conversation_df['severity_score'].value_counts().sort_index()
|
834 |
+
st.bar_chart(severity_counts)
|
835 |
+
|
836 |
+
# Most common symptoms
|
837 |
+
st.markdown("#### Most Common Symptoms")
|
838 |
+
all_symptoms = []
|
839 |
+
for symptoms_list in conversation_df['symptoms']:
|
840 |
+
all_symptoms.extend(symptoms_list)
|
841 |
+
|
842 |
+
if all_symptoms:
|
843 |
+
symptom_counts = pd.Series(all_symptoms).value_counts().head(10)
|
844 |
+
st.bar_chart(symptom_counts)
|
845 |
+
else:
|
846 |
+
st.info("No symptoms data available yet")
|
847 |
+
|
848 |
+
# Timeline analysis
|
849 |
+
st.markdown("#### Usage Timeline")
|
850 |
+
conversation_df['timestamp'] = pd.to_datetime(conversation_df['timestamp'])
|
851 |
+
daily_usage = conversation_df.groupby(conversation_df['timestamp'].dt.date).size()
|
852 |
+
st.line_chart(daily_usage)
|
853 |
+
|
854 |
+
else:
|
855 |
+
st.info("No conversation data available for analysis yet")
|
856 |
+
|
857 |
+
# Learning Progress
|
858 |
+
st.markdown("### π§ AI Learning Progress")
|
859 |
+
|
860 |
+
for agent_name, agent in medical_system.agents.items():
|
861 |
+
with st.expander(f"π {agent.specialization} Learning Details"):
|
862 |
+
expertise = agent.get_expertise_summary()
|
863 |
+
|
864 |
+
st.write(f"**Total Experience:** {expertise['total_queries']} queries processed")
|
865 |
+
st.write(f"**Current Learning Rate:** {expertise['learning_rate']:.4f}")
|
866 |
+
st.write(f"**Performance Trend:** {'Improving' if expertise['user_satisfaction'] > 3.5 else 'Learning'}")
|
867 |
+
|
868 |
+
if expertise['top_expertise_areas']:
|
869 |
+
st.write("**Top Expertise Areas:**")
|
870 |
+
for area, score in expertise['top_expertise_areas'].items():
|
871 |
+
st.write(f" β’ {area.title()}: {score:.2f}")
|
872 |
+
|
873 |
+
# Learning memory (last few interactions)
|
874 |
+
if hasattr(agent, 'learning_memory') and agent.learning_memory:
|
875 |
+
st.write("**Recent Learning Events:**")
|
876 |
+
for memory in agent.learning_memory[-3:]:
|
877 |
+
reward_emoji = "β
" if memory['reward'] > 0 else "β" if memory['reward'] < 0 else "β‘οΈ"
|
878 |
+
st.write(f" {reward_emoji} Reward: {memory['reward']:.2f} | Query: {memory['query'][:50]}...")
|
879 |
+
|
880 |
+
if st.button("π Close Analytics"):
|
881 |
+
st.session_state.show_analytics = False
|
882 |
+
st.rerun()
|
883 |
|
884 |
+
# Health Tips Section
|
885 |
st.markdown("---")
|
886 |
+
st.markdown("### π Daily Health Tips")
|
887 |
+
|
888 |
+
health_tips = [
|
889 |
+
"π§ Stay hydrated: Aim for 8-10 glasses of water daily",
|
890 |
+
"πΆ Take regular walks: Even 10 minutes can boost your mood",
|
891 |
+
"π΄ Maintain sleep hygiene: 7-9 hours of quality sleep is essential",
|
892 |
+
"π₯ Eat colorful foods: Variety ensures you get different nutrients",
|
893 |
+
"π§ Practice mindfulness: Just 5 minutes of meditation can reduce stress",
|
894 |
+
"π± Take breaks from screens: Follow the 20-20-20 rule",
|
895 |
+
"π€ Stay connected: Social connections are vital for mental health",
|
896 |
+
"βοΈ Get sunlight: 15 minutes of sunlight helps with Vitamin D"
|
897 |
+
]
|
898 |
+
|
899 |
+
# Display a random tip
|
900 |
+
import random
|
901 |
+
daily_tip = random.choice(health_tips)
|
902 |
+
st.info(f"**π‘ Today's Health Tip:** {daily_tip}")
|
903 |
|
904 |
+
# Emergency Resources Section
|
905 |
+
st.markdown("### π¨ Emergency Resources")
|
906 |
|
907 |
+
emergency_col1, emergency_col2 = st.columns(2)
|
908 |
+
|
909 |
+
with emergency_col1:
|
910 |
st.markdown("""
|
911 |
+
**π When to Seek Immediate Help:**
|
912 |
+
- Chest pain or difficulty breathing
|
913 |
+
- Severe allergic reactions
|
914 |
+
- Loss of consciousness
|
915 |
+
- Severe bleeding
|
916 |
+
- Signs of stroke (FAST test)
|
917 |
+
- Severe burns
|
918 |
""")
|
919 |
|
920 |
+
with emergency_col2:
|
921 |
st.markdown("""
|
922 |
+
**π Emergency Contacts:**
|
923 |
+
- Emergency Services: 911 (US), 112 (EU)
|
924 |
+
- Poison Control: 1-800-222-1222 (US)
|
925 |
+
- Mental Health Crisis: 988 (US)
|
926 |
+
- Text HOME to 741741 (Crisis Text Line)
|
927 |
+
|
928 |
+
**π₯ Find Nearest Hospital:**
|
929 |
+
Use your maps app or call emergency services
|
930 |
""")
|
931 |
|
932 |
+
# Data Persistence and Learning Enhancement
|
933 |
+
class DataPersistence:
|
934 |
+
"""Handle data persistence for learning and analytics"""
|
935 |
+
|
936 |
+
def __init__(self, data_dir: str = "medical_ai_data"):
|
937 |
+
self.data_dir = data_dir
|
938 |
+
os.makedirs(data_dir, exist_ok=True)
|
939 |
+
|
940 |
+
def save_conversation_data(self, system: MedicalConsultationSystem):
|
941 |
+
"""Save conversation data for future learning"""
|
942 |
+
try:
|
943 |
+
data_file = os.path.join(self.data_dir, f"conversations_{datetime.now().strftime('%Y%m%d')}.json")
|
944 |
+
|
945 |
+
conversations = []
|
946 |
+
for entry in system.conversation_data:
|
947 |
+
conversations.append(asdict(entry))
|
948 |
+
|
949 |
+
with open(data_file, 'w') as f:
|
950 |
+
json.dump(conversations, f, indent=2)
|
951 |
+
|
952 |
+
return True
|
953 |
+
except Exception as e:
|
954 |
+
st.error(f"Failed to save data: {str(e)}")
|
955 |
+
return False
|
956 |
+
|
957 |
+
def save_agent_knowledge(self, system: MedicalConsultationSystem):
|
958 |
+
"""Save agent learning data"""
|
959 |
+
try:
|
960 |
+
for agent_name, agent in system.agents.items():
|
961 |
+
agent_file = os.path.join(self.data_dir, f"agent_{agent_name}_knowledge.pkl")
|
962 |
+
|
963 |
+
agent_data = {
|
964 |
+
'knowledge_base': dict(agent.knowledge_base),
|
965 |
+
'performance': asdict(agent.performance),
|
966 |
+
'learning_memory': agent.learning_memory[-100:] # Keep last 100 entries
|
967 |
+
}
|
968 |
+
|
969 |
+
with open(agent_file, 'wb') as f:
|
970 |
+
pickle.dump(agent_data, f)
|
971 |
+
|
972 |
+
return True
|
973 |
+
except Exception as e:
|
974 |
+
st.error(f"Failed to save agent knowledge: {str(e)}")
|
975 |
+
return False
|
976 |
+
|
977 |
+
def load_agent_knowledge(self, system: MedicalConsultationSystem):
|
978 |
+
"""Load previously saved agent knowledge"""
|
979 |
+
try:
|
980 |
+
for agent_name, agent in system.agents.items():
|
981 |
+
agent_file = os.path.join(self.data_dir, f"agent_{agent_name}_knowledge.pkl")
|
982 |
+
|
983 |
+
if os.path.exists(agent_file):
|
984 |
+
with open(agent_file, 'rb') as f:
|
985 |
+
agent_data = pickle.load(f)
|
986 |
+
|
987 |
+
# Restore knowledge base
|
988 |
+
agent.knowledge_base = defaultdict(float, agent_data.get('knowledge_base', {}))
|
989 |
+
|
990 |
+
# Restore learning memory
|
991 |
+
agent.learning_memory = agent_data.get('learning_memory', [])
|
992 |
+
|
993 |
+
# Restore performance metrics
|
994 |
+
if 'performance' in agent_data:
|
995 |
+
perf_data = agent_data['performance']
|
996 |
+
agent.performance.total_queries = perf_data.get('total_queries', 0)
|
997 |
+
agent.performance.successful_responses = perf_data.get('successful_responses', 0)
|
998 |
+
agent.performance.average_confidence = perf_data.get('average_confidence', 0.0)
|
999 |
+
agent.performance.user_satisfaction = perf_data.get('user_satisfaction', 0.0)
|
1000 |
+
agent.performance.learning_rate = perf_data.get('learning_rate', 0.01)
|
1001 |
+
|
1002 |
+
return True
|
1003 |
+
except Exception as e:
|
1004 |
+
st.error(f"Failed to load agent knowledge: {str(e)}")
|
1005 |
+
return False
|
1006 |
+
|
1007 |
+
# Initialize data persistence
|
1008 |
+
if 'data_persistence' not in st.session_state:
|
1009 |
+
st.session_state.data_persistence = DataPersistence()
|
1010 |
+
|
1011 |
+
# Load previous learning data when system starts
|
1012 |
+
if 'knowledge_loaded' not in st.session_state:
|
1013 |
+
st.session_state.data_persistence.load_agent_knowledge(medical_system)
|
1014 |
+
st.session_state.knowledge_loaded = True
|
1015 |
+
|
1016 |
+
# Auto-save functionality
|
1017 |
+
if len(st.session_state.chat_messages) > 0 and len(st.session_state.chat_messages) % 10 == 0:
|
1018 |
+
# Save data every 10 messages
|
1019 |
+
st.session_state.data_persistence.save_conversation_data(medical_system)
|
1020 |
+
st.session_state.data_persistence.save_agent_knowledge(medical_system)
|
1021 |
+
|
1022 |
+
# Footer with system information
|
1023 |
+
st.markdown("---")
|
1024 |
st.markdown("""
|
1025 |
+
<div style="text-align: center; padding: 2rem; opacity: 0.8;">
|
1026 |
+
<p><strong>MedAssist v1.0</strong> | AI-Powered Medical Preconsultation System</p>
|
1027 |
+
<p>π€ Evolutionary Learning Agents β’ π Real-time Medical Search β’ π¬ Intelligent Chat Interface</p>
|
1028 |
+
<p><small>β οΈ This system is for informational purposes only and is not a substitute for professional medical advice</small></p>
|
1029 |
</div>
|
1030 |
""", unsafe_allow_html=True)
|