Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,1030 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
from typing import Union, List, Dict, Optional, Tuple
|
6 |
+
from groq import Groq
|
7 |
+
from duckduckgo_search import DDGS
|
8 |
+
from datetime import datetime, timedelta
|
9 |
+
import time
|
10 |
+
import numpy as np
|
11 |
+
import pickle
|
12 |
+
from dataclasses import dataclass, asdict
|
13 |
+
import hashlib
|
14 |
+
from collections import defaultdict
|
15 |
+
|
16 |
+
# Set page configuration
|
17 |
+
st.set_page_config(
|
18 |
+
page_title="MedAssist - AI Medical Preconsultation",
|
19 |
+
layout="wide",
|
20 |
+
initial_sidebar_state="expanded",
|
21 |
+
page_icon="π₯"
|
22 |
+
)
|
23 |
+
|
24 |
+
# Enhanced CSS for medical theme
|
25 |
+
st.markdown("""
|
26 |
+
<style>
|
27 |
+
/* Medical theme styling */
|
28 |
+
html, body, .stApp, .main {
|
29 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
|
30 |
+
color: #ffffff !important;
|
31 |
+
}
|
32 |
+
|
33 |
+
.medical-header {
|
34 |
+
background: linear-gradient(45deg, #2c5aa0, #4a90e2) !important;
|
35 |
+
color: white !important;
|
36 |
+
padding: 2rem !important;
|
37 |
+
border-radius: 15px !important;
|
38 |
+
text-align: center !important;
|
39 |
+
margin-bottom: 2rem !important;
|
40 |
+
box-shadow: 0 8px 32px rgba(31, 38, 135, 0.37) !important;
|
41 |
+
}
|
42 |
+
|
43 |
+
.chat-container {
|
44 |
+
background: rgba(255, 255, 255, 0.1) !important;
|
45 |
+
border-radius: 15px !important;
|
46 |
+
padding: 1rem !important;
|
47 |
+
backdrop-filter: blur(10px) !important;
|
48 |
+
border: 1px solid rgba(255, 255, 255, 0.2) !important;
|
49 |
+
margin-bottom: 1rem !important;
|
50 |
+
max-height: 500px !important;
|
51 |
+
overflow-y: auto !important;
|
52 |
+
}
|
53 |
+
|
54 |
+
.user-message {
|
55 |
+
background: linear-gradient(45deg, #4CAF50, #66BB6A) !important;
|
56 |
+
color: white !important;
|
57 |
+
padding: 1rem !important;
|
58 |
+
border-radius: 15px 15px 5px 15px !important;
|
59 |
+
margin: 0.5rem 0 !important;
|
60 |
+
margin-left: 2rem !important;
|
61 |
+
box-shadow: 0 4px 15px rgba(76, 175, 80, 0.4) !important;
|
62 |
+
}
|
63 |
+
|
64 |
+
.assistant-message {
|
65 |
+
background: rgba(255, 255, 255, 0.15) !important;
|
66 |
+
color: white !important;
|
67 |
+
padding: 1rem !important;
|
68 |
+
border-radius: 15px 15px 15px 5px !important;
|
69 |
+
margin: 0.5rem 0 !important;
|
70 |
+
margin-right: 2rem !important;
|
71 |
+
border-left: 4px solid #2196F3 !important;
|
72 |
+
backdrop-filter: blur(5px) !important;
|
73 |
+
}
|
74 |
+
|
75 |
+
.agent-status-card {
|
76 |
+
background: rgba(255, 255, 255, 0.15) !important;
|
77 |
+
border: 1px solid rgba(255, 255, 255, 0.3) !important;
|
78 |
+
border-radius: 12px !important;
|
79 |
+
padding: 1rem !important;
|
80 |
+
margin: 0.5rem 0 !important;
|
81 |
+
backdrop-filter: blur(5px) !important;
|
82 |
+
}
|
83 |
+
|
84 |
+
.evolution-metrics {
|
85 |
+
background: linear-gradient(45deg, #FF6B6B, #FF8E8E) !important;
|
86 |
+
color: white !important;
|
87 |
+
padding: 1rem !important;
|
88 |
+
border-radius: 10px !important;
|
89 |
+
margin: 0.5rem 0 !important;
|
90 |
+
}
|
91 |
+
|
92 |
+
.warning-box {
|
93 |
+
background: rgba(255, 152, 0, 0.2) !important;
|
94 |
+
border: 2px solid #FF9800 !important;
|
95 |
+
border-radius: 10px !important;
|
96 |
+
padding: 1.5rem !important;
|
97 |
+
margin: 1rem 0 !important;
|
98 |
+
color: white !important;
|
99 |
+
}
|
100 |
+
|
101 |
+
.stButton > button {
|
102 |
+
background: linear-gradient(45deg, #2196F3, #64B5F6) !important;
|
103 |
+
color: white !important;
|
104 |
+
border: none !important;
|
105 |
+
border-radius: 25px !important;
|
106 |
+
font-weight: bold !important;
|
107 |
+
padding: 0.75rem 2rem !important;
|
108 |
+
transition: all 0.3s ease !important;
|
109 |
+
}
|
110 |
+
|
111 |
+
.stButton > button:hover {
|
112 |
+
transform: translateY(-2px) !important;
|
113 |
+
box-shadow: 0 8px 25px rgba(33, 150, 243, 0.6) !important;
|
114 |
+
}
|
115 |
+
|
116 |
+
.chat-input {
|
117 |
+
position: sticky !important;
|
118 |
+
bottom: 0 !important;
|
119 |
+
background: rgba(255, 255, 255, 0.1) !important;
|
120 |
+
padding: 1rem !important;
|
121 |
+
border-radius: 15px !important;
|
122 |
+
backdrop-filter: blur(10px) !important;
|
123 |
+
}
|
124 |
+
|
125 |
+
.spinner {
|
126 |
+
border: 2px solid rgba(255, 255, 255, 0.3);
|
127 |
+
border-radius: 50%;
|
128 |
+
border-top: 2px solid #ffffff;
|
129 |
+
width: 20px;
|
130 |
+
height: 20px;
|
131 |
+
animation: spin 1s linear infinite;
|
132 |
+
display: inline-block;
|
133 |
+
}
|
134 |
+
|
135 |
+
@keyframes spin {
|
136 |
+
0% { transform: rotate(0deg); }
|
137 |
+
100% { transform: rotate(360deg); }
|
138 |
+
}
|
139 |
+
</style>
|
140 |
+
""", unsafe_allow_html=True)
|
141 |
+
|
142 |
+
@dataclass
|
143 |
+
class ConversationEntry:
|
144 |
+
"""Data structure for storing conversation entries"""
|
145 |
+
timestamp: str
|
146 |
+
user_input: str
|
147 |
+
assistant_response: str
|
148 |
+
symptoms: List[str]
|
149 |
+
severity_score: float
|
150 |
+
confidence_score: float
|
151 |
+
search_queries_used: List[str]
|
152 |
+
user_feedback: Optional[int] = None # 1-5 rating
|
153 |
+
was_helpful: Optional[bool] = None
|
154 |
+
|
155 |
+
@dataclass
|
156 |
+
class AgentPerformance:
|
157 |
+
"""Track agent performance metrics"""
|
158 |
+
agent_name: str
|
159 |
+
total_queries: int = 0
|
160 |
+
successful_responses: int = 0
|
161 |
+
average_confidence: float = 0.0
|
162 |
+
user_satisfaction: float = 0.0
|
163 |
+
learning_rate: float = 0.01
|
164 |
+
expertise_areas: Dict[str, float] = None
|
165 |
+
|
166 |
+
def __post_init__(self):
|
167 |
+
if self.expertise_areas is None:
|
168 |
+
self.expertise_areas = defaultdict(float)
|
169 |
+
|
170 |
+
class MedicalSearchTool:
|
171 |
+
"""Enhanced medical search tool with domain-specific optimization"""
|
172 |
+
|
173 |
+
def __init__(self):
|
174 |
+
self.ddgs = DDGS()
|
175 |
+
self.medical_sources = [
|
176 |
+
"mayoclinic.org", "webmd.com", "healthline.com", "medlineplus.gov",
|
177 |
+
"nih.gov", "who.int", "cdc.gov", "ncbi.nlm.nih.gov"
|
178 |
+
]
|
179 |
+
|
180 |
+
def search_medical_info(self, query: str, search_type: str = "symptoms") -> str:
|
181 |
+
"""Search for medical information with safety considerations"""
|
182 |
+
try:
|
183 |
+
# Add medical context to search
|
184 |
+
medical_queries = {
|
185 |
+
"symptoms": f"medical symptoms {query} causes diagnosis",
|
186 |
+
"treatment": f"medical treatment {query} therapy options",
|
187 |
+
"prevention": f"disease prevention {query} health tips",
|
188 |
+
"general": f"medical information {query} health facts"
|
189 |
+
}
|
190 |
+
|
191 |
+
enhanced_query = medical_queries.get(search_type, medical_queries["general"])
|
192 |
+
|
193 |
+
# Perform search with medical focus
|
194 |
+
search_results = list(self.ddgs.text(
|
195 |
+
enhanced_query,
|
196 |
+
max_results=5,
|
197 |
+
region='wt-wt',
|
198 |
+
safesearch='on'
|
199 |
+
))
|
200 |
+
|
201 |
+
if not search_results:
|
202 |
+
return "No relevant medical information found. Please consult with a healthcare professional."
|
203 |
+
|
204 |
+
# Filter and format results with medical authority preference
|
205 |
+
formatted_results = []
|
206 |
+
for idx, result in enumerate(search_results, 1):
|
207 |
+
title = result.get('title', 'No title')
|
208 |
+
snippet = result.get('body', 'No description')
|
209 |
+
url = result.get('href', 'No URL')
|
210 |
+
|
211 |
+
# Prioritize trusted medical sources
|
212 |
+
source_trust = "β" if any(source in url for source in self.medical_sources) else ""
|
213 |
+
|
214 |
+
formatted_results.append(
|
215 |
+
f"{idx}. {source_trust} {title}\n"
|
216 |
+
f" Information: {snippet}\n"
|
217 |
+
f" Source: {url}\n"
|
218 |
+
)
|
219 |
+
|
220 |
+
return "\n".join(formatted_results)
|
221 |
+
|
222 |
+
except Exception as e:
|
223 |
+
return f"Search temporarily unavailable: {str(e)}"
|
224 |
+
|
225 |
+
class GroqLLM:
|
226 |
+
"""Medical-optimized LLM client"""
|
227 |
+
|
228 |
+
def __init__(self, model_name="llama-3.1-70b-versatile"):
|
229 |
+
self.client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
|
230 |
+
self.model_name = model_name
|
231 |
+
self.medical_context = """
|
232 |
+
You are a medical AI assistant for preconsultation guidance.
|
233 |
+
IMPORTANT: Always remind users that this is not a substitute for professional medical advice.
|
234 |
+
Provide helpful information while emphasizing the need for proper medical consultation.
|
235 |
+
"""
|
236 |
+
|
237 |
+
def generate_response(self, prompt: str, conversation_history: List[str] = None) -> Tuple[str, float]:
|
238 |
+
"""Generate response with confidence scoring"""
|
239 |
+
try:
|
240 |
+
# Build context with conversation history
|
241 |
+
context = self.medical_context
|
242 |
+
if conversation_history:
|
243 |
+
context += f"\n\nConversation History:\n{chr(10).join(conversation_history[-5:])}"
|
244 |
+
|
245 |
+
full_prompt = f"{context}\n\nUser Query: {prompt}\n\nPlease provide helpful medical guidance while emphasizing the importance of professional medical consultation."
|
246 |
+
|
247 |
+
completion = self.client.chat.completions.create(
|
248 |
+
model=self.model_name,
|
249 |
+
messages=[{"role": "user", "content": full_prompt}],
|
250 |
+
temperature=0.3, # Lower temperature for medical accuracy
|
251 |
+
max_tokens=1500,
|
252 |
+
stream=False
|
253 |
+
)
|
254 |
+
|
255 |
+
response = completion.choices[0].message.content if completion.choices else "Unable to generate response"
|
256 |
+
|
257 |
+
# Calculate confidence score based on response characteristics
|
258 |
+
confidence = self._calculate_confidence(response, prompt)
|
259 |
+
|
260 |
+
return response, confidence
|
261 |
+
|
262 |
+
except Exception as e:
|
263 |
+
return f"LLM temporarily unavailable: {str(e)}", 0.0
|
264 |
+
|
265 |
+
def _calculate_confidence(self, response: str, query: str) -> float:
|
266 |
+
"""Calculate confidence score based on response quality"""
|
267 |
+
confidence_factors = 0.0
|
268 |
+
|
269 |
+
# Check for medical disclaimers (increases confidence in safety)
|
270 |
+
if any(phrase in response.lower() for phrase in ["consult", "doctor", "medical professional", "healthcare provider"]):
|
271 |
+
confidence_factors += 0.3
|
272 |
+
|
273 |
+
# Check response length (adequate detail)
|
274 |
+
if 200 <= len(response) <= 1000:
|
275 |
+
confidence_factors += 0.2
|
276 |
+
|
277 |
+
# Check for structured information
|
278 |
+
if any(marker in response for marker in ["1.", "β’", "-", "**"]):
|
279 |
+
confidence_factors += 0.2
|
280 |
+
|
281 |
+
# Check for balanced information (not overly certain)
|
282 |
+
if any(phrase in response.lower() for phrase in ["may", "might", "could", "possible", "typically"]):
|
283 |
+
confidence_factors += 0.3
|
284 |
+
|
285 |
+
return min(confidence_factors, 1.0)
|
286 |
+
|
287 |
+
class EvolutionaryMedicalAgent:
|
288 |
+
"""Evolutionary agent with reinforcement learning capabilities"""
|
289 |
+
|
290 |
+
def __init__(self, agent_id: str, specialization: str):
|
291 |
+
self.agent_id = agent_id
|
292 |
+
self.specialization = specialization
|
293 |
+
self.performance = AgentPerformance(agent_name=agent_id)
|
294 |
+
self.knowledge_base = defaultdict(float)
|
295 |
+
self.response_patterns = {}
|
296 |
+
self.learning_memory = []
|
297 |
+
|
298 |
+
def process_query(self, query: str, context: str, search_results: str) -> Tuple[str, float]:
|
299 |
+
"""Process query and adapt based on specialization"""
|
300 |
+
|
301 |
+
# Update query count
|
302 |
+
self.performance.total_queries += 1
|
303 |
+
|
304 |
+
# Extract key terms for learning
|
305 |
+
key_terms = self._extract_medical_terms(query)
|
306 |
+
|
307 |
+
# Build specialized response based on agent's expertise
|
308 |
+
specialized_prompt = f"""
|
309 |
+
As a {self.specialization} specialist, analyze this medical query:
|
310 |
+
Query: {query}
|
311 |
+
Context: {context}
|
312 |
+
Search Results: {search_results}
|
313 |
+
|
314 |
+
Provide specialized insights based on your expertise in {self.specialization}.
|
315 |
+
Always emphasize the need for professional medical consultation.
|
316 |
+
"""
|
317 |
+
|
318 |
+
# Simulate processing (in real implementation, this would use the LLM)
|
319 |
+
response = f"Based on my specialization in {self.specialization}, {query.lower()} suggests several considerations. However, please consult with a healthcare professional for proper diagnosis and treatment."
|
320 |
+
|
321 |
+
confidence = 0.7 + (self.performance.average_confidence * 0.3)
|
322 |
+
|
323 |
+
# Update expertise in relevant areas
|
324 |
+
for term in key_terms:
|
325 |
+
self.knowledge_base[term] += 0.1
|
326 |
+
|
327 |
+
return response, confidence
|
328 |
+
|
329 |
+
def update_from_feedback(self, query: str, response: str, feedback_score: int, was_helpful: bool):
|
330 |
+
"""Update agent based on user feedback (reinforcement learning)"""
|
331 |
+
|
332 |
+
# Calculate reward signal
|
333 |
+
reward = (feedback_score - 3) / 2 # Convert 1-5 scale to -1 to 1
|
334 |
+
if was_helpful:
|
335 |
+
reward += 0.2
|
336 |
+
|
337 |
+
# Update performance metrics
|
338 |
+
if feedback_score >= 3:
|
339 |
+
self.performance.successful_responses += 1
|
340 |
+
|
341 |
+
# Update satisfaction and confidence
|
342 |
+
self.performance.user_satisfaction = (
|
343 |
+
(self.performance.user_satisfaction * (self.performance.total_queries - 1) + feedback_score) /
|
344 |
+
self.performance.total_queries
|
345 |
+
)
|
346 |
+
|
347 |
+
# Store learning memory
|
348 |
+
self.learning_memory.append({
|
349 |
+
'query': query,
|
350 |
+
'response': response,
|
351 |
+
'reward': reward,
|
352 |
+
'timestamp': datetime.now().isoformat()
|
353 |
+
})
|
354 |
+
|
355 |
+
# Adapt learning rate based on performance
|
356 |
+
if self.performance.user_satisfaction > 4.0:
|
357 |
+
self.performance.learning_rate *= 0.95 # Slow down learning when performing well
|
358 |
+
elif self.performance.user_satisfaction < 3.0:
|
359 |
+
self.performance.learning_rate *= 1.1 # Speed up learning when performing poorly
|
360 |
+
|
361 |
+
# Update expertise areas based on feedback
|
362 |
+
terms = self._extract_medical_terms(query)
|
363 |
+
for term in terms:
|
364 |
+
self.knowledge_base[term] += reward * self.performance.learning_rate
|
365 |
+
|
366 |
+
def _extract_medical_terms(self, text: str) -> List[str]:
|
367 |
+
"""Extract medical terms from text for learning"""
|
368 |
+
medical_keywords = [
|
369 |
+
'pain', 'fever', 'headache', 'nausea', 'fatigue', 'cough', 'cold', 'flu',
|
370 |
+
'diabetes', 'hypertension', 'infection', 'allergy', 'asthma', 'arthritis',
|
371 |
+
'anxiety', 'depression', 'insomnia', 'migraine', 'rash', 'swelling'
|
372 |
+
]
|
373 |
+
|
374 |
+
found_terms = []
|
375 |
+
text_lower = text.lower()
|
376 |
+
for term in medical_keywords:
|
377 |
+
if term in text_lower:
|
378 |
+
found_terms.append(term)
|
379 |
+
return found_terms
|
380 |
+
|
381 |
+
def get_expertise_summary(self) -> Dict:
|
382 |
+
"""Get summary of agent's learned expertise"""
|
383 |
+
return {
|
384 |
+
'specialization': self.specialization,
|
385 |
+
'total_queries': self.performance.total_queries,
|
386 |
+
'success_rate': (self.performance.successful_responses / max(1, self.performance.total_queries)) * 100,
|
387 |
+
'user_satisfaction': self.performance.user_satisfaction,
|
388 |
+
'learning_rate': self.performance.learning_rate,
|
389 |
+
'top_expertise_areas': dict(sorted(self.knowledge_base.items(), key=lambda x: x[1], reverse=True)[:5])
|
390 |
+
}
|
391 |
+
|
392 |
+
class MedicalConsultationSystem:
|
393 |
+
"""Main medical consultation system with evolutionary agents"""
|
394 |
+
|
395 |
+
def __init__(self):
|
396 |
+
self.llm = GroqLLM()
|
397 |
+
self.search_tool = MedicalSearchTool()
|
398 |
+
self.agents = self._initialize_agents()
|
399 |
+
self.conversation_history = []
|
400 |
+
self.conversation_data = []
|
401 |
+
|
402 |
+
def _initialize_agents(self) -> Dict[str, EvolutionaryMedicalAgent]:
|
403 |
+
"""Initialize specialized medical agents"""
|
404 |
+
return {
|
405 |
+
"general_practitioner": EvolutionaryMedicalAgent("gp", "General Practice Medicine"),
|
406 |
+
"symptom_analyzer": EvolutionaryMedicalAgent("symptom", "Symptom Analysis and Triage"),
|
407 |
+
"wellness_advisor": EvolutionaryMedicalAgent("wellness", "Preventive Care and Wellness"),
|
408 |
+
"mental_health": EvolutionaryMedicalAgent("mental", "Mental Health and Psychology"),
|
409 |
+
"emergency_assessor": EvolutionaryMedicalAgent("emergency", "Emergency Assessment and Urgent Care")
|
410 |
+
}
|
411 |
+
|
412 |
+
def process_medical_query(self, user_query: str) -> Dict:
|
413 |
+
"""Process medical query through evolutionary agent system"""
|
414 |
+
|
415 |
+
timestamp = datetime.now().isoformat()
|
416 |
+
|
417 |
+
# Determine which agents should handle this query
|
418 |
+
relevant_agents = self._select_relevant_agents(user_query)
|
419 |
+
|
420 |
+
# Search for medical information
|
421 |
+
search_results = self.search_tool.search_medical_info(user_query, "symptoms")
|
422 |
+
|
423 |
+
# Build conversation context
|
424 |
+
context = "\n".join(self.conversation_history[-3:]) if self.conversation_history else ""
|
425 |
+
|
426 |
+
# Get responses from relevant agents
|
427 |
+
agent_responses = {}
|
428 |
+
for agent_name in relevant_agents:
|
429 |
+
agent = self.agents[agent_name]
|
430 |
+
response, confidence = agent.process_query(user_query, context, search_results)
|
431 |
+
agent_responses[agent_name] = {
|
432 |
+
'response': response,
|
433 |
+
'confidence': confidence,
|
434 |
+
'specialization': agent.specialization
|
435 |
+
}
|
436 |
+
|
437 |
+
# Generate main LLM response
|
438 |
+
main_response, main_confidence = self.llm.generate_response(
|
439 |
+
f"{user_query}\n\nRelevant Information: {search_results}",
|
440 |
+
self.conversation_history
|
441 |
+
)
|
442 |
+
|
443 |
+
# Combine responses intelligently
|
444 |
+
final_response = self._combine_responses(main_response, agent_responses)
|
445 |
+
|
446 |
+
# Update conversation history
|
447 |
+
self.conversation_history.extend([
|
448 |
+
f"User: {user_query}",
|
449 |
+
f"Assistant: {final_response}"
|
450 |
+
])
|
451 |
+
|
452 |
+
# Extract symptoms for analysis
|
453 |
+
symptoms = self._extract_symptoms(user_query)
|
454 |
+
severity_score = self._assess_severity(user_query, symptoms)
|
455 |
+
|
456 |
+
# Store conversation data
|
457 |
+
conversation_entry = ConversationEntry(
|
458 |
+
timestamp=timestamp,
|
459 |
+
user_input=user_query,
|
460 |
+
assistant_response=final_response,
|
461 |
+
symptoms=symptoms,
|
462 |
+
severity_score=severity_score,
|
463 |
+
confidence_score=main_confidence,
|
464 |
+
search_queries_used=[user_query]
|
465 |
+
)
|
466 |
+
|
467 |
+
self.conversation_data.append(conversation_entry)
|
468 |
+
|
469 |
+
return {
|
470 |
+
'response': final_response,
|
471 |
+
'confidence': main_confidence,
|
472 |
+
'severity_score': severity_score,
|
473 |
+
'symptoms_detected': symptoms,
|
474 |
+
'agents_consulted': relevant_agents,
|
475 |
+
'agent_responses': agent_responses,
|
476 |
+
'search_performed': True
|
477 |
+
}
|
478 |
+
|
479 |
+
def _select_relevant_agents(self, query: str) -> List[str]:
|
480 |
+
"""Select most relevant agents for the query"""
|
481 |
+
query_lower = query.lower()
|
482 |
+
relevant_agents = ["general_practitioner"] # Always include GP
|
483 |
+
|
484 |
+
# Mental health keywords
|
485 |
+
mental_health_keywords = ["stress", "anxiety", "depression", "sleep", "mood", "worry", "panic", "sad"]
|
486 |
+
if any(keyword in query_lower for keyword in mental_health_keywords):
|
487 |
+
relevant_agents.append("mental_health")
|
488 |
+
|
489 |
+
# Emergency keywords
|
490 |
+
emergency_keywords = ["severe", "intense", "emergency", "urgent", "chest pain", "difficulty breathing", "blood"]
|
491 |
+
if any(keyword in query_lower for keyword in emergency_keywords):
|
492 |
+
relevant_agents.append("emergency_assessor")
|
493 |
+
|
494 |
+
# Wellness keywords
|
495 |
+
wellness_keywords = ["prevention", "healthy", "nutrition", "exercise", "lifestyle", "diet"]
|
496 |
+
if any(keyword in query_lower for keyword in wellness_keywords):
|
497 |
+
relevant_agents.append("wellness_advisor")
|
498 |
+
|
499 |
+
# Always include symptom analyzer for health queries
|
500 |
+
if any(keyword in query_lower for keyword in ["pain", "ache", "hurt", "symptom", "feel"]):
|
501 |
+
relevant_agents.append("symptom_analyzer")
|
502 |
+
|
503 |
+
return list(set(relevant_agents))
|
504 |
+
|
505 |
+
def _combine_responses(self, main_response: str, agent_responses: Dict) -> str:
|
506 |
+
"""Intelligently combine responses from multiple agents"""
|
507 |
+
if not agent_responses:
|
508 |
+
return main_response
|
509 |
+
|
510 |
+
combined = main_response + "\n\n**Specialist Insights:**\n"
|
511 |
+
for agent_name, data in agent_responses.items():
|
512 |
+
if data['confidence'] > 0.6: # Only include confident responses
|
513 |
+
combined += f"\nβ’ **{data['specialization']}**: {data['response'][:200]}...\n"
|
514 |
+
|
515 |
+
return combined
|
516 |
+
|
517 |
+
def _extract_symptoms(self, query: str) -> List[str]:
|
518 |
+
"""Extract symptoms from user query"""
|
519 |
+
common_symptoms = [
|
520 |
+
'fever', 'headache', 'nausea', 'pain', 'cough', 'fatigue', 'dizziness',
|
521 |
+
'rash', 'swelling', 'shortness of breath', 'chest pain', 'abdominal pain'
|
522 |
+
]
|
523 |
+
|
524 |
+
query_lower = query.lower()
|
525 |
+
detected_symptoms = [symptom for symptom in common_symptoms if symptom in query_lower]
|
526 |
+
return detected_symptoms
|
527 |
+
|
528 |
+
def _assess_severity(self, query: str, symptoms: List[str]) -> float:
|
529 |
+
"""Assess severity of reported symptoms (0-10 scale)"""
|
530 |
+
severity_score = 0.0
|
531 |
+
query_lower = query.lower()
|
532 |
+
|
533 |
+
# High severity indicators
|
534 |
+
high_severity = ["severe", "intense", "unbearable", "emergency", "chest pain", "difficulty breathing"]
|
535 |
+
medium_severity = ["moderate", "persistent", "recurring", "worse", "concerning"]
|
536 |
+
|
537 |
+
if any(indicator in query_lower for indicator in high_severity):
|
538 |
+
severity_score += 7.0
|
539 |
+
elif any(indicator in query_lower for indicator in medium_severity):
|
540 |
+
severity_score += 4.0
|
541 |
+
else:
|
542 |
+
severity_score += 2.0
|
543 |
+
|
544 |
+
# Add points for multiple symptoms
|
545 |
+
severity_score += min(len(symptoms) * 0.5, 2.0)
|
546 |
+
|
547 |
+
return min(severity_score, 10.0)
|
548 |
+
|
549 |
+
def update_agent_performance(self, query_index: int, feedback_score: int, was_helpful: bool):
|
550 |
+
"""Update agent performance based on user feedback"""
|
551 |
+
if query_index < len(self.conversation_data):
|
552 |
+
entry = self.conversation_data[query_index]
|
553 |
+
entry.user_feedback = feedback_score
|
554 |
+
entry.was_helpful = was_helpful
|
555 |
+
|
556 |
+
# Update all agents that were involved in this query
|
557 |
+
for agent in self.agents.values():
|
558 |
+
agent.update_from_feedback(entry.user_input, entry.assistant_response, feedback_score, was_helpful)
|
559 |
+
|
560 |
+
def get_system_metrics(self) -> Dict:
|
561 |
+
"""Get comprehensive system performance metrics"""
|
562 |
+
total_conversations = len(self.conversation_data)
|
563 |
+
|
564 |
+
if total_conversations == 0:
|
565 |
+
return {"status": "No conversations yet"}
|
566 |
+
|
567 |
+
avg_confidence = np.mean([entry.confidence_score for entry in self.conversation_data])
|
568 |
+
avg_severity = np.mean([entry.severity_score for entry in self.conversation_data])
|
569 |
+
|
570 |
+
feedback_entries = [entry for entry in self.conversation_data if entry.user_feedback is not None]
|
571 |
+
avg_feedback = np.mean([entry.user_feedback for entry in feedback_entries]) if feedback_entries else 0
|
572 |
+
|
573 |
+
return {
|
574 |
+
"total_conversations": total_conversations,
|
575 |
+
"average_confidence": avg_confidence,
|
576 |
+
"average_severity": avg_severity,
|
577 |
+
"average_user_feedback": avg_feedback,
|
578 |
+
"agent_performance": {name: agent.get_expertise_summary() for name, agent in self.agents.items()}
|
579 |
+
}
|
580 |
+
|
581 |
+
# Initialize session state
|
582 |
+
if 'medical_system' not in st.session_state:
|
583 |
+
st.session_state.medical_system = MedicalConsultationSystem()
|
584 |
+
if 'chat_messages' not in st.session_state:
|
585 |
+
st.session_state.chat_messages = []
|
586 |
+
|
587 |
+
medical_system = st.session_state.medical_system
|
588 |
+
|
589 |
+
# Main interface
|
590 |
+
st.markdown("""
|
591 |
+
<div class="medical-header">
|
592 |
+
<h1>π₯ MedAssist - AI Medical Preconsultation</h1>
|
593 |
+
<p>Advanced AI-powered medical guidance with evolutionary learning agents</p>
|
594 |
+
</div>
|
595 |
+
""", unsafe_allow_html=True)
|
596 |
+
|
597 |
+
# Medical disclaimer
|
598 |
+
st.markdown("""
|
599 |
+
<div class="warning-box">
|
600 |
+
<h3>β οΈ Important Medical Disclaimer</h3>
|
601 |
+
<p>This AI system provides general health information and is NOT a substitute for professional medical advice, diagnosis, or treatment. Always consult with qualified healthcare professionals for medical concerns. In case of emergency, contact emergency services immediately.</p>
|
602 |
+
</div>
|
603 |
+
""", unsafe_allow_html=True)
|
604 |
+
|
605 |
+
# Main layout
|
606 |
+
col1, col2 = st.columns([3, 1])
|
607 |
+
|
608 |
+
with col1:
|
609 |
+
st.markdown("### π¬ Medical Consultation Chat")
|
610 |
+
|
611 |
+
# Chat display area
|
612 |
+
chat_container = st.container()
|
613 |
+
with chat_container:
|
614 |
+
st.markdown('<div class="chat-container">', unsafe_allow_html=True)
|
615 |
+
|
616 |
+
for i, message in enumerate(st.session_state.chat_messages):
|
617 |
+
if message["role"] == "user":
|
618 |
+
st.markdown(f'<div class="user-message">π€ <strong>You:</strong> {message["content"]}</div>', unsafe_allow_html=True)
|
619 |
+
else:
|
620 |
+
st.markdown(f'<div class="assistant-message">π€ <strong>MedAssist:</strong> {message["content"]}</div>', unsafe_allow_html=True)
|
621 |
+
|
622 |
+
# Add feedback buttons for assistant messages
|
623 |
+
col_a, col_b, col_c = st.columns([1, 1, 8])
|
624 |
+
with col_a:
|
625 |
+
if st.button("π", key=f"helpful_{i}"):
|
626 |
+
medical_system.update_agent_performance(i//2, 5, True)
|
627 |
+
st.success("Feedback recorded!")
|
628 |
+
with col_b:
|
629 |
+
if st.button("π", key=f"not_helpful_{i}"):
|
630 |
+
medical_system.update_agent_performance(i//2, 2, False)
|
631 |
+
st.info("Feedback recorded. We'll improve!")
|
632 |
+
|
633 |
+
st.markdown('</div>', unsafe_allow_html=True)
|
634 |
+
|
635 |
+
# Chat input
|
636 |
+
with st.container():
|
637 |
+
st.markdown('<div class="chat-input">', unsafe_allow_html=True)
|
638 |
+
user_input = st.text_input("Describe your symptoms or health concerns:",
|
639 |
+
placeholder="e.g., I've been having headaches for 3 days...",
|
640 |
+
key="medical_input")
|
641 |
+
|
642 |
+
col_send, col_clear = st.columns([1, 4])
|
643 |
+
with col_send:
|
644 |
+
send_message = st.button("Send π€", type="primary")
|
645 |
+
with col_clear:
|
646 |
+
if st.button("Clear Chat ποΈ"):
|
647 |
+
st.session_state.chat_messages = []
|
648 |
+
st.rerun()
|
649 |
+
st.markdown('</div>', unsafe_allow_html=True)
|
650 |
+
|
651 |
+
with col2:
|
652 |
+
st.markdown("### π€ AI Agent Status")
|
653 |
+
|
654 |
+
# Agent status display
|
655 |
+
for agent_name, agent in medical_system.agents.items():
|
656 |
+
expertise = agent.get_expertise_summary()
|
657 |
+
|
658 |
+
st.markdown(f"""
|
659 |
+
<div class="agent-status-card">
|
660 |
+
<h4>{agent.specialization}</h4>
|
661 |
+
<p><strong>Queries:</strong> {expertise['total_queries']}</p>
|
662 |
+
<p><strong>Success Rate:</strong> {expertise['success_rate']:.1f}%</p>
|
663 |
+
<p><strong>Satisfaction:</strong> {expertise['user_satisfaction']:.1f}/5</p>
|
664 |
+
<p><strong>Learning Rate:</strong> {expertise['learning_rate']:.3f}</p>
|
665 |
+
</div>
|
666 |
+
""", unsafe_allow_html=True)
|
667 |
+
|
668 |
+
st.markdown("### π System Metrics")
|
669 |
+
metrics = medical_system.get_system_metrics()
|
670 |
+
|
671 |
+
if "total_conversations" in metrics:
|
672 |
+
st.markdown(f"""
|
673 |
+
<div class="evolution-metrics">
|
674 |
+
<p><strong>Total Chats:</strong> {metrics['total_conversations']}</p>
|
675 |
+
<p><strong>Avg Confidence:</strong> {metrics['average_confidence']:.2f}</p>
|
676 |
+
<p><strong>Avg Severity:</strong> {metrics['average_severity']:.1f}/10</p>
|
677 |
+
<p><strong>User Rating:</strong> {metrics['average_user_feedback']:.1f}/5</p>
|
678 |
+
</div>
|
679 |
+
""", unsafe_allow_html=True)
|
680 |
+
|
681 |
+
# Process user input
|
682 |
+
if send_message and user_input:
|
683 |
+
# Add user message
|
684 |
+
st.session_state.chat_messages.append({"role": "user", "content": user_input})
|
685 |
+
|
686 |
+
# Show thinking indicator
|
687 |
+
with st.spinner("π§ AI agents are analyzing your query..."):
|
688 |
+
# Process the query
|
689 |
+
result = medical_system.process_medical_query(user_input)
|
690 |
+
|
691 |
+
# Add assistant response
|
692 |
+
response_content = result['response']
|
693 |
+
|
694 |
+
# Add severity and confidence info
|
695 |
+
if result['severity_score'] > 7:
|
696 |
+
response_content += f"\n\nβ οΈ **High severity detected ({result['severity_score']:.1f}/10). Please seek immediate medical attention if symptoms are severe.**"
|
697 |
+
elif result['severity_score'] > 4:
|
698 |
+
response_content += f"\n\nβ‘ **Moderate severity detected ({result['severity_score']:.1f}/10). Consider scheduling a medical appointment.**"
|
699 |
+
|
700 |
+
if result['symptoms_detected']:
|
701 |
+
response_content += f"\n\nπ **Detected symptoms:** {', '.join(result['symptoms_detected'])}"
|
702 |
+
|
703 |
+
response_content += f"\n\nπ€ **Confidence Score:** {result['confidence']:.2f} | **Agents Consulted:** {', '.join(result['agents_consulted'])}"
|
704 |
+
|
705 |
+
st.session_state.chat_messages.append({"role": "assistant", "content": response_content})
|
706 |
+
|
707 |
+
st.rerun()
|
708 |
+
|
709 |
+
# Sidebar with additional features
|
710 |
+
with st.sidebar:
|
711 |
+
st.markdown("### π οΈ System Controls")
|
712 |
+
|
713 |
+
if st.button("π Reset System"):
|
714 |
+
st.session_state.medical_system = MedicalConsultationSystem()
|
715 |
+
st.session_state.chat_messages = []
|
716 |
+
st.rerun()
|
717 |
+
|
718 |
+
st.markdown("### π Learning Analytics")
|
719 |
+
if st.button("π View Detailed Analytics"):
|
720 |
+
st.session_state.show_analytics = True
|
721 |
+
|
722 |
+
if st.button("πΎ Export Chat History"):
|
723 |
+
if st.session_state.chat_messages:
|
724 |
+
chat_data = {
|
725 |
+
'timestamp': datetime.now().isoformat(),
|
726 |
+
'messages': st.session_state.chat_messages,
|
727 |
+
'system_metrics': medical_system.get_system_metrics()
|
728 |
+
}
|
729 |
+
st.download_button(
|
730 |
+
label="Download Chat Data",
|
731 |
+
data=json.dumps(chat_data, indent=2),
|
732 |
+
file_name=f"medical_chat_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
|
733 |
+
mime="application/json"
|
734 |
+
)
|
735 |
+
else:
|
736 |
+
st.warning("No chat history to export")
|
737 |
+
|
738 |
+
st.markdown("### π― Quick Health Topics")
|
739 |
+
quick_topics = [
|
740 |
+
"Common cold symptoms",
|
741 |
+
"Headache causes",
|
742 |
+
"Stress management",
|
743 |
+
"Sleep problems",
|
744 |
+
"Healthy diet tips",
|
745 |
+
"Exercise recommendations"
|
746 |
+
]
|
747 |
+
|
748 |
+
for topic in quick_topics:
|
749 |
+
if st.button(f"π‘ {topic}", key=f"topic_{topic.replace(' ', '_')}"):
|
750 |
+
st.session_state.chat_messages.append({"role": "user", "content": f"Tell me about {topic.lower()}"})
|
751 |
+
|
752 |
+
with st.spinner("π§ Processing..."):
|
753 |
+
result = medical_system.process_medical_query(f"Tell me about {topic.lower()}")
|
754 |
+
response_content = result['response']
|
755 |
+
|
756 |
+
if result['symptoms_detected']:
|
757 |
+
response_content += f"\n\nπ **Related symptoms:** {', '.join(result['symptoms_detected'])}"
|
758 |
+
|
759 |
+
response_content += f"\n\nπ€ **Confidence:** {result['confidence']:.2f}"
|
760 |
+
st.session_state.chat_messages.append({"role": "assistant", "content": response_content})
|
761 |
+
|
762 |
+
st.rerun()
|
763 |
+
|
764 |
+
# Analytics Dashboard (if requested)
|
765 |
+
if st.session_state.get('show_analytics', False):
|
766 |
+
st.markdown("---")
|
767 |
+
st.markdown("## π Detailed System Analytics")
|
768 |
+
|
769 |
+
metrics = medical_system.get_system_metrics()
|
770 |
+
|
771 |
+
if "agent_performance" in metrics:
|
772 |
+
# Agent Performance Comparison
|
773 |
+
st.markdown("### π€ Agent Performance Analysis")
|
774 |
+
|
775 |
+
agent_data = []
|
776 |
+
for agent_name, performance in metrics["agent_performance"].items():
|
777 |
+
agent_data.append({
|
778 |
+
'Agent': performance['specialization'],
|
779 |
+
'Success Rate (%)': performance['success_rate'],
|
780 |
+
'User Satisfaction': performance['user_satisfaction'],
|
781 |
+
'Learning Rate': performance['learning_rate'],
|
782 |
+
'Total Queries': performance['total_queries']
|
783 |
+
})
|
784 |
+
|
785 |
+
if agent_data:
|
786 |
+
df_agents = pd.DataFrame(agent_data)
|
787 |
+
st.dataframe(df_agents, use_container_width=True)
|
788 |
+
|
789 |
+
# Performance charts
|
790 |
+
col1, col2 = st.columns(2)
|
791 |
+
|
792 |
+
with col1:
|
793 |
+
st.markdown("#### Success Rate by Agent")
|
794 |
+
if not df_agents.empty:
|
795 |
+
st.bar_chart(df_agents.set_index('Agent')['Success Rate (%)'])
|
796 |
+
|
797 |
+
with col2:
|
798 |
+
st.markdown("#### User Satisfaction by Agent")
|
799 |
+
if not df_agents.empty:
|
800 |
+
st.bar_chart(df_agents.set_index('Agent')['User Satisfaction'])
|
801 |
+
|
802 |
+
# Conversation Analysis
|
803 |
+
st.markdown("### π¬ Conversation Analysis")
|
804 |
+
|
805 |
+
if medical_system.conversation_data:
|
806 |
+
conversation_df = pd.DataFrame([asdict(entry) for entry in medical_system.conversation_data])
|
807 |
+
|
808 |
+
col1, col2, col3 = st.columns(3)
|
809 |
+
|
810 |
+
with col1:
|
811 |
+
st.metric("Total Conversations", len(conversation_df))
|
812 |
+
avg_confidence = conversation_df['confidence_score'].mean()
|
813 |
+
st.metric("Average Confidence", f"{avg_confidence:.2f}")
|
814 |
+
|
815 |
+
with col2:
|
816 |
+
avg_severity = conversation_df['severity_score'].mean()
|
817 |
+
st.metric("Average Severity", f"{avg_severity:.1f}/10")
|
818 |
+
|
819 |
+
feedback_data = conversation_df[conversation_df['user_feedback'].notna()]
|
820 |
+
if not feedback_data.empty:
|
821 |
+
avg_feedback = feedback_data['user_feedback'].mean()
|
822 |
+
st.metric("Average User Rating", f"{avg_feedback:.1f}/5")
|
823 |
+
|
824 |
+
with col3:
|
825 |
+
symptoms_detected = sum(len(symptoms) for symptoms in conversation_df['symptoms'])
|
826 |
+
st.metric("Total Symptoms Detected", symptoms_detected)
|
827 |
+
|
828 |
+
helpful_responses = conversation_df['was_helpful'].sum() if 'was_helpful' in conversation_df else 0
|
829 |
+
st.metric("Helpful Responses", helpful_responses)
|
830 |
+
|
831 |
+
# Severity distribution
|
832 |
+
st.markdown("#### Severity Score Distribution")
|
833 |
+
severity_counts = conversation_df['severity_score'].value_counts().sort_index()
|
834 |
+
st.bar_chart(severity_counts)
|
835 |
+
|
836 |
+
# Most common symptoms
|
837 |
+
st.markdown("#### Most Common Symptoms")
|
838 |
+
all_symptoms = []
|
839 |
+
for symptoms_list in conversation_df['symptoms']:
|
840 |
+
all_symptoms.extend(symptoms_list)
|
841 |
+
|
842 |
+
if all_symptoms:
|
843 |
+
symptom_counts = pd.Series(all_symptoms).value_counts().head(10)
|
844 |
+
st.bar_chart(symptom_counts)
|
845 |
+
else:
|
846 |
+
st.info("No symptoms data available yet")
|
847 |
+
|
848 |
+
# Timeline analysis
|
849 |
+
st.markdown("#### Usage Timeline")
|
850 |
+
conversation_df['timestamp'] = pd.to_datetime(conversation_df['timestamp'])
|
851 |
+
daily_usage = conversation_df.groupby(conversation_df['timestamp'].dt.date).size()
|
852 |
+
st.line_chart(daily_usage)
|
853 |
+
|
854 |
+
else:
|
855 |
+
st.info("No conversation data available for analysis yet")
|
856 |
+
|
857 |
+
# Learning Progress
|
858 |
+
st.markdown("### π§ AI Learning Progress")
|
859 |
+
|
860 |
+
for agent_name, agent in medical_system.agents.items():
|
861 |
+
with st.expander(f"π {agent.specialization} Learning Details"):
|
862 |
+
expertise = agent.get_expertise_summary()
|
863 |
+
|
864 |
+
st.write(f"**Total Experience:** {expertise['total_queries']} queries processed")
|
865 |
+
st.write(f"**Current Learning Rate:** {expertise['learning_rate']:.4f}")
|
866 |
+
st.write(f"**Performance Trend:** {'Improving' if expertise['user_satisfaction'] > 3.5 else 'Learning'}")
|
867 |
+
|
868 |
+
if expertise['top_expertise_areas']:
|
869 |
+
st.write("**Top Expertise Areas:**")
|
870 |
+
for area, score in expertise['top_expertise_areas'].items():
|
871 |
+
st.write(f" β’ {area.title()}: {score:.2f}")
|
872 |
+
|
873 |
+
# Learning memory (last few interactions)
|
874 |
+
if hasattr(agent, 'learning_memory') and agent.learning_memory:
|
875 |
+
st.write("**Recent Learning Events:**")
|
876 |
+
for memory in agent.learning_memory[-3:]:
|
877 |
+
reward_emoji = "β
" if memory['reward'] > 0 else "β" if memory['reward'] < 0 else "β‘οΈ"
|
878 |
+
st.write(f" {reward_emoji} Reward: {memory['reward']:.2f} | Query: {memory['query'][:50]}...")
|
879 |
+
|
880 |
+
if st.button("π Close Analytics"):
|
881 |
+
st.session_state.show_analytics = False
|
882 |
+
st.rerun()
|
883 |
+
|
884 |
+
# Health Tips Section
|
885 |
+
st.markdown("---")
|
886 |
+
st.markdown("### π Daily Health Tips")
|
887 |
+
|
888 |
+
health_tips = [
|
889 |
+
"π§ Stay hydrated: Aim for 8-10 glasses of water daily",
|
890 |
+
"πΆ Take regular walks: Even 10 minutes can boost your mood",
|
891 |
+
"π΄ Maintain sleep hygiene: 7-9 hours of quality sleep is essential",
|
892 |
+
"π₯ Eat colorful foods: Variety ensures you get different nutrients",
|
893 |
+
"π§ Practice mindfulness: Just 5 minutes of meditation can reduce stress",
|
894 |
+
"π± Take breaks from screens: Follow the 20-20-20 rule",
|
895 |
+
"π€ Stay connected: Social connections are vital for mental health",
|
896 |
+
"βοΈ Get sunlight: 15 minutes of sunlight helps with Vitamin D"
|
897 |
+
]
|
898 |
+
|
899 |
+
# Display a random tip
|
900 |
+
import random
|
901 |
+
daily_tip = random.choice(health_tips)
|
902 |
+
st.info(f"**π‘ Today's Health Tip:** {daily_tip}")
|
903 |
+
|
904 |
+
# Emergency Resources Section
|
905 |
+
st.markdown("### π¨ Emergency Resources")
|
906 |
+
|
907 |
+
emergency_col1, emergency_col2 = st.columns(2)
|
908 |
+
|
909 |
+
with emergency_col1:
|
910 |
+
st.markdown("""
|
911 |
+
**π When to Seek Immediate Help:**
|
912 |
+
- Chest pain or difficulty breathing
|
913 |
+
- Severe allergic reactions
|
914 |
+
- Loss of consciousness
|
915 |
+
- Severe bleeding
|
916 |
+
- Signs of stroke (FAST test)
|
917 |
+
- Severe burns
|
918 |
+
""")
|
919 |
+
|
920 |
+
with emergency_col2:
|
921 |
+
st.markdown("""
|
922 |
+
**π Emergency Contacts:**
|
923 |
+
- Emergency Services: 911 (US), 112 (EU)
|
924 |
+
- Poison Control: 1-800-222-1222 (US)
|
925 |
+
- Mental Health Crisis: 988 (US)
|
926 |
+
- Text HOME to 741741 (Crisis Text Line)
|
927 |
+
|
928 |
+
**π₯ Find Nearest Hospital:**
|
929 |
+
Use your maps app or call emergency services
|
930 |
+
""")
|
931 |
+
|
932 |
+
# Data Persistence and Learning Enhancement
|
933 |
+
class DataPersistence:
|
934 |
+
"""Handle data persistence for learning and analytics"""
|
935 |
+
|
936 |
+
def __init__(self, data_dir: str = "medical_ai_data"):
|
937 |
+
self.data_dir = data_dir
|
938 |
+
os.makedirs(data_dir, exist_ok=True)
|
939 |
+
|
940 |
+
def save_conversation_data(self, system: MedicalConsultationSystem):
|
941 |
+
"""Save conversation data for future learning"""
|
942 |
+
try:
|
943 |
+
data_file = os.path.join(self.data_dir, f"conversations_{datetime.now().strftime('%Y%m%d')}.json")
|
944 |
+
|
945 |
+
conversations = []
|
946 |
+
for entry in system.conversation_data:
|
947 |
+
conversations.append(asdict(entry))
|
948 |
+
|
949 |
+
with open(data_file, 'w') as f:
|
950 |
+
json.dump(conversations, f, indent=2)
|
951 |
+
|
952 |
+
return True
|
953 |
+
except Exception as e:
|
954 |
+
st.error(f"Failed to save data: {str(e)}")
|
955 |
+
return False
|
956 |
+
|
957 |
+
def save_agent_knowledge(self, system: MedicalConsultationSystem):
|
958 |
+
"""Save agent learning data"""
|
959 |
+
try:
|
960 |
+
for agent_name, agent in system.agents.items():
|
961 |
+
agent_file = os.path.join(self.data_dir, f"agent_{agent_name}_knowledge.pkl")
|
962 |
+
|
963 |
+
agent_data = {
|
964 |
+
'knowledge_base': dict(agent.knowledge_base),
|
965 |
+
'performance': asdict(agent.performance),
|
966 |
+
'learning_memory': agent.learning_memory[-100:] # Keep last 100 entries
|
967 |
+
}
|
968 |
+
|
969 |
+
with open(agent_file, 'wb') as f:
|
970 |
+
pickle.dump(agent_data, f)
|
971 |
+
|
972 |
+
return True
|
973 |
+
except Exception as e:
|
974 |
+
st.error(f"Failed to save agent knowledge: {str(e)}")
|
975 |
+
return False
|
976 |
+
|
977 |
+
def load_agent_knowledge(self, system: MedicalConsultationSystem):
|
978 |
+
"""Load previously saved agent knowledge"""
|
979 |
+
try:
|
980 |
+
for agent_name, agent in system.agents.items():
|
981 |
+
agent_file = os.path.join(self.data_dir, f"agent_{agent_name}_knowledge.pkl")
|
982 |
+
|
983 |
+
if os.path.exists(agent_file):
|
984 |
+
with open(agent_file, 'rb') as f:
|
985 |
+
agent_data = pickle.load(f)
|
986 |
+
|
987 |
+
# Restore knowledge base
|
988 |
+
agent.knowledge_base = defaultdict(float, agent_data.get('knowledge_base', {}))
|
989 |
+
|
990 |
+
# Restore learning memory
|
991 |
+
agent.learning_memory = agent_data.get('learning_memory', [])
|
992 |
+
|
993 |
+
# Restore performance metrics
|
994 |
+
if 'performance' in agent_data:
|
995 |
+
perf_data = agent_data['performance']
|
996 |
+
agent.performance.total_queries = perf_data.get('total_queries', 0)
|
997 |
+
agent.performance.successful_responses = perf_data.get('successful_responses', 0)
|
998 |
+
agent.performance.average_confidence = perf_data.get('average_confidence', 0.0)
|
999 |
+
agent.performance.user_satisfaction = perf_data.get('user_satisfaction', 0.0)
|
1000 |
+
agent.performance.learning_rate = perf_data.get('learning_rate', 0.01)
|
1001 |
+
|
1002 |
+
return True
|
1003 |
+
except Exception as e:
|
1004 |
+
st.error(f"Failed to load agent knowledge: {str(e)}")
|
1005 |
+
return False
|
1006 |
+
|
1007 |
+
# Initialize data persistence
|
1008 |
+
if 'data_persistence' not in st.session_state:
|
1009 |
+
st.session_state.data_persistence = DataPersistence()
|
1010 |
+
|
1011 |
+
# Load previous learning data when system starts
|
1012 |
+
if 'knowledge_loaded' not in st.session_state:
|
1013 |
+
st.session_state.data_persistence.load_agent_knowledge(medical_system)
|
1014 |
+
st.session_state.knowledge_loaded = True
|
1015 |
+
|
1016 |
+
# Auto-save functionality
|
1017 |
+
if len(st.session_state.chat_messages) > 0 and len(st.session_state.chat_messages) % 10 == 0:
|
1018 |
+
# Save data every 10 messages
|
1019 |
+
st.session_state.data_persistence.save_conversation_data(medical_system)
|
1020 |
+
st.session_state.data_persistence.save_agent_knowledge(medical_system)
|
1021 |
+
|
1022 |
+
# Footer with system information
|
1023 |
+
st.markdown("---")
|
1024 |
+
st.markdown("""
|
1025 |
+
<div style="text-align: center; padding: 2rem; opacity: 0.8;">
|
1026 |
+
<p><strong>MedAssist v1.0</strong> | AI-Powered Medical Preconsultation System</p>
|
1027 |
+
<p>π€ Evolutionary Learning Agents β’ π Real-time Medical Search β’ π¬ Intelligent Chat Interface</p>
|
1028 |
+
<p><small>β οΈ This system is for informational purposes only and is not a substitute for professional medical advice</small></p>
|
1029 |
+
</div>
|
1030 |
+
""", unsafe_allow_html=True)
|