Upload folder using huggingface_hub
Browse files- nbs/01_clinical_tutor.ipynb +14 -9
- nbs/02_learning_interface.ipynb +18 -15
- wardbuddy/clinical_tutor.py +15 -9
- wardbuddy/learning_interface.py +18 -15
nbs/01_clinical_tutor.ipynb
CHANGED
@@ -230,11 +230,11 @@
|
|
230 |
" Returns:\n",
|
231 |
" list: Generated SMART goals\n",
|
232 |
" \"\"\"\n",
|
233 |
-
" prompt = f\"\"\"Generate {num_goals}
|
234 |
" \n",
|
235 |
" For each goal:\n",
|
236 |
" 1. Select an appropriate category from: {', '.join(cat.value for cat in LearningCategory)}\n",
|
237 |
-
" 2. Write a specific, measurable goal that builds clinical competence\n",
|
238 |
" \n",
|
239 |
" Format as JSON array with fields:\n",
|
240 |
" - category: Learning category name\n",
|
@@ -244,11 +244,11 @@
|
|
244 |
" [\n",
|
245 |
" {{\n",
|
246 |
" \"category\": \"Clinical Reasoning\",\n",
|
247 |
-
" \"smart_version\": \"
|
248 |
" }},\n",
|
249 |
" {{\n",
|
250 |
" \"category\": \"Management\",\n",
|
251 |
-
" \"smart_version\": \"
|
252 |
" }}\n",
|
253 |
" ]\n",
|
254 |
" \n",
|
@@ -322,12 +322,12 @@
|
|
322 |
" Returns:\n",
|
323 |
" SmartGoal: Generated SMART goal\n",
|
324 |
" \"\"\"\n",
|
325 |
-
" prompt = f\"\"\"Convert this learning goal into a
|
326 |
"\n",
|
327 |
"\"{user_input}\"\n",
|
328 |
"\n",
|
329 |
"1. Select the most appropriate category from: {', '.join(cat.value for cat in LearningCategory)}\n",
|
330 |
-
"2. Rewrite as a
|
331 |
"\n",
|
332 |
"Format as JSON with fields:\n",
|
333 |
"- category: Learning category name\n",
|
@@ -490,9 +490,14 @@
|
|
490 |
" \"\"\"\n",
|
491 |
" return self.current_discussion\n",
|
492 |
" \n",
|
493 |
-
" def clear_discussion(
|
494 |
-
" \"\"\"Clear
|
495 |
-
"
|
|
|
|
|
|
|
|
|
|
|
496 |
]
|
497 |
},
|
498 |
{
|
|
|
230 |
" Returns:\n",
|
231 |
" list: Generated SMART goals\n",
|
232 |
" \"\"\"\n",
|
233 |
+
" prompt = f\"\"\"Generate {num_goals} specific learning goals for a medical trainee in {specialty} ({setting}).\n",
|
234 |
" \n",
|
235 |
" For each goal:\n",
|
236 |
" 1. Select an appropriate category from: {', '.join(cat.value for cat in LearningCategory)}\n",
|
237 |
+
" 2. Write a specific, measurable goal that builds clinical competence. Write the specific goal only for the next case discussion (i.e. it is not longitudinal across several cases). Also, this goal needs to be able to be evaluated by you - there is limited access to doctors to verify facts. \n",
|
238 |
" \n",
|
239 |
" Format as JSON array with fields:\n",
|
240 |
" - category: Learning category name\n",
|
|
|
244 |
" [\n",
|
245 |
" {{\n",
|
246 |
" \"category\": \"Clinical Reasoning\",\n",
|
247 |
+
" \"smart_version\": \"Identify a comprehensive list of differential diagnoses for a patient with acute shortness of breath.\"\n",
|
248 |
" }},\n",
|
249 |
" {{\n",
|
250 |
" \"category\": \"Management\",\n",
|
251 |
+
" \"smart_version\": \"Outline a detailed management plan for a patient with heart failure.\"\n",
|
252 |
" }}\n",
|
253 |
" ]\n",
|
254 |
" \n",
|
|
|
322 |
" Returns:\n",
|
323 |
" SmartGoal: Generated SMART goal\n",
|
324 |
" \"\"\"\n",
|
325 |
+
" prompt = f\"\"\"Convert this learning goal into a more specific goal (Specific, Measurable, Achievable, Relevant) for a patient in {specialty} ({setting}):\n",
|
326 |
"\n",
|
327 |
"\"{user_input}\"\n",
|
328 |
"\n",
|
329 |
"1. Select the most appropriate category from: {', '.join(cat.value for cat in LearningCategory)}\n",
|
330 |
+
"2. Rewrite as a specific goal specific to {setting} in {specialty}\n",
|
331 |
"\n",
|
332 |
"Format as JSON with fields:\n",
|
333 |
"- category: Learning category name\n",
|
|
|
490 |
" \"\"\"\n",
|
491 |
" return self.current_discussion\n",
|
492 |
" \n",
|
493 |
+
" def clear_discussion() -> Tuple[List, str, Dict]:\n",
|
494 |
+
" \"\"\"Clear chat history.\"\"\"\n",
|
495 |
+
" return [], \"\", {\n",
|
496 |
+
" \"discussion_active\": False,\n",
|
497 |
+
" \"suggested_goals\": [],\n",
|
498 |
+
" \"discussion_start\": None,\n",
|
499 |
+
" \"last_message\": None\n",
|
500 |
+
" }\n"
|
501 |
]
|
502 |
},
|
503 |
{
|
nbs/02_learning_interface.ipynb
CHANGED
@@ -206,10 +206,10 @@
|
|
206 |
" async def process_chat(\n",
|
207 |
" self,\n",
|
208 |
" message: str,\n",
|
209 |
-
" history: List[List[str]],\n",
|
210 |
" state: Dict[str, Any]\n",
|
211 |
-
" ) -> AsyncGenerator[Tuple[List[List[str]], str, Dict[str, Any]], None]:\n",
|
212 |
-
" \"\"\"Process chat messages with streaming.\"\"\"\n",
|
213 |
" try:\n",
|
214 |
" if not message.strip():\n",
|
215 |
" yield history, \"\", state\n",
|
@@ -224,8 +224,10 @@
|
|
224 |
" if history is None:\n",
|
225 |
" history = []\n",
|
226 |
" \n",
|
227 |
-
" # Add user message\n",
|
228 |
-
"
|
|
|
|
|
229 |
" \n",
|
230 |
" # Update display\n",
|
231 |
" yield history, \"\", state\n",
|
@@ -234,7 +236,7 @@
|
|
234 |
" current_response = \"\"\n",
|
235 |
" async for token in self.tutor.discuss_case(message):\n",
|
236 |
" current_response += token\n",
|
237 |
-
" history[-1][1] = current_response\n",
|
238 |
" yield history, \"\", state\n",
|
239 |
" \n",
|
240 |
" state[\"last_message\"] = datetime.now().isoformat()\n",
|
@@ -244,9 +246,9 @@
|
|
244 |
" logger.error(f\"Error in chat: {str(e)}\")\n",
|
245 |
" if history is None:\n",
|
246 |
" history = []\n",
|
247 |
-
"
|
|
|
248 |
" yield history, \"\", state\n",
|
249 |
-
"\n",
|
250 |
" def _update_discussion_status(self, state: Dict[str, Any]) -> str:\n",
|
251 |
" \"\"\"Update discussion status display.\"\"\"\n",
|
252 |
" try:\n",
|
@@ -475,12 +477,6 @@
|
|
475 |
" return self._update_displays(state)\n",
|
476 |
" return [[], [], [], \"No active learning goal\"]\n",
|
477 |
"\n",
|
478 |
-
" def process_audio(audio: np.ndarray) -> str:\n",
|
479 |
-
" \"\"\"Convert audio to text.\"\"\"\n",
|
480 |
-
" if audio is None:\n",
|
481 |
-
" return \"\"\n",
|
482 |
-
" return \"Audio transcription would appear here\"\n",
|
483 |
-
"\n",
|
484 |
" def clear_discussion() -> Tuple[List, str, Dict]:\n",
|
485 |
" \"\"\"Clear chat history.\"\"\"\n",
|
486 |
" return [], \"\", {\n",
|
@@ -489,7 +485,14 @@
|
|
489 |
" \"discussion_start\": None,\n",
|
490 |
" \"last_message\": None\n",
|
491 |
" }\n",
|
492 |
-
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
493 |
" # Wire up events\n",
|
494 |
" specialty.change(\n",
|
495 |
" update_context,\n",
|
|
|
206 |
" async def process_chat(\n",
|
207 |
" self,\n",
|
208 |
" message: str,\n",
|
209 |
+
" history: List[List[Dict[str, str]]],\n",
|
210 |
" state: Dict[str, Any]\n",
|
211 |
+
" ) -> AsyncGenerator[Tuple[List[List[Dict[str, str]]], str, Dict[str, Any]], None]:\n",
|
212 |
+
" \"\"\"Process chat messages with streaming and proper message format.\"\"\"\n",
|
213 |
" try:\n",
|
214 |
" if not message.strip():\n",
|
215 |
" yield history, \"\", state\n",
|
|
|
224 |
" if history is None:\n",
|
225 |
" history = []\n",
|
226 |
" \n",
|
227 |
+
" # Add user message in correct format\n",
|
228 |
+
" user_message = {\"role\": \"user\", \"content\": message}\n",
|
229 |
+
" assistant_message = {\"role\": \"assistant\", \"content\": \"\"}\n",
|
230 |
+
" history.append([user_message, assistant_message])\n",
|
231 |
" \n",
|
232 |
" # Update display\n",
|
233 |
" yield history, \"\", state\n",
|
|
|
236 |
" current_response = \"\"\n",
|
237 |
" async for token in self.tutor.discuss_case(message):\n",
|
238 |
" current_response += token\n",
|
239 |
+
" history[-1][1][\"content\"] = current_response\n",
|
240 |
" yield history, \"\", state\n",
|
241 |
" \n",
|
242 |
" state[\"last_message\"] = datetime.now().isoformat()\n",
|
|
|
246 |
" logger.error(f\"Error in chat: {str(e)}\")\n",
|
247 |
" if history is None:\n",
|
248 |
" history = []\n",
|
249 |
+
" error_message = {\"role\": \"assistant\", \"content\": \"I apologize, but I encountered an error. Please try again.\"}\n",
|
250 |
+
" history.append([{\"role\": \"user\", \"content\": message}, error_message])\n",
|
251 |
" yield history, \"\", state\n",
|
|
|
252 |
" def _update_discussion_status(self, state: Dict[str, Any]) -> str:\n",
|
253 |
" \"\"\"Update discussion status display.\"\"\"\n",
|
254 |
" try:\n",
|
|
|
477 |
" return self._update_displays(state)\n",
|
478 |
" return [[], [], [], \"No active learning goal\"]\n",
|
479 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
480 |
" def clear_discussion() -> Tuple[List, str, Dict]:\n",
|
481 |
" \"\"\"Clear chat history.\"\"\"\n",
|
482 |
" return [], \"\", {\n",
|
|
|
485 |
" \"discussion_start\": None,\n",
|
486 |
" \"last_message\": None\n",
|
487 |
" }\n",
|
488 |
+
" \n",
|
489 |
+
" def process_audio(audio: np.ndarray) -> str:\n",
|
490 |
+
" \"\"\"Convert audio to text.\"\"\"\n",
|
491 |
+
" if audio is None:\n",
|
492 |
+
" return \"\"\n",
|
493 |
+
" # Add your audio processing logic here\n",
|
494 |
+
" return \"Audio transcription would appear here\"\n",
|
495 |
+
" \n",
|
496 |
" # Wire up events\n",
|
497 |
" specialty.change(\n",
|
498 |
" update_context,\n",
|
wardbuddy/clinical_tutor.py
CHANGED
@@ -138,11 +138,11 @@ class ClinicalTutor:
|
|
138 |
Returns:
|
139 |
list: Generated SMART goals
|
140 |
"""
|
141 |
-
prompt = f"""Generate {num_goals}
|
142 |
|
143 |
For each goal:
|
144 |
1. Select an appropriate category from: {', '.join(cat.value for cat in LearningCategory)}
|
145 |
-
2. Write a specific, measurable goal that builds clinical competence
|
146 |
|
147 |
Format as JSON array with fields:
|
148 |
- category: Learning category name
|
@@ -152,11 +152,11 @@ class ClinicalTutor:
|
|
152 |
[
|
153 |
{{
|
154 |
"category": "Clinical Reasoning",
|
155 |
-
"smart_version": "
|
156 |
}},
|
157 |
{{
|
158 |
"category": "Management",
|
159 |
-
"smart_version": "
|
160 |
}}
|
161 |
]
|
162 |
|
@@ -230,12 +230,12 @@ class ClinicalTutor:
|
|
230 |
Returns:
|
231 |
SmartGoal: Generated SMART goal
|
232 |
"""
|
233 |
-
prompt = f"""Convert this learning goal into a
|
234 |
|
235 |
"{user_input}"
|
236 |
|
237 |
1. Select the most appropriate category from: {', '.join(cat.value for cat in LearningCategory)}
|
238 |
-
2. Rewrite as a
|
239 |
|
240 |
Format as JSON with fields:
|
241 |
- category: Learning category name
|
@@ -398,6 +398,12 @@ Format as JSON with fields:
|
|
398 |
"""
|
399 |
return self.current_discussion
|
400 |
|
401 |
-
def clear_discussion(
|
402 |
-
"""Clear
|
403 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
Returns:
|
139 |
list: Generated SMART goals
|
140 |
"""
|
141 |
+
prompt = f"""Generate {num_goals} specific learning goals for a medical trainee in {specialty} ({setting}).
|
142 |
|
143 |
For each goal:
|
144 |
1. Select an appropriate category from: {', '.join(cat.value for cat in LearningCategory)}
|
145 |
+
2. Write a specific, measurable goal that builds clinical competence. Write the specific goal only for the next case discussion (i.e. it is not longitudinal across several cases). Also, this goal needs to be able to be evaluated by you - there is limited access to doctors to verify facts.
|
146 |
|
147 |
Format as JSON array with fields:
|
148 |
- category: Learning category name
|
|
|
152 |
[
|
153 |
{{
|
154 |
"category": "Clinical Reasoning",
|
155 |
+
"smart_version": "Identify a comprehensive list of differential diagnoses for a patient with acute shortness of breath."
|
156 |
}},
|
157 |
{{
|
158 |
"category": "Management",
|
159 |
+
"smart_version": "Outline a detailed management plan for a patient with heart failure."
|
160 |
}}
|
161 |
]
|
162 |
|
|
|
230 |
Returns:
|
231 |
SmartGoal: Generated SMART goal
|
232 |
"""
|
233 |
+
prompt = f"""Convert this learning goal into a more specific goal (Specific, Measurable, Achievable, Relevant) for a patient in {specialty} ({setting}):
|
234 |
|
235 |
"{user_input}"
|
236 |
|
237 |
1. Select the most appropriate category from: {', '.join(cat.value for cat in LearningCategory)}
|
238 |
+
2. Rewrite as a specific goal specific to {setting} in {specialty}
|
239 |
|
240 |
Format as JSON with fields:
|
241 |
- category: Learning category name
|
|
|
398 |
"""
|
399 |
return self.current_discussion
|
400 |
|
401 |
+
def clear_discussion() -> Tuple[List, str, Dict]:
|
402 |
+
"""Clear chat history."""
|
403 |
+
return [], "", {
|
404 |
+
"discussion_active": False,
|
405 |
+
"suggested_goals": [],
|
406 |
+
"discussion_start": None,
|
407 |
+
"last_message": None
|
408 |
+
}
|
409 |
+
|
wardbuddy/learning_interface.py
CHANGED
@@ -117,10 +117,10 @@ class LearningInterface:
|
|
117 |
async def process_chat(
|
118 |
self,
|
119 |
message: str,
|
120 |
-
history: List[List[str]],
|
121 |
state: Dict[str, Any]
|
122 |
-
) -> AsyncGenerator[Tuple[List[List[str]], str, Dict[str, Any]], None]:
|
123 |
-
"""Process chat messages with streaming."""
|
124 |
try:
|
125 |
if not message.strip():
|
126 |
yield history, "", state
|
@@ -135,8 +135,10 @@ class LearningInterface:
|
|
135 |
if history is None:
|
136 |
history = []
|
137 |
|
138 |
-
# Add user message
|
139 |
-
|
|
|
|
|
140 |
|
141 |
# Update display
|
142 |
yield history, "", state
|
@@ -145,7 +147,7 @@ class LearningInterface:
|
|
145 |
current_response = ""
|
146 |
async for token in self.tutor.discuss_case(message):
|
147 |
current_response += token
|
148 |
-
history[-1][1] = current_response
|
149 |
yield history, "", state
|
150 |
|
151 |
state["last_message"] = datetime.now().isoformat()
|
@@ -155,9 +157,9 @@ class LearningInterface:
|
|
155 |
logger.error(f"Error in chat: {str(e)}")
|
156 |
if history is None:
|
157 |
history = []
|
158 |
-
|
|
|
159 |
yield history, "", state
|
160 |
-
|
161 |
def _update_discussion_status(self, state: Dict[str, Any]) -> str:
|
162 |
"""Update discussion status display."""
|
163 |
try:
|
@@ -386,12 +388,6 @@ class LearningInterface:
|
|
386 |
return self._update_displays(state)
|
387 |
return [[], [], [], "No active learning goal"]
|
388 |
|
389 |
-
def process_audio(audio: np.ndarray) -> str:
|
390 |
-
"""Convert audio to text."""
|
391 |
-
if audio is None:
|
392 |
-
return ""
|
393 |
-
return "Audio transcription would appear here"
|
394 |
-
|
395 |
def clear_discussion() -> Tuple[List, str, Dict]:
|
396 |
"""Clear chat history."""
|
397 |
return [], "", {
|
@@ -400,7 +396,14 @@ class LearningInterface:
|
|
400 |
"discussion_start": None,
|
401 |
"last_message": None
|
402 |
}
|
403 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
404 |
# Wire up events
|
405 |
specialty.change(
|
406 |
update_context,
|
|
|
117 |
async def process_chat(
|
118 |
self,
|
119 |
message: str,
|
120 |
+
history: List[List[Dict[str, str]]],
|
121 |
state: Dict[str, Any]
|
122 |
+
) -> AsyncGenerator[Tuple[List[List[Dict[str, str]]], str, Dict[str, Any]], None]:
|
123 |
+
"""Process chat messages with streaming and proper message format."""
|
124 |
try:
|
125 |
if not message.strip():
|
126 |
yield history, "", state
|
|
|
135 |
if history is None:
|
136 |
history = []
|
137 |
|
138 |
+
# Add user message in correct format
|
139 |
+
user_message = {"role": "user", "content": message}
|
140 |
+
assistant_message = {"role": "assistant", "content": ""}
|
141 |
+
history.append([user_message, assistant_message])
|
142 |
|
143 |
# Update display
|
144 |
yield history, "", state
|
|
|
147 |
current_response = ""
|
148 |
async for token in self.tutor.discuss_case(message):
|
149 |
current_response += token
|
150 |
+
history[-1][1]["content"] = current_response
|
151 |
yield history, "", state
|
152 |
|
153 |
state["last_message"] = datetime.now().isoformat()
|
|
|
157 |
logger.error(f"Error in chat: {str(e)}")
|
158 |
if history is None:
|
159 |
history = []
|
160 |
+
error_message = {"role": "assistant", "content": "I apologize, but I encountered an error. Please try again."}
|
161 |
+
history.append([{"role": "user", "content": message}, error_message])
|
162 |
yield history, "", state
|
|
|
163 |
def _update_discussion_status(self, state: Dict[str, Any]) -> str:
|
164 |
"""Update discussion status display."""
|
165 |
try:
|
|
|
388 |
return self._update_displays(state)
|
389 |
return [[], [], [], "No active learning goal"]
|
390 |
|
|
|
|
|
|
|
|
|
|
|
|
|
391 |
def clear_discussion() -> Tuple[List, str, Dict]:
|
392 |
"""Clear chat history."""
|
393 |
return [], "", {
|
|
|
396 |
"discussion_start": None,
|
397 |
"last_message": None
|
398 |
}
|
399 |
+
|
400 |
+
def process_audio(audio: np.ndarray) -> str:
|
401 |
+
"""Convert audio to text."""
|
402 |
+
if audio is None:
|
403 |
+
return ""
|
404 |
+
# Add your audio processing logic here
|
405 |
+
return "Audio transcription would appear here"
|
406 |
+
|
407 |
# Wire up events
|
408 |
specialty.change(
|
409 |
update_context,
|