tima_chatbot / app_enhanced.py
Skier8402's picture
Upload 3 files
8e5acbf verified
"""
Enhanced Mental Health Chatbot with LangTrace Monitoring and Custom Gradio Blocks.
This implementation combines:
1. Custom Gradio Blocks interface with streaming responses
2. LangTrace monitoring for API calls and conversation tracking
3. Enhanced user experience with like/dislike functionality
4. Crisis intervention with local resources
Based on:
- Gradio Blocks documentation for custom chatbot interfaces
- LangTrace OpenAI integration for monitoring and observability
"""
import os
import time
import logging
import json
from datetime import datetime
from logging.handlers import RotatingFileHandler
from logging import StreamHandler
from typing import Generator, List, Dict, Optional, Tuple, Any
import gradio as gr
import openai
import httpx
from tenacity import retry, stop_after_attempt, wait_exponential
# Initialize LangTrace BEFORE importing any LLM modules
try:
from langtrace_python_sdk import langtrace
# Initialize LangTrace if API key is available
LANGTRACE_API_KEY = os.getenv("LANGTRACE_API_KEY2")
if LANGTRACE_API_KEY:
langtrace.init(api_key=LANGTRACE_API_KEY)
print("βœ… LangTrace monitoring initialized")
else:
print("⚠️ LANGTRACE_API_KEY not found - monitoring disabled")
except ImportError:
print("⚠️ langtrace-python-sdk not installed - monitoring disabled")
from prompts import load_system_prompt
# Constants
MAX_RETRIES = 5
INITIAL_RETRY_DELAY = 1 # seconds
MAX_RETRY_DELAY = 60 # seconds
RATE_LIMIT_CALLS = 40 # Cerebras recommended rate limit
RATE_LIMIT_PERIOD = 60 # 1 minute period
# Simple logging setup with console and file output
def setup_logging():
"""Setup simple logging with console and file output."""
os.makedirs("logs", exist_ok=True)
# Create main logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.handlers.clear()
# Console output
console_handler = StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(
logging.Formatter(
"%(asctime)s - %(levelname)s - %(message)s", datefmt="%H:%M:%S"
)
)
# File output with rotation
file_handler = RotatingFileHandler(
"logs/tima_app.log",
maxBytes=10 * 1024 * 1024, # 10MB
backupCount=3,
encoding="utf-8",
)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(
logging.Formatter(
"%(asctime)s - %(levelname)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
return logger
# Initialize logging
logger = setup_logging()
# Load environment variables
TIMA_API_KEY = os.getenv("TIMA_API_KEY", os.getenv("CEREBRAS_API_KEY", None))
if not TIMA_API_KEY:
raise ValueError("CEREBRAS_API_KEY environment variable not found")
class ConversationTracker:
"""Track conversation metrics and quality with enhanced logging."""
def __init__(self):
self.session_start = time.time()
self.session_id = f"session_{int(self.session_start)}"
self.message_count = 0
self.total_response_time = 0
self.user_feedback = {}
self.user_comments = {}
self.crisis_detected = False
self.conversation_log = []
# Log session start
logger.info(f"πŸš€ New conversation session started: {self.session_id}")
def log_message(
self,
user_message: str,
response_time: float,
tokens_used: int = 0,
bot_response: str = "",
):
"""Log message metrics."""
self.message_count += 1
self.total_response_time += response_time
# Simple logging
logger.info(
f"πŸ’¬ Message {self.message_count} | Response: {response_time:.2f}s | Tokens: {tokens_used} | User: {len(user_message)} chars | Bot: {len(bot_response)} chars"
)
def log_feedback(
self, message_index: int, feedback_type: str, message_content: str
):
"""Log user feedback."""
self.user_feedback[message_index] = feedback_type
# Simple logging
feedback_emoji = {"positive": "πŸ‘", "negative": "πŸ‘Ž", "neutral": "😐"}
logger.info(
f"πŸ“ Feedback {feedback_emoji.get(feedback_type, '❓')} | Message {message_index} | Type: {feedback_type} | Preview: {message_content[:50]}{'...' if len(message_content) > 50 else ''}"
)
def log_comment(self, message_index: int, comment: str, message_content: str):
"""Log user comments."""
self.user_comments[message_index] = comment
# Simple logging
logger.info(
f"πŸ’­ Comment | Message {message_index} | Length: {len(comment)} chars | Comment: {comment[:50]}{'...' if len(comment) > 50 else ''} | Context: {message_content[:30]}{'...' if len(message_content) > 30 else ''}"
)
def export_session_summary(self) -> Dict:
"""Export session summary."""
summary = {
"session_id": self.session_id,
"session_start": self.session_start,
"session_end": time.time(),
"total_duration": time.time() - self.session_start,
"message_count": self.message_count,
"crisis_detected": self.crisis_detected,
"feedback_count": len(self.user_feedback),
"comment_count": len(self.user_comments),
}
# Log session summary
logger.info(
f"πŸ“Š Session Summary | Duration: {summary['total_duration']:.1f}s | Messages: {summary['message_count']} | Feedback: {summary['feedback_count']}"
)
return summary
# Global conversation tracker
conversation_tracker = ConversationTracker()
class APIError(Exception):
"""Base exception for API errors"""
pass
class RateLimitError(APIError):
"""Exception for rate limit errors"""
pass
class TokenLimitError(APIError):
"""Exception for token limit errors"""
pass
class InvalidRequestError(APIError):
"""Exception for invalid request errors"""
pass
class AuthenticationError(APIError):
"""Exception for authentication errors"""
pass
class ServerError(APIError):
"""Exception for server-side errors"""
pass
def detect_crisis_keywords(text: str) -> Dict[str, List[str]]:
"""Enhanced crisis detection with keyword categorization."""
text_lower = text.lower()
crisis_categories = {
"suicide": [
"kill myself",
"end it all",
"better off dead",
"suicide",
"kill me",
"want to die",
],
"self_harm": [
"cut myself",
"hurt myself",
"self harm",
"self-harm",
"harm myself",
],
"hopelessness": [
"no point",
"give up",
"hopeless",
"no hope",
"can't go on",
"nothing matters",
],
"severe_depression": [
"want to disappear",
"can't take it",
"too much pain",
"can't cope",
],
}
detected = {}
for category, keywords in crisis_categories.items():
found_keywords = [kw for kw in keywords if kw in text_lower]
if found_keywords:
detected[category] = found_keywords
return detected
def get_enhanced_crisis_response(detected_keywords: Dict[str, List[str]]) -> str:
"""Generate crisis response based on detected keyword categories."""
response_parts = []
# Immediate validation
response_parts.append(
"I hear you, and I want you to know that reaching out shows incredible strength. "
"What you're experiencing sounds overwhelming, and you deserve immediate support."
)
# Immediate crisis resources with local Nairobi contacts
response_parts.append(
"\n\nπŸ†˜ **IMMEDIATE HELP AVAILABLE:**"
"\nβ€’ **Kenya Emergency Services:** 999 or 112"
"\nβ€’ **Befrienders Kenya:** +254 722 178 177 (24/7 emotional support)"
"\nβ€’ **AMREF Flying Doctors:** +254 699 395 395 (emergency medical)"
)
# Local mental health professionals for crisis intervention
response_parts.append(
"\n\nπŸ₯ **URGENT Mental Health Support in Nairobi:**"
"\n\n**Dr. Joseph Irungu Mwangi** - Psychiatrist"
"\n β€’ Location: Nelson Awori Center, Ralph Bunche Rd"
"\n β€’ Emergency Contact: 0715687388"
"\n\n**Dr. Judy Kamau** - Psychiatrist"
"\n β€’ Location: Scripture Union, Hurlingham"
"\n β€’ Contact: +254202712852"
"\n\n**Nairobi Hospital Mental Health Unit**"
"\n β€’ 24/7 psychiatric emergency services"
"\n β€’ Contact: +254 20 2845000"
)
# Safety planning
response_parts.append(
"\n\nπŸ’ͺ **RIGHT NOW - Let's create a safety plan:**"
"\n1. Can you remove any means of self-harm from your immediate area?"
"\n2. Is there someone you trust who can stay with you tonight?"
"\n3. Can you call one of the numbers above within the next hour?"
"\n\n**You matter. Your life has value. This pain you're feeling is temporary, even though it doesn't feel that way right now.**"
)
# Mark crisis in conversation tracker
conversation_tracker.crisis_detected = True
# Log crisis response generation
logger.warning(
f"πŸ†˜ Crisis response generated with {len(detected_keywords)} keyword categories"
)
return "".join(response_parts)
def handle_api_error(e: Exception) -> APIError:
"""Convert API exceptions to custom exception types with enhanced logging."""
error_msg = str(e).lower()
# Log the original error for LangTrace
logger.error(f"API Error occurred: {type(e).__name__}: {str(e)}")
if "rate limit" in error_msg:
return RateLimitError("Rate limit exceeded. Please try again later.")
elif "token limit" in error_msg or "context_length_exceeded" in error_msg:
return TokenLimitError(
"Input too long. Please reduce the length of your message."
)
elif "authentication" in error_msg or "api key" in error_msg:
return AuthenticationError("Authentication failed. Please check your API key.")
elif "invalid request" in error_msg:
return InvalidRequestError("Invalid request. Please check your input.")
elif any(code in error_msg for code in ["502", "503", "504"]):
return ServerError("Server is temporarily unavailable. Please try again later.")
return APIError(f"API error occurred: {str(e)}")
@retry(
stop=stop_after_attempt(MAX_RETRIES),
wait=wait_exponential(
multiplier=INITIAL_RETRY_DELAY, min=INITIAL_RETRY_DELAY, max=MAX_RETRY_DELAY
),
retry=lambda e: isinstance(e, (ServerError, RateLimitError)),
reraise=True,
)
def create_chat_completion_with_monitoring(
messages: List[Dict[str, str]],
) -> Generator[str, None, None]:
"""Create chat completion with LangTrace monitoring and enhanced error handling."""
start_time = time.time()
try:
# Initialize OpenAI client with Cerebras endpoint
client = openai.OpenAI(
base_url="https://api.cerebras.ai/v1",
api_key=TIMA_API_KEY,
timeout=60.0,
max_retries=0, # We handle retries ourselves
)
logger.info("Starting monitored chat completion request")
# Check for crisis situations
user_message = messages[-1].get("content", "") if messages else ""
crisis_keywords = detect_crisis_keywords(user_message)
if crisis_keywords:
logger.warning(f"🚨 CRISIS DETECTED - Keywords: {crisis_keywords}")
conversation_tracker.crisis_detected = True
response = get_enhanced_crisis_response(crisis_keywords)
yield response
# Log crisis intervention to conversation tracker
response_time = time.time() - start_time
conversation_tracker.log_message(user_message, response_time, 0, response)
return
try:
# Create completion with LangTrace automatic monitoring
stream = client.chat.completions.create(
model="llama-3.3-70b",
messages=messages,
temperature=0.8,
max_tokens=500,
top_p=0.9,
stream=True,
)
except openai.APIError as e:
raise handle_api_error(e)
except httpx.TimeoutException:
raise ServerError("Request timed out. Please try again.")
except httpx.RequestError as e:
raise ServerError(f"Network error occurred: {str(e)}")
# Stream response with token counting
full_response = ""
token_count = 0
for chunk in stream:
if (
chunk.choices
and chunk.choices[0].delta
and chunk.choices[0].delta.content
):
content = chunk.choices[0].delta.content
full_response += content
token_count += len(content.split()) # Rough token estimation
yield content
# Log completion metrics with enhanced tracking
response_time = time.time() - start_time
conversation_tracker.log_message(
user_message, response_time, token_count, full_response
)
logger.info(
f"βœ… Chat completion successful - Response time: {response_time:.2f}s, Tokens: {token_count}, Response length: {len(full_response)} chars"
)
except APIError as e:
logger.error(f"API Error in monitored chat completion: {str(e)}")
raise
except Exception as e:
logger.error(
f"Unexpected error in monitored chat completion: {str(e)}", exc_info=True
)
raise APIError(f"Unexpected error occurred: {str(e)}")
# Gradio Blocks Implementation with Enhanced Features
def add_message(history: List[List[str]], message: str) -> Tuple[List[List[str]], str]:
"""Add user message to chat history and clear input field."""
if message.strip():
history.append([message, None])
logger.info(
f"πŸ“ User message | Length: {len(message)} chars | Preview: {message[:100]}{'...' if len(message) > 100 else ''}"
)
return history, ""
def bot_response(history: List[List[str]]) -> Generator[List[List[str]], None, None]:
"""Generate bot response with streaming and monitoring."""
if not history or history[-1][1] is not None:
return
user_message = history[-1][0]
# Format messages for API
system_prompt = load_system_prompt()
formatted_messages = [{"role": "system", "content": system_prompt}]
# Add conversation history
for user_msg, assistant_msg in history[:-1]:
if user_msg:
formatted_messages.append({"role": "user", "content": user_msg})
if assistant_msg:
formatted_messages.append({"role": "assistant", "content": assistant_msg})
# Add current user message
formatted_messages.append({"role": "user", "content": user_message})
# Stream response with monitoring
try:
bot_message = ""
for chunk in create_chat_completion_with_monitoring(formatted_messages):
bot_message += chunk
history[-1][1] = bot_message
yield history
logger.info(
f"πŸ€– Bot response completed | Length: {len(bot_message)} chars | Preview: {bot_message[:100]}{'...' if len(bot_message) > 100 else ''}"
)
except Exception as e:
logger.error(f"❌ Error in bot_response: {e}", exc_info=True)
history[-1][
1
] = "I apologize, but I encountered an error. Please try again. If this persists, consider reaching out to the mental health professionals I can recommend."
yield history
def handle_feedback(feedback_type: str, history):
"""Handle user feedback with enhanced tracking."""
if not history:
return (
"<div style='color: #ff6b6b;'>⚠️ No messages to provide feedback on.</div>"
)
message_index = len(conversation_tracker.user_feedback)
last_message = history[-1][1] if history[-1][1] else "No bot response"
conversation_tracker.log_feedback(message_index, feedback_type, last_message)
feedback_messages = {
"positive": "<div style='color: #28a745;'>βœ… Thank you! Your positive feedback helps me improve.</div>",
"neutral": "<div style='color: #6c757d;'>😐 Thank you for the neutral feedback. I'll keep working to be more helpful.</div>",
"negative": "<div style='color: #dc3545;'>πŸ”„ Thank you for the feedback. I'll try to do better next time.</div>",
}
return feedback_messages.get(
feedback_type, "<div style='color: #6c757d;'>πŸ“ Feedback received.</div>"
)
def submit_comment_feedback(comment: str, history):
"""Handle comment submission."""
if not comment.strip():
return (
"<div style='color: #ff6b6b;'>⚠️ Please enter a comment before submitting.</div>",
"",
)
if not history:
return (
"<div style='color: #ff6b6b;'>⚠️ No messages to comment on.</div>",
comment,
)
message_index = len(conversation_tracker.user_comments)
last_message = history[-1][1] if history[-1][1] else "No bot response"
conversation_tracker.log_comment(message_index, comment.strip(), last_message)
return (
"<div style='color: #28a745;'>πŸ’¬ Thank you for your detailed feedback! Your comments help me improve.</div>",
"",
)
def create_enhanced_css() -> str:
"""Enhanced CSS with better visual design."""
return """
.main-container {
max-width: 1200px !important;
margin: 0 auto !important;
}
.chatbot {
border-radius: 15px !important;
border: 2px solid #e0e0e0 !important;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1) !important;
background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%) !important;
}
.chatbot .message.user {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
color: white !important;
border-radius: 18px 18px 5px 18px !important;
margin: 10px 0 !important;
padding: 12px 16px !important;
}
.chatbot .message.bot {
background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%) !important;
color: white !important;
border-radius: 18px 18px 18px 5px !important;
margin: 10px 0 !important;
padding: 12px 16px !important;
}
.input-container {
border-radius: 25px !important;
border: 2px solid #e0e0e0 !important;
background: white !important;
padding: 5px !important;
}
.submit-btn {
border-radius: 20px !important;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
border: none !important;
color: white !important;
font-weight: bold !important;
transition: all 0.3s ease !important;
}
.submit-btn:hover {
transform: translateY(-2px) !important;
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2) !important;
}
.sidebar {
background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%) !important;
border-radius: 15px !important;
padding: 20px !important;
margin: 10px !important;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1) !important;
}
.example-btn {
margin: 5px !important;
border-radius: 15px !important;
border: 1px solid #ddd !important;
background: linear-gradient(135deg, #ffffff 0%, #f8f9fa 100%) !important;
color: #333 !important;
transition: all 0.3s ease !important;
font-size: 0.9em !important;
}
.example-btn:hover {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
color: white !important;
transform: translateY(-2px) !important;
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.15) !important;
}
.crisis-alert {
background: linear-gradient(135deg, #ff9a9e 0%, #fecfef 100%) !important;
border: 2px solid #ff6b6b !important;
border-radius: 10px !important;
padding: 15px !important;
margin: 10px 0 !important;
}
.title {
text-align: center !important;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
-webkit-background-clip: text !important;
-webkit-text-fill-color: transparent !important;
font-size: 2.5em !important;
font-weight: bold !important;
margin-bottom: 10px !important;
}
.description {
text-align: center !important;
color: #666 !important;
font-size: 1.2em !important;
margin-bottom: 20px !important;
padding: 0 20px !important;
}
.feedback-section {
background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%) !important;
border-radius: 10px !important;
padding: 15px !important;
margin: 10px 0 !important;
border: 1px solid #dee2e6 !important;
}
.feedback-btn {
margin: 3px !important;
border-radius: 20px !important;
transition: all 0.3s ease !important;
font-size: 0.9em !important;
min-width: 80px !important;
}
.feedback-btn:hover {
transform: translateY(-2px) !important;
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.15) !important;
}
.comment-box {
border-radius: 10px !important;
border: 2px solid #e9ecef !important;
margin-top: 10px !important;
}
.comment-box:focus {
border-color: #667eea !important;
box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1) !important;
}
"""
def create_enhanced_interface():
"""Create enhanced Gradio Blocks interface with monitoring."""
with gr.Blocks(
theme=gr.themes.Soft(
primary_hue="blue",
secondary_hue="pink",
neutral_hue="gray",
),
css=create_enhanced_css(),
title="Tima - Enhanced Mental Health Companion",
) as demo:
# Enhanced header
gr.HTML(
"""
<div class="main-container">
<div class="title">
🌟 Tima - Your Enhanced Mental Health Companion
</div>
<div class="description">
A safe, monitored space for mental health support with real-time assistance.<br>
<em>Professional-grade monitoring β€’ Crisis intervention β€’ Local resources</em><br>
<small>⚠️ This is not a replacement for professional medical advice</small>
</div>
</div>
"""
)
with gr.Row():
# Enhanced sidebar
with gr.Column(scale=1):
gr.HTML(
"""
<div class="sidebar">
<h3>🀝 Enhanced Features:</h3>
<ul>
<li>βœ… Real-time monitoring</li>
<li>πŸ†˜ Crisis detection & intervention</li>
<li>πŸ₯ Local Nairobi resources</li>
<li>πŸ’¬ Conversation quality tracking</li>
<li>🎯 Personalized responses</li>
<li>πŸ“Š Session analytics</li>
</ul>
<div class="crisis-alert">
<h3>⚑ Emergency Resources</h3>
<p><strong>Kenya Emergency:</strong> 999/112</p>
<p><strong>Befrienders:</strong> +254 722 178 177</p>
<p><strong>Crisis Support:</strong> Available 24/7</p>
</div>
</div>
"""
)
# Enhanced example prompts
with gr.Column():
gr.HTML(
"<h3 style='text-align: center; margin: 20px 0;'>πŸ’­ Try these examples:</h3>"
)
example_prompts = [
"I feel like giving up on everything",
"I'm feeling really anxious and can't sleep",
"I've been feeling hopeless for weeks",
"I need help finding a therapist in Nairobi",
"Can you help me with my depression?",
"I'm having thoughts of self-harm",
]
example_buttons = []
for i, prompt in enumerate(example_prompts):
btn = gr.Button(
prompt,
elem_classes=["example-btn"],
size="sm",
elem_id=f"example_{i}",
)
example_buttons.append(btn)
# Main chat area
with gr.Column(scale=3):
chatbot = gr.Chatbot(
[],
elem_id="chatbot",
bubble_full_width=False,
height=450,
elem_classes=["chatbot"],
show_label=False,
avatar_images=["πŸ§‘β€πŸ’»", "🌟"], # User and bot avatars
)
# Feedback section
with gr.Row():
with gr.Column(elem_classes=["feedback-section"]):
gr.HTML(
"<h4 style='margin: 10px 0 5px 0;'>πŸ’­ How was the last response?</h4>"
)
with gr.Row():
positive_btn = gr.Button(
"πŸ‘ Helpful",
variant="secondary",
size="sm",
elem_classes=["feedback-btn"],
)
neutral_btn = gr.Button(
"😐 Neutral",
variant="secondary",
size="sm",
elem_classes=["feedback-btn"],
)
negative_btn = gr.Button(
"πŸ‘Ž Not Helpful",
variant="secondary",
size="sm",
elem_classes=["feedback-btn"],
)
comment_box = gr.Textbox(
placeholder="Share your thoughts or suggestions (optional)...",
lines=2,
max_lines=3,
label="Comments",
show_label=True,
elem_classes=["comment-box"],
)
submit_comment_btn = gr.Button(
"πŸ’¬ Submit Feedback",
variant="primary",
size="sm",
elem_classes=["feedback-btn"],
)
# Enhanced input area
with gr.Row():
msg = gr.Textbox(
placeholder="Share what's on your mind... I'm here to listen and support you.",
container=False,
scale=7,
elem_classes=["input-container"],
lines=2,
max_lines=5,
)
submit_btn = gr.Button(
"Send πŸ’¬",
variant="primary",
scale=1,
elem_classes=["submit-btn"],
)
# Enhanced control buttons
with gr.Row():
clear_btn = gr.Button("πŸ—‘οΈ Clear Chat", variant="secondary")
regenerate_btn = gr.Button(
"πŸ”„ Regenerate Response", variant="secondary"
)
export_btn = gr.Button(
"πŸ“Š Export Conversation", variant="secondary"
)
# Feedback status display
feedback_status = gr.HTML("")
# Event handlers with enhanced functionality
def submit_message(history, message):
"""Handle message submission with validation."""
if not message.strip():
return history, message
return add_message(history, message)
# Message submission (Enter key and button)
msg.submit(submit_message, [chatbot, msg], [chatbot, msg], queue=False).then(
bot_response, [chatbot], [chatbot]
).then(lambda: gr.Textbox(interactive=True), None, [msg])
submit_btn.click(
submit_message, [chatbot, msg], [chatbot, msg], queue=False
).then(bot_response, [chatbot], [chatbot]).then(
lambda: gr.Textbox(interactive=True), None, [msg]
)
# Example button handlers
for i, (btn, prompt) in enumerate(zip(example_buttons, example_prompts)):
btn.click(lambda prompt=prompt: prompt, None, [msg])
# Clear chat
clear_btn.click(lambda: ([], ""), None, [chatbot, msg], queue=False)
# Regenerate response
def regenerate_response(history):
if history and history[-1][1] is not None:
history[-1][1] = None
return history
regenerate_btn.click(
regenerate_response, [chatbot], [chatbot], queue=False
).then(bot_response, [chatbot], [chatbot])
# Export conversation
def export_conversation(history):
"""Export conversation with enhanced metadata and logging."""
if not history:
logger.warning("πŸ“€ Export attempted with no conversation data")
return "No conversation to export."
# Get comprehensive session summary
session_summary = conversation_tracker.export_session_summary()
export_data = {
**session_summary,
"user_feedback": conversation_tracker.user_feedback,
"user_comments": conversation_tracker.user_comments,
"conversation_history": history,
"export_timestamp": time.time(),
"export_datetime": datetime.now().isoformat(),
}
filename = f"exports/tima_session_{conversation_tracker.session_id}.json"
os.makedirs("exports", exist_ok=True)
with open(filename, "w", encoding="utf-8") as f:
json.dump(export_data, f, indent=2, ensure_ascii=False)
logger.info(
f"πŸ“€ Conversation exported to {filename} | Messages: {len(history)} | Duration: {export_data['total_duration']:.1f}s"
)
return f"βœ… Conversation exported to {filename}"
export_btn.click(export_conversation, [chatbot], None)
# Feedback handling
positive_btn.click(
lambda history: handle_feedback("positive", history),
[chatbot],
[feedback_status],
)
neutral_btn.click(
lambda history: handle_feedback("neutral", history),
[chatbot],
[feedback_status],
)
negative_btn.click(
lambda history: handle_feedback("negative", history),
[chatbot],
[feedback_status],
)
submit_comment_btn.click(
submit_comment_feedback,
[comment_box, chatbot],
[feedback_status, comment_box],
)
# Enhanced footer with monitoring info
gr.HTML(
"""
<div style="text-align: center; margin-top: 30px; padding: 20px; background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%); border-radius: 15px; box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);">
<h3 style="color: #495057; margin-bottom: 15px;">πŸ›‘οΈ Enhanced Safety & Support</h3>
<p style="margin: 5px 0;"><strong>Real-time monitoring</strong> ensures quality responses and crisis detection</p>
<p style="margin: 5px 0;"><strong>Local resources</strong> connect you with Nairobi mental health professionals</p>
<p style="margin: 15px 0; padding: 10px; background: #fff3cd; border-radius: 8px; border-left: 4px solid #ffc107;">
<strong>πŸ†˜ Crisis Support:</strong> Kenya Emergency (999/112) | Befrienders (+254 722 178 177)
</p>
<p style="font-size: 0.9em; color: #6c757d;">
Powered by LangTrace monitoring β€’ Enhanced with Gradio Blocks β€’ Crisis intervention enabled
</p>
</div>
"""
)
return demo
def main():
"""Main function to launch the enhanced interface with comprehensive logging."""
# Log application startup
logger.info("=" * 60)
logger.info("πŸš€ Starting Enhanced Tima Mental Health Companion")
logger.info("=" * 60)
logger.info(f"πŸ“… Started at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
logger.info(
"πŸ”§ Features: LangTrace monitoring, Crisis detection, Enhanced feedback"
)
logger.info("πŸ“ Log file: logs/tima_app.log")
# Ensure all directories exist
for directory in ["logs", "exports"]:
os.makedirs(directory, exist_ok=True)
logger.info(f"πŸ“‚ Directory ensured: {directory}/")
try:
demo = create_enhanced_interface()
# Enable queuing for streaming responses
demo.queue(
max_size=50, # Increased queue size
default_concurrency_limit=20, # Increased concurrency
)
logger.info("🌐 Launching web interface...")
logger.info("πŸ”— Server: http://0.0.0.0:7863")
logger.info("πŸ›‘οΈ Monitoring: Enabled")
logger.info("πŸ’Ύ Auto-logging: Enabled")
# Launch with enhanced settings
demo.launch(
server_name="0.0.0.0",
server_port=7863, # Different port to avoid conflicts
share=False,
max_threads=32, # Increased for better performance
show_error=True,
inbrowser=True,
show_api=True,
enable_monitoring=True,
auth=None, # Add authentication if needed
favicon_path=None, # Add custom favicon if available
)
except Exception as e:
logger.error(f"❌ Failed to start application: {e}", exc_info=True)
raise
finally:
logger.info("πŸ›‘ Application shutdown")
logger.info("=" * 60)
if __name__ == "__main__":
main()