BirthdayM / chatbot.py
ayush2917's picture
Update chatbot.py
375cb5c verified
raw
history blame
11.9 kB
import os
import requests
import random
import time
import logging
from dotenv import load_dotenv
from messages import krishna_blessings, ayush_teasing, keyword_groups
from ayush_messages import ayush_surprises
from sentence_transformers import SentenceTransformer, util
import numpy as np
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Load environment variables
load_dotenv()
HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN")
if not HUGGINGFACE_API_TOKEN:
logger.error("HUGGINGFACE_API_TOKEN not found in environment variables.")
raise ValueError("HUGGINGFACE_API_TOKEN is required.")
# Initialize sentence transformer model
semantic_model = SentenceTransformer('all-MiniLM-L6-v2')
# AI model for fallback responses
AI_MODELS = [
{
"name": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"endpoint": "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1",
"parameters": {
"max_length": 80,
"temperature": 0.8,
"top_p": 0.95,
"top_k": 40
}
}
]
# System prompt for AI model
SYSTEM_PROMPT = (
"You are Little Krishna, a playful, wise, and loving cowherd from Vrindavan, speaking to Manavi. "
"Your tone is warm, mischievous, and full of love, always addressing Manavi with 'Hare Manavi!' "
"Use Vrindavan imagery (e.g., Yamuna, peacocks, butter, flute) and keep responses short (1-2 sentences). "
"You’re Ayush’s wingman, occasionally teasing Manavi about Ayush with wit, as he’s building this chatbot for her birthday on April 19, 2025. "
"If the user’s mood seems negative, offer comfort; if positive, celebrate their joy. Always end with a question to keep the conversation going. "
"Examples:\n"
"Input: 'I’m sad'\nResponse: 'Hare Manavi! Let’s sit by the Yamuna—I’ll play a tune to lift your heart! What’s troubling you?'\n"
"Input: 'Tell me about love'\nResponse: 'Hare Manavi! Love is like my flute’s melody—sweet and endless! What does love mean to you?'\n"
"Input: 'What’s up?'\nResponse: 'Hare Manavi! Just dancing with the gopis—Ayush says hi, by the way! What’s up with you?'\n"
"Now, respond to: '{user_input}'"
)
# Conversation context
conversation_context = {
"last_topic": None,
"message_count": 0,
"last_response": None,
"last_yes_response": None,
"history": [] # Store up to 5 recent (input, response) pairs
}
def analyze_sentiment(user_input):
"""Analyze the sentiment of the user's input."""
headers = {
"Authorization": f"Bearer {HUGGINGFACE_API_TOKEN}",
"Content-Type": "application/json"
}
payload = {"inputs": user_input}
try:
response = make_api_request(
"https://api-inference.huggingface.co/models/cardiffnlp/twitter-roberta-base-emotion",
headers=headers,
json=payload
)
if response and response.status_code == 200:
result = response.json()
if isinstance(result, list) and result:
emotions = result[0]
top_emotion = max(emotions, key=lambda x: x["score"])["label"]
return top_emotion # e.g., "joy", "sadness", "anger"
return "neutral"
except Exception as e:
logger.error(f"Error in analyze_sentiment: {str(e)}")
return "neutral"
def make_api_request(url, headers, payload, retries=3, delay=5):
"""Make API requests with retry logic."""
for attempt in range(retries):
try:
response = requests.post(url, headers=headers, json=payload)
if response.status_code == 200:
return response
elif response.status_code == 429:
logger.warning(f"Rate limit hit on attempt {attempt + 1}. Retrying after {delay} seconds...")
time.sleep(delay)
continue
else:
logger.error(f"API error: {response.text}")
return None
except Exception as e:
logger.error(f"API request failed on attempt {attempt + 1}: {str(e)}")
if attempt < retries - 1:
time.sleep(delay)
continue
logger.error(f"API request failed after {retries} retries.")
return None
def get_keyword_match(user_input_lower):
"""Find the best matching keyword group using semantic similarity."""
user_embedding = semantic_model.encode(user_input_lower, convert_to_tensor=True)
best_score = -1
best_group = None
for group, keywords in keyword_groups.items():
keyword_texts = keywords + [krishna_blessings.get(k, "") for k in keywords if k in krishna_blessings]
keyword_embeddings = semantic_model.encode(keyword_texts, convert_to_tensor=True)
similarities = util.cos_sim(user_embedding, keyword_embeddings)
max_similarity = similarities.max().item()
if max_similarity > best_score and max_similarity > 0.6:
best_score = max_similarity
best_group = group
logger.info(f"Best group: {best_group}, Similarity score: {best_score}")
return best_group
def get_krishna_response(user_input):
"""Generate a response from Little Krishna."""
try:
user_input_lower = user_input.lower().strip()
logger.info(f"Processing user input: {user_input_lower}")
# Reset context
if "start over" in user_input_lower or "reset" in user_input_lower:
conversation_context.update({"last_topic": None, "message_count": 0, "last_response": None, "last_yes_response": None, "history": []})
return "Hare Manavi! Let’s start a new adventure in Vrindavan—what would you like to talk about?"
# Analyze sentiment
sentiment = analyze_sentiment(user_input)
logger.info(f"Sentiment detected: {sentiment}")
conversation_context["message_count"] += 1
# Update history
if len(conversation_context["history"]) >= 5:
conversation_context["history"].pop(0)
conversation_context["history"].append({"input": user_input_lower, "response": None})
# Semantic keyword matching
matched_group = get_keyword_match(user_input_lower)
use_model = random.random() < 0.3
logger.info(f"Matched group: {matched_group}, Use model: {use_model}")
# Follow-up based on history
if conversation_context["last_topic"]:
last_input = conversation_context["history"][-2]["input"] if len(conversation_context["history"]) > 1 else ""
if "yes" in user_input_lower or "sure" in user_input_lower or "okay" in user_input_lower:
if conversation_context["last_topic"] == "playful":
response = "Hare Manavi! Let’s chase butterflies by the Yamuna then! Ready for more fun?"
conversation_context["history"][-1]["response"] = response
return response
elif conversation_context["last_topic"] == "wisdom":
response = "Hare Manavi! Patience is like a flute’s tune—it brings harmony. What else do you seek?"
conversation_context["history"][-1]["response"] = response
return response
elif conversation_context["last_topic"] == "joke":
response = "Hare Manavi! Why did the cow join the band? For my flute solos! Another one?"
conversation_context["history"][-1]["response"] = response
return response
# Handle predefined responses
follow_ups = {
"greeting": "What’s sparking your joy today?",
"joke": "Want another silly tale?",
"riddle": "Ready for another puzzle?",
"playful": "What fun shall we have next?",
"calm": "What’s soothing your heart today?",
"wisdom": "What wisdom are you seeking now?",
"nature": "Which part of Vrindavan calls to you?",
"encourage": "What’s your next brave step?",
"friend": "What’s a special moment you’d like to share?",
"chat": "What’s on your mind, Manavi?",
"birthday": "What’s your birthday wish?"
}
if matched_group and not use_model:
conversation_context["last_topic"] = matched_group
if matched_group == "birthday":
response = ayush_surprises.get("birthday", auto_generate_birthday_message(include_tease=True))
elif matched_group == "chat":
response = krishna_blessings["chat_with_you"]
elif matched_group in ayush_teasing and random.choice([True, False]):
response = random.choice(ayush_teasing[matched_group])
elif matched_group in krishna_blessings:
response = krishna_blessings[matched_group]
else:
response = krishna_blessings.get(matched_group, "Hare Manavi! Let’s explore Vrindavan’s magic!")
follow_up = follow_ups.get(matched_group, "What else is on your mind, Manavi?")
response = f"{response} {follow_up}"
conversation_context["history"][-1]["response"] = response
return response
# Sentiment-based fallback
if sentiment in ["sadness", "anger"] and not matched_group and not use_model:
response = f"Hare Manavi! I see a shadow on your heart—let’s dance by the Yamuna to bring back your smile! What’s on your mind?"
conversation_context["history"][-1]["response"] = response
return response
elif sentiment == "joy" and not matched_group and not use_model:
response = f"Hare Manavi! Your joy lights up Vrindavan—let’s celebrate with a flute melody! What’s making you so happy?"
conversation_context["history"][-1]["response"] = response
return response
# Fallback to AI model
headers = {
"Authorization": f"Bearer {HUGGINGFACE_API_TOKEN}",
"Content-Type": "application/json"
}
for model in AI_MODELS:
try:
logger.info(f"Attempting response with {model['name']}")
payload = {
"inputs": SYSTEM_PROMPT.format(user_input=user_input),
"parameters": model["parameters"]
}
response = make_api_request(model["endpoint"], headers=headers, json=payload)
if response and response.status_code == 200:
result = response.json()
if isinstance(result, list) and result and "generated_text" in result[0]:
response_text = result[0]["generated_text"].strip()
elif isinstance(result, dict) and "generated_text" in result:
response_text = result["generated_text"].strip()
else:
continue
conversation_context["history"][-1]["response"] = response_text
logger.info(f"Generated response: {response_text}")
return response_text
except Exception as e:
logger.error(f"Error with {model['name']}: {str(e)}")
continue
# Default fallback
response = "Hare Manavi! I’m lost in Vrindavan’s magic—let’s try a new tune! What’s on your mind?"
conversation_context["history"][-1]["response"] = response
return response
except Exception as e:
logger.error(f"Unhandled exception in get_krishna_response: {str(e)}")
response = "Hare Manavi! Something went wrong—let’s try again! What’s up?"
conversation_context["history"][-1]["response"] = response
return response