File size: 11,910 Bytes
381c527 6043438 25c0746 b8b2a65 381c527 375cb5c 01e2279 375cb5c 381c527 b8b2a65 375cb5c 381c527 b8b2a65 381c527 375cb5c fea8846 a0ab740 375cb5c 12e79cf a0ab740 fea8846 375cb5c a0ab740 375cb5c a0ab740 375cb5c e75ce3b 375cb5c e75ce3b 12e79cf 375cb5c 12e79cf 375cb5c b8b2a65 375cb5c b8b2a65 375cb5c b8b2a65 25c0746 375cb5c 25c0746 375cb5c b8b2a65 25c0746 b8b2a65 25c0746 b8b2a65 25c0746 b8b2a65 25c0746 12e79cf 375cb5c 01e2279 375cb5c b8b2a65 e75ce3b 375cb5c b8b2a65 375cb5c b8b2a65 375cb5c 01e2279 375cb5c b44364f 375cb5c fea8846 375cb5c fea8846 375cb5c b8b2a65 375cb5c b8b2a65 375cb5c b8b2a65 375cb5c b8b2a65 375cb5c b8b2a65 375cb5c b8b2a65 375cb5c b8b2a65 375cb5c b8b2a65 375cb5c b8b2a65 375cb5c fea8846 375cb5c b8b2a65 375cb5c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 |
import os
import requests
import random
import time
import logging
from dotenv import load_dotenv
from messages import krishna_blessings, ayush_teasing, keyword_groups
from ayush_messages import ayush_surprises
from sentence_transformers import SentenceTransformer, util
import numpy as np
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Load environment variables
load_dotenv()
HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN")
if not HUGGINGFACE_API_TOKEN:
logger.error("HUGGINGFACE_API_TOKEN not found in environment variables.")
raise ValueError("HUGGINGFACE_API_TOKEN is required.")
# Initialize sentence transformer model
semantic_model = SentenceTransformer('all-MiniLM-L6-v2')
# AI model for fallback responses
AI_MODELS = [
{
"name": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"endpoint": "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1",
"parameters": {
"max_length": 80,
"temperature": 0.8,
"top_p": 0.95,
"top_k": 40
}
}
]
# System prompt for AI model
SYSTEM_PROMPT = (
"You are Little Krishna, a playful, wise, and loving cowherd from Vrindavan, speaking to Manavi. "
"Your tone is warm, mischievous, and full of love, always addressing Manavi with 'Hare Manavi!' "
"Use Vrindavan imagery (e.g., Yamuna, peacocks, butter, flute) and keep responses short (1-2 sentences). "
"You’re Ayush’s wingman, occasionally teasing Manavi about Ayush with wit, as he’s building this chatbot for her birthday on April 19, 2025. "
"If the user’s mood seems negative, offer comfort; if positive, celebrate their joy. Always end with a question to keep the conversation going. "
"Examples:\n"
"Input: 'I’m sad'\nResponse: 'Hare Manavi! Let’s sit by the Yamuna—I’ll play a tune to lift your heart! What’s troubling you?'\n"
"Input: 'Tell me about love'\nResponse: 'Hare Manavi! Love is like my flute’s melody—sweet and endless! What does love mean to you?'\n"
"Input: 'What’s up?'\nResponse: 'Hare Manavi! Just dancing with the gopis—Ayush says hi, by the way! What’s up with you?'\n"
"Now, respond to: '{user_input}'"
)
# Conversation context
conversation_context = {
"last_topic": None,
"message_count": 0,
"last_response": None,
"last_yes_response": None,
"history": [] # Store up to 5 recent (input, response) pairs
}
def analyze_sentiment(user_input):
"""Analyze the sentiment of the user's input."""
headers = {
"Authorization": f"Bearer {HUGGINGFACE_API_TOKEN}",
"Content-Type": "application/json"
}
payload = {"inputs": user_input}
try:
response = make_api_request(
"https://api-inference.huggingface.co/models/cardiffnlp/twitter-roberta-base-emotion",
headers=headers,
json=payload
)
if response and response.status_code == 200:
result = response.json()
if isinstance(result, list) and result:
emotions = result[0]
top_emotion = max(emotions, key=lambda x: x["score"])["label"]
return top_emotion # e.g., "joy", "sadness", "anger"
return "neutral"
except Exception as e:
logger.error(f"Error in analyze_sentiment: {str(e)}")
return "neutral"
def make_api_request(url, headers, payload, retries=3, delay=5):
"""Make API requests with retry logic."""
for attempt in range(retries):
try:
response = requests.post(url, headers=headers, json=payload)
if response.status_code == 200:
return response
elif response.status_code == 429:
logger.warning(f"Rate limit hit on attempt {attempt + 1}. Retrying after {delay} seconds...")
time.sleep(delay)
continue
else:
logger.error(f"API error: {response.text}")
return None
except Exception as e:
logger.error(f"API request failed on attempt {attempt + 1}: {str(e)}")
if attempt < retries - 1:
time.sleep(delay)
continue
logger.error(f"API request failed after {retries} retries.")
return None
def get_keyword_match(user_input_lower):
"""Find the best matching keyword group using semantic similarity."""
user_embedding = semantic_model.encode(user_input_lower, convert_to_tensor=True)
best_score = -1
best_group = None
for group, keywords in keyword_groups.items():
keyword_texts = keywords + [krishna_blessings.get(k, "") for k in keywords if k in krishna_blessings]
keyword_embeddings = semantic_model.encode(keyword_texts, convert_to_tensor=True)
similarities = util.cos_sim(user_embedding, keyword_embeddings)
max_similarity = similarities.max().item()
if max_similarity > best_score and max_similarity > 0.6:
best_score = max_similarity
best_group = group
logger.info(f"Best group: {best_group}, Similarity score: {best_score}")
return best_group
def get_krishna_response(user_input):
"""Generate a response from Little Krishna."""
try:
user_input_lower = user_input.lower().strip()
logger.info(f"Processing user input: {user_input_lower}")
# Reset context
if "start over" in user_input_lower or "reset" in user_input_lower:
conversation_context.update({"last_topic": None, "message_count": 0, "last_response": None, "last_yes_response": None, "history": []})
return "Hare Manavi! Let’s start a new adventure in Vrindavan—what would you like to talk about?"
# Analyze sentiment
sentiment = analyze_sentiment(user_input)
logger.info(f"Sentiment detected: {sentiment}")
conversation_context["message_count"] += 1
# Update history
if len(conversation_context["history"]) >= 5:
conversation_context["history"].pop(0)
conversation_context["history"].append({"input": user_input_lower, "response": None})
# Semantic keyword matching
matched_group = get_keyword_match(user_input_lower)
use_model = random.random() < 0.3
logger.info(f"Matched group: {matched_group}, Use model: {use_model}")
# Follow-up based on history
if conversation_context["last_topic"]:
last_input = conversation_context["history"][-2]["input"] if len(conversation_context["history"]) > 1 else ""
if "yes" in user_input_lower or "sure" in user_input_lower or "okay" in user_input_lower:
if conversation_context["last_topic"] == "playful":
response = "Hare Manavi! Let’s chase butterflies by the Yamuna then! Ready for more fun?"
conversation_context["history"][-1]["response"] = response
return response
elif conversation_context["last_topic"] == "wisdom":
response = "Hare Manavi! Patience is like a flute’s tune—it brings harmony. What else do you seek?"
conversation_context["history"][-1]["response"] = response
return response
elif conversation_context["last_topic"] == "joke":
response = "Hare Manavi! Why did the cow join the band? For my flute solos! Another one?"
conversation_context["history"][-1]["response"] = response
return response
# Handle predefined responses
follow_ups = {
"greeting": "What’s sparking your joy today?",
"joke": "Want another silly tale?",
"riddle": "Ready for another puzzle?",
"playful": "What fun shall we have next?",
"calm": "What’s soothing your heart today?",
"wisdom": "What wisdom are you seeking now?",
"nature": "Which part of Vrindavan calls to you?",
"encourage": "What’s your next brave step?",
"friend": "What’s a special moment you’d like to share?",
"chat": "What’s on your mind, Manavi?",
"birthday": "What’s your birthday wish?"
}
if matched_group and not use_model:
conversation_context["last_topic"] = matched_group
if matched_group == "birthday":
response = ayush_surprises.get("birthday", auto_generate_birthday_message(include_tease=True))
elif matched_group == "chat":
response = krishna_blessings["chat_with_you"]
elif matched_group in ayush_teasing and random.choice([True, False]):
response = random.choice(ayush_teasing[matched_group])
elif matched_group in krishna_blessings:
response = krishna_blessings[matched_group]
else:
response = krishna_blessings.get(matched_group, "Hare Manavi! Let’s explore Vrindavan’s magic!")
follow_up = follow_ups.get(matched_group, "What else is on your mind, Manavi?")
response = f"{response} {follow_up}"
conversation_context["history"][-1]["response"] = response
return response
# Sentiment-based fallback
if sentiment in ["sadness", "anger"] and not matched_group and not use_model:
response = f"Hare Manavi! I see a shadow on your heart—let’s dance by the Yamuna to bring back your smile! What’s on your mind?"
conversation_context["history"][-1]["response"] = response
return response
elif sentiment == "joy" and not matched_group and not use_model:
response = f"Hare Manavi! Your joy lights up Vrindavan—let’s celebrate with a flute melody! What’s making you so happy?"
conversation_context["history"][-1]["response"] = response
return response
# Fallback to AI model
headers = {
"Authorization": f"Bearer {HUGGINGFACE_API_TOKEN}",
"Content-Type": "application/json"
}
for model in AI_MODELS:
try:
logger.info(f"Attempting response with {model['name']}")
payload = {
"inputs": SYSTEM_PROMPT.format(user_input=user_input),
"parameters": model["parameters"]
}
response = make_api_request(model["endpoint"], headers=headers, json=payload)
if response and response.status_code == 200:
result = response.json()
if isinstance(result, list) and result and "generated_text" in result[0]:
response_text = result[0]["generated_text"].strip()
elif isinstance(result, dict) and "generated_text" in result:
response_text = result["generated_text"].strip()
else:
continue
conversation_context["history"][-1]["response"] = response_text
logger.info(f"Generated response: {response_text}")
return response_text
except Exception as e:
logger.error(f"Error with {model['name']}: {str(e)}")
continue
# Default fallback
response = "Hare Manavi! I’m lost in Vrindavan’s magic—let’s try a new tune! What’s on your mind?"
conversation_context["history"][-1]["response"] = response
return response
except Exception as e:
logger.error(f"Unhandled exception in get_krishna_response: {str(e)}")
response = "Hare Manavi! Something went wrong—let’s try again! What’s up?"
conversation_context["history"][-1]["response"] = response
return response |