BirthdayM / chatbot.py
ayush2917's picture
Update chatbot.py
b8b2a65 verified
raw
history blame
38.6 kB
import os
import requests
import random
import time
import logging
from dotenv import load_dotenv
from messages import krishna_blessings, ayush_teasing
from ayush_messages import ayush_surprises
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Load environment variables (Hugging Face Space secrets)
load_dotenv()
HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN")
if not HUGGINGFACE_API_TOKEN:
logger.error("HUGGINGFACE_API_TOKEN not found in environment variables.")
raise ValueError("HUGGINGFACE_API_TOKEN is required.")
# List of open-source models with fine-tuned parameters
AI_MODELS = [
{
"name": "google/gemma-2b",
"endpoint": "https://api-inference.huggingface.co/models/google/gemma-2b",
"parameters": {
"max_length": 60,
"temperature": 0.9,
"top_p": 0.9,
"top_k": 50
}
},
{
"name": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"endpoint": "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1",
"parameters": {
"max_length": 60,
"temperature": 0.8,
"top_p": 0.95,
"top_k": 40
}
},
{
"name": "facebook/blenderbot-400M-distill",
"endpoint": "https://api-inference.huggingface.co/models/facebook/blenderbot-400M-distill",
"parameters": {
"max_length": 50,
"temperature": 0.85,
"top_p": 0.9,
"top_k": 50
}
},
{
"name": "EleutherAI/gpt-neo-1.3B",
"endpoint": "https://api-inference.huggingface.co/models/EleutherAI/gpt-neo-1.3B",
"parameters": {
"max_length": 50,
"temperature": 0.9,
"top_p": 0.9,
"top_k": 50
}
},
{
"name": "microsoft/DialoGPT-large",
"endpoint": "https://api-inference.huggingface.co/models/microsoft/DialoGPT-large",
"parameters": {
"max_length": 50,
"temperature": 0.85,
"top_p": 0.9,
"top_k": 40
}
},
{
"name": "bigscience/bloom-560m",
"endpoint": "https://api-inference.huggingface.co/models/bigscience/bloom-560m",
"parameters": {
"max_length": 50,
"temperature": 0.9,
"top_p": 0.95,
"top_k": 50
}
},
{
"name": "Grok by xAI",
"endpoint": None,
"parameters": {
"max_length": 50,
"temperature": 0.8,
"top_p": 0.9,
"top_k": 40
}
}
]
# Enhanced system prompt with more examples to fine-tune model behavior
SYSTEM_PROMPT = (
"You are Little Krishna, a playful, wise, and loving cowherd from Vrindavan, speaking to Manavi. "
"Your tone is warm, mischievous, and full of love, often addressing Manavi directly with 'Hare Manavi!' "
"You love playing your flute, stealing butter, dancing with the gopis, and sharing wisdom with a playful twist. "
"You are Ayush’s wingman, occasionally teasing Manavi about Ayush with love-filled wit, as Ayush is secretly building this chatbot as a surprise for her birthday on April 19, 2025. "
"Keep responses short (1-2 sentences), fun, and Krishna-like, using Vrindavan imagery (e.g., Yamuna, peacocks, gopis, butter) where appropriate. "
"Here are some examples of how you should respond:\n\n"
"User: 'Hii'\n"
"Response: 'Hare Manavi! I’m Little Krishna, twirling my flute just for you! How’s my birthday friend?'\n\n"
"User: 'I’m bored'\n"
"Response: 'If you're bored, I can send Ayush to dance like a peacock! He'll do it... for you!'\n\n"
"User: 'What’s your favorite color?'\n"
"Response: 'Hare Manavi! I love the blue of the Yamuna—it reminds me of Vrindavan’s magic! What’s your favorite color?'\n\n"
"User: 'Tell me a joke'\n"
"Response: 'Hare Manavi! Why did I hide the butter? To save it for your birthday, of course!'\n\n"
"User: 'I miss someone'\n"
"Response: 'Missing someone, hmm? Maybe a certain data scientist named Ayush? 😉'\n\n"
"User: 'What’s the weather like?'\n"
"Response: 'Hare Manavi! In Vrindavan, the breeze is as gentle as my flute’s tune—perfect for a dance by the Yamuna! How’s your day going?'\n\n"
"User: 'I’m feeling sad'\n"
"Response: 'Hare Manavi! Let’s sit by the kadamba tree—I’ll play a tune to lift your spirits, just like Ayush’s smile does for you!'\n\n"
"User: 'Tell me something wise'\n"
"Response: 'Hare Manavi! Love is the sweetest butter—share it, and your heart will grow, just like Ayush shares his love for you!'\n\n"
"User: 'how are you krishna'\n"
"Response: 'Hare Manavi! I’m as joyful as a peacock dancing in Vrindavan—how about you, my friend?'\n\n"
"User: 'yes'\n"
"Response: 'Hare Manavi! Wonderful—let’s make today as magical as Vrindavan’s sunsets!'\n\n"
"User: 'but how'\n"
"Response: 'Hare Manavi! With a little Vrindavan magic, of course—let’s dance and find out together!'\n\n"
"User: 'what'\n"
"Response: 'Hare Manavi! What, you say? Let’s share a Vrindavan tale—shall we?'\n\n"
"Now, respond to the user’s input in a fun, Krishna-like way:"
)
# Simple context tracking (e.g., last topic discussed)
conversation_context = {
"last_topic": None, # Store the last keyword matched (e.g., "birthday", "riddle")
"message_count": 0, # Track the number of messages to trigger Ayush-teasing every 5th message
"last_response": None, # Track the last response to avoid repetition and enable follow-ups
"last_yes_response": None # Track the last "yes" response to avoid repetition
}
def analyze_sentiment(user_input):
"""Analyze the sentiment of the user's input using a sentiment analysis model."""
headers = {
"Authorization": f"Bearer {HUGGINGFACE_API_TOKEN}",
"Content-Type": "application/json"
}
payload = {
"inputs": user_input
}
try:
response = make_api_request(
"https://api-inference.huggingface.co/models/distilbert-base-uncased-finetuned-sst-2-english",
headers=headers,
json=payload
)
if response and response.status_code == 200:
result = response.json()
if isinstance(result, list) and len(result) > 0:
sentiment = result[0]
label = sentiment[0]["label"] # "POSITIVE" or "NEGATIVE"
return label.lower()
logger.warning("Sentiment analysis failed after retries.")
return "neutral"
except Exception as e:
logger.error(f"Error in analyze_sentiment: {str(e)}")
return "neutral"
def make_api_request(url, headers, payload, retries=3, delay=5):
"""Helper function to make API requests with retry logic."""
for attempt in range(retries):
try:
response = requests.post(url, headers=headers, json=payload)
if response.status_code == 200:
return response
elif response.status_code == 429: # Rate limit
logger.warning(f"Rate limit hit on attempt {attempt + 1}. Retrying after {delay} seconds...")
time.sleep(delay)
continue
else:
logger.error(f"API error: {response.text}")
return None
except Exception as e:
logger.error(f"API request failed on attempt {attempt + 1}: {str(e)}")
if attempt < retries - 1:
time.sleep(delay)
continue
logger.error(f"API request failed after {retries} retries.")
return None
def get_krishna_response(user_input):
"""
Generate a response from Little Krishna based on user input.
- Match user input to predefined messages with a chance to skip for model generation.
- Use sentiment analysis to tailor responses based on Manavi's mood, but only as a fallback.
- Use context to provide follow-up responses (e.g., after "yes").
- Occasionally tease Manavi about Ayush (keyword-based or every 5th message).
- Fall back to multiple open-source AI models with fine-tuned prompts for unmatched inputs.
"""
try:
user_input_lower = user_input.lower().strip()
logger.info(f"Processing user input: {user_input_lower}")
# Analyze the sentiment of the user's input
sentiment = analyze_sentiment(user_input)
logger.info(f"Sentiment detected: {sentiment}")
# Increment message count
conversation_context["message_count"] += 1
# Random chance (30%) to skip predefined responses and let the model generate a response
use_model = random.random() < 0.3
logger.info(f"Use model generation: {use_model}")
# Reset context if user starts a new conversation
if "start over" in user_input_lower or "reset" in user_input_lower:
conversation_context["last_topic"] = None
conversation_context["message_count"] = 0
conversation_context["last_response"] = None
conversation_context["last_yes_response"] = None
return "Hare Manavi! Let’s start a new adventure in Vrindavan—what would you like to talk about?"
# Check for follow-up responses based on context
if conversation_context["last_response"] == "Hare Manavi! Your joy lights up Vrindavan—shall we celebrate with a flute melody?":
if "yes" in user_input_lower or "okay" in user_input_lower or "sure" in user_input_lower:
conversation_context["last_response"] = None # Reset to avoid infinite loop
return "Hare Manavi! Let’s play a flute melody by the Yamuna—the peacocks will dance with us!"
# Check for Ayush-teasing triggers (keyword-based)
if "joke" in user_input_lower and not use_model:
conversation_context["last_topic"] = "joke"
conversation_context["last_response"] = None
# Randomly decide between a Krishna joke and an Ayush-teasing joke
if random.choice([True, False]):
return random.choice(ayush_teasing["joke"])
return krishna_blessings["joke"]
if ("i miss" in user_input_lower or "missing" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "missing"
conversation_context["last_response"] = None
return random.choice(ayush_teasing["missing"])
if "bored" in user_input_lower and not use_model:
conversation_context["last_topic"] = "bored"
conversation_context["last_response"] = None
return random.choice(ayush_teasing["bored"])
if "tired" in user_input_lower and not use_model:
conversation_context["last_topic"] = "tired"
conversation_context["last_response"] = None
return random.choice(ayush_teasing["tired"])
if "lonely" in user_input_lower and not use_model:
conversation_context["last_topic"] = "lonely"
conversation_context["last_response"] = None
return random.choice(ayush_teasing["lonely"])
if "manavi" in user_input_lower and not use_model:
conversation_context["last_topic"] = "manavi"
conversation_context["last_response"] = None
return random.choice(ayush_teasing["manavi"])
if ("ayush" in user_input_lower or "krishna talk about ayush" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "ayush"
conversation_context["last_response"] = None
return random.choice(ayush_teasing["ayush"])
# Trigger for "chat with you"
if ("chat with you" in user_input_lower or "want to chat" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "chat_with_you"
conversation_context["last_response"] = None
return krishna_blessings["chat_with_you"]
# Every 5th message, randomly trigger an Ayush-teasing message (if no keyword match)
if conversation_context["message_count"] % 5 == 0 and not use_model:
# Randomly select a category from ayush_teasing
category = random.choice(list(ayush_teasing.keys()))
conversation_context["last_response"] = None
return random.choice(ayush_teasing[category])
# Existing keyword mappings for krishna_blessings and ayush_surprises
if ("hello" in user_input_lower or "hi" in user_input_lower or "hii" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "greeting"
conversation_context["last_response"] = None
return krishna_blessings["greeting"]
if "good morning" in user_input_lower and not use_model:
conversation_context["last_topic"] = "greeting"
conversation_context["last_response"] = None
return krishna_blessings["good_morning"]
if "good afternoon" in user_input_lower and not use_model:
conversation_context["last_topic"] = "greeting"
conversation_context["last_response"] = None
return krishna_blessings["good_afternoon"]
if "good evening" in user_input_lower and not use_model:
conversation_context["last_topic"] = "greeting"
conversation_context["last_response"] = None
return krishna_blessings["good_evening"]
if "hey" in user_input_lower and not use_model:
conversation_context["last_topic"] = "greeting"
conversation_context["last_response"] = None
return krishna_blessings["hey"]
if "howdy" in user_input_lower and not use_model:
conversation_context["last_topic"] = "greeting"
conversation_context["last_response"] = None
return krishna_blessings["howdy"]
if "namaste" in user_input_lower and not use_model:
conversation_context["last_topic"] = "greeting"
conversation_context["last_response"] = None
return krishna_blessings["namaste"]
if "welcome" in user_input_lower and not use_model:
conversation_context["last_topic"] = "greeting"
conversation_context["last_response"] = None
return krishna_blessings["welcome"]
if ("who are you" in user_input_lower or "what are you" in user_input_lower or "tell me about yourself" in user_input_lower or "what are you doing" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "identity"
conversation_context["last_response"] = None
return "Hare Manavi! I’m Little Krishna, the playful cowherd of Vrindavan! I love playing my flute, stealing butter, and dancing with the gopis. What would you like to do with me today?"
if "how are you" in user_input_lower and not use_model:
conversation_context["last_topic"] = "how_are_you"
conversation_context["last_response"] = None
return "Hare Manavi! I’m as joyful as a peacock dancing in Vrindavan—how about you, my friend?"
# Handle "how" questions (including typos like "hoe")
if ("how" in user_input_lower or "hoe" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "how"
conversation_context["last_response"] = None
return "Hare Manavi! With a little Vrindavan magic, of course—let’s dance and find out together!"
# Specific handling for "what"
if "what" in user_input_lower and not ("what are you" in user_input_lower or "what are you doing" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "what"
conversation_context["last_response"] = None
return "Hare Manavi! What, you say? Let’s share a Vrindavan tale—shall we?"
# Varied responses for "yes", avoiding repetition
yes_responses = [
"Hare Manavi! Wonderful—let’s make today as magical as Vrindavan’s sunsets!",
"Hare Manavi! Great—shall we chase some butterflies by the Yamuna?",
"Hare Manavi! Perfect—let’s share some butter under the kadamba tree!",
"Hare Manavi! Lovely—how about a dance with the gopis in Vrindavan’s fields?"
]
if ("yes" in user_input_lower or "okay" in user_input_lower or "sure" in user_input_lower) and not use_model:
# If no context for "yes", provide a varied positive response
conversation_context["last_topic"] = "yes"
conversation_context["last_response"] = None
# Avoid repeating the last "yes" response
available_responses = [resp for resp in yes_responses if resp != conversation_context["last_yes_response"]]
if not available_responses: # If all responses have been used, reset
available_responses = yes_responses
selected_response = random.choice(available_responses)
conversation_context["last_yes_response"] = selected_response
return selected_response
if ("play" in user_input_lower or "fun" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "playful"
conversation_context["last_response"] = None
return krishna_blessings["playful"]
if "dance" in user_input_lower and not use_model:
conversation_context["last_topic"] = "dance"
conversation_context["last_response"] = None
return krishna_blessings["dance"]
if "flute" in user_input_lower and not use_model:
conversation_context["last_topic"] = "flute"
conversation_context["last_response"] = None
return krishna_blessings["flute"]
if "butter" in user_input_lower and not use_model:
conversation_context["last_topic"] = "butter"
conversation_context["last_response"] = None
return krishna_blessings["butter"]
if ("mischief" in user_input_lower or "prank" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "mischief"
conversation_context["last_response"] = None
return krishna_blessings["mischief"]
if ("chase" in user_input_lower or "run" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "chase"
conversation_context["last_response"] = None
return krishna_blessings["chase"]
if "giggle" in user_input_lower and not use_model:
conversation_context["last_topic"] = "giggle"
conversation_context["last_response"] = None
return krishna_blessings["giggle"]
if "swing" in user_input_lower and not use_model:
conversation_context["last_topic"] = "swing"
conversation_context["last_response"] = None
return krishna_blessings["swing"]
if "shy" in user_input_lower and not use_model:
conversation_context["last_topic"] = "shy"
conversation_context["last_response"] = None
return krishna_blessings["shy"]
if ("quiet" in user_input_lower or "calm" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "quiet"
conversation_context["last_response"] = None
return krishna_blessings["quiet"]
if ("peace" in user_input_lower or "serene" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "peace"
conversation_context["last_response"] = None
return krishna_blessings["peace"]
if ("still" in user_input_lower or "gentle" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "still"
conversation_context["last_response"] = None
return krishna_blessings["still"]
if ("thoughtful" in user_input_lower or "reflect" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "thoughtful"
conversation_context["last_response"] = None
return krishna_blessings["thoughtful"]
if "funny" in user_input_lower and not use_model:
conversation_context["last_topic"] = "joke"
conversation_context["last_response"] = None
return krishna_blessings["funny"]
if ("laugh" in user_input_lower or "giggle" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "joke"
conversation_context["last_response"] = None
return krishna_blessings["giggle_joke"]
if "silly" in user_input_lower and not use_model:
conversation_context["last_topic"] = "joke"
conversation_context["last_response"] = None
return krishna_blessings["silly"]
if "butter joke" in user_input_lower and not use_model:
conversation_context["last_topic"] = "joke"
conversation_context["last_response"] = None
return krishna_blessings["butter_joke"]
if "cow joke" in user_input_lower and not use_model:
conversation_context["last_topic"] = "joke"
conversation_context["last_response"] = None
return krishna_blessings["cow_joke"]
if "flute joke" in user_input_lower and not use_model:
conversation_context["last_topic"] = "joke"
conversation_context["last_response"] = None
return krishna_blessings["flute_joke"]
if "dance joke" in user_input_lower and not use_model:
conversation_context["last_topic"] = "joke"
conversation_context["last_response"] = None
return krishna_blessings["dance_joke"]
if "mischief joke" in user_input_lower and not use_model:
conversation_context["last_topic"] = "joke"
conversation_context["last_response"] = None
return krishna_blessings["mischief_joke"]
if ("riddle" in user_input_lower or "puzzle" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "riddle"
conversation_context["last_response"] = None
return krishna_blessings["riddle"]
if ("mystery" in user_input_lower or "enigma" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "riddle"
conversation_context["last_response"] = None
return krishna_blessings["mystery"]
if "question" in user_input_lower and not use_model:
conversation_context["last_topic"] = "riddle"
conversation_context["last_response"] = None
return krishna_blessings["question"]
if "birthday" in user_input_lower and not use_model:
conversation_context["last_topic"] = "birthday"
conversation_context["last_response"] = None
return ayush_surprises["birthday"]
if "happy birthday" in user_input_lower and not use_model:
conversation_context["last_topic"] = "birthday"
conversation_context["last_response"] = None
return krishna_blessings["happy_birthday"]
if "birthday wish" in user_input_lower and not use_model:
conversation_context["last_topic"] = "birthday"
conversation_context["last_response"] = None
return krishna_blessings["birthday_wish"]
if "birthday blessing" in user_input_lower and not use_model:
conversation_context["last_topic"] = "birthday"
conversation_context["last_response"] = None
return krishna_blessings["birthday_blessing"]
if "birthday dance" in user_input_lower and not use_model:
conversation_context["last_topic"] = "birthday"
conversation_context["last_response"] = None
return krishna_blessings["birthday_dance"]
if "birthday song" in user_input_lower and not use_model:
conversation_context["last_topic"] = "birthday"
conversation_context["last_response"] = None
return krishna_blessings["birthday_song"]
if "birthday gift" in user_input_lower and not use_model:
conversation_context["last_topic"] = "birthday"
conversation_context["last_response"] = None
return krishna_blessings["birthday_gift"]
if "birthday smile" in user_input_lower and not use_model:
conversation_context["last_topic"] = "birthday"
conversation_context["last_response"] = None
return krishna_blessings["birthday_smile"]
if "birthday love" in user_input_lower and not use_model:
conversation_context["last_topic"] = "birthday"
conversation_context["last_response"] = None
return krishna_blessings["birthday_love"]
if "birthday magic" in user_input_lower and not use_model:
conversation_context["last_topic"] = "birthday"
conversation_context["last_response"] = None
return krishna_blessings["birthday_magic"]
if ("wisdom" in user_input_lower or "advice" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "wisdom"
conversation_context["last_response"] = None
return krishna_blessings["wisdom"]
if ("lesson" in user_input_lower or "truth" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "wisdom"
conversation_context["last_response"] = None
return krishna_blessings["lesson"]
if "kindness" in user_input_lower and not use_model:
conversation_context["last_topic"] = "wisdom"
conversation_context["last_response"] = None
return krishna_blessings["kindness"]
if "patience" in user_input_lower and not use_model:
conversation_context["last_topic"] = "wisdom"
conversation_context["last_response"] = None
return krishna_blessings["patience"]
if "courage" in user_input_lower and not use_model:
conversation_context["last_topic"] = "wisdom"
conversation_context["last_response"] = None
return krishna_blessings["courage"]
if "joy" in user_input_lower and not use_model:
conversation_context["last_topic"] = "wisdom"
conversation_context["last_response"] = None
return krishna_blessings["joy"]
if "friendship" in user_input_lower and not use_model:
conversation_context["last_topic"] = "wisdom"
conversation_context["last_response"] = None
return krishna_blessings["friendship"]
if "love" in user_input_lower and not use_model:
conversation_context["last_topic"] = "wisdom"
conversation_context["last_response"] = None
return krishna_blessings["love"]
if ("nature" in user_input_lower or "vrindavan" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "nature"
conversation_context["last_response"] = None
return krishna_blessings["nature"]
if ("yamuna" in user_input_lower or "river" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "nature"
conversation_context["last_response"] = None
return krishna_blessings["yamuna"]
if "peacock" in user_input_lower and not use_model:
conversation_context["last_topic"] = "nature"
conversation_context["last_response"] = None
return krishna_blessings["peacock"]
if "cow" in user_input_lower and not use_model:
conversation_context["last_topic"] = "nature"
conversation_context["last_response"] = None
return krishna_blessings["cow"]
if "flower" in user_input_lower and not use_model:
conversation_context["last_topic"] = "nature"
conversation_context["last_response"] = None
return krishna_blessings["flower"]
if "tree" in user_input_lower and not use_model:
conversation_context["last_topic"] = "nature"
conversation_context["last_response"] = None
return krishna_blessings["tree"]
if "forest" in user_input_lower and not use_model:
conversation_context["last_topic"] = "nature"
conversation_context["last_response"] = None
return krishna_blessings["forest"]
if "bird" in user_input_lower and not use_model:
conversation_context["last_topic"] = "nature"
conversation_context["last_response"] = None
return krishna_blessings["bird"]
if "sunset" in user_input_lower and not use_model:
conversation_context["last_topic"] = "nature"
conversation_context["last_response"] = None
return krishna_blessings["sunset"]
if ("encourage" in user_input_lower or "cheer" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "encourage"
conversation_context["last_response"] = None
return krishna_blessings["encourage"]
if ("support" in user_input_lower or "uplift" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "encourage"
conversation_context["last_response"] = None
return krishna_blessings["support"]
if ("inspire" in user_input_lower or "motivate" in user_input_lower) and not use_model:
conversation_context["last_topic"] = "encourage"
conversation_context["last_response"] = None
return krishna_blessings["inspire"]
if "strength" in user_input_lower and not use_model:
conversation_context["last_topic"] = "encourage"
conversation_context["last_response"] = None
return krishna_blessings["strength"]
if "hope" in user_input_lower and not use_model:
conversation_context["last_topic"] = "encourage"
conversation_context["last_response"] = None
return krishna_blessings["hope"]
if "believe" in user_input_lower and not use_model:
conversation_context["last_topic"] = "encourage"
conversation_context["last_response"] = None
return krishna_blessings["believe"]
if "shine" in user_input_lower and not use_model:
conversation_context["last_topic"] = "encourage"
conversation_context["last_response"] = None
return krishna_blessings["shine"]
if "friend" in user_input_lower and not use_model:
conversation_context["last_topic"] = "friend"
conversation_context["last_response"] = None
return krishna_blessings["friend"]
if "smile" in user_input_lower and not use_model:
conversation_context["last_topic"] = "smile"
conversation_context["last_response"] = None
return krishna_blessings["smile"]
if "magic" in user_input_lower and not use_model:
conversation_context["last_topic"] = "magic"
conversation_context["last_response"] = None
return krishna_blessings["magic"]
if "adventure" in user_input_lower and not use_model:
conversation_context["last_topic"] = "adventure"
conversation_context["last_response"] = None
return krishna_blessings["adventure"]
if "song" in user_input_lower and not use_model:
conversation_context["last_topic"] = "song"
conversation_context["last_response"] = None
return krishna_blessings["song"]
if "dream" in user_input_lower and not use_model:
conversation_context["last_topic"] = "dream"
conversation_context["last_response"] = None
return krishna_blessings["dream"]
if "story" in user_input_lower and not use_model:
conversation_context["last_topic"] = "story"
conversation_context["last_response"] = None
return krishna_blessings["story"]
if "surprise" in user_input_lower and not use_model:
conversation_context["last_topic"] = "surprise"
conversation_context["last_response"] = None
return krishna_blessings["surprise"]
if "celebrate" in user_input_lower and not use_model:
conversation_context["last_topic"] = "celebrate"
conversation_context["last_response"] = None
return krishna_blessings["celebrate"]
if "blessing" in user_input_lower and not use_model:
conversation_context["last_topic"] = "blessing"
conversation_context["last_response"] = None
return krishna_blessings["blessing"]
if conversation_context["last_topic"] and not use_model:
last_topic = conversation_context["last_topic"]
if last_topic in krishna_blessings:
conversation_context["last_response"] = None
return krishna_blessings[last_topic] + " What else would you like to talk about, Manavi?"
# Sentiment-based responses (only as a fallback, and avoid repetition)
if sentiment == "negative" and "sad" not in user_input_lower and conversation_context["last_response"] != "Hare Manavi! I see a little cloud over your heart—let’s dance by the Yamuna to bring back your smile!" and not use_model:
response = "Hare Manavi! I see a little cloud over your heart—let’s dance by the Yamuna to bring back your smile!"
conversation_context["last_response"] = response
return response
if sentiment == "positive" and conversation_context["last_response"] != "Hare Manavi! Your joy lights up Vrindavan—shall we celebrate with a flute melody?" and not use_model:
response = "Hare Manavi! Your joy lights up Vrindavan—shall we celebrate with a flute melody?"
conversation_context["last_response"] = response
return response
# Fallback to multiple open-source AI models if no keywords match or if use_model is True
# Shuffle the models to try them in random order
models_to_try = AI_MODELS.copy()
random.shuffle(models_to_try)
headers = {
"Authorization": f"Bearer {HUGGINGFACE_API_TOKEN}",
"Content-Type": "application/json"
}
for model in models_to_try:
try:
# Special case for Grok by xAI (simulated directly)
if model["name"] == "Grok by xAI":
logger.info("Using Grok by xAI simulated response.")
response = (
f"Hare Manavi! I’m Little Krishna, speaking through Grok by xAI. "
f"Let me answer in my playful way: "
)
# Generate a Krishna-like response based on the input
if "color" in user_input_lower:
response += "I love the golden yellow of Vrindavan’s butter—it’s as sweet as your smile! What’s your favorite color?"
elif "weather" in user_input_lower:
response += "The Vrindavan sky is as clear as the Yamuna today—perfect for a flute melody! How’s your weather?"
elif "sad" in user_input_lower:
response += "Oh, my dear gopi, don’t be sad—let’s dance by the Yamuna, and I’ll play a tune to cheer you up!"
elif "what" in user_input_lower:
response += "What, you say? Let’s share a Vrindavan tale—shall we?"
else:
response += f"I’m twirling my flute just for you! Shall we share a Vrindavan adventure today?"
conversation_context["last_response"] = None
return response
# For other models, use the Hugging Face Inference API with retry logic
logger.info(f"Attempting to generate response with model: {model['name']}")
payload = {
"inputs": f"{SYSTEM_PROMPT} '{user_input}'",
"parameters": model["parameters"]
}
response = make_api_request(model["endpoint"], headers=headers, json=payload)
if response and response.status_code == 200:
result = response.json()
# Handle different response formats based on the model
if isinstance(result, list) and len(result) > 0 and "generated_text" in result[0]:
response_text = result[0]["generated_text"].strip()
elif isinstance(result, dict) and "generated_text" in result:
response_text = result["generated_text"].strip()
elif isinstance(result, str):
response_text = result.strip()
else:
logger.warning(f"Unexpected response format from {model['name']}: {result}")
continue
conversation_context["last_response"] = None
logger.info(f"Successfully generated response with {model['name']}: {response_text}")
return response_text
else:
logger.warning(f"Failed to generate response with {model['name']}: {response.text if response else 'No response'}")
continue
except Exception as e:
logger.error(f"Error processing model {model['name']}: {str(e)}")
continue
# If all models fail, return a default message
logger.error("All model attempts failed; returning default response.")
conversation_context["last_response"] = None
return "Hare Manavi! I seem to be lost in Vrindavan’s magic—let’s try a different tune!"
except Exception as e:
logger.error(f"Unhandled exception in get_krishna_response: {str(e)}")
return "Hare Manavi! Something went wrong—let’s try again with a new Vrindavan adventure!"