Update chatbot.py
Browse files- chatbot.py +519 -495
chatbot.py
CHANGED
@@ -2,13 +2,21 @@ import os
|
|
2 |
import requests
|
3 |
import random
|
4 |
import time
|
|
|
5 |
from dotenv import load_dotenv
|
6 |
from messages import krishna_blessings, ayush_teasing
|
7 |
from ayush_messages import ayush_surprises
|
8 |
|
|
|
|
|
|
|
|
|
9 |
# Load environment variables (Hugging Face Space secrets)
|
10 |
load_dotenv()
|
11 |
HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN")
|
|
|
|
|
|
|
12 |
|
13 |
# List of open-source models with fine-tuned parameters
|
14 |
AI_MODELS = [
|
@@ -136,19 +144,23 @@ def analyze_sentiment(user_input):
|
|
136 |
payload = {
|
137 |
"inputs": user_input
|
138 |
}
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
|
|
|
|
|
|
|
|
152 |
|
153 |
def make_api_request(url, headers, payload, retries=3, delay=5):
|
154 |
"""Helper function to make API requests with retry logic."""
|
@@ -158,17 +170,18 @@ def make_api_request(url, headers, payload, retries=3, delay=5):
|
|
158 |
if response.status_code == 200:
|
159 |
return response
|
160 |
elif response.status_code == 429: # Rate limit
|
161 |
-
|
162 |
time.sleep(delay)
|
163 |
continue
|
164 |
else:
|
165 |
-
|
166 |
return None
|
167 |
except Exception as e:
|
168 |
-
|
169 |
if attempt < retries - 1:
|
170 |
time.sleep(delay)
|
171 |
continue
|
|
|
172 |
return None
|
173 |
|
174 |
def get_krishna_response(user_input):
|
@@ -180,509 +193,520 @@ def get_krishna_response(user_input):
|
|
180 |
- Occasionally tease Manavi about Ayush (keyword-based or every 5th message).
|
181 |
- Fall back to multiple open-source AI models with fine-tuned prompts for unmatched inputs.
|
182 |
"""
|
183 |
-
|
|
|
|
|
184 |
|
185 |
-
|
186 |
-
|
|
|
187 |
|
188 |
-
|
189 |
-
|
190 |
|
191 |
-
|
192 |
-
|
|
|
193 |
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
# Trigger for "chat with you"
|
242 |
-
if ("chat with you" in user_input_lower or "want to chat" in user_input_lower) and not use_model:
|
243 |
-
conversation_context["last_topic"] = "chat_with_you"
|
244 |
-
conversation_context["last_response"] = None
|
245 |
-
return krishna_blessings["chat_with_you"]
|
246 |
-
|
247 |
-
# Every 5th message, randomly trigger an Ayush-teasing message (if no keyword match)
|
248 |
-
if conversation_context["message_count"] % 5 == 0 and not use_model:
|
249 |
-
# Randomly select a category from ayush_teasing
|
250 |
-
category = random.choice(list(ayush_teasing.keys()))
|
251 |
-
conversation_context["last_response"] = None
|
252 |
-
return random.choice(ayush_teasing[category])
|
253 |
-
|
254 |
-
# Existing keyword mappings for krishna_blessings and ayush_surprises
|
255 |
-
if ("hello" in user_input_lower or "hi" in user_input_lower or "hii" in user_input_lower) and not use_model:
|
256 |
-
conversation_context["last_topic"] = "greeting"
|
257 |
-
conversation_context["last_response"] = None
|
258 |
-
return krishna_blessings["greeting"]
|
259 |
-
if "good morning" in user_input_lower and not use_model:
|
260 |
-
conversation_context["last_topic"] = "greeting"
|
261 |
-
conversation_context["last_response"] = None
|
262 |
-
return krishna_blessings["good_morning"]
|
263 |
-
if "good afternoon" in user_input_lower and not use_model:
|
264 |
-
conversation_context["last_topic"] = "greeting"
|
265 |
-
conversation_context["last_response"] = None
|
266 |
-
return krishna_blessings["good_afternoon"]
|
267 |
-
if "good evening" in user_input_lower and not use_model:
|
268 |
-
conversation_context["last_topic"] = "greeting"
|
269 |
-
conversation_context["last_response"] = None
|
270 |
-
return krishna_blessings["good_evening"]
|
271 |
-
if "hey" in user_input_lower and not use_model:
|
272 |
-
conversation_context["last_topic"] = "greeting"
|
273 |
-
conversation_context["last_response"] = None
|
274 |
-
return krishna_blessings["hey"]
|
275 |
-
if "howdy" in user_input_lower and not use_model:
|
276 |
-
conversation_context["last_topic"] = "greeting"
|
277 |
-
conversation_context["last_response"] = None
|
278 |
-
return krishna_blessings["howdy"]
|
279 |
-
if "namaste" in user_input_lower and not use_model:
|
280 |
-
conversation_context["last_topic"] = "greeting"
|
281 |
-
conversation_context["last_response"] = None
|
282 |
-
return krishna_blessings["namaste"]
|
283 |
-
if "welcome" in user_input_lower and not use_model:
|
284 |
-
conversation_context["last_topic"] = "greeting"
|
285 |
-
conversation_context["last_response"] = None
|
286 |
-
return krishna_blessings["welcome"]
|
287 |
-
|
288 |
-
if ("who are you" in user_input_lower or "what are you" in user_input_lower or "tell me about yourself" in user_input_lower or "what are you doing" in user_input_lower) and not use_model:
|
289 |
-
conversation_context["last_topic"] = "identity"
|
290 |
-
conversation_context["last_response"] = None
|
291 |
-
return "Hare Manavi! I’m Little Krishna, the playful cowherd of Vrindavan! I love playing my flute, stealing butter, and dancing with the gopis. What would you like to do with me today?"
|
292 |
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
|
|
297 |
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
|
|
303 |
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
"
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
if "flute" in user_input_lower and not use_model:
|
338 |
-
conversation_context["last_topic"] = "flute"
|
339 |
-
conversation_context["last_response"] = None
|
340 |
-
return krishna_blessings["flute"]
|
341 |
-
if "butter" in user_input_lower and not use_model:
|
342 |
-
conversation_context["last_topic"] = "butter"
|
343 |
-
conversation_context["last_response"] = None
|
344 |
-
return krishna_blessings["butter"]
|
345 |
-
if ("mischief" in user_input_lower or "prank" in user_input_lower) and not use_model:
|
346 |
-
conversation_context["last_topic"] = "mischief"
|
347 |
-
conversation_context["last_response"] = None
|
348 |
-
return krishna_blessings["mischief"]
|
349 |
-
if ("chase" in user_input_lower or "run" in user_input_lower) and not use_model:
|
350 |
-
conversation_context["last_topic"] = "chase"
|
351 |
-
conversation_context["last_response"] = None
|
352 |
-
return krishna_blessings["chase"]
|
353 |
-
if "giggle" in user_input_lower and not use_model:
|
354 |
-
conversation_context["last_topic"] = "giggle"
|
355 |
-
conversation_context["last_response"] = None
|
356 |
-
return krishna_blessings["giggle"]
|
357 |
-
if "swing" in user_input_lower and not use_model:
|
358 |
-
conversation_context["last_topic"] = "swing"
|
359 |
-
conversation_context["last_response"] = None
|
360 |
-
return krishna_blessings["swing"]
|
361 |
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
if ("quiet" in user_input_lower or "calm" in user_input_lower) and not use_model:
|
367 |
-
conversation_context["last_topic"] = "quiet"
|
368 |
-
conversation_context["last_response"] = None
|
369 |
-
return krishna_blessings["quiet"]
|
370 |
-
if ("peace" in user_input_lower or "serene" in user_input_lower) and not use_model:
|
371 |
-
conversation_context["last_topic"] = "peace"
|
372 |
-
conversation_context["last_response"] = None
|
373 |
-
return krishna_blessings["peace"]
|
374 |
-
if ("still" in user_input_lower or "gentle" in user_input_lower) and not use_model:
|
375 |
-
conversation_context["last_topic"] = "still"
|
376 |
-
conversation_context["last_response"] = None
|
377 |
-
return krishna_blessings["still"]
|
378 |
-
if ("thoughtful" in user_input_lower or "reflect" in user_input_lower) and not use_model:
|
379 |
-
conversation_context["last_topic"] = "thoughtful"
|
380 |
-
conversation_context["last_response"] = None
|
381 |
-
return krishna_blessings["thoughtful"]
|
382 |
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
if ("laugh" in user_input_lower or "giggle" in user_input_lower) and not use_model:
|
388 |
-
conversation_context["last_topic"] = "joke"
|
389 |
-
conversation_context["last_response"] = None
|
390 |
-
return krishna_blessings["giggle_joke"]
|
391 |
-
if "silly" in user_input_lower and not use_model:
|
392 |
-
conversation_context["last_topic"] = "joke"
|
393 |
-
conversation_context["last_response"] = None
|
394 |
-
return krishna_blessings["silly"]
|
395 |
-
if "butter joke" in user_input_lower and not use_model:
|
396 |
-
conversation_context["last_topic"] = "joke"
|
397 |
-
conversation_context["last_response"] = None
|
398 |
-
return krishna_blessings["butter_joke"]
|
399 |
-
if "cow joke" in user_input_lower and not use_model:
|
400 |
-
conversation_context["last_topic"] = "joke"
|
401 |
-
conversation_context["last_response"] = None
|
402 |
-
return krishna_blessings["cow_joke"]
|
403 |
-
if "flute joke" in user_input_lower and not use_model:
|
404 |
-
conversation_context["last_topic"] = "joke"
|
405 |
-
conversation_context["last_response"] = None
|
406 |
-
return krishna_blessings["flute_joke"]
|
407 |
-
if "dance joke" in user_input_lower and not use_model:
|
408 |
-
conversation_context["last_topic"] = "joke"
|
409 |
-
conversation_context["last_response"] = None
|
410 |
-
return krishna_blessings["dance_joke"]
|
411 |
-
if "mischief joke" in user_input_lower and not use_model:
|
412 |
-
conversation_context["last_topic"] = "joke"
|
413 |
-
conversation_context["last_response"] = None
|
414 |
-
return krishna_blessings["mischief_joke"]
|
415 |
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
conversation_context["last_topic"] = "riddle"
|
422 |
-
conversation_context["last_response"] = None
|
423 |
-
return krishna_blessings["mystery"]
|
424 |
-
if "question" in user_input_lower and not use_model:
|
425 |
-
conversation_context["last_topic"] = "riddle"
|
426 |
-
conversation_context["last_response"] = None
|
427 |
-
return krishna_blessings["question"]
|
428 |
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
435 |
-
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
|
444 |
-
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
469 |
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
|
489 |
-
|
490 |
-
if "joy" in user_input_lower and not use_model:
|
491 |
-
conversation_context["last_topic"] = "wisdom"
|
492 |
-
conversation_context["last_response"] = None
|
493 |
-
return krishna_blessings["joy"]
|
494 |
-
if "friendship" in user_input_lower and not use_model:
|
495 |
-
conversation_context["last_topic"] = "wisdom"
|
496 |
-
conversation_context["last_response"] = None
|
497 |
-
return krishna_blessings["friendship"]
|
498 |
-
if "love" in user_input_lower and not use_model:
|
499 |
-
conversation_context["last_topic"] = "wisdom"
|
500 |
-
conversation_context["last_response"] = None
|
501 |
-
return krishna_blessings["love"]
|
502 |
|
503 |
-
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
|
517 |
-
|
518 |
-
|
519 |
-
|
520 |
-
|
521 |
-
|
522 |
-
|
523 |
-
|
524 |
-
|
525 |
-
|
526 |
-
|
527 |
-
|
528 |
-
|
529 |
-
|
530 |
-
|
531 |
-
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
if "sunset" in user_input_lower and not use_model:
|
536 |
-
conversation_context["last_topic"] = "nature"
|
537 |
-
conversation_context["last_response"] = None
|
538 |
-
return krishna_blessings["sunset"]
|
539 |
|
540 |
-
|
541 |
-
|
542 |
-
|
543 |
-
|
544 |
-
|
545 |
-
|
546 |
-
|
547 |
-
|
548 |
-
|
549 |
-
|
550 |
-
|
551 |
-
|
552 |
-
if "strength" in user_input_lower and not use_model:
|
553 |
-
conversation_context["last_topic"] = "encourage"
|
554 |
-
conversation_context["last_response"] = None
|
555 |
-
return krishna_blessings["strength"]
|
556 |
-
if "hope" in user_input_lower and not use_model:
|
557 |
-
conversation_context["last_topic"] = "encourage"
|
558 |
-
conversation_context["last_response"] = None
|
559 |
-
return krishna_blessings["hope"]
|
560 |
-
if "believe" in user_input_lower and not use_model:
|
561 |
-
conversation_context["last_topic"] = "encourage"
|
562 |
-
conversation_context["last_response"] = None
|
563 |
-
return krishna_blessings["believe"]
|
564 |
-
if "shine" in user_input_lower and not use_model:
|
565 |
-
conversation_context["last_topic"] = "encourage"
|
566 |
-
conversation_context["last_response"] = None
|
567 |
-
return krishna_blessings["shine"]
|
568 |
|
569 |
-
|
570 |
-
|
571 |
-
|
572 |
-
|
573 |
-
|
574 |
-
|
575 |
-
|
576 |
-
|
577 |
-
|
578 |
-
|
579 |
-
|
580 |
-
|
581 |
-
|
582 |
-
|
583 |
-
|
584 |
-
|
585 |
-
|
586 |
-
|
587 |
-
|
588 |
-
|
589 |
-
|
590 |
-
|
591 |
-
|
592 |
-
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
600 |
-
|
601 |
-
|
602 |
-
|
603 |
-
|
604 |
-
|
605 |
-
|
606 |
-
|
607 |
-
|
608 |
-
|
609 |
|
610 |
-
|
611 |
-
|
612 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
613 |
conversation_context["last_response"] = None
|
614 |
-
return krishna_blessings[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
615 |
|
616 |
-
|
617 |
-
|
618 |
-
|
619 |
-
|
620 |
-
|
621 |
-
|
622 |
-
|
623 |
-
|
624 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
625 |
|
626 |
-
|
627 |
-
|
628 |
-
|
629 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
630 |
|
631 |
-
|
632 |
-
|
633 |
-
|
634 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
635 |
|
636 |
-
|
637 |
-
|
638 |
-
|
639 |
-
if model["name"] == "Grok by xAI":
|
640 |
-
# Simulate Grok's response (I, Grok, will generate the response directly)
|
641 |
-
response = (
|
642 |
-
f"Hare Manavi! I’m Little Krishna, speaking through Grok by xAI. "
|
643 |
-
f"Let me answer in my playful way: "
|
644 |
-
)
|
645 |
-
# Generate a Krishna-like response based on the input
|
646 |
-
if "color" in user_input_lower:
|
647 |
-
response += "I love the golden yellow of Vrindavan’s butter—it’s as sweet as your smile! What’s your favorite color?"
|
648 |
-
elif "weather" in user_input_lower:
|
649 |
-
response += "The Vrindavan sky is as clear as the Yamuna today—perfect for a flute melody! How’s your weather?"
|
650 |
-
elif "sad" in user_input_lower:
|
651 |
-
response += "Oh, my dear gopi, don’t be sad—let’s dance by the Yamuna, and I’ll play a tune to cheer you up!"
|
652 |
-
elif "what" in user_input_lower:
|
653 |
-
response += "What, you say? Let’s share a Vrindavan tale—shall we?"
|
654 |
-
else:
|
655 |
-
response += f"I’m twirling my flute just for you! Shall we share a Vrindavan adventure today?"
|
656 |
conversation_context["last_response"] = None
|
657 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
658 |
|
659 |
-
|
660 |
-
|
661 |
-
|
662 |
-
|
663 |
-
|
664 |
-
|
665 |
-
|
666 |
-
|
667 |
-
|
668 |
-
|
669 |
-
|
670 |
-
|
671 |
-
|
672 |
-
|
673 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
674 |
else:
|
675 |
-
|
676 |
continue
|
677 |
-
|
678 |
-
|
679 |
-
else:
|
680 |
-
print(f"Error with {model['name']}: {response.text if response else 'No response'}")
|
681 |
continue
|
682 |
-
except Exception as e:
|
683 |
-
print(f"Error connecting to {model['name']}: {str(e)}")
|
684 |
-
continue
|
685 |
|
686 |
-
|
687 |
-
|
688 |
-
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import requests
|
3 |
import random
|
4 |
import time
|
5 |
+
import logging
|
6 |
from dotenv import load_dotenv
|
7 |
from messages import krishna_blessings, ayush_teasing
|
8 |
from ayush_messages import ayush_surprises
|
9 |
|
10 |
+
# Configure logging
|
11 |
+
logging.basicConfig(level=logging.INFO)
|
12 |
+
logger = logging.getLogger(__name__)
|
13 |
+
|
14 |
# Load environment variables (Hugging Face Space secrets)
|
15 |
load_dotenv()
|
16 |
HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN")
|
17 |
+
if not HUGGINGFACE_API_TOKEN:
|
18 |
+
logger.error("HUGGINGFACE_API_TOKEN not found in environment variables.")
|
19 |
+
raise ValueError("HUGGINGFACE_API_TOKEN is required.")
|
20 |
|
21 |
# List of open-source models with fine-tuned parameters
|
22 |
AI_MODELS = [
|
|
|
144 |
payload = {
|
145 |
"inputs": user_input
|
146 |
}
|
147 |
+
try:
|
148 |
+
response = make_api_request(
|
149 |
+
"https://api-inference.huggingface.co/models/distilbert-base-uncased-finetuned-sst-2-english",
|
150 |
+
headers=headers,
|
151 |
+
json=payload
|
152 |
+
)
|
153 |
+
if response and response.status_code == 200:
|
154 |
+
result = response.json()
|
155 |
+
if isinstance(result, list) and len(result) > 0:
|
156 |
+
sentiment = result[0]
|
157 |
+
label = sentiment[0]["label"] # "POSITIVE" or "NEGATIVE"
|
158 |
+
return label.lower()
|
159 |
+
logger.warning("Sentiment analysis failed after retries.")
|
160 |
+
return "neutral"
|
161 |
+
except Exception as e:
|
162 |
+
logger.error(f"Error in analyze_sentiment: {str(e)}")
|
163 |
+
return "neutral"
|
164 |
|
165 |
def make_api_request(url, headers, payload, retries=3, delay=5):
|
166 |
"""Helper function to make API requests with retry logic."""
|
|
|
170 |
if response.status_code == 200:
|
171 |
return response
|
172 |
elif response.status_code == 429: # Rate limit
|
173 |
+
logger.warning(f"Rate limit hit on attempt {attempt + 1}. Retrying after {delay} seconds...")
|
174 |
time.sleep(delay)
|
175 |
continue
|
176 |
else:
|
177 |
+
logger.error(f"API error: {response.text}")
|
178 |
return None
|
179 |
except Exception as e:
|
180 |
+
logger.error(f"API request failed on attempt {attempt + 1}: {str(e)}")
|
181 |
if attempt < retries - 1:
|
182 |
time.sleep(delay)
|
183 |
continue
|
184 |
+
logger.error(f"API request failed after {retries} retries.")
|
185 |
return None
|
186 |
|
187 |
def get_krishna_response(user_input):
|
|
|
193 |
- Occasionally tease Manavi about Ayush (keyword-based or every 5th message).
|
194 |
- Fall back to multiple open-source AI models with fine-tuned prompts for unmatched inputs.
|
195 |
"""
|
196 |
+
try:
|
197 |
+
user_input_lower = user_input.lower().strip()
|
198 |
+
logger.info(f"Processing user input: {user_input_lower}")
|
199 |
|
200 |
+
# Analyze the sentiment of the user's input
|
201 |
+
sentiment = analyze_sentiment(user_input)
|
202 |
+
logger.info(f"Sentiment detected: {sentiment}")
|
203 |
|
204 |
+
# Increment message count
|
205 |
+
conversation_context["message_count"] += 1
|
206 |
|
207 |
+
# Random chance (30%) to skip predefined responses and let the model generate a response
|
208 |
+
use_model = random.random() < 0.3
|
209 |
+
logger.info(f"Use model generation: {use_model}")
|
210 |
|
211 |
+
# Reset context if user starts a new conversation
|
212 |
+
if "start over" in user_input_lower or "reset" in user_input_lower:
|
213 |
+
conversation_context["last_topic"] = None
|
214 |
+
conversation_context["message_count"] = 0
|
215 |
+
conversation_context["last_response"] = None
|
216 |
+
conversation_context["last_yes_response"] = None
|
217 |
+
return "Hare Manavi! Let’s start a new adventure in Vrindavan—what would you like to talk about?"
|
218 |
+
|
219 |
+
# Check for follow-up responses based on context
|
220 |
+
if conversation_context["last_response"] == "Hare Manavi! Your joy lights up Vrindavan—shall we celebrate with a flute melody?":
|
221 |
+
if "yes" in user_input_lower or "okay" in user_input_lower or "sure" in user_input_lower:
|
222 |
+
conversation_context["last_response"] = None # Reset to avoid infinite loop
|
223 |
+
return "Hare Manavi! Let’s play a flute melody by the Yamuna—the peacocks will dance with us!"
|
224 |
+
|
225 |
+
# Check for Ayush-teasing triggers (keyword-based)
|
226 |
+
if "joke" in user_input_lower and not use_model:
|
227 |
+
conversation_context["last_topic"] = "joke"
|
228 |
+
conversation_context["last_response"] = None
|
229 |
+
# Randomly decide between a Krishna joke and an Ayush-teasing joke
|
230 |
+
if random.choice([True, False]):
|
231 |
+
return random.choice(ayush_teasing["joke"])
|
232 |
+
return krishna_blessings["joke"]
|
233 |
+
if ("i miss" in user_input_lower or "missing" in user_input_lower) and not use_model:
|
234 |
+
conversation_context["last_topic"] = "missing"
|
235 |
+
conversation_context["last_response"] = None
|
236 |
+
return random.choice(ayush_teasing["missing"])
|
237 |
+
if "bored" in user_input_lower and not use_model:
|
238 |
+
conversation_context["last_topic"] = "bored"
|
239 |
+
conversation_context["last_response"] = None
|
240 |
+
return random.choice(ayush_teasing["bored"])
|
241 |
+
if "tired" in user_input_lower and not use_model:
|
242 |
+
conversation_context["last_topic"] = "tired"
|
243 |
+
conversation_context["last_response"] = None
|
244 |
+
return random.choice(ayush_teasing["tired"])
|
245 |
+
if "lonely" in user_input_lower and not use_model:
|
246 |
+
conversation_context["last_topic"] = "lonely"
|
247 |
+
conversation_context["last_response"] = None
|
248 |
+
return random.choice(ayush_teasing["lonely"])
|
249 |
+
if "manavi" in user_input_lower and not use_model:
|
250 |
+
conversation_context["last_topic"] = "manavi"
|
251 |
+
conversation_context["last_response"] = None
|
252 |
+
return random.choice(ayush_teasing["manavi"])
|
253 |
+
if ("ayush" in user_input_lower or "krishna talk about ayush" in user_input_lower) and not use_model:
|
254 |
+
conversation_context["last_topic"] = "ayush"
|
255 |
+
conversation_context["last_response"] = None
|
256 |
+
return random.choice(ayush_teasing["ayush"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
257 |
|
258 |
+
# Trigger for "chat with you"
|
259 |
+
if ("chat with you" in user_input_lower or "want to chat" in user_input_lower) and not use_model:
|
260 |
+
conversation_context["last_topic"] = "chat_with_you"
|
261 |
+
conversation_context["last_response"] = None
|
262 |
+
return krishna_blessings["chat_with_you"]
|
263 |
|
264 |
+
# Every 5th message, randomly trigger an Ayush-teasing message (if no keyword match)
|
265 |
+
if conversation_context["message_count"] % 5 == 0 and not use_model:
|
266 |
+
# Randomly select a category from ayush_teasing
|
267 |
+
category = random.choice(list(ayush_teasing.keys()))
|
268 |
+
conversation_context["last_response"] = None
|
269 |
+
return random.choice(ayush_teasing[category])
|
270 |
|
271 |
+
# Existing keyword mappings for krishna_blessings and ayush_surprises
|
272 |
+
if ("hello" in user_input_lower or "hi" in user_input_lower or "hii" in user_input_lower) and not use_model:
|
273 |
+
conversation_context["last_topic"] = "greeting"
|
274 |
+
conversation_context["last_response"] = None
|
275 |
+
return krishna_blessings["greeting"]
|
276 |
+
if "good morning" in user_input_lower and not use_model:
|
277 |
+
conversation_context["last_topic"] = "greeting"
|
278 |
+
conversation_context["last_response"] = None
|
279 |
+
return krishna_blessings["good_morning"]
|
280 |
+
if "good afternoon" in user_input_lower and not use_model:
|
281 |
+
conversation_context["last_topic"] = "greeting"
|
282 |
+
conversation_context["last_response"] = None
|
283 |
+
return krishna_blessings["good_afternoon"]
|
284 |
+
if "good evening" in user_input_lower and not use_model:
|
285 |
+
conversation_context["last_topic"] = "greeting"
|
286 |
+
conversation_context["last_response"] = None
|
287 |
+
return krishna_blessings["good_evening"]
|
288 |
+
if "hey" in user_input_lower and not use_model:
|
289 |
+
conversation_context["last_topic"] = "greeting"
|
290 |
+
conversation_context["last_response"] = None
|
291 |
+
return krishna_blessings["hey"]
|
292 |
+
if "howdy" in user_input_lower and not use_model:
|
293 |
+
conversation_context["last_topic"] = "greeting"
|
294 |
+
conversation_context["last_response"] = None
|
295 |
+
return krishna_blessings["howdy"]
|
296 |
+
if "namaste" in user_input_lower and not use_model:
|
297 |
+
conversation_context["last_topic"] = "greeting"
|
298 |
+
conversation_context["last_response"] = None
|
299 |
+
return krishna_blessings["namaste"]
|
300 |
+
if "welcome" in user_input_lower and not use_model:
|
301 |
+
conversation_context["last_topic"] = "greeting"
|
302 |
+
conversation_context["last_response"] = None
|
303 |
+
return krishna_blessings["welcome"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
304 |
|
305 |
+
if ("who are you" in user_input_lower or "what are you" in user_input_lower or "tell me about yourself" in user_input_lower or "what are you doing" in user_input_lower) and not use_model:
|
306 |
+
conversation_context["last_topic"] = "identity"
|
307 |
+
conversation_context["last_response"] = None
|
308 |
+
return "Hare Manavi! I’m Little Krishna, the playful cowherd of Vrindavan! I love playing my flute, stealing butter, and dancing with the gopis. What would you like to do with me today?"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
309 |
|
310 |
+
if "how are you" in user_input_lower and not use_model:
|
311 |
+
conversation_context["last_topic"] = "how_are_you"
|
312 |
+
conversation_context["last_response"] = None
|
313 |
+
return "Hare Manavi! I’m as joyful as a peacock dancing in Vrindavan—how about you, my friend?"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
314 |
|
315 |
+
# Handle "how" questions (including typos like "hoe")
|
316 |
+
if ("how" in user_input_lower or "hoe" in user_input_lower) and not use_model:
|
317 |
+
conversation_context["last_topic"] = "how"
|
318 |
+
conversation_context["last_response"] = None
|
319 |
+
return "Hare Manavi! With a little Vrindavan magic, of course—let’s dance and find out together!"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
320 |
|
321 |
+
# Specific handling for "what"
|
322 |
+
if "what" in user_input_lower and not ("what are you" in user_input_lower or "what are you doing" in user_input_lower) and not use_model:
|
323 |
+
conversation_context["last_topic"] = "what"
|
324 |
+
conversation_context["last_response"] = None
|
325 |
+
return "Hare Manavi! What, you say? Let’s share a Vrindavan tale—shall we?"
|
326 |
+
|
327 |
+
# Varied responses for "yes", avoiding repetition
|
328 |
+
yes_responses = [
|
329 |
+
"Hare Manavi! Wonderful—let’s make today as magical as Vrindavan’s sunsets!",
|
330 |
+
"Hare Manavi! Great—shall we chase some butterflies by the Yamuna?",
|
331 |
+
"Hare Manavi! Perfect—let’s share some butter under the kadamba tree!",
|
332 |
+
"Hare Manavi! Lovely—how about a dance with the gopis in Vrindavan’s fields?"
|
333 |
+
]
|
334 |
+
if ("yes" in user_input_lower or "okay" in user_input_lower or "sure" in user_input_lower) and not use_model:
|
335 |
+
# If no context for "yes", provide a varied positive response
|
336 |
+
conversation_context["last_topic"] = "yes"
|
337 |
+
conversation_context["last_response"] = None
|
338 |
+
# Avoid repeating the last "yes" response
|
339 |
+
available_responses = [resp for resp in yes_responses if resp != conversation_context["last_yes_response"]]
|
340 |
+
if not available_responses: # If all responses have been used, reset
|
341 |
+
available_responses = yes_responses
|
342 |
+
selected_response = random.choice(available_responses)
|
343 |
+
conversation_context["last_yes_response"] = selected_response
|
344 |
+
return selected_response
|
345 |
+
|
346 |
+
if ("play" in user_input_lower or "fun" in user_input_lower) and not use_model:
|
347 |
+
conversation_context["last_topic"] = "playful"
|
348 |
+
conversation_context["last_response"] = None
|
349 |
+
return krishna_blessings["playful"]
|
350 |
+
if "dance" in user_input_lower and not use_model:
|
351 |
+
conversation_context["last_topic"] = "dance"
|
352 |
+
conversation_context["last_response"] = None
|
353 |
+
return krishna_blessings["dance"]
|
354 |
+
if "flute" in user_input_lower and not use_model:
|
355 |
+
conversation_context["last_topic"] = "flute"
|
356 |
+
conversation_context["last_response"] = None
|
357 |
+
return krishna_blessings["flute"]
|
358 |
+
if "butter" in user_input_lower and not use_model:
|
359 |
+
conversation_context["last_topic"] = "butter"
|
360 |
+
conversation_context["last_response"] = None
|
361 |
+
return krishna_blessings["butter"]
|
362 |
+
if ("mischief" in user_input_lower or "prank" in user_input_lower) and not use_model:
|
363 |
+
conversation_context["last_topic"] = "mischief"
|
364 |
+
conversation_context["last_response"] = None
|
365 |
+
return krishna_blessings["mischief"]
|
366 |
+
if ("chase" in user_input_lower or "run" in user_input_lower) and not use_model:
|
367 |
+
conversation_context["last_topic"] = "chase"
|
368 |
+
conversation_context["last_response"] = None
|
369 |
+
return krishna_blessings["chase"]
|
370 |
+
if "giggle" in user_input_lower and not use_model:
|
371 |
+
conversation_context["last_topic"] = "giggle"
|
372 |
+
conversation_context["last_response"] = None
|
373 |
+
return krishna_blessings["giggle"]
|
374 |
+
if "swing" in user_input_lower and not use_model:
|
375 |
+
conversation_context["last_topic"] = "swing"
|
376 |
+
conversation_context["last_response"] = None
|
377 |
+
return krishna_blessings["swing"]
|
378 |
|
379 |
+
if "shy" in user_input_lower and not use_model:
|
380 |
+
conversation_context["last_topic"] = "shy"
|
381 |
+
conversation_context["last_response"] = None
|
382 |
+
return krishna_blessings["shy"]
|
383 |
+
if ("quiet" in user_input_lower or "calm" in user_input_lower) and not use_model:
|
384 |
+
conversation_context["last_topic"] = "quiet"
|
385 |
+
conversation_context["last_response"] = None
|
386 |
+
return krishna_blessings["quiet"]
|
387 |
+
if ("peace" in user_input_lower or "serene" in user_input_lower) and not use_model:
|
388 |
+
conversation_context["last_topic"] = "peace"
|
389 |
+
conversation_context["last_response"] = None
|
390 |
+
return krishna_blessings["peace"]
|
391 |
+
if ("still" in user_input_lower or "gentle" in user_input_lower) and not use_model:
|
392 |
+
conversation_context["last_topic"] = "still"
|
393 |
+
conversation_context["last_response"] = None
|
394 |
+
return krishna_blessings["still"]
|
395 |
+
if ("thoughtful" in user_input_lower or "reflect" in user_input_lower) and not use_model:
|
396 |
+
conversation_context["last_topic"] = "thoughtful"
|
397 |
+
conversation_context["last_response"] = None
|
398 |
+
return krishna_blessings["thoughtful"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
399 |
|
400 |
+
if "funny" in user_input_lower and not use_model:
|
401 |
+
conversation_context["last_topic"] = "joke"
|
402 |
+
conversation_context["last_response"] = None
|
403 |
+
return krishna_blessings["funny"]
|
404 |
+
if ("laugh" in user_input_lower or "giggle" in user_input_lower) and not use_model:
|
405 |
+
conversation_context["last_topic"] = "joke"
|
406 |
+
conversation_context["last_response"] = None
|
407 |
+
return krishna_blessings["giggle_joke"]
|
408 |
+
if "silly" in user_input_lower and not use_model:
|
409 |
+
conversation_context["last_topic"] = "joke"
|
410 |
+
conversation_context["last_response"] = None
|
411 |
+
return krishna_blessings["silly"]
|
412 |
+
if "butter joke" in user_input_lower and not use_model:
|
413 |
+
conversation_context["last_topic"] = "joke"
|
414 |
+
conversation_context["last_response"] = None
|
415 |
+
return krishna_blessings["butter_joke"]
|
416 |
+
if "cow joke" in user_input_lower and not use_model:
|
417 |
+
conversation_context["last_topic"] = "joke"
|
418 |
+
conversation_context["last_response"] = None
|
419 |
+
return krishna_blessings["cow_joke"]
|
420 |
+
if "flute joke" in user_input_lower and not use_model:
|
421 |
+
conversation_context["last_topic"] = "joke"
|
422 |
+
conversation_context["last_response"] = None
|
423 |
+
return krishna_blessings["flute_joke"]
|
424 |
+
if "dance joke" in user_input_lower and not use_model:
|
425 |
+
conversation_context["last_topic"] = "joke"
|
426 |
+
conversation_context["last_response"] = None
|
427 |
+
return krishna_blessings["dance_joke"]
|
428 |
+
if "mischief joke" in user_input_lower and not use_model:
|
429 |
+
conversation_context["last_topic"] = "joke"
|
430 |
+
conversation_context["last_response"] = None
|
431 |
+
return krishna_blessings["mischief_joke"]
|
|
|
|
|
|
|
|
|
432 |
|
433 |
+
if ("riddle" in user_input_lower or "puzzle" in user_input_lower) and not use_model:
|
434 |
+
conversation_context["last_topic"] = "riddle"
|
435 |
+
conversation_context["last_response"] = None
|
436 |
+
return krishna_blessings["riddle"]
|
437 |
+
if ("mystery" in user_input_lower or "enigma" in user_input_lower) and not use_model:
|
438 |
+
conversation_context["last_topic"] = "riddle"
|
439 |
+
conversation_context["last_response"] = None
|
440 |
+
return krishna_blessings["mystery"]
|
441 |
+
if "question" in user_input_lower and not use_model:
|
442 |
+
conversation_context["last_topic"] = "riddle"
|
443 |
+
conversation_context["last_response"] = None
|
444 |
+
return krishna_blessings["question"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
445 |
|
446 |
+
if "birthday" in user_input_lower and not use_model:
|
447 |
+
conversation_context["last_topic"] = "birthday"
|
448 |
+
conversation_context["last_response"] = None
|
449 |
+
return ayush_surprises["birthday"]
|
450 |
+
if "happy birthday" in user_input_lower and not use_model:
|
451 |
+
conversation_context["last_topic"] = "birthday"
|
452 |
+
conversation_context["last_response"] = None
|
453 |
+
return krishna_blessings["happy_birthday"]
|
454 |
+
if "birthday wish" in user_input_lower and not use_model:
|
455 |
+
conversation_context["last_topic"] = "birthday"
|
456 |
+
conversation_context["last_response"] = None
|
457 |
+
return krishna_blessings["birthday_wish"]
|
458 |
+
if "birthday blessing" in user_input_lower and not use_model:
|
459 |
+
conversation_context["last_topic"] = "birthday"
|
460 |
+
conversation_context["last_response"] = None
|
461 |
+
return krishna_blessings["birthday_blessing"]
|
462 |
+
if "birthday dance" in user_input_lower and not use_model:
|
463 |
+
conversation_context["last_topic"] = "birthday"
|
464 |
+
conversation_context["last_response"] = None
|
465 |
+
return krishna_blessings["birthday_dance"]
|
466 |
+
if "birthday song" in user_input_lower and not use_model:
|
467 |
+
conversation_context["last_topic"] = "birthday"
|
468 |
+
conversation_context["last_response"] = None
|
469 |
+
return krishna_blessings["birthday_song"]
|
470 |
+
if "birthday gift" in user_input_lower and not use_model:
|
471 |
+
conversation_context["last_topic"] = "birthday"
|
472 |
+
conversation_context["last_response"] = None
|
473 |
+
return krishna_blessings["birthday_gift"]
|
474 |
+
if "birthday smile" in user_input_lower and not use_model:
|
475 |
+
conversation_context["last_topic"] = "birthday"
|
476 |
+
conversation_context["last_response"] = None
|
477 |
+
return krishna_blessings["birthday_smile"]
|
478 |
+
if "birthday love" in user_input_lower and not use_model:
|
479 |
+
conversation_context["last_topic"] = "birthday"
|
480 |
+
conversation_context["last_response"] = None
|
481 |
+
return krishna_blessings["birthday_love"]
|
482 |
+
if "birthday magic" in user_input_lower and not use_model:
|
483 |
+
conversation_context["last_topic"] = "birthday"
|
484 |
+
conversation_context["last_response"] = None
|
485 |
+
return krishna_blessings["birthday_magic"]
|
486 |
|
487 |
+
if ("wisdom" in user_input_lower or "advice" in user_input_lower) and not use_model:
|
488 |
+
conversation_context["last_topic"] = "wisdom"
|
489 |
+
conversation_context["last_response"] = None
|
490 |
+
return krishna_blessings["wisdom"]
|
491 |
+
if ("lesson" in user_input_lower or "truth" in user_input_lower) and not use_model:
|
492 |
+
conversation_context["last_topic"] = "wisdom"
|
493 |
+
conversation_context["last_response"] = None
|
494 |
+
return krishna_blessings["lesson"]
|
495 |
+
if "kindness" in user_input_lower and not use_model:
|
496 |
+
conversation_context["last_topic"] = "wisdom"
|
497 |
+
conversation_context["last_response"] = None
|
498 |
+
return krishna_blessings["kindness"]
|
499 |
+
if "patience" in user_input_lower and not use_model:
|
500 |
+
conversation_context["last_topic"] = "wisdom"
|
501 |
+
conversation_context["last_response"] = None
|
502 |
+
return krishna_blessings["patience"]
|
503 |
+
if "courage" in user_input_lower and not use_model:
|
504 |
+
conversation_context["last_topic"] = "wisdom"
|
505 |
conversation_context["last_response"] = None
|
506 |
+
return krishna_blessings["courage"]
|
507 |
+
if "joy" in user_input_lower and not use_model:
|
508 |
+
conversation_context["last_topic"] = "wisdom"
|
509 |
+
conversation_context["last_response"] = None
|
510 |
+
return krishna_blessings["joy"]
|
511 |
+
if "friendship" in user_input_lower and not use_model:
|
512 |
+
conversation_context["last_topic"] = "wisdom"
|
513 |
+
conversation_context["last_response"] = None
|
514 |
+
return krishna_blessings["friendship"]
|
515 |
+
if "love" in user_input_lower and not use_model:
|
516 |
+
conversation_context["last_topic"] = "wisdom"
|
517 |
+
conversation_context["last_response"] = None
|
518 |
+
return krishna_blessings["love"]
|
519 |
|
520 |
+
if ("nature" in user_input_lower or "vrindavan" in user_input_lower) and not use_model:
|
521 |
+
conversation_context["last_topic"] = "nature"
|
522 |
+
conversation_context["last_response"] = None
|
523 |
+
return krishna_blessings["nature"]
|
524 |
+
if ("yamuna" in user_input_lower or "river" in user_input_lower) and not use_model:
|
525 |
+
conversation_context["last_topic"] = "nature"
|
526 |
+
conversation_context["last_response"] = None
|
527 |
+
return krishna_blessings["yamuna"]
|
528 |
+
if "peacock" in user_input_lower and not use_model:
|
529 |
+
conversation_context["last_topic"] = "nature"
|
530 |
+
conversation_context["last_response"] = None
|
531 |
+
return krishna_blessings["peacock"]
|
532 |
+
if "cow" in user_input_lower and not use_model:
|
533 |
+
conversation_context["last_topic"] = "nature"
|
534 |
+
conversation_context["last_response"] = None
|
535 |
+
return krishna_blessings["cow"]
|
536 |
+
if "flower" in user_input_lower and not use_model:
|
537 |
+
conversation_context["last_topic"] = "nature"
|
538 |
+
conversation_context["last_response"] = None
|
539 |
+
return krishna_blessings["flower"]
|
540 |
+
if "tree" in user_input_lower and not use_model:
|
541 |
+
conversation_context["last_topic"] = "nature"
|
542 |
+
conversation_context["last_response"] = None
|
543 |
+
return krishna_blessings["tree"]
|
544 |
+
if "forest" in user_input_lower and not use_model:
|
545 |
+
conversation_context["last_topic"] = "nature"
|
546 |
+
conversation_context["last_response"] = None
|
547 |
+
return krishna_blessings["forest"]
|
548 |
+
if "bird" in user_input_lower and not use_model:
|
549 |
+
conversation_context["last_topic"] = "nature"
|
550 |
+
conversation_context["last_response"] = None
|
551 |
+
return krishna_blessings["bird"]
|
552 |
+
if "sunset" in user_input_lower and not use_model:
|
553 |
+
conversation_context["last_topic"] = "nature"
|
554 |
+
conversation_context["last_response"] = None
|
555 |
+
return krishna_blessings["sunset"]
|
556 |
|
557 |
+
if ("encourage" in user_input_lower or "cheer" in user_input_lower) and not use_model:
|
558 |
+
conversation_context["last_topic"] = "encourage"
|
559 |
+
conversation_context["last_response"] = None
|
560 |
+
return krishna_blessings["encourage"]
|
561 |
+
if ("support" in user_input_lower or "uplift" in user_input_lower) and not use_model:
|
562 |
+
conversation_context["last_topic"] = "encourage"
|
563 |
+
conversation_context["last_response"] = None
|
564 |
+
return krishna_blessings["support"]
|
565 |
+
if ("inspire" in user_input_lower or "motivate" in user_input_lower) and not use_model:
|
566 |
+
conversation_context["last_topic"] = "encourage"
|
567 |
+
conversation_context["last_response"] = None
|
568 |
+
return krishna_blessings["inspire"]
|
569 |
+
if "strength" in user_input_lower and not use_model:
|
570 |
+
conversation_context["last_topic"] = "encourage"
|
571 |
+
conversation_context["last_response"] = None
|
572 |
+
return krishna_blessings["strength"]
|
573 |
+
if "hope" in user_input_lower and not use_model:
|
574 |
+
conversation_context["last_topic"] = "encourage"
|
575 |
+
conversation_context["last_response"] = None
|
576 |
+
return krishna_blessings["hope"]
|
577 |
+
if "believe" in user_input_lower and not use_model:
|
578 |
+
conversation_context["last_topic"] = "encourage"
|
579 |
+
conversation_context["last_response"] = None
|
580 |
+
return krishna_blessings["believe"]
|
581 |
+
if "shine" in user_input_lower and not use_model:
|
582 |
+
conversation_context["last_topic"] = "encourage"
|
583 |
+
conversation_context["last_response"] = None
|
584 |
+
return krishna_blessings["shine"]
|
585 |
|
586 |
+
if "friend" in user_input_lower and not use_model:
|
587 |
+
conversation_context["last_topic"] = "friend"
|
588 |
+
conversation_context["last_response"] = None
|
589 |
+
return krishna_blessings["friend"]
|
590 |
+
if "smile" in user_input_lower and not use_model:
|
591 |
+
conversation_context["last_topic"] = "smile"
|
592 |
+
conversation_context["last_response"] = None
|
593 |
+
return krishna_blessings["smile"]
|
594 |
+
if "magic" in user_input_lower and not use_model:
|
595 |
+
conversation_context["last_topic"] = "magic"
|
596 |
+
conversation_context["last_response"] = None
|
597 |
+
return krishna_blessings["magic"]
|
598 |
+
if "adventure" in user_input_lower and not use_model:
|
599 |
+
conversation_context["last_topic"] = "adventure"
|
600 |
+
conversation_context["last_response"] = None
|
601 |
+
return krishna_blessings["adventure"]
|
602 |
+
if "song" in user_input_lower and not use_model:
|
603 |
+
conversation_context["last_topic"] = "song"
|
604 |
+
conversation_context["last_response"] = None
|
605 |
+
return krishna_blessings["song"]
|
606 |
+
if "dream" in user_input_lower and not use_model:
|
607 |
+
conversation_context["last_topic"] = "dream"
|
608 |
+
conversation_context["last_response"] = None
|
609 |
+
return krishna_blessings["dream"]
|
610 |
+
if "story" in user_input_lower and not use_model:
|
611 |
+
conversation_context["last_topic"] = "story"
|
612 |
+
conversation_context["last_response"] = None
|
613 |
+
return krishna_blessings["story"]
|
614 |
+
if "surprise" in user_input_lower and not use_model:
|
615 |
+
conversation_context["last_topic"] = "surprise"
|
616 |
+
conversation_context["last_response"] = None
|
617 |
+
return krishna_blessings["surprise"]
|
618 |
+
if "celebrate" in user_input_lower and not use_model:
|
619 |
+
conversation_context["last_topic"] = "celebrate"
|
620 |
+
conversation_context["last_response"] = None
|
621 |
+
return krishna_blessings["celebrate"]
|
622 |
+
if "blessing" in user_input_lower and not use_model:
|
623 |
+
conversation_context["last_topic"] = "blessing"
|
624 |
+
conversation_context["last_response"] = None
|
625 |
+
return krishna_blessings["blessing"]
|
626 |
|
627 |
+
if conversation_context["last_topic"] and not use_model:
|
628 |
+
last_topic = conversation_context["last_topic"]
|
629 |
+
if last_topic in krishna_blessings:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
630 |
conversation_context["last_response"] = None
|
631 |
+
return krishna_blessings[last_topic] + " What else would you like to talk about, Manavi?"
|
632 |
+
|
633 |
+
# Sentiment-based responses (only as a fallback, and avoid repetition)
|
634 |
+
if sentiment == "negative" and "sad" not in user_input_lower and conversation_context["last_response"] != "Hare Manavi! I see a little cloud over your heart—let’s dance by the Yamuna to bring back your smile!" and not use_model:
|
635 |
+
response = "Hare Manavi! I see a little cloud over your heart—let’s dance by the Yamuna to bring back your smile!"
|
636 |
+
conversation_context["last_response"] = response
|
637 |
+
return response
|
638 |
+
if sentiment == "positive" and conversation_context["last_response"] != "Hare Manavi! Your joy lights up Vrindavan—shall we celebrate with a flute melody?" and not use_model:
|
639 |
+
response = "Hare Manavi! Your joy lights up Vrindavan��shall we celebrate with a flute melody?"
|
640 |
+
conversation_context["last_response"] = response
|
641 |
+
return response
|
642 |
+
|
643 |
+
# Fallback to multiple open-source AI models if no keywords match or if use_model is True
|
644 |
+
# Shuffle the models to try them in random order
|
645 |
+
models_to_try = AI_MODELS.copy()
|
646 |
+
random.shuffle(models_to_try)
|
647 |
+
|
648 |
+
headers = {
|
649 |
+
"Authorization": f"Bearer {HUGGINGFACE_API_TOKEN}",
|
650 |
+
"Content-Type": "application/json"
|
651 |
+
}
|
652 |
|
653 |
+
for model in models_to_try:
|
654 |
+
try:
|
655 |
+
# Special case for Grok by xAI (simulated directly)
|
656 |
+
if model["name"] == "Grok by xAI":
|
657 |
+
logger.info("Using Grok by xAI simulated response.")
|
658 |
+
response = (
|
659 |
+
f"Hare Manavi! I’m Little Krishna, speaking through Grok by xAI. "
|
660 |
+
f"Let me answer in my playful way: "
|
661 |
+
)
|
662 |
+
# Generate a Krishna-like response based on the input
|
663 |
+
if "color" in user_input_lower:
|
664 |
+
response += "I love the golden yellow of Vrindavan’s butter—it’s as sweet as your smile! What’s your favorite color?"
|
665 |
+
elif "weather" in user_input_lower:
|
666 |
+
response += "The Vrindavan sky is as clear as the Yamuna today—perfect for a flute melody! How’s your weather?"
|
667 |
+
elif "sad" in user_input_lower:
|
668 |
+
response += "Oh, my dear gopi, don’t be sad—let’s dance by the Yamuna, and I’ll play a tune to cheer you up!"
|
669 |
+
elif "what" in user_input_lower:
|
670 |
+
response += "What, you say? Let’s share a Vrindavan tale—shall we?"
|
671 |
+
else:
|
672 |
+
response += f"I’m twirling my flute just for you! Shall we share a Vrindavan adventure today?"
|
673 |
+
conversation_context["last_response"] = None
|
674 |
+
return response
|
675 |
+
|
676 |
+
# For other models, use the Hugging Face Inference API with retry logic
|
677 |
+
logger.info(f"Attempting to generate response with model: {model['name']}")
|
678 |
+
payload = {
|
679 |
+
"inputs": f"{SYSTEM_PROMPT} '{user_input}'",
|
680 |
+
"parameters": model["parameters"]
|
681 |
+
}
|
682 |
+
response = make_api_request(model["endpoint"], headers=headers, json=payload)
|
683 |
+
if response and response.status_code == 200:
|
684 |
+
result = response.json()
|
685 |
+
# Handle different response formats based on the model
|
686 |
+
if isinstance(result, list) and len(result) > 0 and "generated_text" in result[0]:
|
687 |
+
response_text = result[0]["generated_text"].strip()
|
688 |
+
elif isinstance(result, dict) and "generated_text" in result:
|
689 |
+
response_text = result["generated_text"].strip()
|
690 |
+
elif isinstance(result, str):
|
691 |
+
response_text = result.strip()
|
692 |
+
else:
|
693 |
+
logger.warning(f"Unexpected response format from {model['name']}: {result}")
|
694 |
+
continue
|
695 |
+
conversation_context["last_response"] = None
|
696 |
+
logger.info(f"Successfully generated response with {model['name']}: {response_text}")
|
697 |
+
return response_text
|
698 |
else:
|
699 |
+
logger.warning(f"Failed to generate response with {model['name']}: {response.text if response else 'No response'}")
|
700 |
continue
|
701 |
+
except Exception as e:
|
702 |
+
logger.error(f"Error processing model {model['name']}: {str(e)}")
|
|
|
|
|
703 |
continue
|
|
|
|
|
|
|
704 |
|
705 |
+
# If all models fail, return a default message
|
706 |
+
logger.error("All model attempts failed; returning default response.")
|
707 |
+
conversation_context["last_response"] = None
|
708 |
+
return "Hare Manavi! I seem to be lost in Vrindavan’s magic—let’s try a different tune!"
|
709 |
+
|
710 |
+
except Exception as e:
|
711 |
+
logger.error(f"Unhandled exception in get_krishna_response: {str(e)}")
|
712 |
+
return "Hare Manavi! Something went wrong—let’s try again with a new Vrindavan adventure!"
|