ayush2917 commited on
Commit
fea8846
·
verified ·
1 Parent(s): d3a2807

Update chatbot.py

Browse files
Files changed (1) hide show
  1. chatbot.py +59 -25
chatbot.py CHANGED
@@ -9,6 +9,25 @@ from ayush_messages import ayush_surprises
9
  load_dotenv()
10
  HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN")
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  # Simple context tracking (e.g., last topic discussed)
13
  conversation_context = {
14
  "last_topic": None, # Store the last keyword matched (e.g., "birthday", "riddle")
@@ -21,7 +40,7 @@ def get_krishna_response(user_input):
21
  - Match user input to predefined messages in krishna_blessings or ayush_surprises using keywords.
22
  - Use context to provide more coherent responses.
23
  - Occasionally tease Manavi about Ayush (keyword-based or every 5th message).
24
- - Fall back to Hugging Face's Inference API for unmatched inputs.
25
  """
26
  user_input_lower = user_input.lower().strip()
27
 
@@ -60,7 +79,7 @@ def get_krishna_response(user_input):
60
  conversation_context["last_topic"] = "ayush"
61
  return random.choice(ayush_teasing["ayush"])
62
 
63
- # New trigger for "chat with you"
64
  if "chat with you" in user_input_lower or "want to chat" in user_input_lower:
65
  conversation_context["last_topic"] = "chat_with_you"
66
  return krishna_blessings["chat_with_you"]
@@ -319,30 +338,45 @@ def get_krishna_response(user_input):
319
  if last_topic in krishna_blessings:
320
  return krishna_blessings[last_topic] + " What else would you like to talk about, Manavi?"
321
 
322
- # Fallback to Hugging Face Inference API if no keywords match
 
 
 
 
323
  headers = {
324
  "Authorization": f"Bearer {HUGGINGFACE_API_TOKEN}",
325
  "Content-Type": "application/json"
326
  }
327
- payload = {
328
- "inputs": f"You are Little Krishna, playful and wise. Respond to this in a fun, Krishna-like way: '{user_input}'",
329
- "parameters": {
330
- "max_length": 50,
331
- "temperature": 0.7
332
- }
333
- }
334
- try:
335
- response = requests.post(
336
- "https://api-inference.huggingface.co/models/google/gemma-2b",
337
- headers=headers,
338
- json=payload
339
- )
340
- if response.status_code == 200:
341
- result = response.json()
342
- return result[0]["generated_text"].strip()
343
- else:
344
- print(f"Error with Gemma: {response.text}")
345
- return "Hare Manavi! My flute got a bit shy—let’s try something else, shall we?"
346
- except Exception as e:
347
- print(f"Error connecting to Hugging Face Inference API: {str(e)}")
348
- return "Hare Manavi! I seem to be lost in Vrindavan’s magic—let’s try a different tune!"
 
 
 
 
 
 
 
 
 
 
 
 
9
  load_dotenv()
10
  HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN")
11
 
12
+ # List of open-source models for fallback responses
13
+ AI_MODELS = [
14
+ {
15
+ "name": "google/gemma-2b",
16
+ "endpoint": "https://api-inference.huggingface.co/models/google/gemma-2b",
17
+ "parameters": {"max_length": 50, "temperature": 0.7}
18
+ },
19
+ {
20
+ "name": "mistralai/Mixtral-8x7B-Instruct-v0.1",
21
+ "endpoint": "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1",
22
+ "parameters": {"max_length": 50, "temperature": 0.7}
23
+ },
24
+ {
25
+ "name": "facebook/blenderbot-400M-distill",
26
+ "endpoint": "https://api-inference.huggingface.co/models/facebook/blenderbot-400M-distill",
27
+ "parameters": {"max_length": 50, "temperature": 0.7}
28
+ }
29
+ ]
30
+
31
  # Simple context tracking (e.g., last topic discussed)
32
  conversation_context = {
33
  "last_topic": None, # Store the last keyword matched (e.g., "birthday", "riddle")
 
40
  - Match user input to predefined messages in krishna_blessings or ayush_surprises using keywords.
41
  - Use context to provide more coherent responses.
42
  - Occasionally tease Manavi about Ayush (keyword-based or every 5th message).
43
+ - Fall back to multiple open-source AI models for unmatched inputs.
44
  """
45
  user_input_lower = user_input.lower().strip()
46
 
 
79
  conversation_context["last_topic"] = "ayush"
80
  return random.choice(ayush_teasing["ayush"])
81
 
82
+ # Trigger for "chat with you"
83
  if "chat with you" in user_input_lower or "want to chat" in user_input_lower:
84
  conversation_context["last_topic"] = "chat_with_you"
85
  return krishna_blessings["chat_with_you"]
 
338
  if last_topic in krishna_blessings:
339
  return krishna_blessings[last_topic] + " What else would you like to talk about, Manavi?"
340
 
341
+ # Fallback to multiple open-source AI models if no keywords match
342
+ # Shuffle the models to try them in random order
343
+ models_to_try = AI_MODELS.copy()
344
+ random.shuffle(models_to_try)
345
+
346
  headers = {
347
  "Authorization": f"Bearer {HUGGINGFACE_API_TOKEN}",
348
  "Content-Type": "application/json"
349
  }
350
+
351
+ for model in models_to_try:
352
+ try:
353
+ payload = {
354
+ "inputs": f"You are Little Krishna, playful and wise. Respond to this in a fun, Krishna-like way: '{user_input}'",
355
+ "parameters": model["parameters"]
356
+ }
357
+ response = requests.post(
358
+ model["endpoint"],
359
+ headers=headers,
360
+ json=payload
361
+ )
362
+ if response.status_code == 200:
363
+ result = response.json()
364
+ # Handle different response formats based on the model
365
+ if isinstance(result, list) and len(result) > 0 and "generated_text" in result[0]:
366
+ return result[0]["generated_text"].strip()
367
+ elif isinstance(result, dict) and "generated_text" in result:
368
+ return result["generated_text"].strip()
369
+ elif isinstance(result, str):
370
+ return result.strip()
371
+ else:
372
+ print(f"Unexpected response format from {model['name']}: {result}")
373
+ continue
374
+ else:
375
+ print(f"Error with {model['name']}: {response.text}")
376
+ continue
377
+ except Exception as e:
378
+ print(f"Error connecting to {model['name']}: {str(e)}")
379
+ continue
380
+
381
+ # If all models fail, return a default message
382
+ return "Hare Manavi! I seem to be lost in Vrindavan’s magic—let’s try a different tune!"