ayush2917 commited on
Commit
12e79cf
·
verified ·
1 Parent(s): 95c43a4

Update chatbot.py

Browse files
Files changed (1) hide show
  1. chatbot.py +46 -11
chatbot.py CHANGED
@@ -15,10 +15,10 @@ AI_MODELS = [
15
  "name": "google/gemma-2b",
16
  "endpoint": "https://api-inference.huggingface.co/models/google/gemma-2b",
17
  "parameters": {
18
- "max_length": 60, # Slightly increased for more expressive responses
19
- "temperature": 0.9, # Higher for creativity
20
- "top_p": 0.9, # Encourage diversity in responses
21
- "top_k": 50 # Consider top 50 tokens for sampling
22
  }
23
  },
24
  {
@@ -26,7 +26,7 @@ AI_MODELS = [
26
  "endpoint": "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1",
27
  "parameters": {
28
  "max_length": 60,
29
- "temperature": 0.8, # Slightly lower for coherence
30
  "top_p": 0.95,
31
  "top_k": 40
32
  }
@@ -36,7 +36,7 @@ AI_MODELS = [
36
  "endpoint": "https://api-inference.huggingface.co/models/facebook/blenderbot-400M-distill",
37
  "parameters": {
38
  "max_length": 50,
39
- "temperature": 0.85, # Balanced for conversational tone
40
  "top_p": 0.9,
41
  "top_k": 50
42
  }
@@ -46,7 +46,7 @@ AI_MODELS = [
46
  "endpoint": "https://api-inference.huggingface.co/models/EleutherAI/gpt-neo-1.3B",
47
  "parameters": {
48
  "max_length": 50,
49
- "temperature": 0.9, # Higher for creativity
50
  "top_p": 0.9,
51
  "top_k": 50
52
  }
@@ -56,7 +56,7 @@ AI_MODELS = [
56
  "endpoint": "https://api-inference.huggingface.co/models/microsoft/DialoGPT-large",
57
  "parameters": {
58
  "max_length": 50,
59
- "temperature": 0.85, # Balanced for conversational tone
60
  "top_p": 0.9,
61
  "top_k": 40
62
  }
@@ -66,14 +66,14 @@ AI_MODELS = [
66
  "endpoint": "https://api-inference.huggingface.co/models/bigscience/bloom-560m",
67
  "parameters": {
68
  "max_length": 50,
69
- "temperature": 0.9, # Higher for creativity
70
  "top_p": 0.95,
71
  "top_k": 50
72
  }
73
  },
74
  {
75
  "name": "Grok by xAI",
76
- "endpoint": None, # Special case: Grok will be simulated directly
77
  "parameters": {
78
  "max_length": 50,
79
  "temperature": 0.8,
@@ -116,16 +116,45 @@ conversation_context = {
116
  "message_count": 0 # Track the number of messages to trigger Ayush-teasing every 5th message
117
  }
118
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  def get_krishna_response(user_input):
120
  """
121
  Generate a response from Little Krishna based on user input.
122
  - Match user input to predefined messages in krishna_blessings or ayush_surprises using keywords.
123
- - Use context to provide more coherent responses.
124
  - Occasionally tease Manavi about Ayush (keyword-based or every 5th message).
125
  - Fall back to multiple open-source AI models with fine-tuned prompts for unmatched inputs.
126
  """
127
  user_input_lower = user_input.lower().strip()
128
 
 
 
 
129
  # Increment message count
130
  conversation_context["message_count"] += 1
131
 
@@ -161,6 +190,12 @@ def get_krishna_response(user_input):
161
  conversation_context["last_topic"] = "ayush"
162
  return random.choice(ayush_teasing["ayush"])
163
 
 
 
 
 
 
 
164
  # Trigger for "chat with you"
165
  if "chat with you" in user_input_lower or "want to chat" in user_input_lower:
166
  conversation_context["last_topic"] = "chat_with_you"
 
15
  "name": "google/gemma-2b",
16
  "endpoint": "https://api-inference.huggingface.co/models/google/gemma-2b",
17
  "parameters": {
18
+ "max_length": 60,
19
+ "temperature": 0.9,
20
+ "top_p": 0.9,
21
+ "top_k": 50
22
  }
23
  },
24
  {
 
26
  "endpoint": "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1",
27
  "parameters": {
28
  "max_length": 60,
29
+ "temperature": 0.8,
30
  "top_p": 0.95,
31
  "top_k": 40
32
  }
 
36
  "endpoint": "https://api-inference.huggingface.co/models/facebook/blenderbot-400M-distill",
37
  "parameters": {
38
  "max_length": 50,
39
+ "temperature": 0.85,
40
  "top_p": 0.9,
41
  "top_k": 50
42
  }
 
46
  "endpoint": "https://api-inference.huggingface.co/models/EleutherAI/gpt-neo-1.3B",
47
  "parameters": {
48
  "max_length": 50,
49
+ "temperature": 0.9,
50
  "top_p": 0.9,
51
  "top_k": 50
52
  }
 
56
  "endpoint": "https://api-inference.huggingface.co/models/microsoft/DialoGPT-large",
57
  "parameters": {
58
  "max_length": 50,
59
+ "temperature": 0.85,
60
  "top_p": 0.9,
61
  "top_k": 40
62
  }
 
66
  "endpoint": "https://api-inference.huggingface.co/models/bigscience/bloom-560m",
67
  "parameters": {
68
  "max_length": 50,
69
+ "temperature": 0.9,
70
  "top_p": 0.95,
71
  "top_k": 50
72
  }
73
  },
74
  {
75
  "name": "Grok by xAI",
76
+ "endpoint": None,
77
  "parameters": {
78
  "max_length": 50,
79
  "temperature": 0.8,
 
116
  "message_count": 0 # Track the number of messages to trigger Ayush-teasing every 5th message
117
  }
118
 
119
+ def analyze_sentiment(user_input):
120
+ """Analyze the sentiment of the user's input using a sentiment analysis model."""
121
+ headers = {
122
+ "Authorization": f"Bearer {HUGGINGFACE_API_TOKEN}",
123
+ "Content-Type": "application/json"
124
+ }
125
+ payload = {
126
+ "inputs": user_input
127
+ }
128
+ try:
129
+ response = requests.post(
130
+ "https://api-inference.huggingface.co/models/distilbert-base-uncased-finetuned-sst-2-english",
131
+ headers=headers,
132
+ json=payload
133
+ )
134
+ if response.status_code == 200:
135
+ result = response.json()
136
+ if isinstance(result, list) and len(result) > 0:
137
+ sentiment = result[0]
138
+ label = sentiment[0]["label"] # "POSITIVE" or "NEGATIVE"
139
+ return label.lower()
140
+ return "neutral"
141
+ except Exception as e:
142
+ print(f"Error analyzing sentiment: {str(e)}")
143
+ return "neutral"
144
+
145
  def get_krishna_response(user_input):
146
  """
147
  Generate a response from Little Krishna based on user input.
148
  - Match user input to predefined messages in krishna_blessings or ayush_surprises using keywords.
149
+ - Use sentiment analysis to tailor responses based on Manavi's mood.
150
  - Occasionally tease Manavi about Ayush (keyword-based or every 5th message).
151
  - Fall back to multiple open-source AI models with fine-tuned prompts for unmatched inputs.
152
  """
153
  user_input_lower = user_input.lower().strip()
154
 
155
+ # Analyze the sentiment of the user's input
156
+ sentiment = analyze_sentiment(user_input)
157
+
158
  # Increment message count
159
  conversation_context["message_count"] += 1
160
 
 
190
  conversation_context["last_topic"] = "ayush"
191
  return random.choice(ayush_teasing["ayush"])
192
 
193
+ # Sentiment-based responses
194
+ if sentiment == "negative" and "sad" not in user_input_lower: # Avoid overlap with keyword "sad"
195
+ return "Hare Manavi! I see a little cloud over your heart—let’s dance by the Yamuna to bring back your smile!"
196
+ if sentiment == "positive":
197
+ return "Hare Manavi! Your joy lights up Vrindavan—shall we celebrate with a flute melody?"
198
+
199
  # Trigger for "chat with you"
200
  if "chat with you" in user_input_lower or "want to chat" in user_input_lower:
201
  conversation_context["last_topic"] = "chat_with_you"