ayush2917 commited on
Commit
f03ea3f
·
verified ·
1 Parent(s): cf05bfb

Update backend/chatbot.py

Browse files
Files changed (1) hide show
  1. backend/chatbot.py +31 -29
backend/chatbot.py CHANGED
@@ -4,10 +4,20 @@ from dotenv import load_dotenv
4
  from messages import krishna_blessings
5
  from ayush_messages import ayush_surprises
6
 
 
7
  load_dotenv()
8
- REPLICATE_API_TOKEN = os.getenv("REPLICATE_API_TOKEN")
9
 
10
  def get_krishna_response(user_input):
 
 
 
 
 
 
 
 
 
11
  # Check for predefined responses
12
  if "birthday" in user_input.lower():
13
  return ayush_surprises["birthday"]
@@ -16,38 +26,30 @@ def get_krishna_response(user_input):
16
  if "hello" in user_input.lower() or "hi" in user_input.lower():
17
  return krishna_blessings["greeting"]
18
 
19
- # Use LLaMA-3 via Replicate API
20
  headers = {
21
- "Authorization": f"Token {REPLICATE_API_TOKEN}",
22
  "Content-Type": "application/json"
23
  }
24
  payload = {
25
- "input": {
26
- "prompt": f"You are Little Krishna, playful and wise. Respond to this in a fun, Krishna-like way: '{user_input}'",
27
- "max_tokens": 60,
28
  "temperature": 0.9
29
  }
30
  }
31
- response = requests.post(
32
- "https://api.replicate.com/v1/predictions",
33
- headers=headers,
34
- json={
35
- "version": "meta-llama/llama-3-8b", # Replace with actual model version after access
36
- "input": payload["input"]
37
- }
38
- )
39
- if response.status_code == 200:
40
- prediction = response.json()
41
- # Wait for prediction to complete (Replicate uses async predictions)
42
- prediction_id = prediction["id"]
43
- while True:
44
- status_response = requests.get(
45
- f"https://api.replicate.com/v1/predictions/{prediction_id}",
46
- headers=headers
47
- )
48
- status_data = status_response.json()
49
- if status_data["status"] == "succeeded":
50
- return status_data["output"]
51
- elif status_data["status"] == "failed":
52
- return "Sorry, I couldn’t generate a response. Let’s try something else!"
53
- return "Error connecting to LLaMA-3. Please try again later."
 
4
  from messages import krishna_blessings
5
  from ayush_messages import ayush_surprises
6
 
7
+ # Load environment variables (Hugging Face Space secrets)
8
  load_dotenv()
9
+ HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN")
10
 
11
  def get_krishna_response(user_input):
12
+ """
13
+ Generate a Krishna-themed response for the user input.
14
+
15
+ Args:
16
+ user_input (str): The user's message.
17
+
18
+ Returns:
19
+ str: Krishna's response.
20
+ """
21
  # Check for predefined responses
22
  if "birthday" in user_input.lower():
23
  return ayush_surprises["birthday"]
 
26
  if "hello" in user_input.lower() or "hi" in user_input.lower():
27
  return krishna_blessings["greeting"]
28
 
29
+ # Use Gemma via Hugging Face Inference API for dynamic responses
30
  headers = {
31
+ "Authorization": f"Bearer {HUGGINGFACE_API_TOKEN}",
32
  "Content-Type": "application/json"
33
  }
34
  payload = {
35
+ "inputs": f"You are Little Krishna, playful and wise. Respond to this in a fun, Krishna-like way: '{user_input}'",
36
+ "parameters": {
37
+ "max_length": 60,
38
  "temperature": 0.9
39
  }
40
  }
41
+ try:
42
+ response = requests.post(
43
+ "https://api-inference.huggingface.co/models/google/gemma-2b",
44
+ headers=headers,
45
+ json=payload
46
+ )
47
+ if response.status_code == 200:
48
+ result = response.json()
49
+ return result[0]["generated_text"].strip()
50
+ else:
51
+ print(f"Error with Gemma: {response.text}")
52
+ return "Sorry, I couldn’t generate a response. Let’s try something else!"
53
+ except Exception as e:
54
+ print(f"Error connecting to Hugging Face Inference API: {str(e)}")
55
+ return "Hare Krishna! I seem to be lost in Vrindavan’s magic—let’s try again!"