ayush2917 commited on
Commit
4584a37
·
verified ·
1 Parent(s): 7fbd607

Update backend/chatbot.py

Browse files
Files changed (1) hide show
  1. backend/chatbot.py +44 -13
backend/chatbot.py CHANGED
@@ -1,22 +1,53 @@
1
- import openai
 
 
2
  from messages import krishna_blessings
3
  from ayush_messages import ayush_surprises
4
 
5
- openai.api_key = os.getenv("OPENAI_API_KEY")
 
6
 
7
  def get_krishna_response(user_input):
 
8
  if "birthday" in user_input.lower():
9
  return ayush_surprises["birthday"]
10
  if "shy" in user_input.lower():
11
- return "Hare Manavi! I hid from the gopis too—quiet time is golden like my butter!"
12
- prompt = (
13
- "You are Little Krishna, playful and wise. Respond to this in a fun, Krishna-like way: "
14
- f"'{user_input}'"
15
- )
16
- response = openai.Completion.create(
17
- engine="text-davinci-003",
18
- prompt=prompt,
19
- max_tokens=60,
20
- temperature=0.9
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  )
22
- return response.choices[0].text.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ from dotenv import load_dotenv
4
  from messages import krishna_blessings
5
  from ayush_messages import ayush_surprises
6
 
7
+ load_dotenv()
8
+ REPLICATE_API_TOKEN = os.getenv("REPLICATE_API_TOKEN")
9
 
10
  def get_krishna_response(user_input):
11
+ # Check for predefined responses
12
  if "birthday" in user_input.lower():
13
  return ayush_surprises["birthday"]
14
  if "shy" in user_input.lower():
15
+ return krishna_blessings["shy"]
16
+ if "hello" in user_input.lower() or "hi" in user_input.lower():
17
+ return krishna_blessings["greeting"]
18
+
19
+ # Use LLaMA-3 via Replicate API
20
+ headers = {
21
+ "Authorization": f"Token {REPLICATE_API_TOKEN}",
22
+ "Content-Type": "application/json"
23
+ }
24
+ payload = {
25
+ "input": {
26
+ "prompt": f"You are Little Krishna, playful and wise. Respond to this in a fun, Krishna-like way: '{user_input}'",
27
+ "max_tokens": 60,
28
+ "temperature": 0.9
29
+ }
30
+ }
31
+ response = requests.post(
32
+ "https://api.replicate.com/v1/predictions",
33
+ headers=headers,
34
+ json={
35
+ "version": "meta-llama/llama-3-8b", # Replace with actual model version after access
36
+ "input": payload["input"]
37
+ }
38
  )
39
+ if response.status_code == 200:
40
+ prediction = response.json()
41
+ # Wait for prediction to complete (Replicate uses async predictions)
42
+ prediction_id = prediction["id"]
43
+ while True:
44
+ status_response = requests.get(
45
+ f"https://api.replicate.com/v1/predictions/{prediction_id}",
46
+ headers=headers
47
+ )
48
+ status_data = status_response.json()
49
+ if status_data["status"] == "succeeded":
50
+ return status_data["output"]
51
+ elif status_data["status"] == "failed":
52
+ return "Sorry, I couldn’t generate a response. Let’s try something else!"
53
+ return "Error connecting to LLaMA-3. Please try again later."