Vijish commited on
Commit
d4098bb
·
verified ·
1 Parent(s): f654f43

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +170 -45
handler.py CHANGED
@@ -1,5 +1,10 @@
 
 
 
 
1
  import json
2
- from typing import Dict, Any
 
3
  import requests
4
 
5
  def download_env_file(url: str, local_path: str):
@@ -13,50 +18,170 @@ env_file_url = "https://www.dropbox.com/scl/fi/21ldek2cdsak2v3mhyy5x/openai.env?
13
  local_env_path = "openai.env"
14
  download_env_file(env_file_url, local_env_path)
15
 
16
- # Importing modules from both scripts
17
- from coresugg import ConversationPayload as ConversationPayloadSugg, create_conversation_starter_prompt, generate_conversation_starters, NUMBER_OF_MESSAGES_FOR_CONTEXT as NUMBER_OF_MESSAGES_FOR_CONTEXT_SUGG
18
- from corechat import ConversationPayload as ConversationPayloadChat, get_conversation_suggestions, NUMBER_OF_MESSAGES_FOR_CONTEXT as NUMBER_OF_MESSAGES_FOR_CONTEXT_CHAT
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  class EndpointHandler:
21
- def __init__(self, model_dir):
22
- self.model_dir = model_dir
23
- # Initialize any necessary objects or load models here
24
-
25
- def integration(self, data: Dict[str, Any]) -> Dict[str, Any]:
26
- payload = ConversationPayloadSugg(**data)
27
- from_user_questions = payload.FromUserKavasQuestions[-NUMBER_OF_MESSAGES_FOR_CONTEXT_SUGG:]
28
- to_user_questions = payload.ToUserKavasQuestions[-NUMBER_OF_MESSAGES_FOR_CONTEXT_SUGG:]
29
- ai_prompt = create_conversation_starter_prompt(from_user_questions + to_user_questions, payload.Chatmood)
30
- conversation_starters = generate_conversation_starters(ai_prompt)
31
- return {"conversation_starters": conversation_starters}
32
-
33
- def chat_integration(self, data: Dict[str, Any]) -> Dict[str, Any]:
34
- payload = ConversationPayloadChat(**data)
35
- last_chat_messages = payload.LastChatMessages[-NUMBER_OF_MESSAGES_FOR_CONTEXT_CHAT:]
36
- suggestions = get_conversation_suggestions(last_chat_messages)
37
- return {"version": "1.0.0-alpha", "suggested_responses": suggestions}
38
-
39
- def upload(self, data: Dict[str, Any]) -> Dict[str, Any]:
40
- if "file" not in data or "content" not in data["file"]:
41
- raise ValueError("No file provided")
42
- file_data = data["file"]["content"]
43
  try:
44
- json_data = json.loads(file_data)
45
- except json.JSONDecodeError:
46
- raise ValueError("Invalid JSON format.")
47
- if "FromUserKavasQuestions" in json_data and "Chatmood" in json_data:
48
- prompt = create_conversation_starter_prompt(
49
- json_data["FromUserKavasQuestions"],
50
- json_data["Chatmood"]
51
- )
52
- starter_suggestion = generate_conversation_starters(prompt)
53
- return {"conversation_starter": starter_suggestion}
54
- elif "LastChatMessages" in json_data:
55
- last_chat_messages = json_data["LastChatMessages"][-NUMBER_OF_MESSAGES_FOR_CONTEXT_CHAT:]
56
- response = {
57
- "version": "1.0.0-alpha",
58
- "suggested_responses": get_conversation_suggestions(last_chat_messages)
59
- }
60
- return response
61
- else:
62
- raise ValueError("Invalid JSON structure.")
 
1
+ from pydantic import BaseModel
2
+ import openai
3
+ from environs import Env
4
+ from typing import List, Dict, Any
5
  import json
6
+
7
+
8
  import requests
9
 
10
  def download_env_file(url: str, local_path: str):
 
18
  local_env_path = "openai.env"
19
  download_env_file(env_file_url, local_env_path)
20
 
21
+ # Load environment variables
22
+ env = Env()
23
+ env.read_env("openai.env")
24
+ openai.api_key = env.str("OPENAI_API_KEY")
25
+
26
+ # Constants
27
+ SYSTEM_PROMPT_SUGG = env.str("SYSTEM_PROMPT_SUGG", "generate 3 different friendly short conversation starter for a user to another unknown user.")
28
+ SYSTEM_PROMPT_CHAT = env.str("SYSTEM_PROMPT_CHAT", "Suggest a suitable reply for a user in a dating conversation context.")
29
+ MODEL = env.str("MODEL", "gpt-3.5-turbo")
30
+ NUMBER_OF_MESSAGES_FOR_CONTEXT_SUGG = min(env.int("NUMBER_OF_MESSAGES_FOR_CONTEXT_SUGG", 4), 10)
31
+ NUMBER_OF_MESSAGES_FOR_CONTEXT_CHAT = min(env.int("NUMBER_OF_MESSAGES_FOR_CONTEXT_CHAT", 4), 10)
32
+ AI_RESPONSE_TIMEOUT = env.int("AI_RESPONSE_TIMEOUT", 20)
33
+
34
+ class ConversationPayloadSugg(BaseModel):
35
+ fromusername: str
36
+ tousername: str
37
+ FromUserKavasQuestions: list
38
+ ToUserKavasQuestions: list
39
+ Chatmood: str
40
+
41
+ class LastChatMessage(BaseModel):
42
+ fromUser: str
43
+ touser: str
44
+
45
+ class ConversationPayloadChat(BaseModel):
46
+ fromusername: str
47
+ tousername: str
48
+ zodiansign: str
49
+ LastChatMessages: List[dict]
50
+ Chatmood: str
51
+
52
+ def create_conversation_starter_prompt(user_questions, chatmood):
53
+ formatted_info = " ".join([f"{qa['Question']} - {qa['Answer']}" for qa in user_questions if qa['Answer']])
54
+ prompt = (f"Based on user profile info and a {chatmood} mood, "
55
+ f"generate 3 subtle and very short conversation starters. "
56
+ f"Explore various topics like travel, hobbies, movies, and not just culinary tastes. "
57
+ f"\nProfile Info: {formatted_info}")
58
+ return prompt
59
+
60
+ def generate_conversation_starters(prompt):
61
+ try:
62
+ response = openai.ChatCompletion.create(
63
+ model=MODEL,
64
+ messages=[{"role": "system", "content": prompt}],
65
+ temperature=0.7,
66
+ max_tokens=100,
67
+ n=1,
68
+ request_timeout=AI_RESPONSE_TIMEOUT
69
+ )
70
+ return response.choices[0].message["content"]
71
+ except openai.error.OpenAIError as e:
72
+ raise Exception(f"OpenAI API error: {str(e)}")
73
+ except Exception as e:
74
+ raise Exception(f"Unexpected error: {str(e)}")
75
+
76
+ def transform_messages(last_chat_messages):
77
+ t_messages = []
78
+ for chat in last_chat_messages:
79
+ if "fromUser" in chat:
80
+ from_user = chat['fromUser']
81
+ message = chat.get('touser', '')
82
+ t_messages.append(f"{from_user}: {message}")
83
+ elif "touser" in chat:
84
+ to_user = chat['touser']
85
+ message = chat.get('fromUser', '')
86
+ t_messages.append(f"{to_user}: {message}")
87
+
88
+ if t_messages and "touser" in last_chat_messages[-1]:
89
+ latest_message = t_messages[-1]
90
+ latest_message = f"Q: {latest_message}"
91
+ t_messages[-1] = latest_message
92
+
93
+ return t_messages
94
+
95
+ def generate_system_prompt(last_chat_messages, fromusername, tousername, zodiansign=None, chatmood=None):
96
+ prompt = ""
97
+ if not last_chat_messages or ("touser" not in last_chat_messages[-1]):
98
+ prompt = f"Suggest a casual and friendly message for {fromusername} to start a conversation with {tousername} or continue naturally, as if talking to a good friend. Strictly avoid replying to messages from {fromusername} or answering their questions."
99
+ else:
100
+ prompt = f"Suggest a warm and friendly reply for {fromusername} to respond to the last message from {tousername}, as if responding to a dear friend. Strictly avoid replying to messages from {fromusername} or answering their questions."
101
+
102
+ if zodiansign:
103
+ prompt += f" Keep in mind {tousername}'s {zodiansign} zodiac sign."
104
+ if chatmood:
105
+ prompt += f" Consider the {chatmood} mood."
106
+
107
+ return prompt
108
+
109
+ def get_conversation_suggestions(last_chat_messages):
110
+ fromusername = last_chat_messages[-1].get("fromusername", "")
111
+ tousername = last_chat_messages[-1].get("tousername", "")
112
+ zodiansign = last_chat_messages[-1].get("zodiansign", "")
113
+ chatmood = last_chat_messages[-1].get("Chatmood", "")
114
+
115
+ messages = transform_messages(last_chat_messages)
116
+
117
+ system_prompt = generate_system_prompt(last_chat_messages, fromusername, tousername, zodiansign, chatmood)
118
+ messages_final = [{"role": "system", "content": system_prompt}]
119
+
120
+ if messages:
121
+ messages_final.extend([{"role": "user", "content": m} for m in messages])
122
+ else:
123
+ # If there are no messages, add a default message to ensure a response is generated
124
+ default_message = f"{tousername}: Hi there!"
125
+ messages_final.append({"role": "user", "content": default_message})
126
+
127
+ try:
128
+ response = openai.ChatCompletion.create(
129
+ model=MODEL,
130
+ messages=messages_final,
131
+ temperature=0.7,
132
+ max_tokens=150,
133
+ n=3,
134
+ request_timeout=AI_RESPONSE_TIMEOUT
135
+ )
136
+
137
+ formatted_replies = []
138
+ for idx, choice in enumerate(response.choices):
139
+ formatted_replies.append({
140
+ "type": "TEXT",
141
+ "body": choice.message['content'],
142
+ "title": f"AI Reply {idx + 1}",
143
+ "confidence": 1,
144
+ })
145
+
146
+ return formatted_replies
147
+
148
+ except openai.error.Timeout as e:
149
+ formatted_reply = [{
150
+ "type": "TEXT",
151
+ "body": "Request to the AI response generator has timed out. Please try again later.",
152
+ "title": "AI Response Error",
153
+ "confidence": 1
154
+ }]
155
+ return formatted_reply
156
+
157
+ def process_json_input(json_data):
158
+ if "FromUserKavasQuestions" in json_data and "Chatmood" in json_data:
159
+ prompt = create_conversation_starter_prompt(
160
+ json_data["FromUserKavasQuestions"],
161
+ json_data["Chatmood"]
162
+ )
163
+ starter_suggestion = generate_conversation_starters(prompt)
164
+ return {"conversation_starter": starter_suggestion}
165
+ elif "LastChatMessages" in json_data:
166
+ last_chat_messages = json_data["LastChatMessages"][-NUMBER_OF_MESSAGES_FOR_CONTEXT_CHAT:]
167
+ response = {
168
+ "version": "1.0.0-alpha",
169
+ "suggested_responses": get_conversation_suggestions(last_chat_messages)
170
+ }
171
+ return response
172
+ else:
173
+ raise ValueError("Invalid JSON structure.")
174
 
175
  class EndpointHandler:
176
+ def __init__(self):
177
+ pass
178
+
179
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
  try:
181
+ json_data = data
182
+ output = process_json_input(json_data)
183
+ return output
184
+ except ValueError as e:
185
+ return {"error": str(e)}
186
+ except Exception as e:
187
+ return {"error": f"Unexpected error: {str(e)}"}