eagle0504 commited on
Commit
b9f8e19
1 Parent(s): 0abb0c9

both app.py and helper.py updated

Browse files
Files changed (2) hide show
  1. app.py +8 -5
  2. utils/helper_functions.py +1 -24
app.py CHANGED
@@ -51,6 +51,7 @@ collection.add(
51
  metadatas=[{"type": "support"} for _ in range(0, L)],
52
  )
53
 
 
54
  st.title("Youth Homelessness Chatbot")
55
 
56
  # Initialize chat history
@@ -78,6 +79,9 @@ special_threshold = st.sidebar.number_input(
78
  "Insert a number", value=0.2, placeholder="Type a number..."
79
  ) # 0.3
80
  clear_button = st.sidebar.button("Clear Conversation", key="clear")
 
 
 
81
 
82
  if clear_button:
83
  st.session_state.messages = []
@@ -134,8 +138,10 @@ if prompt := st.chat_input("Tell me about YSA"):
134
  final_ref["ai_judge"] = independent_ai_judge_score
135
 
136
  engineered_prompt = f"""
137
- Based on the context: {ref_from_db_search},
138
- answer the user question: {question}.
 
 
139
  Answer the question directly (don't say "based on the context, ...")
140
  """
141
 
@@ -150,6 +156,3 @@ if prompt := st.chat_input("Tell me about YSA"):
150
  st.table(final_ref)
151
  # Add assistant response to chat history
152
  st.session_state.messages.append({"role": "assistant", "content": response})
153
- # st.session_state.messages.append(
154
- # {"role": "assistant", "content": final_ref.to_json()}
155
- # )
 
51
  metadatas=[{"type": "support"} for _ in range(0, L)],
52
  )
53
 
54
+ # Front-end Design
55
  st.title("Youth Homelessness Chatbot")
56
 
57
  # Initialize chat history
 
79
  "Insert a number", value=0.2, placeholder="Type a number..."
80
  ) # 0.3
81
  clear_button = st.sidebar.button("Clear Conversation", key="clear")
82
+ st.sidebar.warning(
83
+ "The 'distances' measures how close your question is to the questions in our database (lower the score the better). The 'ai_judge' measures independent similarity ranking of database answers and user's question (the higher the better)."
84
+ )
85
 
86
  if clear_button:
87
  st.session_state.messages = []
 
138
  final_ref["ai_judge"] = independent_ai_judge_score
139
 
140
  engineered_prompt = f"""
141
+ Based on the context: {ref_from_db_search}
142
+
143
+ answer the user question: {question}
144
+
145
  Answer the question directly (don't say "based on the context, ...")
146
  """
147
 
 
156
  st.table(final_ref)
157
  # Add assistant response to chat history
158
  st.session_state.messages.append({"role": "assistant", "content": response})
 
 
 
utils/helper_functions.py CHANGED
@@ -69,27 +69,6 @@ def calculate_sts_openai_score(sentence1: str, sentence2: str) -> float:
69
  return similarity_score
70
 
71
 
72
- def ai_judge(sentence1: str, sentence2: str) -> float:
73
- API_URL = "https://laazu6ral9w37pfb.us-east-1.aws.endpoints.huggingface.cloud"
74
- headers = {"Accept": "application/json", "Content-Type": "application/json"}
75
-
76
- def helper(payload):
77
- response = requests.post(API_URL, headers=headers, json=payload)
78
- return response.json()
79
-
80
- data = helper(
81
- {
82
- "source_sentence": sentence1,
83
- "sentences": [sentence2, sentence2],
84
- "parameters": {},
85
- }
86
- )
87
-
88
- # result = data['similarities']
89
-
90
- return data
91
-
92
-
93
  def query(payload: Dict[str, Any]) -> Dict[str, Any]:
94
  """
95
  Sends a JSON payload to a predefined API URL and returns the JSON response.
@@ -134,9 +113,7 @@ def llama2_7b_ysa(prompt: str) -> str:
134
  # Define the query payload with the prompt and any additional parameters
135
  query_payload: Dict[str, Any] = {
136
  "inputs": prompt,
137
- "parameters": {
138
- "max_new_tokens": 200
139
- }
140
  }
141
 
142
  # Send the query to the model and store the output response
 
69
  return similarity_score
70
 
71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  def query(payload: Dict[str, Any]) -> Dict[str, Any]:
73
  """
74
  Sends a JSON payload to a predefined API URL and returns the JSON response.
 
113
  # Define the query payload with the prompt and any additional parameters
114
  query_payload: Dict[str, Any] = {
115
  "inputs": prompt,
116
+ "parameters": {"max_new_tokens": 200},
 
 
117
  }
118
 
119
  # Send the query to the model and store the output response