Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -144,55 +144,37 @@ Here are some documents that are relevant to the question.
|
|
144 |
```
|
145 |
"""
|
146 |
# Define the predict function
|
147 |
-
def predict(user_input,company):
|
148 |
-
|
149 |
-
|
150 |
-
relevant_document_chunks = vectorstore_persisted.similarity_search(user_input, k=5, filter={"source":filter})
|
151 |
|
152 |
-
# Create
|
153 |
context_list = [d.page_content for d in relevant_document_chunks]
|
154 |
-
context_for_query = ".".join(context_list)
|
155 |
|
156 |
# Create messages
|
157 |
prompt = [
|
158 |
-
{'role':'system', 'content': qna_system_message},
|
159 |
{'role': 'user', 'content': qna_user_message_template.format(
|
160 |
-
context=context_for_query,
|
161 |
question=user_input
|
162 |
-
|
163 |
-
}
|
164 |
]
|
165 |
|
166 |
-
# Get response from the LLM
|
167 |
try:
|
168 |
-
response
|
169 |
-
|
|
|
170 |
messages=prompt,
|
171 |
temperature=0
|
172 |
)
|
173 |
-
|
174 |
-
prediction = response.choices[0].message.content
|
175 |
|
176 |
except Exception as e:
|
177 |
-
prediction = e
|
178 |
-
|
179 |
-
# While the prediction is made, log both the inputs and outputs to a local log file
|
180 |
-
# While writing to the log file, ensure that the commit scheduler is locked to avoid parallel
|
181 |
-
# access
|
182 |
-
|
183 |
-
with scheduler.lock:
|
184 |
-
with log_file.open("a") as f:
|
185 |
-
f.write(json.dumps(
|
186 |
-
{
|
187 |
-
'user_input': user_input,
|
188 |
-
'retrieved_context': context_for_query,
|
189 |
-
'model_response': prediction
|
190 |
-
}
|
191 |
-
))
|
192 |
-
f.write("\n")
|
193 |
|
194 |
return prediction
|
195 |
-
|
196 |
|
197 |
examples = [
|
198 |
["What are the company's policies and frameworks regarding AI ethics, governance, and responsible AI use as detailed in their 10-K reports?", "AWS"],
|
|
|
144 |
```
|
145 |
"""
|
146 |
# Define the predict function
|
147 |
+
def predict(user_input, company):
|
148 |
+
filter = f"dataset/{company}-10-k-2023.pdf"
|
149 |
+
relevant_document_chunks = vectorstore_persisted.similarity_search(user_input, k=5, filter={"source": filter})
|
|
|
150 |
|
151 |
+
# Create context for query
|
152 |
context_list = [d.page_content for d in relevant_document_chunks]
|
153 |
+
context_for_query = ".".join(context_list) # Ensure this is being assigned correctly
|
154 |
|
155 |
# Create messages
|
156 |
prompt = [
|
157 |
+
{'role': 'system', 'content': qna_system_message},
|
158 |
{'role': 'user', 'content': qna_user_message_template.format(
|
159 |
+
context=context_for_query, # Passing the correct context
|
160 |
question=user_input
|
161 |
+
)}
|
|
|
162 |
]
|
163 |
|
|
|
164 |
try:
|
165 |
+
# Get response from the LLM
|
166 |
+
response = openai.ChatCompletion.create(
|
167 |
+
model='gpt-3.5-turbo',
|
168 |
messages=prompt,
|
169 |
temperature=0
|
170 |
)
|
171 |
+
prediction = response['choices'][0]['message']['content']
|
|
|
172 |
|
173 |
except Exception as e:
|
174 |
+
prediction = f"Error: {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
175 |
|
176 |
return prediction
|
177 |
+
|
178 |
|
179 |
examples = [
|
180 |
["What are the company's policies and frameworks regarding AI ethics, governance, and responsible AI use as detailed in their 10-K reports?", "AWS"],
|