Pijush2023 commited on
Commit
31711be
·
verified ·
1 Parent(s): b27de52

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -25
app.py CHANGED
@@ -740,31 +740,31 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
740
  logging.error(traceback.format_exc())
741
  return "Sorry, I encountered an error while processing your request.", []
742
 
743
- def generate_answer(message, choice, retrieval_mode, selected_model, selected_file):
744
- # Ensure a file is selected
745
- if not selected_file:
746
- return "Please choose a file to proceed."
747
-
748
- # Modify the Phi-3.5 prompt to include the selected file
749
- if selected_model == phi_pipe:
750
- retriever = phi_retriever
751
- context_documents = retriever.get_relevant_documents(message)
752
- context = "\n".join([doc.page_content for doc in context_documents])
753
-
754
- prompt = phi_custom_template.format(context=context, question=message, document_name=selected_file)
755
- response = selected_model(prompt, **{
756
- "max_new_tokens": 250,
757
- "return_full_text": True,
758
- "temperature": 0.0,
759
- "do_sample": False,
760
- })
761
-
762
- if response:
763
- generated_text = response[0]['generated_text']
764
- cleaned_response = clean_response(generated_text)
765
- return cleaned_response
766
- else:
767
- return "No response generated.", []
768
 
769
 
770
 
 
740
  logging.error(traceback.format_exc())
741
  return "Sorry, I encountered an error while processing your request.", []
742
 
743
+ # def generate_answer(message, choice, retrieval_mode, selected_model, selected_file):
744
+ # # Ensure a file is selected
745
+ # if not selected_file:
746
+ # return "Please choose a file to proceed."
747
+
748
+ # # Modify the Phi-3.5 prompt to include the selected file
749
+ # if selected_model == phi_pipe:
750
+ # retriever = phi_retriever
751
+ # context_documents = retriever.get_relevant_documents(message)
752
+ # context = "\n".join([doc.page_content for doc in context_documents])
753
+
754
+ # prompt = phi_custom_template.format(context=context, question=message, document_name=selected_file)
755
+ # response = selected_model(prompt, **{
756
+ # "max_new_tokens": 250,
757
+ # "return_full_text": True,
758
+ # "temperature": 0.0,
759
+ # "do_sample": False,
760
+ # })
761
+
762
+ # if response:
763
+ # generated_text = response[0]['generated_text']
764
+ # cleaned_response = clean_response(generated_text)
765
+ # return cleaned_response
766
+ # else:
767
+ # return "No response generated.", []
768
 
769
 
770