Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -59,7 +59,7 @@ def index_docs(documents):
|
|
59 |
def retrieve_docs(query):
|
60 |
return vector_store.similarity_search(query)
|
61 |
|
62 |
-
# Function to generate an answer based on retrieved documents
|
63 |
def answer_question(question, documents):
|
64 |
context = "\n\n".join([doc.page_content for doc in documents])
|
65 |
full_context = f"{context}"
|
@@ -68,9 +68,14 @@ def answer_question(question, documents):
|
|
68 |
# Format the prompt with the user's question and context
|
69 |
question_with_context = prompt.format(question=question, context=full_context)
|
70 |
|
71 |
-
# Use the Hugging Face InferenceClient
|
72 |
-
|
73 |
-
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
# Assuming the response contains a 'generated_text' field with the model's output
|
76 |
return response["generated_text"]
|
|
|
59 |
def retrieve_docs(query):
|
60 |
return vector_store.similarity_search(query)
|
61 |
|
62 |
+
# Function to generate an answer based on retrieved documents using text generation
|
63 |
def answer_question(question, documents):
|
64 |
context = "\n\n".join([doc.page_content for doc in documents])
|
65 |
full_context = f"{context}"
|
|
|
68 |
# Format the prompt with the user's question and context
|
69 |
question_with_context = prompt.format(question=question, context=full_context)
|
70 |
|
71 |
+
# Use the Hugging Face InferenceClient's text_generation method
|
72 |
+
generate_kwargs = {
|
73 |
+
"temperature": 0.7, # Control the creativity of the generated response
|
74 |
+
"max_new_tokens": 150, # Limit the length of the output
|
75 |
+
"top_p": 0.9 # Control diversity via nucleus sampling
|
76 |
+
}
|
77 |
+
|
78 |
+
response = client.text_generation(question_with_context, **generate_kwargs)
|
79 |
|
80 |
# Assuming the response contains a 'generated_text' field with the model's output
|
81 |
return response["generated_text"]
|