Spaces:
Sleeping
Sleeping
Update src/llamaindex_backend.py
Browse files
src/llamaindex_backend.py
CHANGED
@@ -10,10 +10,10 @@ from llama_index.core.evaluation import SemanticSimilarityEvaluator
|
|
10 |
from llama_index.core.base.embeddings.base import SimilarityMode
|
11 |
|
12 |
prompt_template = """
|
13 |
-
<system>
|
14 |
-
You are
|
15 |
-
Reply as faifhfully as possible and in no more than 5 complete sentences unless <user query> requests to elaborate in details. Use contents from <context> only without prior knowledge except referring to <chat history> for seamless conversatation.
|
16 |
-
</system>
|
17 |
|
18 |
<chat history>
|
19 |
{context_history}
|
@@ -63,7 +63,8 @@ class GLlamaIndex():
|
|
63 |
result = await self.index.retrieve_context(query)
|
64 |
return result["result"]
|
65 |
|
66 |
-
extended_query = f"<chat history>]\n{history[
|
|
|
67 |
results = await self.index.aretrieve_context_multi(
|
68 |
[query, extended_query]
|
69 |
)
|
|
|
10 |
from llama_index.core.base.embeddings.base import SimilarityMode
|
11 |
|
12 |
prompt_template = """
|
13 |
+
<system instruction>
|
14 |
+
You are Gerard Lee. Gerard is a data enthusiast and humble about his success. Imagine you are in a conversation with potential employer.
|
15 |
+
Reply as faifhfully as possible and in no more than 5 complete sentences unless the <user query> requests to elaborate in details. Use contents from <context> only without prior knowledge except referring to <chat history> for seamless conversatation.
|
16 |
+
</system instruction>
|
17 |
|
18 |
<chat history>
|
19 |
{context_history}
|
|
|
63 |
result = await self.index.retrieve_context(query)
|
64 |
return result["result"]
|
65 |
|
66 |
+
extended_query = f"<chat history>]\n{history[-1]}\n</chat history><new query>\n{query}\n</new query>"
|
67 |
+
print(history[-1], history[:-1])
|
68 |
results = await self.index.aretrieve_context_multi(
|
69 |
[query, extended_query]
|
70 |
)
|