Ishaan Shah
commited on
Commit
·
2c0039e
1
Parent(s):
b5efc4d
lesgo
Browse files
app.py
CHANGED
@@ -51,7 +51,7 @@ def process_llm_response(llm_response):
|
|
51 |
def get_answer(question):
|
52 |
llm_response = qa_chain(question)
|
53 |
response = process_llm_response(llm_response)
|
54 |
-
return response["result"]
|
55 |
|
56 |
# @app.route('/question', methods=['POST'])
|
57 |
# def answer():
|
@@ -137,18 +137,24 @@ qa_chain = RetrievalQA.from_chain_type(llm=turbo_llm,
|
|
137 |
retriever=retriever,
|
138 |
return_source_documents=True)
|
139 |
qa_chain.combine_documents_chain.llm_chain.prompt.messages[0].prompt.template= """
|
140 |
-
Use only the following pieces of context
|
141 |
If you don't know the answer, just say that you don't know, don't try to make up an answer. Make your answer very detailed and long.
|
142 |
Use bullet points to explain when required.
|
143 |
Use only text found in the context as your knowledge source for the answer.
|
144 |
----------------
|
145 |
{context}"""
|
146 |
|
147 |
-
|
148 |
def getanswer(question):
|
149 |
llm_response = qa_chain(question)
|
150 |
response = process_llm_response(llm_response)
|
151 |
return response
|
152 |
|
153 |
-
iface = gr.Interface(fn=getanswer, inputs="text", outputs="text")
|
154 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
def get_answer(question):
|
52 |
llm_response = qa_chain(question)
|
53 |
response = process_llm_response(llm_response)
|
54 |
+
return response["result"], response["sources"]
|
55 |
|
56 |
# @app.route('/question', methods=['POST'])
|
57 |
# def answer():
|
|
|
137 |
retriever=retriever,
|
138 |
return_source_documents=True)
|
139 |
qa_chain.combine_documents_chain.llm_chain.prompt.messages[0].prompt.template= """
|
140 |
+
Use only the following pieces of context. Answer the users question only if they are related to the context given.
|
141 |
If you don't know the answer, just say that you don't know, don't try to make up an answer. Make your answer very detailed and long.
|
142 |
Use bullet points to explain when required.
|
143 |
Use only text found in the context as your knowledge source for the answer.
|
144 |
----------------
|
145 |
{context}"""
|
146 |
|
|
|
147 |
def getanswer(question):
|
148 |
llm_response = qa_chain(question)
|
149 |
response = process_llm_response(llm_response)
|
150 |
return response
|
151 |
|
152 |
+
# iface = gr.Interface(fn=getanswer, inputs="text", outputs="text")
|
153 |
+
# iface.launch()
|
154 |
+
|
155 |
+
demo = gr.Interface(
|
156 |
+
fn=getanswer,
|
157 |
+
inputs=["text"],
|
158 |
+
outputs=["text", "text"],
|
159 |
+
)
|
160 |
+
demo.launch()
|