Ishaan Shah commited on
Commit
5b417ee
·
1 Parent(s): 467b7f2
Files changed (1) hide show
  1. app.py +30 -59
app.py CHANGED
@@ -48,11 +48,7 @@ def process_llm_response(llm_response):
48
  # return json.dumps(response_data)
49
  return response_data
50
 
51
- def get_answer(question):
52
- llm_response = qa_chain(question)
53
- response = process_llm_response(llm_response)
54
 
55
- return response["result"], response["sources"]
56
 
57
  # @app.route('/question', methods=['POST'])
58
  # def answer():
@@ -64,51 +60,7 @@ def get_answer(question):
64
  # return response
65
  # else:
66
  # return 'Content-Type not supported!'
67
-
68
- # @app.route('/', methods=['GET'])
69
- # def default():
70
- # return "Hello World!"
71
-
72
-
73
- # if __name__ == '__main__':
74
- # ip=get_local_ip()
75
- # os.environ["OPENAI_API_KEY"] = "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"
76
- # # Embed and store the texts
77
- # # if(torch.cuda.is_available() == False):
78
- # # print("No GPU available")
79
- # # exit(1)
80
-
81
- # torch.cuda.empty_cache()
82
- # torch.max_split_size_mb = 100
83
- # instructor_embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl",
84
- # model_kwargs={"device": "cpu"})
85
- # # Supplying a persist_directory will store the embeddings on disk
86
- # persist_directory = 'db'
87
- # vectordb2 = Chroma(persist_directory=persist_directory,
88
- # embedding_function=instructor_embeddings,
89
- # )
90
- # retriever = vectordb2.as_retriever(search_kwargs={"k": 3})
91
- # vectordb2.persist()
92
-
93
- # # Set up the turbo LLM
94
- # turbo_llm = ChatOpenAI(
95
- # temperature=0,
96
- # model_name='gpt-3.5-turbo'
97
- # )
98
- # qa_chain = RetrievalQA.from_chain_type(llm=turbo_llm,
99
- # chain_type="stuff",
100
- # retriever=retriever,
101
- # return_source_documents=True)
102
- # qa_chain.combine_documents_chain.llm_chain.prompt.messages[0].prompt.template= """
103
- # Use only the following pieces of context and think step by step to answer. Answer the users question only if they are related to the context given.
104
- # If you don't know the answer, just say that you don't know, don't try to make up an answer. Make your answer very detailed and long.
105
- # Use bullet points to explain when required.
106
- # Use only text found in the context as your knowledge source for the answer.
107
- # ----------------
108
- # {context}"""
109
- # app.run(host=ip, port=5000)
110
- #
111
- #
112
 
113
  ip=get_local_ip()
114
  os.environ["OPENAI_API_KEY"] = "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"
@@ -151,18 +103,37 @@ def print_array(arr):
151
  arr_str = str(arr)
152
  return arr_str
153
 
154
- def getanswer(question):
155
- llm_response = qa_chain(question)
156
- response = process_llm_response(llm_response)
157
- sources= print_array(response["sources"])
158
- return response["result"], sources
159
 
160
  # iface = gr.Interface(fn=getanswer, inputs="text", outputs="text")
161
  # iface.launch()
162
 
163
- demo = gr.Interface(
164
- fn=getanswer,
165
- inputs=["text"],
166
- outputs=["text", "text"],
167
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
  demo.launch()
 
48
  # return json.dumps(response_data)
49
  return response_data
50
 
 
 
 
51
 
 
52
 
53
  # @app.route('/question', methods=['POST'])
54
  # def answer():
 
60
  # return response
61
  # else:
62
  # return 'Content-Type not supported!'
63
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
  ip=get_local_ip()
66
  os.environ["OPENAI_API_KEY"] = "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"
 
103
  arr_str = str(arr)
104
  return arr_str
105
 
106
+ # def getanswer(question):
107
+ # llm_response = qa_chain(question)
108
+ # response = process_llm_response(llm_response)
109
+ # sources= print_array(response["sources"])
110
+ # return response["result"], sources
111
 
112
  # iface = gr.Interface(fn=getanswer, inputs="text", outputs="text")
113
  # iface.launch()
114
 
115
+ # demo = gr.Interface(
116
+ # fn=getanswer,
117
+ # inputs=["text"],
118
+ # outputs=["text", "text"],
119
+ # api="question"
120
+ # )
121
+ # demo.launch()
122
+
123
+ with gr.Blocks() as demo:
124
+ chatbot = gr.Chatbot()
125
+ msg = gr.Textbox()
126
+ clear = gr.Button("Clear")
127
+
128
+ def getanswer(question, chat_history):
129
+ llm_response = qa_chain(question)
130
+ response = process_llm_response(llm_response)
131
+ sources= print_array(response["sources"])
132
+ processed_response = response["result"] +" \n\nSources: " + sources
133
+ chat_history.append((question, processed_response))
134
+ return "", chat_history
135
+
136
+ msg.submit(getanswer, [msg, chatbot], [msg, chatbot])
137
+ clear.click(lambda: None, None, chatbot, queue=False)
138
+
139
  demo.launch()