ADKU commited on
Commit
425d4bf
·
verified ·
1 Parent(s): 12e1b40

Update app.py

Browse files

corrected some bugs in the select paper for updating it realtime

Files changed (1) hide show
  1. app.py +26 -13
app.py CHANGED
@@ -94,34 +94,34 @@ except Exception as e:
94
  raise
95
 
96
  # Hybrid search function
97
- def get_relevant_papers(query, top_k=5):
98
  if not query.strip():
99
- return []
100
  try:
101
  query_embedding = generate_embeddings_sci_bert([query])
102
- distances, indices = faiss_index.search(query_embedding.astype(np.float32), top_k)
103
  tokenized_query = query.split()
104
  bm25_scores = bm25.get_scores(tokenized_query)
105
- bm25_top_indices = np.argsort(bm25_scores)[::-1][:top_k]
106
  combined_indices = list(set(indices[0]) | set(bm25_top_indices))
107
  ranked_results = sorted(combined_indices, key=lambda idx: -bm25_scores[idx])
108
  papers = []
109
- for i, index in enumerate(ranked_results[:top_k]):
110
  paper = df.iloc[index]
111
  papers.append(f"{i+1}. {paper['title']} - Abstract: {paper['cleaned_abstract'][:200]}...")
112
- return papers
113
  except Exception as e:
114
  logger.error(f"Search failed: {e}")
115
- return ["Search failed. Please try again."]
116
 
117
  # GPT-2 QA function
118
  def answer_question(paper, question, history):
119
  if not paper:
120
- return [("Please select a paper first!", "")], history
121
  if not question.strip():
122
  return [(question, "Please ask a question!")], history
123
  if question.lower() in ["exit", "done"]:
124
- return [("Conversation ended. Select a new paper or search again!", "")], []
125
 
126
  try:
127
  # Extract title and abstract
@@ -150,7 +150,7 @@ def answer_question(paper, question, history):
150
  response = response[len(context):].strip()
151
 
152
  history.append((question, response))
153
- return history, history # Return updated history for Chatbot
154
  except Exception as e:
155
  logger.error(f"QA failed: {e}")
156
  history.append((question, "Sorry, I couldn’t process that. Try again!"))
@@ -173,9 +173,18 @@ with gr.Blocks(
173
  query_input = gr.Textbox(label="Enter your search query", placeholder="e.g., machine learning in healthcare")
174
  search_btn = gr.Button("Search")
175
  paper_dropdown = gr.Dropdown(label="Select a Paper", choices=[], interactive=True)
 
 
 
 
 
176
  search_btn.click(
177
  fn=get_relevant_papers,
178
  inputs=query_input,
 
 
 
 
179
  outputs=paper_dropdown
180
  )
181
 
@@ -190,11 +199,15 @@ with gr.Blocks(
190
  # State to store conversation history
191
  history_state = gr.State([])
192
 
193
- # Update selected paper
194
  paper_dropdown.change(
195
- fn=lambda x: (x, []), # Reset history when new paper selected
196
  inputs=paper_dropdown,
197
  outputs=[selected_paper, history_state]
 
 
 
 
198
  )
199
 
200
  # Handle chat
@@ -205,7 +218,7 @@ with gr.Blocks(
205
  ).then(
206
  fn=lambda: "",
207
  inputs=None,
208
- outputs=question_input # Clear question input after sending
209
  )
210
 
211
  # Launch the app
 
94
  raise
95
 
96
  # Hybrid search function
97
+ def get_relevant_papers(query):
98
  if not query.strip():
99
+ return [], "Please enter a search query."
100
  try:
101
  query_embedding = generate_embeddings_sci_bert([query])
102
+ distances, indices = faiss_index.search(query_embedding.astype(np.float32), 5)
103
  tokenized_query = query.split()
104
  bm25_scores = bm25.get_scores(tokenized_query)
105
+ bm25_top_indices = np.argsort(bm25_scores)[::-1][:5]
106
  combined_indices = list(set(indices[0]) | set(bm25_top_indices))
107
  ranked_results = sorted(combined_indices, key=lambda idx: -bm25_scores[idx])
108
  papers = []
109
+ for i, index in enumerate(ranked_results[:5]):
110
  paper = df.iloc[index]
111
  papers.append(f"{i+1}. {paper['title']} - Abstract: {paper['cleaned_abstract'][:200]}...")
112
+ return papers, "Search completed."
113
  except Exception as e:
114
  logger.error(f"Search failed: {e}")
115
+ return [], "Search failed. Please try again."
116
 
117
  # GPT-2 QA function
118
  def answer_question(paper, question, history):
119
  if not paper:
120
+ return [(question, "Please select a paper first!")], history
121
  if not question.strip():
122
  return [(question, "Please ask a question!")], history
123
  if question.lower() in ["exit", "done"]:
124
+ return [("Conversation ended.", "Select a new paper or search again!")], []
125
 
126
  try:
127
  # Extract title and abstract
 
150
  response = response[len(context):].strip()
151
 
152
  history.append((question, response))
153
+ return history, history
154
  except Exception as e:
155
  logger.error(f"QA failed: {e}")
156
  history.append((question, "Sorry, I couldn’t process that. Try again!"))
 
173
  query_input = gr.Textbox(label="Enter your search query", placeholder="e.g., machine learning in healthcare")
174
  search_btn = gr.Button("Search")
175
  paper_dropdown = gr.Dropdown(label="Select a Paper", choices=[], interactive=True)
176
+ search_status = gr.Textbox(label="Search Status", interactive=False)
177
+
178
+ # State to store paper choices
179
+ paper_choices_state = gr.State([])
180
+
181
  search_btn.click(
182
  fn=get_relevant_papers,
183
  inputs=query_input,
184
+ outputs=[paper_choices_state, search_status]
185
+ ).then(
186
+ fn=lambda choices: gr.update(choices=choices, value=None),
187
+ inputs=paper_choices_state,
188
  outputs=paper_dropdown
189
  )
190
 
 
199
  # State to store conversation history
200
  history_state = gr.State([])
201
 
202
+ # Update selected paper and reset history
203
  paper_dropdown.change(
204
+ fn=lambda x: (x, []),
205
  inputs=paper_dropdown,
206
  outputs=[selected_paper, history_state]
207
+ ).then(
208
+ fn=lambda: [],
209
+ inputs=None,
210
+ outputs=chatbot
211
  )
212
 
213
  # Handle chat
 
218
  ).then(
219
  fn=lambda: "",
220
  inputs=None,
221
+ outputs=question_input # Clear input
222
  )
223
 
224
  # Launch the app