Files changed (8) hide show
  1. .github/workflows +0 -20
  2. README.md +0 -1
  3. app.py +204 -63
  4. arxiv_decode.png +0 -0
  5. requirements.txt +1 -3
  6. test/__init__.py +0 -0
  7. test/test.py +0 -39
  8. utils.py +0 -228
.github/workflows DELETED
@@ -1,20 +0,0 @@
1
- name: Sync to Hugging Face hub
2
- on:
3
- push:
4
- branches: [main]
5
-
6
- # to run this workflow manually from the Actions tab
7
- workflow_dispatch:
8
-
9
- jobs:
10
- sync-to-hub:
11
- runs-on: ubuntu-latest
12
- steps:
13
- - uses: actions/checkout@v3
14
- with:
15
- fetch-depth: 0
16
- lfs: true
17
- - name: Push to hub
18
- env:
19
- HF_TOKEN: ${{ secrets.token }}
20
- run: git push https://mehradans92:[email protected]/spaces/mehradans92/SPACE_NAME main
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -1,6 +1,5 @@
1
  ---
2
  title: Decode Elm
3
- python_version: 3.8.16
4
  emoji: 📚
5
  colorFrom: green
6
  colorTo: pink
 
1
  ---
2
  title: Decode Elm
 
3
  emoji: 📚
4
  colorFrom: green
5
  colorTo: pink
app.py CHANGED
@@ -1,14 +1,18 @@
1
- import streamlit as st # Web App
 
 
 
 
2
  import os
 
 
 
3
  from PIL import Image
4
- from utils import *
5
- import asyncio
6
 
7
  import pickle
8
  docs = None
9
  api_key = ' '
10
 
11
-
12
  st.set_page_config(layout="wide")
13
 
14
  image = Image.open('arxiv_decode.png')
@@ -16,114 +20,251 @@ st.image(image, width=1000)
16
 
17
  #title
18
  st.title("Answering questions from scientific papers")
19
- st.markdown("##### This tool will allow you to ask questions and get answers based on scientific papers. It uses OpenAI's GPT models, and you must have your own API key. Each query is about 10k tokens, which costs about only $0.20 on your own API key, charged by OpenAI.")
20
- st.markdown("##### Current version searches on different pre-print servers including [arXiv](https://arxiv.org), [chemRxiv](https://chemrxiv.org/engage/chemrxiv/public-dashboard), [bioRxiv](https://www.biorxiv.org/) and [medRxiv](https://www.medrxiv.org/). 🚧Under development🚧")
21
  st.markdown("Used libraries:\n * [PaperQA](https://github.com/whitead/paper-qa) \n* [langchain](https://github.com/hwchase17/langchain)")
22
- st.markdown("See this [tweet](https://twitter.com/MehradAnsari/status/1627649959204888576) for a demo.")
23
-
24
 
25
  api_key_url = 'https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key'
26
 
27
  api_key = st.text_input('OpenAI API Key',
28
  placeholder='sk-...',
29
  help=f"['What is that?']({api_key_url})",
30
- type="password",
31
- value = '')
32
 
33
  os.environ["OPENAI_API_KEY"] = f"{api_key}" #
34
- # if len(api_key) != 51:
35
- # st.warning('Please enter a valid OpenAI API key.', icon="⚠️")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
 
38
  max_results_current = 5
39
  max_results = max_results_current
40
- def search_click_callback(search_query, max_results, XRxiv_servers=[]):
 
 
41
  global pdf_info, pdf_citation
42
- search_engines = XRxivQuery(search_query, max_results, XRxiv_servers=XRxiv_servers)
43
- pdf_info = search_engines.call_API()
44
- search_engines.download_pdf()
45
-
46
  return pdf_info
47
 
 
 
 
48
  with st.form(key='columns_in_form', clear_on_submit = False):
49
- c1, c2 = st.columns([5, 0.8])
50
  with c1:
51
  search_query = st.text_input("Input search query here:", placeholder='Keywords for most relevant search...', value=''
52
- )
53
 
54
  with c2:
55
- max_results = st.number_input("Max papers", value=max_results_current)
56
  max_results_current = max_results_current
57
- st.markdown('Pre-print server')
58
- checks = st.columns(4)
59
- with checks[0]:
60
- ArXiv_check = st.checkbox('arXiv')
61
- with checks[1]:
62
- ChemArXiv_check = st.checkbox('chemRxiv')
63
- with checks[2]:
64
- BioArXiv_check = st.checkbox('bioRxiv')
65
- with checks[3]:
66
- MedrXiv_check = st.checkbox('medRxiv')
67
-
68
  searchButton = st.form_submit_button(label = 'Search')
 
69
 
70
  if searchButton:
71
- # checking which pre-print servers selected
72
- XRxiv_servers = []
73
- if ArXiv_check:
74
- XRxiv_servers.append('rxiv')
75
- if ChemArXiv_check:
76
- XRxiv_servers.append('chemrxiv')
77
- if BioArXiv_check:
78
- XRxiv_servers.append('biorxiv')
79
- if MedrXiv_check:
80
- XRxiv_servers.append('medrxiv')
81
  global pdf_info
82
- pdf_info = search_click_callback(search_query, max_results, XRxiv_servers=XRxiv_servers)
83
  if 'pdf_info' not in st.session_state:
84
  st.session_state.key = 'pdf_info'
85
  st.session_state['pdf_info'] = pdf_info
 
 
 
 
 
 
86
 
 
 
 
 
87
 
88
- def answer_callback(question_query, word_count):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  import paperqa
90
  global docs
 
 
 
 
91
  if docs is None:
 
92
  pdf_info = st.session_state['pdf_info']
 
93
  docs = paperqa.Docs()
94
- pdf_paths = [f"{p[4]}/{p[0].replace(':','').replace('/','').replace('.','')}.pdf" for p in pdf_info]
95
  pdf_citations = [p[5] for p in pdf_info]
96
  print(list(zip(pdf_paths, pdf_citations)))
 
97
  for d, c in zip(pdf_paths, pdf_citations):
 
98
  docs.add(d, c)
99
- docs._build_texts_index()
100
- answer = docs.query(question_query, length_prompt=f'use {word_count:d} words')
101
- st.success('Voila! 😃')
 
 
102
  return answer.formatted_answer
103
 
104
- with st.form(key='question_form', clear_on_submit = False):
105
- c1, c2 = st.columns([6, 2])
106
- with c1:
107
- question_query = st.text_input("What do you wanna know from these papers?", placeholder='Input questions here...',
108
  value='')
109
- with c2:
110
- word_count = st.slider("Suggested number of words in your answer?", 30, 300, 100)
111
- submitButton = st.form_submit_button('Submit')
112
 
113
  if submitButton:
114
  with st.expander("Found papers:", expanded=True):
115
  st.write(f"{st.session_state['all_reference_text']}")
116
- with st.spinner('⏳ Please wait...'):
117
- start = time.time()
118
- final_answer = answer_callback(question_query, word_count)
119
- length_answer = len(final_answer)
120
- st.text_area("Answer:", final_answer, height=max(length_answer//4, 100))
121
- end = time.time()
122
- clock_time = end - start
123
- with st.empty():
124
- st.write(f"✔️ Task completed in {clock_time:.2f} seconds.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
 
126
 
127
 
 
 
 
128
 
129
 
 
 
 
1
+ import streamlit as st #Web App
2
+ import urllib
3
+ from lxml import html
4
+ import requests
5
+ import re
6
  import os
7
+ from stqdm import stqdm
8
+ import time
9
+ import shutil
10
  from PIL import Image
 
 
11
 
12
  import pickle
13
  docs = None
14
  api_key = ' '
15
 
 
16
  st.set_page_config(layout="wide")
17
 
18
  image = Image.open('arxiv_decode.png')
 
20
 
21
  #title
22
  st.title("Answering questions from scientific papers")
23
+ st.markdown("##### This tool will allow you to ask questions and get answers based on scientific papers. It uses OpenAI's GPT models, and you must have your own API key. Each query is about 10k tokens, which costs about only $0.20 on your own API key which is charged by OpenAI.")
24
+ st.markdown("##### Current version searches on [ArXiv](https://arxiv.org) papers only. 🚧Under development🚧")
25
  st.markdown("Used libraries:\n * [PaperQA](https://github.com/whitead/paper-qa) \n* [langchain](https://github.com/hwchase17/langchain)")
 
 
26
 
27
  api_key_url = 'https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key'
28
 
29
  api_key = st.text_input('OpenAI API Key',
30
  placeholder='sk-...',
31
  help=f"['What is that?']({api_key_url})",
32
+ type="password")
 
33
 
34
  os.environ["OPENAI_API_KEY"] = f"{api_key}" #
35
+ if len(api_key) != 51:
36
+ st.warning('Please enter a valid OpenAI API key.', icon="⚠️")
37
+
38
+
39
+
40
+ def call_arXiv_API(search_query, search_by='all', sort_by='relevance', max_results='10', folder_name='arxiv-dl'):
41
+ '''
42
+ Scraps the arXiv's html to get data from each entry in a search. Entries has the following formatting:
43
+ <entry>\n
44
+ <id>http://arxiv.org/abs/2008.04584v2</id>\n
45
+ <updated>2021-05-11T12:00:24Z</updated>\n
46
+ <published>2020-08-11T08:47:06Z</published>\n
47
+ <title>Bayesian Selective Inference: Non-informative Priors</title>\n
48
+ <summary> We discuss Bayesian inference for parameters selected using the data. First,\nwe provide a critical analysis of the existing positions in the literature\nregarding the correct Bayesian approach under selection. Second, we propose two\ntypes of non-informative priors for selection models. These priors may be\nemployed to produce a posterior distribution in the absence of prior\ninformation as well as to provide well-calibrated frequentist inference for the\nselected parameter. We test the proposed priors empirically in several\nscenarios.\n</summary>\n
49
+ <author>\n <name>Daniel G. Rasines</name>\n </author>\n <author>\n <name>G. Alastair Young</name>\n </author>\n
50
+ <arxiv:comment xmlns:arxiv="http://arxiv.org/schemas/atom">24 pages, 7 figures</arxiv:comment>\n
51
+ <link href="http://arxiv.org/abs/2008.04584v2" rel="alternate" type="text/html"/>\n
52
+ <link title="pdf" href="http://arxiv.org/pdf/2008.04584v2" rel="related" type="application/pdf"/>\n
53
+ <arxiv:primary_category xmlns:arxiv="http://arxiv.org/schemas/atom" term="math.ST" scheme="http://arxiv.org/schemas/atom"/>\n
54
+ <category term="math.ST" scheme="http://arxiv.org/schemas/atom"/>\n
55
+ <category term="stat.TH" scheme="http://arxiv.org/schemas/atom"/>\n
56
+ </entry>\n
57
+ '''
58
+
59
+ # Remove space in seach query
60
+ search_query=search_query.strip().replace(" ", "+")
61
+ # Call arXiv API
62
+ arXiv_url=f'http://export.arxiv.org/api/query?search_query={search_by}:{search_query}&sortBy={sort_by}&start=0&max_results={max_results}'
63
+ with urllib.request.urlopen(arXiv_url) as url:
64
+ s = url.read()
65
+
66
+ # Parse the xml data
67
+ root = html.fromstring(s)
68
+ # Fetch relevant pdf information
69
+ pdf_entries = root.xpath("entry")
70
+
71
+ pdf_titles = []
72
+ pdf_authors = []
73
+ pdf_urls = []
74
+ pdf_categories = []
75
+ folder_names = []
76
+ pdf_citation = []
77
+ pdf_years = []
78
+
79
+ for i, pdf in enumerate(pdf_entries):
80
+ # print(pdf.xpath('updated/text()')[0][:4])
81
+ # xpath return a list with every ocurrence of the html path. Since we're getting each entry individually, we'll take the first element to avoid an unecessary list
82
+ pdf_titles.append(re.sub('[^a-zA-Z0-9]', ' ', pdf.xpath("title/text()")[0]))
83
+ pdf_authors.append(pdf.xpath("author/name/text()"))
84
+ pdf_urls.append(pdf.xpath("link[@title='pdf']/@href")[0])
85
+ pdf_categories.append(pdf.xpath("category/@term"))
86
+ folder_names.append(folder_name)
87
+ pdf_years.append(pdf.xpath('updated/text()')[0][:4])
88
+ pdf_citation.append(f"{', '.join(pdf_authors[i])}, {pdf_titles[i]}. arXiv [{pdf_categories[i][0]}] ({pdf_years[i]}), (available at {pdf_urls[i]}).")
89
+
90
+
91
+
92
+ pdf_info=list(zip(pdf_titles, pdf_urls, pdf_authors, pdf_categories, folder_names, pdf_citation))
93
+
94
+ # Check number of available files
95
+ # print('Requesting {max_results} files'.format(max_results=max_results))
96
+ if len(pdf_urls)<int(max_results):
97
+ matching_pdf_num=len(pdf_urls)
98
+ # print('Only {matching_pdf_num} files available'.format(matching_pdf_num=matching_pdf_num))
99
+ return pdf_info, pdf_citation
100
+
101
+
102
+ def download_pdf(pdf_info):
103
+
104
+ # if len(os.listdir(f'./{folder_name}') ) != 0:
105
+ # check folder is empty to avoid using papers from old runs:
106
+ # os.remove(f'./{folder_name}/*')
107
+ all_reference_text = []
108
+ for i,p in enumerate(stqdm(pdf_info, desc='Searching and downloading papers')):
109
+
110
+ pdf_title=p[0]
111
+ pdf_url=p[1]
112
+ pdf_author=p[2]
113
+ pdf_category=p[3]
114
+ folder_name=p[4]
115
+ pdf_citation=p[5]
116
+ r = requests.get(pdf_url, allow_redirects=True)
117
+ if i == 0:
118
+ if not os.path.exists(f'{folder_name}'):
119
+ os.makedirs(f"{folder_name}")
120
+ else:
121
+ shutil.rmtree(f'{folder_name}')
122
+ os.makedirs(f"{folder_name}")
123
+ with open(f'{folder_name}/{pdf_title}.pdf', 'wb') as currP:
124
+ currP.write(r.content)
125
+ if i == 0:
126
+ st.markdown("###### Papers found:")
127
+ st.markdown(f"{i+1}. {pdf_citation}")
128
+ time.sleep(0.15)
129
+ all_reference_text.append(f"{i+1}. {pdf_citation}\n")
130
+ if 'all_reference_text' not in st.session_state:
131
+ st.session_state.key = 'all_reference_text'
132
+ st.session_state['all_reference_text'] = ' '.join(all_reference_text)
133
+
134
+ # print(all_reference_text)
135
+
136
 
137
 
138
  max_results_current = 5
139
  max_results = max_results_current
140
+ # pdf_info = ''
141
+ # pdf_citation = ''
142
+ def search_click_callback(search_query, max_results):
143
  global pdf_info, pdf_citation
144
+ pdf_info, pdf_citation = call_arXiv_API(f'{search_query}', max_results=max_results)
145
+ download_pdf(pdf_info)
 
 
146
  return pdf_info
147
 
148
+
149
+
150
+
151
  with st.form(key='columns_in_form', clear_on_submit = False):
152
+ c1, c2 = st.columns([8,1])
153
  with c1:
154
  search_query = st.text_input("Input search query here:", placeholder='Keywords for most relevant search...', value=''
155
+ )#search_query, max_results_current))
156
 
157
  with c2:
158
+ max_results = st.text_input("Max papers", value=max_results_current)
159
  max_results_current = max_results_current
 
 
 
 
 
 
 
 
 
 
 
160
  searchButton = st.form_submit_button(label = 'Search')
161
+ # search_click(search_query, max_results_default)
162
 
163
  if searchButton:
 
 
 
 
 
 
 
 
 
 
164
  global pdf_info
165
+ pdf_info = search_click_callback(search_query, max_results)
166
  if 'pdf_info' not in st.session_state:
167
  st.session_state.key = 'pdf_info'
168
  st.session_state['pdf_info'] = pdf_info
169
+ # print(f'This is PDF info from search:{pdf_info}')
170
+
171
+
172
+ # def tokenize_callback():
173
+
174
+ # return docs
175
 
176
+ # tokenization_form = st.form(key='tokenization-form')
177
+ # tokenization_form.markdown(f"Happy with your paper search results? ")
178
+ # toknizeButton = tokenization_form.form_submit_button(label = "Yes! Let's tokenize.", on_click=tokenize_callback())
179
+ # tokenization_form.markdown("If not, change keywords and search again. [This step costs!](https://openai.com/api/pricing/)")
180
 
181
+
182
+
183
+ # submitButton = form.form_submit_button('Submit')
184
+ # with st.form(key='tokenization_form', clear_on_submit = False):
185
+ # st.markdown(f"Happy with your paper search results? If not, change keywords and search again. [This step costs!](https://openai.com/api/pricing/)")
186
+ # # st.text_input("Input search query here:", placeholder='Keywords for most relevant search...'
187
+ # # )#search_query, max_results_current))
188
+ # toknizeButton = st.form_submit_button(label = "Yes! Let's tokenize.")
189
+
190
+ # if toknizeButton:
191
+ # tokenize_callback()
192
+
193
+ # tokenize_callback()
194
+
195
+
196
+
197
+
198
+ def answer_callback(question_query):
199
  import paperqa
200
  global docs
201
+ # global pdf_info
202
+ progress_text = "Please wait..."
203
+ # my_bar = st.progress(0, text = progress_text)
204
+ st.info('Please wait...', icon="🔥")
205
  if docs is None:
206
+ # my_bar.progress(0.2, "Please wait...")
207
  pdf_info = st.session_state['pdf_info']
208
+ # print('buliding docs')
209
  docs = paperqa.Docs()
210
+ pdf_paths = [f"{p[4]}/{p[0]}.pdf" for p in pdf_info]
211
  pdf_citations = [p[5] for p in pdf_info]
212
  print(list(zip(pdf_paths, pdf_citations)))
213
+
214
  for d, c in zip(pdf_paths, pdf_citations):
215
+ # print(d,c)
216
  docs.add(d, c)
217
+ # docs._build_faiss_index()
218
+ answer = docs.query(question_query)
219
+ # print(answer.formatted_answer)
220
+ # my_bar.progress(1.0, "Done!")
221
+ st.success('Voila!')
222
  return answer.formatted_answer
223
 
224
+
225
+
226
+ form = st.form(key='question_form')
227
+ question_query = form.text_input("What do you wanna know from these papers?", placeholder='Input questions here...',
228
  value='')
229
+ submitButton = form.form_submit_button('Submit')
 
 
230
 
231
  if submitButton:
232
  with st.expander("Found papers:", expanded=True):
233
  st.write(f"{st.session_state['all_reference_text']}")
234
+ st.text_area("Answer:", answer_callback(question_query), height=600)
235
+
236
+ # with st.form(key='question_form', clear_on_submit = False):
237
+ # question_query = st.text_input("What do you wanna know from these papers?", placeholder='Input questions here')
238
+ # # st.text_input("Input search query here:", placeholder='Keywords for most relevant search...'
239
+ # # )#search_query, max_results_current))
240
+ # submitButton = form.form_submit_button(label = "Submit", on_click=answer_callback(question_query))
241
+
242
+
243
+ # Simulation-based inference bayesian model selection
244
+
245
+
246
+
247
+
248
+
249
+ # test = "<ul> \
250
+ # <li>List item here</li> \
251
+ # <li>List item here</li> \
252
+ # <li>List item here</li> \
253
+ # <li>List item here</li> \
254
+ # </ul>"
255
+ # test = "'''It was the best of times, it was the worst of times, it was \
256
+ # the age of wisdom, it was the age of foolishness, it was \
257
+ # the epoch of belief, it was the epoch of incredulity, it \
258
+ # was the season of Light, it was the season of Darkness, it\
259
+ # was the spring of hope, it was the winter of despair, (...)'''"
260
 
261
+ # citation_text = st.text_area('Papers found:',test, height=300) # f'{pdf_citation}'
262
 
263
 
264
+ # for i, cite in enumerate(pdf_citation):
265
+ # st.markdown(f'{i+1}. {cite}')
266
+ # time.sleep(1)
267
 
268
 
269
+ # def make_clickable('link',text):
270
+ # return f'<a target="_blank" href="{link}">{text}'
arxiv_decode.png CHANGED
requirements.txt CHANGED
@@ -2,6 +2,4 @@ streamlit
2
  urllib3
3
  lxml
4
  stqdm
5
- paper-qa
6
- bs4
7
- altair<5
 
2
  urllib3
3
  lxml
4
  stqdm
5
+ paper-qa
 
 
test/__init__.py DELETED
File without changes
test/test.py DELETED
@@ -1,39 +0,0 @@
1
- import unittest
2
- import sys
3
- sys.path.append('../')
4
- from utils import *
5
- import os
6
- import shutil
7
-
8
- class Utils(unittest.TestCase):
9
- def test_arXiv_API(self):
10
- search_query = 'Tools for Landscape Analysis of Optimisation Problems in Procedural Content Generation for Games'
11
- pdf_info = "('Tools for Landscape Analysis of Optimisation Problems in Procedural Content Generation for Games', 'http://arxiv.org/pdf/2302.08479v1', ['Vanessa Volz', 'Boris Naujoks', 'Pascal Kerschke', 'Tea Tusar'], ['cs.AI'], 'docs', 'Vanessa Volz, Boris Naujoks, Pascal Kerschke, Tea Tusar, Tools for Landscape Analysis of Optimisation Problems in Procedural Content Generation for Games. arXiv [cs.AI] (2023), (available at http://arxiv.org/pdf/2302.08479v1).')"
12
- max_results = 1
13
- XRxiv_servers = ['rxiv']
14
- search_engines = XRxivQuery(search_query, max_results, XRxiv_servers=XRxiv_servers)
15
- test_pdf_info = search_engines.call_API()
16
- self.assertEqual(pdf_info, str(test_pdf_info[0]))
17
-
18
- def test_download_pdf(self):
19
- search_query = 'Serverless Applications: Why, When, and How?'
20
- max_results = 1
21
- XRxiv_servers = ['rxiv']
22
- search_engines = XRxivQuery(search_query, max_results, XRxiv_servers=XRxiv_servers)
23
- test_pdf_info = search_engines.call_API()
24
- search_engines.download_pdf()
25
- dowloaded_dir = 'docs/Serverless Applications Why When and How .pdf'
26
- self.assertTrue(os.path.exists(dowloaded_dir))
27
- shutil.rmtree(f'docs/')
28
-
29
- def test_distibute_max_papers(self):
30
- XRxiv_servers = ['rxiv', 'medrxiv']
31
- max_results = 10
32
- max_papers_in_server = distibute_max_papers(max_results, XRxiv_servers)
33
- self.assertEqual(max_results, np.sum(max_papers_in_server))
34
- self.assertEqual(max_papers_in_server[2], 0)
35
- self.assertGreater(max_papers_in_server[0],0)
36
- self.assertGreater(max_papers_in_server[3],0)
37
-
38
- if __name__ == '__main__':
39
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
utils.py DELETED
@@ -1,228 +0,0 @@
1
- import urllib
2
- import streamlit as st
3
- import requests
4
- import re
5
- from stqdm import stqdm
6
- import os
7
- import shutil
8
- import time
9
- from bs4 import BeautifulSoup as bs
10
- from datetime import datetime
11
- from random import uniform as rand
12
- import json
13
- import numpy as np
14
-
15
-
16
-
17
-
18
- class XRxivQuery:
19
- def __init__(self, search_query, max_results, folder_name='docs', XRxiv_servers = [], search_by='all', sort_by='relevance'):
20
- self.search_query = search_query
21
- self.max_results = max_results
22
- self.folder_name = folder_name
23
- self.XRxiv_servers = XRxiv_servers
24
- self.search_by = search_by
25
- self.sort_by = sort_by
26
- self.all_pdf_info = []
27
- self.all_pdf_citation = []
28
-
29
- def call_API(self):
30
- search_query = self.search_query.strip().replace(" ", "+").split('+')#.replace(", ", "+").replace(",", "+")#.split('+')
31
- max_papers_in_server = distibute_max_papers(self.max_results, self.XRxiv_servers)
32
- if 'rxiv' in self.XRxiv_servers:
33
- '''
34
- Scraps the arXiv's html to get data from each entry in a search. Entries has the following formatting:
35
- <entry>\n
36
- <id>http://arxiv.org/abs/2008.04584v2</id>\n
37
- <updated>2021-05-11T12:00:24Z</updated>\n
38
- <published>2020-08-11T08:47:06Z</published>\n
39
- <title>Bayesian Selective Inference: Non-informative Priors</title>\n
40
- <summary> We discuss Bayesian inference for parameters selected using the data. First,\nwe provide a critical analysis of the existing positions in the literature\nregarding the correct Bayesian approach under selection. Second, we propose two\ntypes of non-informative priors for selection models. These priors may be\nemployed to produce a posterior distribution in the absence of prior\ninformation as well as to provide well-calibrated frequentist inference for the\nselected parameter. We test the proposed priors empirically in several\nscenarios.\n</summary>\n
41
- <author>\n <name>Daniel G. Rasines</name>\n </author>\n <author>\n <name>G. Alastair Young</name>\n </author>\n
42
- <arxiv:comment xmlns:arxiv="http://arxiv.org/schemas/atom">24 pages, 7 figures</arxiv:comment>\n
43
- <link href="http://arxiv.org/abs/2008.04584v2" rel="alternate" type="text/html"/>\n
44
- <link title="pdf" href="http://arxiv.org/pdf/2008.04584v2" rel="related" type="application/pdf"/>\n
45
- <arxiv:primary_category xmlns:arxiv="http://arxiv.org/schemas/atom" term="math.ST" scheme="http://arxiv.org/schemas/atom"/>\n
46
- <category term="math.ST" scheme="http://arxiv.org/schemas/atom"/>\n
47
- <category term="stat.TH" scheme="http://arxiv.org/schemas/atom"/>\n
48
- </entry>\n
49
- '''
50
- # Call arXiv API
51
- journal = 'arXiv'
52
- max_rxiv_papers = max_papers_in_server[0]
53
- arXiv_url=f'http://export.arxiv.org/api/query?search_query={self.search_by}:{"+".join(search_query)}&sortBy={self.sort_by}&start=0&max_results={max_rxiv_papers}'
54
- with urllib.request.urlopen(arXiv_url) as url:
55
- s = url.read()
56
-
57
- # Parse the xml data
58
- from lxml import html
59
- root = html.fromstring(s)
60
- # Fetch relevant pdf information
61
- pdf_entries = root.xpath("entry")
62
- pdf_titles = []
63
- pdf_authors = []
64
- pdf_urls = []
65
- pdf_categories = []
66
- folder_names = []
67
- pdf_citation = []
68
- pdf_years = []
69
- for i, pdf in enumerate(pdf_entries):
70
- pdf_titles.append(re.sub('[^a-zA-Z0-9]', ' ', pdf.xpath("title/text()")[0]))
71
- pdf_authors.append(pdf.xpath("author/name/text()"))
72
- pdf_urls.append(pdf.xpath("link[@title='pdf']/@href")[0])
73
- pdf_categories.append(pdf.xpath("category/@term"))
74
- folder_names.append(self.folder_name)
75
- pdf_years.append(pdf.xpath('updated/text()')[0][:4])
76
- pdf_citation.append(f"{', '.join(pdf_authors[i])}, {pdf_titles[i]}. {journal} [{pdf_categories[i][0]}] ({pdf_years[i]}), (available at {pdf_urls[i]}).")
77
- pdf_info = list(zip(pdf_titles, pdf_urls, pdf_authors, pdf_categories, folder_names, pdf_citation))
78
- self.all_pdf_info.append(pdf_info)
79
-
80
- if 'chemrxiv' in self.XRxiv_servers:
81
- '''
82
- See https://chemrxiv.org/engage/chemrxiv/public-api/documentation#tag/public-apiv1items/operation/getPublicapiV1Items
83
-
84
- '''
85
- # Call chemrxiv API
86
- journal = 'chemRxiv'
87
- max_chemrxiv_papers = max_papers_in_server[1]
88
- chemrxiv_url = f'https://chemrxiv.org/engage/chemrxiv/public-api/v1/items?term="{"%20".join(search_query)}"&sort=RELEVANT_DESC&limit={max_chemrxiv_papers}'
89
- req = urllib.request.Request(
90
- url=chemrxiv_url,
91
- headers={'User-Agent': 'Mozilla/5.0'}
92
- )
93
- s = urllib.request.urlopen(req).read()
94
- jsonResponse = json.loads(s.decode('utf-8'))
95
- pdf_titles = []
96
- pdf_authors = []
97
- pdf_urls = []
98
- pdf_categories = []
99
- folder_names = []
100
- pdf_citation = []
101
- pdf_years = []
102
- for i,d in enumerate(jsonResponse['itemHits']):
103
- pdf_titles.append(d['item']['title'].replace("\n", ""))
104
- authors_dict = d['item']['authors']
105
- pdf_authors.append([n['firstName']+' '+ n['lastName'] for n in authors_dict])
106
- pdf_urls.append('https://chemrxiv.org/engage/chemrxiv/article-details/'+ str(d['item']['id']))
107
- pdf_categories.append(journal)
108
- folder_names.append(self.folder_name)
109
- pdf_years.append(d['item']['statusDate'][:4])
110
- pdf_citation.append(f"{', '.join(pdf_authors[i])}, {pdf_titles[i]}. {journal} [{pdf_categories[i][0]}] ({pdf_years[i]}), (available at {pdf_urls[i]}).")
111
- # overwriting url cause chermRxiv sucks!
112
- pdf_urls[i] = d['item']['asset']['original']['url']
113
- pdf_info = list(zip(pdf_titles, pdf_urls, pdf_authors, pdf_categories, folder_names, pdf_citation))
114
- self.all_pdf_info.append(pdf_info)
115
-
116
-
117
- if 'biorxiv' in self.XRxiv_servers or 'medrxiv' in self.XRxiv_servers:
118
- '''
119
- Scraps the biorxiv and medrxiv's html to get data from each entry in a search. Entries has the following formatting:
120
- <li class="first last odd search-result result-jcode-medrxiv search-result-highwire-citation">
121
- <div class="highwire-article-citation highwire-citation-type-highwire-article node" data-apath="/medrxiv/early/2021/02/18/2021.02.12.21251663.atom" data-pisa="medrxiv;2021.02.12.21251663v1" data-pisa-master="medrxiv;2021.02.12.21251663" id="node-medrxivearly202102182021021221251663atom1512875027"><div class="highwire-cite highwire-cite-highwire-article highwire-citation-biorxiv-article-pap-list clearfix">
122
- <span class="highwire-cite-title">
123
- <a class="highwire-cite-linked-title" data-hide-link-title="0" data-icon-position="" href="http://medrxiv.org/content/early/2021/02/18/2021.02.12.21251663">
124
- <span class="highwire-cite-title">ClinGen Variant Curation Interface: A Variant Classification Platform for the Application of Evidence Criteria from ACMG/AMP Guidelines</span></a> </span>
125
- <div class="highwire-cite-authors"><span class="highwire-citation-authors">
126
- <span class="highwire-citation-author first" data-delta="0"><span class="nlm-given-names">Christine G.</span> <span class="nlm-surname">Preston</span></span>,
127
- <span class="highwire-citation-author" data-delta="1"><span class="nlm-given-names">Matt W.</span> <span class="nlm-surname">Wright</span></span>,
128
- <span class="highwire-citation-author" data-delta="2"><span class="nlm-given-names">Rao</span> <span class="nlm-surname">Madhavrao</span></span>,
129
- <div class="highwire-cite-metadata"><span class="highwire-cite-metadata-journal highwire-cite-metadata">medRxiv </span>
130
- <span class="highwire-cite-metadata-pages highwire-cite-metadata">2021.02.12.21251663; </span><span class="highwire-cite-metadata-doi highwire-cite-metadata">
131
- <span class="doi_label">doi:</span> https://doi.org/10.1101/2021.02.12.21251663 </span></div>
132
- <div class="highwire-cite-extras"><div class="hw-make-citation" data-encoded-apath=";medrxiv;early;2021;02;18;2021.02.12.21251663.atom" data-seqnum="0" id="hw-make-citation-0">
133
- <a class="link-save-citation-save use-ajax hw-link-save-unsave-catation link-icon" href="/highwire-save-citation/saveapath/%3Bmedrxiv%3Bearly%3B2021%3B02%3B18%3B2021.02.12.21251663.atom/nojs/0" id="link-save-citation-toggle-0" title="Save">
134
- <span class="icon-plus"></span> <span class="title">Add to Selected Citations</span></a></div></div>
135
- </div>
136
- </div></li>
137
- </entry>\n
138
- '''
139
- if 'biorxiv' in self.XRxiv_servers and 'medrxiv' not in self.XRxiv_servers:
140
- # print('Searching biorxiv\n')
141
- max_biorxiv_papers = max_papers_in_server[2]
142
- journals_str = f'%20jcode%3Abiorxiv'
143
- if 'biorxiv' not in self.XRxiv_servers and 'medrxiv' in self.XRxiv_servers:
144
- # print('Searching medrxiv\n')
145
- max_biorxiv_papers = max_papers_in_server[3]
146
- journals_str = f'%20jcode%3Amedrxiv'
147
- if 'biorxiv' in self.XRxiv_servers and 'medrxiv' in self.XRxiv_servers:
148
- # print('Searching both biorxiv and medrxiv\n')
149
- max_biorxiv_papers = max_papers_in_server[3]+ max_papers_in_server[2] # birxiv and medrxiv are together.
150
- journals_str = f'%20jcode%3Abiorxiv%7C%7Cmedrxiv'
151
-
152
- subject_str = ('%20').join(self.search_query[0].split())
153
- for subject in search_query[1:]:
154
- subject_str = subject_str + '%252B' + ('%20').join(subject.split())
155
-
156
- current_dateTime = datetime.now()
157
- today = str(current_dateTime)[:10]
158
- start_day = '2013-01-01'
159
- arXiv_url = f'https://www.biorxiv.org/search/'
160
- arXiv_url += subject_str + journals_str + f'%20limit_from%3A2{start_day}%20limit_to%3A{today}%20numresults%3A{max_biorxiv_papers}%20sort%3Arelevance-rank%20format_result%3Astandard'
161
-
162
- url_response = requests.post(arXiv_url)
163
- html = bs(url_response.text, features='html.parser')
164
- pdf_entries = html.find_all(attrs={'class': 'search-result'})
165
- pdf_titles = []
166
- pdf_authors = []
167
- pdf_urls = []
168
- pdf_categories = []
169
- folder_names = []
170
- pdf_citation = []
171
- pdf_years = []
172
- for i, pdf in enumerate(pdf_entries):
173
- pdf_titles.append(pdf.find('span', attrs={'class': 'highwire-cite-title'}).text.strip())
174
- pdf_authors.append(pdf.find('span', attrs={'class': 'highwire-citation-authors'}).text.strip().split(', '))
175
- pdf_url = pdf.find('a', href=True)['href']
176
- if pdf_url[:4] != 'http':
177
- pdf_url = f'http://www.biorxiv.org'+ pdf_url
178
- pdf_urls.append(pdf_url)
179
- pdf_categories.append(pdf.find('span', attrs={'class': 'highwire-cite-metadata-journal highwire-cite-metadata'}).text.strip())
180
- folder_names.append(self.folder_name)
181
- pdf_years.append(pdf.find('span', attrs={'class': 'highwire-cite-metadata-pages highwire-cite-metadata'}).text.strip()[:4])
182
- pdf_citation.append(f"{', '.join(pdf_authors[i])}, {pdf_titles[i]}. {pdf_categories[i]} ({pdf_years[i]}), (available at {pdf_urls[i]}).")
183
-
184
- pdf_info = list(zip(pdf_titles, pdf_urls, pdf_authors, pdf_categories, folder_names, pdf_citation))
185
- self.all_pdf_info.append(pdf_info)
186
-
187
- self.all_pdf_info = [item for sublist in self.all_pdf_info for item in sublist]
188
- return self.all_pdf_info
189
-
190
- def download_pdf(self):
191
- all_reference_text = []
192
- for i,p in enumerate(stqdm(self.all_pdf_info, desc='🔍 Searching and downloading papers')):
193
- pdf_title=p[0]
194
- pdf_category=p[3]
195
- pdf_url=p[1]
196
- if pdf_category in ['medRxiv', 'bioRxiv']:
197
- pdf_url += '.full.pdf'
198
- pdf_file_name=p[0].replace(':','').replace('/','').replace('.','').replace('\n','')
199
- folder_name=p[4]
200
- pdf_citation=p[5]
201
- r = requests.get(pdf_url, allow_redirects=True)
202
- if i == 0:
203
- if not os.path.exists(f'{folder_name}'):
204
- os.makedirs(f"{folder_name}")
205
- else:
206
- shutil.rmtree(f'{folder_name}')
207
- os.makedirs(f"{folder_name}")
208
- with open(f'{folder_name}/{pdf_file_name}.pdf', 'wb') as f:
209
- f.write(r.content)
210
- if i == 0:
211
- st.markdown("###### Papers found:")
212
- st.markdown(f"{i+1}. {pdf_citation}")
213
- time.sleep(0.15)
214
- all_reference_text.append(f"{i+1}. {pdf_citation}\n")
215
- if 'all_reference_text' not in st.session_state:
216
- st.session_state.key = 'all_reference_text'
217
- st.session_state['all_reference_text'] = ' '.join(all_reference_text)
218
-
219
-
220
-
221
- def distibute_max_papers(max_results, XRxiv_servers):
222
- fixed_length = len(XRxiv_servers)
223
- sample = np.random.multinomial(max_results - fixed_length, np.ones(fixed_length)/fixed_length, size=1)[0] + 1
224
- max_papers_in_server = np.zeros(4, dtype=int)
225
- all_servers = ['rxiv', 'chemrxiv', 'biorxiv', 'medrxiv']
226
- for i,s in enumerate(XRxiv_servers):
227
- max_papers_in_server[all_servers.index(s)] = int(sample[i])
228
- return max_papers_in_server