awacke1 commited on
Commit
f65e87d
·
verified ·
1 Parent(s): c2fee75

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -225
app.py CHANGED
@@ -4,74 +4,8 @@ import os
4
  import urllib
5
  import base64
6
  from bs4 import BeautifulSoup
7
- import hashlib
8
- import json
9
- import uuid
10
- import glob
11
- import zipfile
12
 
13
  EXCLUDED_FILES = ['app.py', 'requirements.txt', 'pre-requirements.txt', 'packages.txt', 'README.md','.gitattributes', "backup.py","Dockerfile"]
14
- URLS = {
15
- "National Library of Medicine": "https://www.nlm.nih.gov/",
16
- "World Health Organization": "https://www.who.int/",
17
- "UHCProvider - United Health and Optum": "https://www.uhcprovider.com/",
18
- "CMS - Centers for Medicare & Medicaid Services": "https://www.cms.gov/",
19
- "Mayo Clinic": "https://www.mayoclinic.org/",
20
- "WebMD": "https://www.webmd.com/",
21
- "MedlinePlus": "https://medlineplus.gov/",
22
- "Healthline": "https://www.healthline.com/",
23
- "CDC - Centers for Disease Control and Prevention": "https://www.cdc.gov/",
24
- "Johns Hopkins Medicine": "https://www.hopkinsmedicine.org/"
25
- }
26
-
27
- if not os.path.exists("history.json"):
28
- with open("history.json", "w") as f:
29
- json.dump({}, f)
30
-
31
- import os
32
- import base64
33
- import zipfile
34
- import streamlit as st
35
-
36
- def zip_subdirs(start_dir):
37
- for subdir, dirs, files in os.walk(start_dir):
38
- if subdir != start_dir: # Skip the root directory
39
- zip_filename = os.path.join(start_dir, subdir.split(os.sep)[-1] + '.zip')
40
- allFileSummary = ""
41
- with zipfile.ZipFile(zip_filename, 'w') as zipf:
42
- for file in files:
43
- file_path = os.path.join(subdir, file)
44
- zipf.write(file_path, os.path.relpath(file_path, start_dir))
45
- allFileSummary=allFileSummary+(f"Added: {file_path}")
46
- st.write(allFileSummary)
47
- yield zip_filename
48
-
49
- def get_zip_download_link(zip_file):
50
- with open(zip_file, 'rb') as f:
51
- bytes = f.read()
52
- b64 = base64.b64encode(bytes).decode()
53
- link_name = os.path.basename(zip_file)
54
- href = f'<a href="data:file/zip;base64,{b64}" download="{link_name}">Download: {link_name}</a>'
55
- return href
56
-
57
-
58
- @st.cache_resource
59
- def create_zip_of_files(files):
60
- zip_name = "all_files.zip"
61
- with zipfile.ZipFile(zip_name, 'w') as zipf:
62
- for file in files:
63
- zipf.write(file)
64
- return zip_name
65
-
66
- @st.cache_resource
67
- def get_zip_download_link(zip_file):
68
- with open(zip_file, 'rb') as f:
69
- data = f.read()
70
- b64 = base64.b64encode(data).decode()
71
- href = f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
72
- return href
73
-
74
-
75
 
76
  def download_file(url, local_filename):
77
  if url.startswith('http://') or url.startswith('https://'):
@@ -79,190 +13,49 @@ def download_file(url, local_filename):
79
  with requests.get(url, stream=True) as r:
80
  r.raise_for_status()
81
  with open(local_filename, 'wb') as f:
82
- for chunk in r.iter_content(chunk_size=8192):
83
  f.write(chunk)
84
  return local_filename
85
  except requests.exceptions.HTTPError as err:
86
  print(f"HTTP error occurred: {err}")
87
 
88
- def download_html_and_files(url, subdir):
89
  html_content = requests.get(url).text
90
  soup = BeautifulSoup(html_content, 'html.parser')
91
  base_url = urllib.parse.urlunparse(urllib.parse.urlparse(url)._replace(path='', params='', query='', fragment=''))
92
-
93
  for link in soup.find_all('a'):
94
  file_url = urllib.parse.urljoin(base_url, link.get('href'))
95
- local_filename = os.path.join(subdir, urllib.parse.urlparse(file_url).path.split('/')[-1])
96
-
97
- if not local_filename.endswith('/') and local_filename != subdir:
98
  link['href'] = local_filename
99
  download_file(file_url, local_filename)
100
-
101
- with open(os.path.join(subdir, "index.html"), "w") as file:
102
  file.write(str(soup))
103
 
104
  def list_files(directory_path='.'):
105
  files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))]
106
  return [f for f in files if f not in EXCLUDED_FILES]
107
 
108
- def file_editor(file_path):
109
- st.write(f"Editing File: {os.path.basename(file_path)}")
110
- file_content = ""
111
-
112
- with open(file_path, "r") as f:
113
- file_content = f.read()
114
-
115
- file_content = st.text_area("Edit the file content:", value=file_content, height=250)
116
-
117
- if st.button("💾 Save"):
118
- with open(file_path, "w") as f:
119
- f.write(file_content)
120
- st.success(f"File '{os.path.basename(file_path)}' saved!")
121
-
122
-
123
- def show_file_operations(file_path, sequence_number):
124
- #st.write(f"File: {os.path.basename(file_path)}")
125
- unique_key = hashlib.md5(file_path.encode()).hexdigest()
126
- file_content = ""
127
-
128
- col01, col02, col1, col2, col3 = st.columns(5)
129
- with col01:
130
- st.write(os.path.basename(file_path))
131
- #with col02:
132
- #st.write(file_path)
133
- with col1:
134
- edit_key = f"edit_{unique_key}_{sequence_number}"
135
- if st.button(f"✏️ Edit", key=edit_key):
136
- file_editor(file_path)
137
- #with open(file_path, "r") as f:
138
- # file_content = f.read()
139
- #text_area_key = f"text_area_{unique_key}_{sequence_number}"
140
- #file_content = st.text_area("Edit the file content:", value=file_content, height=250, key=text_area_key)
141
-
142
- with col2:
143
- save_key = f"save_{unique_key}_{sequence_number}"
144
- if st.button(f"💾 Save", key=save_key):
145
- if file_content: # Ensure file_content is not empty
146
- with open(file_path, "w") as f:
147
- f.write(file_content)
148
- st.success(f"File saved!")
149
-
150
- with col3:
151
- delete_key = f"delete_{unique_key}_{sequence_number}"
152
- if st.button(f"🗑️ Delete", key=delete_key):
153
- os.remove(file_path)
154
- st.markdown(f"File deleted!")
155
-
156
-
157
- file_sequence_numbers = {}
158
-
159
- def show_download_links(subdir):
160
- global file_sequence_numbers
161
- for file in list_files(subdir):
162
- file_path = os.path.join(subdir, file)
163
- if file_path not in file_sequence_numbers:
164
- file_sequence_numbers[file_path] = 1
165
- else:
166
- file_sequence_numbers[file_path] += 1
167
- sequence_number = file_sequence_numbers[file_path]
168
-
169
- if os.path.isfile(file_path):
170
- #st.markdown(get_download_link(file_path), unsafe_allow_html=True)
171
- st.markdown(file_path, unsafe_allow_html=True) # faster than encapsulating file into base64 download link
172
- show_file_operations(file_path, sequence_number)
173
- else:
174
- st.write(f"File not found: {file}")
175
-
176
  def get_download_link(file):
177
  with open(file, "rb") as f:
178
  bytes = f.read()
179
  b64 = base64.b64encode(bytes).decode()
180
- href = f'<a href="data:file/octet-stream;base64,{b64}" download=\'{os.path.basename(file)}\'>Download: {os.path.basename(file)}</a>'
181
  return href
182
-
183
- def main():
184
- st.sidebar.title('📥Web Data Downloader📂')
185
-
186
- # Check for query parameters for file editing
187
- query_params = st.experimental_get_query_params()
188
- file_to_edit = query_params.get('file_to_edit', [None])[0]
189
-
190
- if file_to_edit and os.path.exists(file_to_edit):
191
- file_editor(file_to_edit)
192
- else:
193
- # Selecting URL input method
194
- url_input_method = st.sidebar.radio("Choose URL Input Method", ["Enter URL", "Select from List"])
195
- url = ""
196
- if url_input_method == "Enter URL":
197
- url = st.sidebar.text_input('Please enter a Web URL to bulk download text and files')
198
- else:
199
- selected_site = st.sidebar.selectbox("Select a Website", list(URLS.keys()))
200
- url = URLS[selected_site]
201
 
202
- # Reading or creating history.json
203
- if not os.path.exists("history.json"):
204
- with open("history.json", "w") as f:
205
- json.dump({}, f)
206
-
207
- with open("history.json", "r") as f:
208
- try:
209
- history = json.load(f)
210
- except:
211
- print('error')
212
-
213
- # Handling URL submission
214
- if url:
215
- subdir = hashlib.md5(url.encode()).hexdigest()
216
- if not os.path.exists(subdir):
217
- os.makedirs(subdir)
218
- if url not in history:
219
- history[url] = subdir
220
- with open("history.json", "w") as f:
221
- json.dump(history, f)
222
-
223
- # Button for downloading content
224
- if st.sidebar.button('📥 Get All the Content'):
225
- download_html_and_files(url, history[url])
226
- show_download_links(history[url])
227
-
228
- # Button for showing download links
229
- if st.sidebar.button('📂 Show Download Links'):
230
- for subdir in history.values():
231
- show_download_links(subdir)
232
 
 
 
 
 
 
 
 
 
233
 
234
- if st.sidebar.button("🗑 Delete All"):
235
- # Clear history file
236
- with open("history.json", "w") as f:
237
- json.dump({}, f)
238
-
239
- # Delete all files in subdirectories
240
- for subdir in glob.glob('*'):
241
- if os.path.isdir(subdir) and subdir not in EXCLUDED_FILES:
242
- for file in os.listdir(subdir):
243
- file_path = os.path.join(subdir, file)
244
- os.remove(file_path)
245
- st.write(f"Deleted: {file_path}")
246
- os.rmdir(subdir) # Remove the empty directory
247
-
248
- st.experimental_rerun()
249
-
250
- if st.sidebar.button("⬇️ Download All"):
251
- start_directory = '.' # Current directory
252
- for zip_file in zip_subdirs(start_directory):
253
- st.sidebar.markdown(zip_file, unsafe_allow_html=True)
254
- st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True)
255
-
256
- # Expander for showing URL history and download links
257
- with st.expander("URL History and Downloaded Files"):
258
- try:
259
- for url, subdir in history.items():
260
- st.markdown(f"#### {url}")
261
- # show_download_links(subdir)
262
- except:
263
- print('url history is empty')
264
- # Update each time to show files we have
265
- #for subdir in history.values():
266
- # show_download_links(subdir)
267
  if __name__ == "__main__":
268
  main()
 
4
  import urllib
5
  import base64
6
  from bs4 import BeautifulSoup
 
 
 
 
 
7
 
8
  EXCLUDED_FILES = ['app.py', 'requirements.txt', 'pre-requirements.txt', 'packages.txt', 'README.md','.gitattributes', "backup.py","Dockerfile"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  def download_file(url, local_filename):
11
  if url.startswith('http://') or url.startswith('https://'):
 
13
  with requests.get(url, stream=True) as r:
14
  r.raise_for_status()
15
  with open(local_filename, 'wb') as f:
16
+ for chunk in r.iter_content(chunk_size=8192):
17
  f.write(chunk)
18
  return local_filename
19
  except requests.exceptions.HTTPError as err:
20
  print(f"HTTP error occurred: {err}")
21
 
22
+ def download_html_and_files(url):
23
  html_content = requests.get(url).text
24
  soup = BeautifulSoup(html_content, 'html.parser')
25
  base_url = urllib.parse.urlunparse(urllib.parse.urlparse(url)._replace(path='', params='', query='', fragment=''))
 
26
  for link in soup.find_all('a'):
27
  file_url = urllib.parse.urljoin(base_url, link.get('href'))
28
+ local_filename = urllib.parse.urlparse(file_url).path.split('/')[-1]
29
+ if local_filename:
 
30
  link['href'] = local_filename
31
  download_file(file_url, local_filename)
32
+ with open("index.html", "w") as file:
 
33
  file.write(str(soup))
34
 
35
  def list_files(directory_path='.'):
36
  files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))]
37
  return [f for f in files if f not in EXCLUDED_FILES]
38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  def get_download_link(file):
40
  with open(file, "rb") as f:
41
  bytes = f.read()
42
  b64 = base64.b64encode(bytes).decode()
43
+ href = f'<a href="data:file/octet-stream;base64,{b64}" download=\'{file}\'>Click to download {file}</a>'
44
  return href
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
+ def show_download_links():
47
+ st.sidebar.write('Here are the files you can download:')
48
+ for file in list_files():
49
+ st.sidebar.markdown(get_download_link(file), unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
+ def main():
52
+ st.sidebar.title('Web Datasets Bulk Downloader')
53
+ url = st.sidebar.text_input('Please enter a Web URL to bulk download text and files')
54
+ if st.sidebar.button('📥 Get All the Content'):
55
+ download_html_and_files(url)
56
+ show_download_links()
57
+ if st.sidebar.button('📂 Show Download Links'):
58
+ show_download_links()
59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  if __name__ == "__main__":
61
  main()