Spaces:
Sleeping
Sleeping
File size: 2,418 Bytes
03f155a d647402 2de1374 d647402 f1ba02a 3b59fe8 03f155a 407358f 2f6d918 f65e87d 2f6d918 407358f 03f155a f65e87d 03f155a f65e87d a6f9b25 f65e87d 03f155a a6f9b25 03f155a 407358f 03f155a 80e3dbe 03f155a dcaedc3 f65e87d 407358f f65e87d 40082db 03f155a 40082db |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import streamlit as st
import requests
import os
import urllib
import base64
from bs4 import BeautifulSoup
EXCLUDED_FILES = ['app.py', 'requirements.txt', 'pre-requirements.txt', 'packages.txt', 'README.md','.gitattributes', "backup.py","Dockerfile"]
def download_file(url, local_filename):
if url.startswith('http://') or url.startswith('https://'):
try:
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
return local_filename
except requests.exceptions.HTTPError as err:
print(f"HTTP error occurred: {err}")
def download_html_and_files(url):
html_content = requests.get(url).text
soup = BeautifulSoup(html_content, 'html.parser')
base_url = urllib.parse.urlunparse(urllib.parse.urlparse(url)._replace(path='', params='', query='', fragment=''))
for link in soup.find_all('a'):
file_url = urllib.parse.urljoin(base_url, link.get('href'))
local_filename = urllib.parse.urlparse(file_url).path.split('/')[-1]
if local_filename:
link['href'] = local_filename
download_file(file_url, local_filename)
with open("index.html", "w") as file:
file.write(str(soup))
def list_files(directory_path='.'):
files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))]
return [f for f in files if f not in EXCLUDED_FILES]
def get_download_link(file):
with open(file, "rb") as f:
bytes = f.read()
b64 = base64.b64encode(bytes).decode()
href = f'<a href="data:file/octet-stream;base64,{b64}" download=\'{file}\'>{file}</a>'
return href
def show_download_links():
st.sidebar.write('Here are the files you can download:')
for file in list_files():
st.sidebar.markdown(get_download_link(file), unsafe_allow_html=True)
def main():
st.sidebar.title('Web Datasets Bulk Downloader')
url = st.sidebar.text_input('Please enter a Web URL to bulk download text and files')
if st.sidebar.button('📥 Get All the Content'):
download_html_and_files(url)
show_download_links()
if st.sidebar.button('📂 Show Download Links'):
show_download_links()
if __name__ == "__main__":
main() |