import streamlit as st
import requests
import os
import urllib
import base64
from bs4 import BeautifulSoup
import hashlib
import json
import uuid
import glob
import zipfile
EXCLUDED_FILES = ['app.py', 'requirements.txt', 'pre-requirements.txt', 'packages.txt', 'README.md','.gitattributes', "backup.py","Dockerfile"]
URLS = {
"Chordify - Play Along Chords": "https://chordify.net/",
"National Guitar Academy - Guitar Learning": "https://www.guitaracademy.com/",
"Ultimate Guitar - Massive Song Database": "https://www.ultimate-guitar.com/",
"Wolf Alice": "https://www.chordie.com/song.php/songartist/Wolf+Alice/index.html",
"Everclear": "https://www.chordie.com/song.php/songartist/Everclear/index.html",
"Jungle": "https://www.ultimate-guitar.com/artist/jungle_47745",
"Mylie Cyrus": "https://www.ultimate-guitar.com/search.php?title=mile+cyrus&spelling=Mylie+cyrus",
"Kanye": "https://www.ultimate-guitar.com/search.php?search_type=title&value=Kanye%20west",
"Cat Stevens": "https://www.ultimate-guitar.com/search.php?search_type=title&value=cat%20stevens",
"Metric": "https://www.ultimate-guitar.com/search.php?search_type=title&value=Metric",
"John Lennon": "https://www.ultimate-guitar.com/search.php?search_type=title&value=John%20Lennon",
}
if not os.path.exists("history.json"):
with open("history.json", "w") as f:
json.dump({}, f)
import os
import base64
import zipfile
import streamlit as st
def zip_subdirs(start_dir):
for subdir, dirs, files in os.walk(start_dir):
if subdir != start_dir: # Skip the root directory
zip_filename = os.path.join(start_dir, subdir.split(os.sep)[-1] + '.zip')
with zipfile.ZipFile(zip_filename, 'w') as zipf:
for file in files:
file_path = os.path.join(subdir, file)
zipf.write(file_path, os.path.relpath(file_path, start_dir))
st.write(f"Added: {file_path}")
yield zip_filename
def get_zip_download_link(zip_file):
with open(zip_file, 'rb') as f:
bytes = f.read()
b64 = base64.b64encode(bytes).decode()
link_name = os.path.basename(zip_file)
href = f'Download: {link_name}'
return href
@st.cache_resource
def create_zip_of_files(files):
zip_name = "all_files.zip"
with zipfile.ZipFile(zip_name, 'w') as zipf:
for file in files:
zipf.write(file)
return zip_name
@st.cache_resource
def get_zip_download_link(zip_file):
with open(zip_file, 'rb') as f:
data = f.read()
b64 = base64.b64encode(data).decode()
href = f'Download All'
return href
def download_file(url, local_filename):
if url.startswith('http://') or url.startswith('https://'):
try:
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
return local_filename
except requests.exceptions.HTTPError as err:
print(f"HTTP error occurred: {err}")
def download_html_and_files(url, subdir):
html_content = requests.get(url).text
soup = BeautifulSoup(html_content, 'html.parser')
base_url = urllib.parse.urlunparse(urllib.parse.urlparse(url)._replace(path='', params='', query='', fragment=''))
for link in soup.find_all('a'):
file_url = urllib.parse.urljoin(base_url, link.get('href'))
local_filename = os.path.join(subdir, urllib.parse.urlparse(file_url).path.split('/')[-1])
if not local_filename.endswith('/') and local_filename != subdir:
link['href'] = local_filename
download_file(file_url, local_filename)
with open(os.path.join(subdir, "index.html"), "w") as file:
file.write(str(soup))
def list_files(directory_path='.'):
files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))]
return [f for f in files if f not in EXCLUDED_FILES]
def file_editor(file_path):
st.write(f"Editing File: {os.path.basename(file_path)}")
file_content = ""
with open(file_path, "r") as f:
file_content = f.read()
file_content = st.text_area("Edit the file content:", value=file_content, height=250)
if st.button("💾 Save"):
with open(file_path, "w") as f:
f.write(file_content)
st.success(f"File '{os.path.basename(file_path)}' saved!")
def show_file_operations(file_path, sequence_number):
#st.write(f"File: {os.path.basename(file_path)}")
unique_key = hashlib.md5(file_path.encode()).hexdigest()
file_content = ""
col01, col02, col1, col2, col3 = st.columns(5)
with col01:
st.write(os.path.basename(file_path))
#with col02:
#st.write(file_path)
with col1:
edit_key = f"edit_{unique_key}_{sequence_number}"
if st.button(f"✏️ Edit", key=edit_key):
with open(file_path, "r") as f:
file_content = f.read()
text_area_key = f"text_area_{unique_key}_{sequence_number}"
file_content = st.text_area("Edit the file content:", value=file_content, height=250, key=text_area_key)
with col2:
save_key = f"save_{unique_key}_{sequence_number}"
if st.button(f"💾 Save", key=save_key):
if file_content: # Ensure file_content is not empty
with open(file_path, "w") as f:
f.write(file_content)
st.success(f"File saved!")
with col3:
delete_key = f"delete_{unique_key}_{sequence_number}"
if st.button(f"🗑️ Delete", key=delete_key):
os.remove(file_path)
st.markdown(f"File deleted!")
file_sequence_numbers = {}
def show_download_links(subdir):
global file_sequence_numbers
for file in list_files(subdir):
file_path = os.path.join(subdir, file)
if file_path not in file_sequence_numbers:
file_sequence_numbers[file_path] = 1
else:
file_sequence_numbers[file_path] += 1
sequence_number = file_sequence_numbers[file_path]
if os.path.isfile(file_path):
st.markdown(get_download_link(file_path), unsafe_allow_html=True)
show_file_operations(file_path, sequence_number)
else:
st.write(f"File not found: {file}")
def get_download_link(file):
with open(file, "rb") as f:
bytes = f.read()
b64 = base64.b64encode(bytes).decode()
href = f'Download: {os.path.basename(file)}'
return href
def main():
st.sidebar.title('Web Datasets Bulk Downloader')
# Check for query parameters for file editing
query_params = st.experimental_get_query_params()
file_to_edit = query_params.get('file_to_edit', [None])[0]
if file_to_edit and os.path.exists(file_to_edit):
file_editor(file_to_edit)
else:
# Selecting URL input method
url_input_method = st.sidebar.radio("Choose URL Input Method", ["Enter URL", "Select from List"])
url = ""
if url_input_method == "Enter URL":
url = st.sidebar.text_input('Please enter a Web URL to bulk download text and files')
else:
selected_site = st.sidebar.selectbox("Select a Website", list(URLS.keys()))
url = URLS[selected_site]
# Reading or creating history.json
if not os.path.exists("history.json"):
with open("history.json", "w") as f:
json.dump({}, f)
with open("history.json", "r") as f:
try:
history = json.load(f)
except:
print('error')
# Handling URL submission
if url:
subdir = hashlib.md5(url.encode()).hexdigest()
if not os.path.exists(subdir):
os.makedirs(subdir)
if url not in history:
history[url] = subdir
with open("history.json", "w") as f:
json.dump(history, f)
# Button for downloading content
if st.sidebar.button('📥 Get All the Content'):
download_html_and_files(url, history[url])
show_download_links(history[url])
# Button for showing download links
if st.sidebar.button('📂 Show Download Links'):
for subdir in history.values():
show_download_links(subdir)
if st.sidebar.button("🗑 Delete All"):
# Clear history file
with open("history.json", "w") as f:
json.dump({}, f)
# Delete all files in subdirectories
for subdir in glob.glob('*'):
if os.path.isdir(subdir) and subdir not in EXCLUDED_FILES:
for file in os.listdir(subdir):
file_path = os.path.join(subdir, file)
os.remove(file_path)
st.write(f"Deleted: {file_path}")
os.rmdir(subdir) # Remove the empty directory
st.experimental_rerun()
if st.sidebar.button("⬇️ Download All"):
start_directory = '.' # Current directory
for zip_file in zip_subdirs(start_directory):
st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True)
# Expander for showing URL history and download links
with st.expander("URL History and Downloaded Files"):
try:
for url, subdir in history.items():
st.markdown(f"#### {url}")
show_download_links(subdir)
except:
print('url history is empty')
# Update each time to show files we have
for subdir in history.values():
show_download_links(subdir)
if __name__ == "__main__":
main()