{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "see: http://millionsongdataset.com/pages/getting-dataset/#subset" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# !wget http://labrosa.ee.columbia.edu/~dpwe/tmp/millionsongsubset.tar.gz\n", "# !tar -xvzf millionsongsubset.tar.gz" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# !pip install pandas h5py pyarrow fastparquet" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "import os\n", "import h5py\n", "import pandas as pd\n", "from tqdm.auto import tqdm\n", "\n", "import unibox as ub" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "a7418816c46f4f5b95a8c7e307b6e569", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Listing local files: 0files [00:00, ?files/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [ "10000" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "len(ub.ls(\"../data/MillionSongSubset\", [\".h5\"]))" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 10000/10000 [00:39<00:00, 250.36it/s]\n" ] } ], "source": [ "import os\n", "import pandas as pd\n", "import numpy as np\n", "import hdf5_getters\n", "import h5py\n", "from tqdm import tqdm\n", "from concurrent.futures import ProcessPoolExecutor\n", "\n", "# Define dataset path\n", "dataset_path = \"/lv0/yada/dataproc5/data/MillionSongSubset\"\n", "\n", "# Function to extract all available fields from an HDF5 file\n", "def extract_song_data(file_path):\n", " \"\"\"Extracts all available fields from an HDF5 song file using hdf5_getters.\"\"\"\n", " song_data = {}\n", "\n", " try:\n", " with hdf5_getters.open_h5_file_read(file_path) as h5:\n", " # Get all getter functions from hdf5_getters\n", " getters = [func for func in dir(hdf5_getters) if func.startswith(\"get_\")]\n", "\n", " for getter in getters:\n", " try:\n", " # Dynamically call each getter function\n", " value = getattr(hdf5_getters, getter)(h5)\n", "\n", " # Optimize conversions\n", " if isinstance(value, np.ndarray):\n", " value = value.tolist()\n", " elif isinstance(value, bytes):\n", " value = value.decode()\n", "\n", " # Store in dictionary with a cleaned-up key name\n", " song_data[getter[4:]] = value\n", "\n", " except Exception:\n", " continue # Skip errors but don't slow down\n", "\n", " except Exception as e:\n", " print(f\"Error processing {file_path}: {e}\")\n", " \n", " return song_data\n", "\n", "# Function to process multiple files in parallel\n", "def process_files_in_parallel(h5_files, num_workers=8):\n", " \"\"\"Processes multiple .h5 files in parallel.\"\"\"\n", " all_songs = []\n", "\n", " with ProcessPoolExecutor(max_workers=num_workers) as executor:\n", " for song_data in tqdm(executor.map(extract_song_data, h5_files), total=len(h5_files)):\n", " if song_data:\n", " all_songs.append(song_data)\n", " \n", " return all_songs\n", "\n", "# Find all .h5 files\n", "h5_files = [os.path.join(root, file) for root, _, files in os.walk(dataset_path) for file in files if file.endswith(\".h5\")]\n", "\n", "# Process files in parallel\n", "all_songs = process_files_in_parallel(h5_files, num_workers=24)\n", "\n", "# Convert to Pandas DataFrame\n", "df = pd.DataFrame(all_songs)" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(10000, 55)\n", "Index(['analysis_sample_rate', 'artist_7digitalid', 'artist_familiarity',\n", " 'artist_hotttnesss', 'artist_id', 'artist_latitude', 'artist_location',\n", " 'artist_longitude', 'artist_mbid', 'artist_mbtags',\n", " 'artist_mbtags_count', 'artist_name', 'artist_playmeid', 'artist_terms',\n", " 'artist_terms_freq', 'artist_terms_weight', 'audio_md5',\n", " 'bars_confidence', 'bars_start', 'beats_confidence', 'beats_start',\n", " 'danceability', 'duration', 'end_of_fade_in', 'energy', 'key',\n", " 'key_confidence', 'loudness', 'mode', 'mode_confidence', 'num_songs',\n", " 'release', 'release_7digitalid', 'sections_confidence',\n", " 'sections_start', 'segments_confidence', 'segments_loudness_max',\n", " 'segments_loudness_max_time', 'segments_loudness_start',\n", " 'segments_pitches', 'segments_start', 'segments_timbre',\n", " 'similar_artists', 'song_hotttnesss', 'song_id', 'start_of_fade_out',\n", " 'tatums_confidence', 'tatums_start', 'tempo', 'time_signature',\n", " 'time_signature_confidence', 'title', 'track_7digitalid', 'track_id',\n", " 'year'],\n", " dtype='object')\n" ] }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
analysis_sample_rateartist_7digitalidartist_familiarityartist_hotttnesssartist_idartist_latitudeartist_locationartist_longitudeartist_mbidartist_mbtags...start_of_fade_outtatums_confidencetatums_starttempotime_signaturetime_signature_confidencetitletrack_7digitalidtrack_idyear
0220501747170.4507430.331215AR1DGSO1187FB59B15NaNNaNfe4e71a9-ddb9-47b5-9e2e-ec53862a91c6[]...266.879[0.0, 0.0, 0.896, 0.819, 0.664, 0.693, 0.67, 0...[0.16738, 0.44887, 0.73036, 1.09072, 1.44407, ...107.05340.657Jody2555900TRAHHUN128F42270290
12205071730.3927100.311789ARO6WZY1187FB3A86ENaNNaN23f7ad3f-a189-4a1c-9991-4763ded495a7[]...321.300[0.451, 0.426, 0.396, 0.32, 0.255, 0.204, 0.15...[0.05024, 0.25641, 0.46357, 0.66974, 0.87691, ...149.85331.000Turntable Terrorist5591259TRAHHMM128F932D5D91995
22205027590.6027670.463193ARH1LE01187B98D68DNaNNaN3df3a779-a7b1-4362-a8b4-9ae6c7eb623d[b'american', b'soundtrack']...67.895[0.056, 0.058, 0.056, 0.059, 0.097, 0.093, 0.0...[0.54095, 0.86496, 1.20205, 1.52933, 1.85662, ...91.24940.568Porcelain Man7341937TRAHHJY12903CA73BD1999
\n", "

3 rows × 55 columns

\n", "
" ], "text/plain": [ " analysis_sample_rate artist_7digitalid artist_familiarity \\\n", "0 22050 174717 0.450743 \n", "1 22050 7173 0.392710 \n", "2 22050 2759 0.602767 \n", "\n", " artist_hotttnesss artist_id artist_latitude artist_location \\\n", "0 0.331215 AR1DGSO1187FB59B15 NaN \n", "1 0.311789 ARO6WZY1187FB3A86E NaN \n", "2 0.463193 ARH1LE01187B98D68D NaN \n", "\n", " artist_longitude artist_mbid \\\n", "0 NaN fe4e71a9-ddb9-47b5-9e2e-ec53862a91c6 \n", "1 NaN 23f7ad3f-a189-4a1c-9991-4763ded495a7 \n", "2 NaN 3df3a779-a7b1-4362-a8b4-9ae6c7eb623d \n", "\n", " artist_mbtags ... start_of_fade_out \\\n", "0 [] ... 266.879 \n", "1 [] ... 321.300 \n", "2 [b'american', b'soundtrack'] ... 67.895 \n", "\n", " tatums_confidence \\\n", "0 [0.0, 0.0, 0.896, 0.819, 0.664, 0.693, 0.67, 0... \n", "1 [0.451, 0.426, 0.396, 0.32, 0.255, 0.204, 0.15... \n", "2 [0.056, 0.058, 0.056, 0.059, 0.097, 0.093, 0.0... \n", "\n", " tatums_start tempo time_signature \\\n", "0 [0.16738, 0.44887, 0.73036, 1.09072, 1.44407, ... 107.053 4 \n", "1 [0.05024, 0.25641, 0.46357, 0.66974, 0.87691, ... 149.853 3 \n", "2 [0.54095, 0.86496, 1.20205, 1.52933, 1.85662, ... 91.249 4 \n", "\n", " time_signature_confidence title track_7digitalid \\\n", "0 0.657 Jody 2555900 \n", "1 1.000 Turntable Terrorist 5591259 \n", "2 0.568 Porcelain Man 7341937 \n", "\n", " track_id year \n", "0 TRAHHUN128F4227029 0 \n", "1 TRAHHMM128F932D5D9 1995 \n", "2 TRAHHJY12903CA73BD 1999 \n", "\n", "[3 rows x 55 columns]" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "ub.peeks(df)" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[37m2025-02-19 14:01:45 [INFO] HuggingFaceDatasetsBackend.data_to_hub: Uploading dataset to HF repo trojblue/million-song-subset\u001b[0m\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "c6e47a2259e54cb19dc37e6762883cbc", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Uploading the dataset shards: 0%| | 0/5 [00:00