trojblue commited on
Commit
7914e2a
·
verified ·
1 Parent(s): facdff9

Upload convert_million_songs_dataset.ipynb

Browse files
Files changed (1) hide show
  1. convert_million_songs_dataset.ipynb +489 -0
convert_million_songs_dataset.ipynb ADDED
@@ -0,0 +1,489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "see: http://millionsongdataset.com/pages/getting-dataset/#subset"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": null,
13
+ "metadata": {},
14
+ "outputs": [],
15
+ "source": [
16
+ "# !wget http://labrosa.ee.columbia.edu/~dpwe/tmp/millionsongsubset.tar.gz\n",
17
+ "# !tar -xvzf millionsongsubset.tar.gz"
18
+ ]
19
+ },
20
+ {
21
+ "cell_type": "code",
22
+ "execution_count": null,
23
+ "metadata": {},
24
+ "outputs": [],
25
+ "source": [
26
+ "# !pip install pandas h5py pyarrow fastparquet"
27
+ ]
28
+ },
29
+ {
30
+ "cell_type": "code",
31
+ "execution_count": 2,
32
+ "metadata": {},
33
+ "outputs": [],
34
+ "source": [
35
+ "import os\n",
36
+ "import h5py\n",
37
+ "import pandas as pd\n",
38
+ "from tqdm.auto import tqdm\n",
39
+ "\n",
40
+ "import unibox as ub"
41
+ ]
42
+ },
43
+ {
44
+ "cell_type": "code",
45
+ "execution_count": null,
46
+ "metadata": {},
47
+ "outputs": [
48
+ {
49
+ "data": {
50
+ "application/vnd.jupyter.widget-view+json": {
51
+ "model_id": "a7418816c46f4f5b95a8c7e307b6e569",
52
+ "version_major": 2,
53
+ "version_minor": 0
54
+ },
55
+ "text/plain": [
56
+ "Listing local files: 0files [00:00, ?files/s]"
57
+ ]
58
+ },
59
+ "metadata": {},
60
+ "output_type": "display_data"
61
+ },
62
+ {
63
+ "data": {
64
+ "text/plain": [
65
+ "10000"
66
+ ]
67
+ },
68
+ "execution_count": 4,
69
+ "metadata": {},
70
+ "output_type": "execute_result"
71
+ }
72
+ ],
73
+ "source": [
74
+ "len(ub.ls(\"../data/MillionSongSubset\", [\".h5\"]))"
75
+ ]
76
+ },
77
+ {
78
+ "cell_type": "code",
79
+ "execution_count": 5,
80
+ "metadata": {},
81
+ "outputs": [
82
+ {
83
+ "name": "stderr",
84
+ "output_type": "stream",
85
+ "text": [
86
+ "100%|██████████| 10000/10000 [00:39<00:00, 250.36it/s]\n"
87
+ ]
88
+ }
89
+ ],
90
+ "source": [
91
+ "import os\n",
92
+ "import pandas as pd\n",
93
+ "import numpy as np\n",
94
+ "import hdf5_getters\n",
95
+ "import h5py\n",
96
+ "from tqdm import tqdm\n",
97
+ "from concurrent.futures import ProcessPoolExecutor\n",
98
+ "\n",
99
+ "# Define dataset path\n",
100
+ "dataset_path = \"/lv0/yada/dataproc5/data/MillionSongSubset\"\n",
101
+ "\n",
102
+ "# Function to extract all available fields from an HDF5 file\n",
103
+ "def extract_song_data(file_path):\n",
104
+ " \"\"\"Extracts all available fields from an HDF5 song file using hdf5_getters.\"\"\"\n",
105
+ " song_data = {}\n",
106
+ "\n",
107
+ " try:\n",
108
+ " with hdf5_getters.open_h5_file_read(file_path) as h5:\n",
109
+ " # Get all getter functions from hdf5_getters\n",
110
+ " getters = [func for func in dir(hdf5_getters) if func.startswith(\"get_\")]\n",
111
+ "\n",
112
+ " for getter in getters:\n",
113
+ " try:\n",
114
+ " # Dynamically call each getter function\n",
115
+ " value = getattr(hdf5_getters, getter)(h5)\n",
116
+ "\n",
117
+ " # Optimize conversions\n",
118
+ " if isinstance(value, np.ndarray):\n",
119
+ " value = value.tolist()\n",
120
+ " elif isinstance(value, bytes):\n",
121
+ " value = value.decode()\n",
122
+ "\n",
123
+ " # Store in dictionary with a cleaned-up key name\n",
124
+ " song_data[getter[4:]] = value\n",
125
+ "\n",
126
+ " except Exception:\n",
127
+ " continue # Skip errors but don't slow down\n",
128
+ "\n",
129
+ " except Exception as e:\n",
130
+ " print(f\"Error processing {file_path}: {e}\")\n",
131
+ " \n",
132
+ " return song_data\n",
133
+ "\n",
134
+ "# Function to process multiple files in parallel\n",
135
+ "def process_files_in_parallel(h5_files, num_workers=8):\n",
136
+ " \"\"\"Processes multiple .h5 files in parallel.\"\"\"\n",
137
+ " all_songs = []\n",
138
+ "\n",
139
+ " with ProcessPoolExecutor(max_workers=num_workers) as executor:\n",
140
+ " for song_data in tqdm(executor.map(extract_song_data, h5_files), total=len(h5_files)):\n",
141
+ " if song_data:\n",
142
+ " all_songs.append(song_data)\n",
143
+ " \n",
144
+ " return all_songs\n",
145
+ "\n",
146
+ "# Find all .h5 files\n",
147
+ "h5_files = [os.path.join(root, file) for root, _, files in os.walk(dataset_path) for file in files if file.endswith(\".h5\")]\n",
148
+ "\n",
149
+ "# Process files in parallel\n",
150
+ "all_songs = process_files_in_parallel(h5_files, num_workers=24)\n",
151
+ "\n",
152
+ "# Convert to Pandas DataFrame\n",
153
+ "df = pd.DataFrame(all_songs)"
154
+ ]
155
+ },
156
+ {
157
+ "cell_type": "code",
158
+ "execution_count": 9,
159
+ "metadata": {},
160
+ "outputs": [
161
+ {
162
+ "name": "stdout",
163
+ "output_type": "stream",
164
+ "text": [
165
+ "(10000, 55)\n",
166
+ "Index(['analysis_sample_rate', 'artist_7digitalid', 'artist_familiarity',\n",
167
+ " 'artist_hotttnesss', 'artist_id', 'artist_latitude', 'artist_location',\n",
168
+ " 'artist_longitude', 'artist_mbid', 'artist_mbtags',\n",
169
+ " 'artist_mbtags_count', 'artist_name', 'artist_playmeid', 'artist_terms',\n",
170
+ " 'artist_terms_freq', 'artist_terms_weight', 'audio_md5',\n",
171
+ " 'bars_confidence', 'bars_start', 'beats_confidence', 'beats_start',\n",
172
+ " 'danceability', 'duration', 'end_of_fade_in', 'energy', 'key',\n",
173
+ " 'key_confidence', 'loudness', 'mode', 'mode_confidence', 'num_songs',\n",
174
+ " 'release', 'release_7digitalid', 'sections_confidence',\n",
175
+ " 'sections_start', 'segments_confidence', 'segments_loudness_max',\n",
176
+ " 'segments_loudness_max_time', 'segments_loudness_start',\n",
177
+ " 'segments_pitches', 'segments_start', 'segments_timbre',\n",
178
+ " 'similar_artists', 'song_hotttnesss', 'song_id', 'start_of_fade_out',\n",
179
+ " 'tatums_confidence', 'tatums_start', 'tempo', 'time_signature',\n",
180
+ " 'time_signature_confidence', 'title', 'track_7digitalid', 'track_id',\n",
181
+ " 'year'],\n",
182
+ " dtype='object')\n"
183
+ ]
184
+ },
185
+ {
186
+ "data": {
187
+ "text/html": [
188
+ "<div>\n",
189
+ "<style scoped>\n",
190
+ " .dataframe tbody tr th:only-of-type {\n",
191
+ " vertical-align: middle;\n",
192
+ " }\n",
193
+ "\n",
194
+ " .dataframe tbody tr th {\n",
195
+ " vertical-align: top;\n",
196
+ " }\n",
197
+ "\n",
198
+ " .dataframe thead th {\n",
199
+ " text-align: right;\n",
200
+ " }\n",
201
+ "</style>\n",
202
+ "<table border=\"1\" class=\"dataframe\">\n",
203
+ " <thead>\n",
204
+ " <tr style=\"text-align: right;\">\n",
205
+ " <th></th>\n",
206
+ " <th>analysis_sample_rate</th>\n",
207
+ " <th>artist_7digitalid</th>\n",
208
+ " <th>artist_familiarity</th>\n",
209
+ " <th>artist_hotttnesss</th>\n",
210
+ " <th>artist_id</th>\n",
211
+ " <th>artist_latitude</th>\n",
212
+ " <th>artist_location</th>\n",
213
+ " <th>artist_longitude</th>\n",
214
+ " <th>artist_mbid</th>\n",
215
+ " <th>artist_mbtags</th>\n",
216
+ " <th>...</th>\n",
217
+ " <th>start_of_fade_out</th>\n",
218
+ " <th>tatums_confidence</th>\n",
219
+ " <th>tatums_start</th>\n",
220
+ " <th>tempo</th>\n",
221
+ " <th>time_signature</th>\n",
222
+ " <th>time_signature_confidence</th>\n",
223
+ " <th>title</th>\n",
224
+ " <th>track_7digitalid</th>\n",
225
+ " <th>track_id</th>\n",
226
+ " <th>year</th>\n",
227
+ " </tr>\n",
228
+ " </thead>\n",
229
+ " <tbody>\n",
230
+ " <tr>\n",
231
+ " <th>0</th>\n",
232
+ " <td>22050</td>\n",
233
+ " <td>174717</td>\n",
234
+ " <td>0.450743</td>\n",
235
+ " <td>0.331215</td>\n",
236
+ " <td>AR1DGSO1187FB59B15</td>\n",
237
+ " <td>NaN</td>\n",
238
+ " <td></td>\n",
239
+ " <td>NaN</td>\n",
240
+ " <td>fe4e71a9-ddb9-47b5-9e2e-ec53862a91c6</td>\n",
241
+ " <td>[]</td>\n",
242
+ " <td>...</td>\n",
243
+ " <td>266.879</td>\n",
244
+ " <td>[0.0, 0.0, 0.896, 0.819, 0.664, 0.693, 0.67, 0...</td>\n",
245
+ " <td>[0.16738, 0.44887, 0.73036, 1.09072, 1.44407, ...</td>\n",
246
+ " <td>107.053</td>\n",
247
+ " <td>4</td>\n",
248
+ " <td>0.657</td>\n",
249
+ " <td>Jody</td>\n",
250
+ " <td>2555900</td>\n",
251
+ " <td>TRAHHUN128F4227029</td>\n",
252
+ " <td>0</td>\n",
253
+ " </tr>\n",
254
+ " <tr>\n",
255
+ " <th>1</th>\n",
256
+ " <td>22050</td>\n",
257
+ " <td>7173</td>\n",
258
+ " <td>0.392710</td>\n",
259
+ " <td>0.311789</td>\n",
260
+ " <td>ARO6WZY1187FB3A86E</td>\n",
261
+ " <td>NaN</td>\n",
262
+ " <td></td>\n",
263
+ " <td>NaN</td>\n",
264
+ " <td>23f7ad3f-a189-4a1c-9991-4763ded495a7</td>\n",
265
+ " <td>[]</td>\n",
266
+ " <td>...</td>\n",
267
+ " <td>321.300</td>\n",
268
+ " <td>[0.451, 0.426, 0.396, 0.32, 0.255, 0.204, 0.15...</td>\n",
269
+ " <td>[0.05024, 0.25641, 0.46357, 0.66974, 0.87691, ...</td>\n",
270
+ " <td>149.853</td>\n",
271
+ " <td>3</td>\n",
272
+ " <td>1.000</td>\n",
273
+ " <td>Turntable Terrorist</td>\n",
274
+ " <td>5591259</td>\n",
275
+ " <td>TRAHHMM128F932D5D9</td>\n",
276
+ " <td>1995</td>\n",
277
+ " </tr>\n",
278
+ " <tr>\n",
279
+ " <th>2</th>\n",
280
+ " <td>22050</td>\n",
281
+ " <td>2759</td>\n",
282
+ " <td>0.602767</td>\n",
283
+ " <td>0.463193</td>\n",
284
+ " <td>ARH1LE01187B98D68D</td>\n",
285
+ " <td>NaN</td>\n",
286
+ " <td></td>\n",
287
+ " <td>NaN</td>\n",
288
+ " <td>3df3a779-a7b1-4362-a8b4-9ae6c7eb623d</td>\n",
289
+ " <td>[b'american', b'soundtrack']</td>\n",
290
+ " <td>...</td>\n",
291
+ " <td>67.895</td>\n",
292
+ " <td>[0.056, 0.058, 0.056, 0.059, 0.097, 0.093, 0.0...</td>\n",
293
+ " <td>[0.54095, 0.86496, 1.20205, 1.52933, 1.85662, ...</td>\n",
294
+ " <td>91.249</td>\n",
295
+ " <td>4</td>\n",
296
+ " <td>0.568</td>\n",
297
+ " <td>Porcelain Man</td>\n",
298
+ " <td>7341937</td>\n",
299
+ " <td>TRAHHJY12903CA73BD</td>\n",
300
+ " <td>1999</td>\n",
301
+ " </tr>\n",
302
+ " </tbody>\n",
303
+ "</table>\n",
304
+ "<p>3 rows × 55 columns</p>\n",
305
+ "</div>"
306
+ ],
307
+ "text/plain": [
308
+ " analysis_sample_rate artist_7digitalid artist_familiarity \\\n",
309
+ "0 22050 174717 0.450743 \n",
310
+ "1 22050 7173 0.392710 \n",
311
+ "2 22050 2759 0.602767 \n",
312
+ "\n",
313
+ " artist_hotttnesss artist_id artist_latitude artist_location \\\n",
314
+ "0 0.331215 AR1DGSO1187FB59B15 NaN \n",
315
+ "1 0.311789 ARO6WZY1187FB3A86E NaN \n",
316
+ "2 0.463193 ARH1LE01187B98D68D NaN \n",
317
+ "\n",
318
+ " artist_longitude artist_mbid \\\n",
319
+ "0 NaN fe4e71a9-ddb9-47b5-9e2e-ec53862a91c6 \n",
320
+ "1 NaN 23f7ad3f-a189-4a1c-9991-4763ded495a7 \n",
321
+ "2 NaN 3df3a779-a7b1-4362-a8b4-9ae6c7eb623d \n",
322
+ "\n",
323
+ " artist_mbtags ... start_of_fade_out \\\n",
324
+ "0 [] ... 266.879 \n",
325
+ "1 [] ... 321.300 \n",
326
+ "2 [b'american', b'soundtrack'] ... 67.895 \n",
327
+ "\n",
328
+ " tatums_confidence \\\n",
329
+ "0 [0.0, 0.0, 0.896, 0.819, 0.664, 0.693, 0.67, 0... \n",
330
+ "1 [0.451, 0.426, 0.396, 0.32, 0.255, 0.204, 0.15... \n",
331
+ "2 [0.056, 0.058, 0.056, 0.059, 0.097, 0.093, 0.0... \n",
332
+ "\n",
333
+ " tatums_start tempo time_signature \\\n",
334
+ "0 [0.16738, 0.44887, 0.73036, 1.09072, 1.44407, ... 107.053 4 \n",
335
+ "1 [0.05024, 0.25641, 0.46357, 0.66974, 0.87691, ... 149.853 3 \n",
336
+ "2 [0.54095, 0.86496, 1.20205, 1.52933, 1.85662, ... 91.249 4 \n",
337
+ "\n",
338
+ " time_signature_confidence title track_7digitalid \\\n",
339
+ "0 0.657 Jody 2555900 \n",
340
+ "1 1.000 Turntable Terrorist 5591259 \n",
341
+ "2 0.568 Porcelain Man 7341937 \n",
342
+ "\n",
343
+ " track_id year \n",
344
+ "0 TRAHHUN128F4227029 0 \n",
345
+ "1 TRAHHMM128F932D5D9 1995 \n",
346
+ "2 TRAHHJY12903CA73BD 1999 \n",
347
+ "\n",
348
+ "[3 rows x 55 columns]"
349
+ ]
350
+ },
351
+ "metadata": {},
352
+ "output_type": "display_data"
353
+ }
354
+ ],
355
+ "source": [
356
+ "ub.peeks(df)"
357
+ ]
358
+ },
359
+ {
360
+ "cell_type": "code",
361
+ "execution_count": 8,
362
+ "metadata": {},
363
+ "outputs": [
364
+ {
365
+ "name": "stdout",
366
+ "output_type": "stream",
367
+ "text": [
368
+ "\u001b[37m2025-02-19 14:01:45 [INFO] HuggingFaceDatasetsBackend.data_to_hub: Uploading dataset to HF repo trojblue/million-song-subset\u001b[0m\n"
369
+ ]
370
+ },
371
+ {
372
+ "data": {
373
+ "application/vnd.jupyter.widget-view+json": {
374
+ "model_id": "c6e47a2259e54cb19dc37e6762883cbc",
375
+ "version_major": 2,
376
+ "version_minor": 0
377
+ },
378
+ "text/plain": [
379
+ "Uploading the dataset shards: 0%| | 0/5 [00:00<?, ?it/s]"
380
+ ]
381
+ },
382
+ "metadata": {},
383
+ "output_type": "display_data"
384
+ },
385
+ {
386
+ "data": {
387
+ "application/vnd.jupyter.widget-view+json": {
388
+ "model_id": "f69f061bad16497ca9f9bac1ab4500c2",
389
+ "version_major": 2,
390
+ "version_minor": 0
391
+ },
392
+ "text/plain": [
393
+ "Creating parquet from Arrow format: 0%| | 0/20 [00:00<?, ?ba/s]"
394
+ ]
395
+ },
396
+ "metadata": {},
397
+ "output_type": "display_data"
398
+ },
399
+ {
400
+ "data": {
401
+ "application/vnd.jupyter.widget-view+json": {
402
+ "model_id": "1d998490521645edb6d4e462de6045f4",
403
+ "version_major": 2,
404
+ "version_minor": 0
405
+ },
406
+ "text/plain": [
407
+ "Creating parquet from Arrow format: 0%| | 0/20 [00:00<?, ?ba/s]"
408
+ ]
409
+ },
410
+ "metadata": {},
411
+ "output_type": "display_data"
412
+ },
413
+ {
414
+ "data": {
415
+ "application/vnd.jupyter.widget-view+json": {
416
+ "model_id": "389e657da20c4a3c8e18899e57642b96",
417
+ "version_major": 2,
418
+ "version_minor": 0
419
+ },
420
+ "text/plain": [
421
+ "Creating parquet from Arrow format: 0%| | 0/20 [00:00<?, ?ba/s]"
422
+ ]
423
+ },
424
+ "metadata": {},
425
+ "output_type": "display_data"
426
+ },
427
+ {
428
+ "data": {
429
+ "application/vnd.jupyter.widget-view+json": {
430
+ "model_id": "767913f1d6134a888edea631657fce58",
431
+ "version_major": 2,
432
+ "version_minor": 0
433
+ },
434
+ "text/plain": [
435
+ "Creating parquet from Arrow format: 0%| | 0/20 [00:00<?, ?ba/s]"
436
+ ]
437
+ },
438
+ "metadata": {},
439
+ "output_type": "display_data"
440
+ },
441
+ {
442
+ "data": {
443
+ "application/vnd.jupyter.widget-view+json": {
444
+ "model_id": "b59fd321ef98408880ec22deb55498c6",
445
+ "version_major": 2,
446
+ "version_minor": 0
447
+ },
448
+ "text/plain": [
449
+ "Creating parquet from Arrow format: 0%| | 0/20 [00:00<?, ?ba/s]"
450
+ ]
451
+ },
452
+ "metadata": {},
453
+ "output_type": "display_data"
454
+ },
455
+ {
456
+ "name": "stdout",
457
+ "output_type": "stream",
458
+ "text": [
459
+ "\u001b[37m2025-02-19 14:02:47 [INFO] saves: DataFrame saved (HF dataset) to \"hf://trojblue/million-song-subset\" in 62.75s\u001b[0m\n"
460
+ ]
461
+ }
462
+ ],
463
+ "source": [
464
+ "ub.saves(df, \"hf://trojblue/million-song-subset\", private=False)"
465
+ ]
466
+ }
467
+ ],
468
+ "metadata": {
469
+ "kernelspec": {
470
+ "display_name": "base",
471
+ "language": "python",
472
+ "name": "python3"
473
+ },
474
+ "language_info": {
475
+ "codemirror_mode": {
476
+ "name": "ipython",
477
+ "version": 3
478
+ },
479
+ "file_extension": ".py",
480
+ "mimetype": "text/x-python",
481
+ "name": "python",
482
+ "nbconvert_exporter": "python",
483
+ "pygments_lexer": "ipython3",
484
+ "version": "3.10.16"
485
+ }
486
+ },
487
+ "nbformat": 4,
488
+ "nbformat_minor": 2
489
+ }