{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Instalação de dependências" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "%pip install pandas pyarrow tdqm requests ipywidgets huggingface_hub" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Importa dependências" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import os\n", "from pathlib import Path\n", "from datetime import datetime\n", "import requests\n", "import zipfile\n", "import concurrent.futures\n", "import pandas as pd\n", "from functools import partial\n", "from tqdm.auto import tqdm\n", "import ipywidgets as widgets\n", "from IPython.display import display\n", "import time" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Download do dataset da Binance" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Função para gerar todos os meses entre as datas de início e fim\n", "def generate_months(start_date, end_date):\n", " current = start_date\n", " while current <= end_date:\n", " yield current\n", " # Avança para o próximo mês\n", " if current.month == 12:\n", " current = datetime(current.year + 1, 1, 1)\n", " else:\n", " current = datetime(current.year, current.month + 1, 1)\n", "\n", "# Função para baixar um arquivo com atualização da barra de progresso e velocidade\n", "def download_file(url, dest_path, progress_bar, speed_label):\n", " try:\n", " with requests.get(url, stream=True) as response:\n", " response.raise_for_status() # Levanta uma exceção para erros HTTP\n", " total_size = int(response.headers.get('content-length', 0))\n", " block_size = 1024 # 1 Kibibyte\n", " downloaded = 0\n", " start_time = time.time()\n", " with open(dest_path, 'wb') as f:\n", " for data in response.iter_content(block_size):\n", " if data:\n", " f.write(data)\n", " downloaded += len(data)\n", " progress_bar.value = downloaded\n", " elapsed_time = time.time() - start_time\n", " speed = downloaded / elapsed_time if elapsed_time > 0 else 0\n", " speed_label.value = f\"Velocidade: {speed/1024:.2f} KB/s\"\n", " progress_bar.description = f\"✅ {dest_path.name}\"\n", " except requests.exceptions.HTTPError as http_err:\n", " progress_bar.description = f\"❌ {dest_path.name}\"\n", " speed_label.value = f\"Erro HTTP: {http_err}\"\n", " except Exception as err:\n", " progress_bar.description = f\"❌ {dest_path.name}\"\n", " speed_label.value = f\"Erro: {err}\"\n", "\n", "# Definição das datas de início e fim\n", "start_date = datetime(2017, 8, 1) # Agosto de 2017\n", "end_date = datetime(2024, 9, 1) # Setembro de 2024\n", "\n", "# Padrão da URL base\n", "base_url = \"https://data.binance.vision/data/spot/monthly/trades/BTCUSDT/BTCUSDT-trades-{year}-{month:02d}.zip\"\n", "\n", "# Diretório de download\n", "download_dir = Path(\"./dataset-raw\")\n", "download_dir.mkdir(parents=True, exist_ok=True)\n", "\n", "# Lista para armazenar as tarefas de download\n", "download_tasks = []\n", "for single_date in generate_months(start_date, end_date):\n", " year = single_date.year\n", " month = single_date.month\n", " file_suffix = f\"{year}-{month:02d}\"\n", "\n", " # Caminhos dos arquivos\n", " csv_file = download_dir / f\"BTCUSDT-trades-{file_suffix}.csv\"\n", " zip_file = download_dir / f\"BTCUSDT-trades-{file_suffix}.zip\"\n", "\n", " # Verifica se o CSV já existe\n", " if csv_file.exists():\n", " print(f\"📄 CSV já existe: {csv_file.name}. Pulando download.\")\n", " continue\n", " # Verifica se o ZIP já existe\n", " elif zip_file.exists():\n", " print(f\"📦 ZIP já existe: {zip_file.name}. Pulando download.\")\n", " continue\n", " else:\n", " # Constrói a URL de download\n", " url = base_url.format(year=year, month=month)\n", " download_tasks.append((url, zip_file))\n", "\n", "# Número máximo de threads\n", "max_workers = 1 # Ajuste conforme a capacidade do seu sistema e conexão de rede\n", "\n", "# Função principal para gerenciar os downloads\n", "def main_download(download_tasks, max_workers=5):\n", " if not download_tasks:\n", " print(\"✅ Nenhuma tarefa de download a ser executada.\")\n", " return\n", "\n", " # Criação de widgets para cada download\n", " download_widgets = []\n", " for url, dest_path in download_tasks:\n", " speed_label = widgets.Label(value=\"Velocidade: 0 KB/s\")\n", " progress_bar = widgets.IntProgress(\n", " value=0,\n", " min=0,\n", " max=1, # Será atualizado após obter o tamanho total\n", " description=dest_path.name,\n", " bar_style='', # 'success', 'info', 'warning', 'danger' ou ''\n", " orientation='horizontal'\n", " )\n", " download_widgets.append(widgets.VBox([progress_bar, speed_label]))\n", "\n", " # Exibe todos os widgets de download\n", " container = widgets.VBox(download_widgets)\n", " display(container)\n", "\n", " # Inicializa o ThreadPoolExecutor\n", " with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n", " # Submete todas as tarefas de download\n", " futures = []\n", " for i, (url, dest_path) in enumerate(download_tasks):\n", " # Atualiza o valor máximo da barra de progresso após obter o tamanho total\n", " try:\n", " head = requests.head(url, allow_redirects=True)\n", " total_size = int(head.headers.get('content-length', 0))\n", " if total_size == 0:\n", " # Fallback para obter o tamanho via GET\n", " with requests.get(url, stream=True) as response:\n", " response.raise_for_status()\n", " total_size = int(response.headers.get('content-length', 0))\n", " except Exception as e:\n", " print(f\"❌ Não foi possível obter o tamanho de {dest_path.name}: {e}\")\n", " continue\n", "\n", " progress_bar = download_widgets[i].children[0]\n", " speed_label = download_widgets[i].children[1]\n", " progress_bar.max = total_size\n", "\n", " future = executor.submit(download_file, url, dest_path, progress_bar, speed_label)\n", " futures.append(future)\n", "\n", " # Aguardando a conclusão de todas as tarefas\n", " for future in concurrent.futures.as_completed(futures):\n", " pass # Todas as atualizações são feitas dentro da função download_file\n", "\n", " print(\"🎉 Todos os downloads foram concluídos.\")\n", "\n", "# Executa a função de download\n", "main_download(download_tasks, max_workers)\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Processar dataset de .zip para .parquet" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Define os nomes das colunas com base na estrutura dos dados\n", "COLUMN_NAMES = [\n", " 'trade_id',\n", " 'price',\n", " 'qty',\n", " 'quoteQty',\n", " 'time',\n", " 'isBuyerMaker',\n", " 'isBestMatch'\n", "]\n", "\n", "import os\n", "import zipfile\n", "import pandas as pd\n", "import pyarrow as pa\n", "import pyarrow.parquet as pq\n", "\n", "def extract_zip(file_path, extract_to):\n", " \"\"\"\n", " Extrai um arquivo ZIP e o remove após a extração bem-sucedida.\n", "\n", " :param file_path: Caminho completo para o arquivo ZIP.\n", " :param extract_to: Diretório onde os arquivos serão extraídos.\n", " \"\"\"\n", " try:\n", " with zipfile.ZipFile(file_path, 'r') as zip_ref:\n", " zip_ref.extractall(extract_to)\n", " os.remove(file_path)\n", " print(f\"Extração concluída: {os.path.basename(file_path)}\")\n", " except zipfile.BadZipFile:\n", " print(f\"Arquivo corrompido: {os.path.basename(file_path)}\")\n", " except Exception as e:\n", " print(f\"Erro ao processar {os.path.basename(file_path)}: {e}\")\n", "\n", "def extract_and_delete(zip_dir_path):\n", " \"\"\"\n", " Extrai todos os arquivos ZIP em um diretório e os remove após a extração.\n", "\n", " :param zip_dir_path: Diretório contendo os arquivos ZIP.\n", " \"\"\"\n", " # Lista todos os arquivos ZIP no diretório especificado\n", " zip_files = [\n", " os.path.join(zip_dir_path, f)\n", " for f in os.listdir(zip_dir_path)\n", " if f.lower().endswith('.zip')\n", " ]\n", "\n", " if not zip_files:\n", " print(\"Nenhum arquivo ZIP encontrado para extração.\")\n", " return\n", "\n", " print(f\"Iniciando a extração de {len(zip_files)} arquivos ZIP...\")\n", "\n", " for zip_file in zip_files:\n", " extract_zip(zip_file, zip_dir_path)\n", "\n", " print(\"Extração de arquivos ZIP concluída.\")\n", "\n", "def process_csv_directory(directory_path, output_parquet_path):\n", " \"\"\"\n", " Processa todos os arquivos CSV em um diretório e salva os dados combinados em um único arquivo Parquet,\n", " respeitando o limite de memória disponível.\n", "\n", " :param directory_path: Diretório contendo os arquivos CSV.\n", " :param output_parquet_path: Caminho onde o arquivo Parquet será salvo.\n", " \"\"\"\n", " csv_files = [\n", " os.path.join(directory_path, f)\n", " for f in sorted(os.listdir(directory_path))\n", " if f.lower().endswith('.csv')\n", " ]\n", "\n", " if not csv_files:\n", " print(\"Nenhum arquivo CSV encontrado para processamento.\")\n", " return\n", "\n", " print(f\"Iniciando o processamento de {len(csv_files)} arquivos CSV...\")\n", "\n", " # Inicializa o ParquetWriter\n", " writer = None\n", "\n", " for idx, file_path in enumerate(csv_files):\n", " try:\n", " # Ler o CSV em chunks para economizar memória\n", " for df_chunk in pd.read_csv(file_path, header=None, names=COLUMN_NAMES, chunksize=100000):\n", " # Converte 'time' de milissegundos para datetime\n", " df_chunk['time'] = pd.to_datetime(df_chunk['time'], unit='ms')\n", " table = pa.Table.from_pandas(df_chunk)\n", " if writer is None:\n", " # Cria o ParquetWriter no primeiro chunk\n", " writer = pq.ParquetWriter(output_parquet_path, table.schema, compression='snappy')\n", " writer.write_table(table)\n", " print(f\"Dados do arquivo {os.path.basename(file_path)} processados.\")\n", " except Exception as e:\n", " print(f\"Erro ao processar o arquivo {os.path.basename(file_path)}: {e}\")\n", "\n", " # Fecha o ParquetWriter\n", " if writer:\n", " writer.close()\n", " print(f\"Dados combinados salvos com sucesso em {output_parquet_path}\")\n", " else:\n", " print(\"Nenhum dado foi escrito no arquivo Parquet.\")\n", "\n", " print(\"Processamento concluído.\")\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Especifica o caminho para o diretório contendo os arquivos ZIP e CSV\n", "zip_dir_path = './dataset-raw' # Substitua pelo seu caminho de diretório\n", "\n", "# Especifica o caminho para o arquivo Parquet de saída\n", "output_parquet_path = './BTCUSDT-Dataset.parquet' # Substitua pelo seu caminho de saída desejado\n", "\n", "# Etapa 1: Extrair e deletar arquivos ZIP de forma paralela\n", "extract_and_delete(zip_dir_path)\n", "\n", "# Etapa 2: Processar arquivos CSV de forma paralela e salvar como Parquet\n", "process_csv_directory(zip_dir_path, output_parquet_path)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Particionamento do dataset em arquivos menores de no máximo 4GB" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import pyarrow.parquet as pq\n", "import pyarrow as pa\n", "\n", "# Caminho do arquivo original\n", "input_file = \"/Users/lordramos/Desktop/binance spot data/BTCUSDT-Dataset.parquet\"\n", "\n", "# Caminho para salvar os arquivos divididos\n", "output_dir = \"/Users/lordramos/Desktop/binance spot data/\"\n", "\n", "# Tamanho máximo de cada arquivo em bytes (4 GB = 4 * 1024^3 bytes)\n", "max_size = 4 * 1024**3\n", "\n", "# Inicialize as variáveis\n", "part_number = 1\n", "current_size = 0\n", "output_file = f\"{output_dir}BTCUSDT-Dataset-part-{part_number}.parquet\"\n", "writer = None\n", "\n", "# Lê o arquivo original em pedaços\n", "for batch in pq.ParquetFile(input_file).iter_batches(batch_size=10000):\n", " table = pa.Table.from_batches([batch])\n", "\n", " # Calcula o tamanho do batch atual\n", " batch_size = table.nbytes\n", "\n", " # Checa se o tamanho atual mais o novo batch excede o limite de 4 GB\n", " if current_size + batch_size > max_size:\n", " # Fecha o arquivo atual e inicia um novo arquivo\n", " if writer:\n", " writer.close()\n", " part_number += 1\n", " output_file = f\"{output_dir}BTCUSDT-Dataset-part-{part_number}.parquet\"\n", " current_size = 0 # Redefine o tamanho atual para o novo arquivo\n", " writer = None\n", "\n", " # Se o writer ainda não está definido, inicia um novo writer\n", " if writer is None:\n", " writer = pq.ParquetWriter(output_file, table.schema)\n", "\n", " # Escreve o batch no arquivo atual e atualiza o tamanho\n", " writer.write_table(table)\n", " current_size += batch_size\n", "\n", "# Fecha o último arquivo\n", "if writer:\n", " writer.close()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Upload para HuggingFace" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from huggingface_hub import login, HfApi, HfFolder\n", "import os\n", "\n", "login(token=\"\")\n", "\n", "# Configurações de autenticação e detalhes do dataset\n", "token = HfFolder.get_token() # Assume que o token já está salvo localmente\n", "api = HfApi()\n", "dataset_id = \"orion-research/btcusdt-spot-dataset\" # Substitua pelo nome do seu dataset\n", "\n", "# Caminho da pasta onde os arquivos particionados estão salvos\n", "output_dir = \"/Users/lordramos/Desktop/binance spot data/\"\n", "\n", "# Cria o repositório no Hugging Face Hub (caso ainda não tenha sido criado)\n", "api.create_repo(repo_id=dataset_id, repo_type=\"dataset\", token=token, private=False)\n", "\n", "# Loop para fazer o upload de cada arquivo particionado\n", "for file_name in os.listdir(output_dir):\n", " if file_name.startswith(\"BTCUSDT-Dataset-part-\") and file_name.endswith(\".parquet\"):\n", " file_path = os.path.join(output_dir, file_name)\n", " \n", " # Faz o upload do arquivo\n", " api.upload_file(\n", " path_or_fileobj=file_path,\n", " path_in_repo=file_name,\n", " repo_id=dataset_id,\n", " repo_type=\"dataset\",\n", " token=token,\n", " )\n", " print(f\"{file_name} upload completed.\")\n" ] } ], "metadata": { "kernelspec": { "display_name": "mlfinlab", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.20" } }, "nbformat": 4, "nbformat_minor": 2 }