hacker-news / sql2parquet.py
anantn's picture
Add 20230429 db snapshot as parquet files
be1c51e
import duckdb
import os
import sqlite3
import sys
def convert_sqlite_to_parquet_with_duckdb(sqlite_db_path, output_dir, tables_to_convert=None):
"""
Converts tables from a SQLite database to Parquet files using DuckDB.
Args:
sqlite_db_path (str): The path to the SQLite database file.
output_dir (str): The directory where the Parquet files will be saved.
tables_to_convert (list, optional): A list of table names to convert.
If None, all tables will be converted.
"""
# --- Step 1: Create the output directory if it doesn't exist ---
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print(f"Created output directory: {output_dir}")
# --- Step 2: Connect to the SQLite database and get table names ---
# We use the built-in sqlite3 library just to get the list of tables.
# DuckDB will do the heavy lifting of reading the data.
try:
conn_sqlite = sqlite3.connect(sqlite_db_path)
cursor = conn_sqlite.cursor()
# Get a list of all tables. We exclude internal SQLite tables.
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%';")
all_tables = [row[0] for row in cursor.fetchall()]
conn_sqlite.close()
except sqlite3.Error as e:
print(f"Error connecting to SQLite database: {e}")
return
# --- Step 3: Use DuckDB to attach the SQLite database and convert tables ---
# Connect to a DuckDB in-memory database.
# The 'read_only=False' is important for writing the Parquet files.
# You can also use a persistent DuckDB database file here for more complex workflows.
con_duckdb = duckdb.connect(database=':memory:', read_only=False)
try:
# Load the SQLite extension. It's often autoloader, but this ensures it.
con_duckdb.execute("INSTALL sqlite; LOAD sqlite;")
# Attach the SQLite database. This makes all its tables available to DuckDB.
con_duckdb.execute(f"ATTACH '{sqlite_db_path}' AS sqlite_db (TYPE sqlite);")
# Determine which tables to convert
if tables_to_convert is None:
tables_to_process = all_tables
else:
tables_to_process = tables_to_convert
# Filter to ensure the requested tables actually exist in the database
tables_to_process = [table for table in tables_to_process if table in all_tables]
if not tables_to_process:
print("No tables found to convert.")
return
print(f"Found {len(tables_to_process)} tables to convert: {', '.join(tables_to_process)}")
# Loop through the tables and use the efficient COPY command
for table_name in tables_to_process:
print(f"\nConverting table: '{table_name}'...")
# Define the output path for the Parquet file
parquet_file_path = os.path.join(output_dir, f'{table_name}.parquet')
# Use the powerful COPY statement. This reads directly from the attached
# SQLite table and writes directly to the Parquet file.
# The 'FORMAT parquet' option specifies the output format.
# You can also add options for compression (e.g., COMPRESSION 'zstd').
# The query is `SELECT * FROM attached_db_alias.table_name`.
copy_query = f"COPY (SELECT * FROM sqlite_db.\"{table_name}\") TO '{parquet_file_path}' (FORMAT parquet);"
try:
con_duckdb.execute(copy_query)
print(f" -> Successfully saved to '{parquet_file_path}'")
except duckdb.Error as copy_err:
print(f" -> ERROR converting table '{table_name}': {copy_err}")
print(" This may be due to unsupported data types or other schema issues.")
finally:
# --- Step 4: Clean up (detach the database and close the connection) ---
con_duckdb.execute("DETACH sqlite_db;")
con_duckdb.close()
print("\nConversion process finished.")
# --- How to use the function ---
if __name__ == '__main__':
# Replace 'your_large_database.db' with the path to your actual SQLite file
database_file = sys.argv[1]
# Replace 'path/to/your/output_folder' with your desired output directory
output_folder = '.'
# Call the function to perform the conversion
# You can also specify a list of tables if you don't want to convert all of them.
# For example: tables_to_convert=['table_1', 'another_large_table']
convert_sqlite_to_parquet_with_duckdb(database_file, output_folder)