{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "### Load from raw data (geotif)" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "import torch\n", "import kornia\n", "import numpy as np\n", "from kornia.augmentation import AugmentationSequential\n", "from torch.utils.data import Dataset, DataLoader\n", "from ssl4eo_s_dataset import SSL4EO_S\n", "import time\n", "import os\n", "from tqdm import tqdm" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "fnames_path = '../data/example_100_grids/fnames_sampled_union.json.gz'\n", "root_dir = '../data/example_100_grids/'" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Grid ID: ('0913063_-154.25_68.50',)\n", "dict_keys(['s1_grd', 's2_toa', 's3_olci', 's5p_co', 's5p_no2', 's5p_o3', 's5p_so2', 'dem'])\n", "dict_keys(['s1_grd', 's2_toa', 's3_olci', 's5p_co', 's5p_no2', 's5p_o3', 's5p_so2', 'dem'])\n", "### S1 GRD ###\n", "Number of s1 local patches: 1 Number of time stamps for first local patch: 1\n", "Example for one image: torch.Size([1, 2, 224, 224]) torch.float32 ('0913063_-154.25_68.50/1092252_-154.25_68.50/20210530',)\n", "### S2 TOA ###\n", "Number of s2 local patches: 1 Number of time stamps for first local patch: 4\n", "Example for one image: torch.Size([1, 13, 224, 224]) torch.int16 ('0913063_-154.25_68.50/1092252_-154.25_68.50/20191002',)\n", "### S3 OLCI ###\n", "Number of s3 time stamps: 7\n", "Example for one image: torch.Size([1, 21, 96, 96]) torch.float32 ('0913063_-154.25_68.50/20210411',)\n", "### S5P ###\n", "Number of s5p time stamps for CO/NO2/O3/SO2: 7 5 11 4\n", "Example for one CO image: torch.Size([1, 1, 28, 28]) torch.float32 ('0913063_-154.25_68.50/20210401',)\n", "Example for one NO2 image: torch.Size([1, 1, 28, 28]) torch.float32 ('0913063_-154.25_68.50/20210401',)\n", "Example for one O3 image: torch.Size([1, 1, 28, 28]) torch.float32 ('0913063_-154.25_68.50/20210101',)\n", "Example for one SO2 image: torch.Size([1, 1, 28, 28]) torch.float32 ('0913063_-154.25_68.50/20210501',)\n", "### DEM ###\n", "One DEM image for the grid: torch.Size([1, 1, 960, 960]) torch.float32 ('0913063_-154.25_68.50',)\n", "Time: 94.70435643196106\n" ] } ], "source": [ "transform_s1 = AugmentationSequential(\n", " #kornia.augmentation.SmallestMaxSize(264),\n", " kornia.augmentation.CenterCrop(224),\n", ")\n", "transform_s2 = AugmentationSequential(\n", " #kornia.augmentation.SmallestMaxSize(264),\n", " kornia.augmentation.CenterCrop(224),\n", ")\n", "transform_s3 = AugmentationSequential(\n", " kornia.augmentation.SmallestMaxSize(96),\n", " kornia.augmentation.CenterCrop(96),\n", ")\n", "transform_s5p = AugmentationSequential(\n", " kornia.augmentation.SmallestMaxSize(28),\n", " kornia.augmentation.CenterCrop(28),\n", ")\n", "transform_dem = AugmentationSequential(\n", " kornia.augmentation.SmallestMaxSize(960),\n", " kornia.augmentation.CenterCrop(960),\n", ")\n", "\n", "ssl4eo_s = SSL4EO_S(fnames_path, root_dir, transform_s1=transform_s1, transform_s2=transform_s2, transform_s3=transform_s3, transform_s5p=transform_s5p, transform_dem=transform_dem)\n", "dataloader = DataLoader(ssl4eo_s, batch_size=1, shuffle=True, num_workers=4) # batch size can only be 1 because of varying number of images per grid\n", "\n", "start_time = time.time()\n", "\n", "for i, (sample, meta_data) in enumerate(dataloader):\n", " if i == 0:\n", " print('Grid ID:', meta_data['dem'][0])\n", " print(sample.keys())\n", " print(meta_data.keys())\n", "\n", " print('### S1 GRD ###')\n", " print('Number of s1 local patches:', len(meta_data['s1_grd']), ' ', 'Number of time stamps for first local patch:', len(meta_data['s1_grd'][0]))\n", " print('Example for one image:', sample['s1_grd'][0][0].shape, sample['s1_grd'][0][0].dtype, meta_data['s1_grd'][0][0])\n", " print('### S2 TOA ###')\n", " print('Number of s2 local patches:', len(meta_data['s2_toa']), ' ', 'Number of time stamps for first local patch:', len(meta_data['s2_toa'][0]))\n", " print('Example for one image:', sample['s2_toa'][0][0].shape, sample['s2_toa'][0][0].dtype, meta_data['s2_toa'][0][0])\n", " print('### S3 OLCI ###')\n", " print('Number of s3 time stamps:', len(meta_data['s3_olci']))\n", " print('Example for one image:', sample['s3_olci'][0].shape, sample['s3_olci'][0].dtype, meta_data['s3_olci'][0])\n", " print('### S5P ###')\n", " print('Number of s5p time stamps for CO/NO2/O3/SO2:', len(meta_data['s5p_co']), len(meta_data['s5p_no2']), len(meta_data['s5p_o3']), len(meta_data['s5p_so2']))\n", " print('Example for one CO image:', sample['s5p_co'][0].shape, sample['s5p_co'][0].dtype, meta_data['s5p_co'][0])\n", " print('Example for one NO2 image:', sample['s5p_no2'][0].shape, sample['s5p_no2'][0].dtype, meta_data['s5p_no2'][0])\n", " print('Example for one O3 image:', sample['s5p_o3'][0].shape, sample['s5p_o3'][0].dtype, meta_data['s5p_o3'][0])\n", " print('Example for one SO2 image:', sample['s5p_so2'][0].shape, sample['s5p_so2'][0].dtype, meta_data['s5p_so2'][0])\n", " print('### DEM ###')\n", " print('One DEM image for the grid:', sample['dem'].shape, sample['dem'].dtype, meta_data['dem'][0])\n", " else:\n", " pass\n", "\n", "print('Time:', time.time()-start_time)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Load from webdataset (npy)" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "import webdataset as wds\n", "\n", "## 85 grids version\n", "webdataset_npy_dir = '../data/example_100_grids_cleaned/webdataset_obj_grid_pad_npy/'\n", "# shards path\n", "shards_path = os.path.join(webdataset_npy_dir, 'example-{000000..000008}.tar')\n", "batch_size = 1\n", "#shuffle = 8\n", "train_transform = None\n", "\n", "# pytorch dataset\n", "dataset = (\n", " wds.WebDataset(shards_path,shardshuffle=True)\n", " #.shuffle(shuffle)\n", " .decode()\n", " #.to_tuple(\"json\")\n", " .to_tuple(\"s1_grd.npy\", \n", " \"s2_toa.npy\", \n", " \"s3_olci.npy\", \n", " \"s5p.npy\", \n", " \"dem.npy\",\n", " \"json\") # also possible to only extract part of the data\n", " #.map(sample_one_grid)\n", " #.map_tuple(train_transform, identity)\n", " ).batched(batch_size, partial=False)\n", "\n", "# pytorch dataloader\n", "dataloader = torch.utils.data.DataLoader(dataset, num_workers=4, batch_size=None)" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "85it [00:36, 2.30it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Time: 36.96s\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n" ] } ], "source": [ "start_time = time.time()\n", "for i, data in tqdm(enumerate(dataloader)):\n", " #key,img = data\n", " pass\n", "print(f\"Time: {time.time()-start_time:.2f}s\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Load from webdataset (pth)" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "ename": "NameError", "evalue": "name 'wds' is not defined", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn[8], line 44\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m sample\n\u001b[1;32m 42\u001b[0m \u001b[38;5;66;03m# pytorch dataset\u001b[39;00m\n\u001b[1;32m 43\u001b[0m dataset \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m---> 44\u001b[0m \u001b[43mwds\u001b[49m\u001b[38;5;241m.\u001b[39mWebDataset(shards_path,shardshuffle\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[1;32m 45\u001b[0m \u001b[38;5;66;03m#.shuffle(shuffle)\u001b[39;00m\n\u001b[1;32m 46\u001b[0m \u001b[38;5;241m.\u001b[39mdecode()\n\u001b[1;32m 47\u001b[0m \u001b[38;5;241m.\u001b[39mselect(has_all_modalities) \u001b[38;5;66;03m# only keep samples with all modalities\u001b[39;00m\n\u001b[1;32m 48\u001b[0m \u001b[38;5;241m.\u001b[39mmap(sample_one_local_patch) \u001b[38;5;66;03m# sample one local patch for S1 and S2\u001b[39;00m\n\u001b[1;32m 49\u001b[0m \u001b[38;5;66;03m#.to_tuple(\"json\")\u001b[39;00m\n\u001b[1;32m 50\u001b[0m \u001b[38;5;241m.\u001b[39mto_tuple(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124ms1_grd.pth\u001b[39m\u001b[38;5;124m\"\u001b[39m, \n\u001b[1;32m 51\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124ms2_toa.pth\u001b[39m\u001b[38;5;124m\"\u001b[39m, \n\u001b[1;32m 52\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124ms3_olci.pth\u001b[39m\u001b[38;5;124m\"\u001b[39m, \n\u001b[1;32m 53\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124ms5p_co.pth\u001b[39m\u001b[38;5;124m\"\u001b[39m, \n\u001b[1;32m 54\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124ms5p_no2.pth\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 55\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124ms5p_o3.pth\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 56\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124ms5p_so2.pth\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 57\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdem.pth\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 58\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mjson\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;66;03m# also possible to only extract part of the data\u001b[39;00m\n\u001b[1;32m 59\u001b[0m \u001b[38;5;66;03m#.map(sample_one_grid)\u001b[39;00m\n\u001b[1;32m 60\u001b[0m \u001b[38;5;66;03m#.map_tuple(train_transform, identity)\u001b[39;00m\n\u001b[1;32m 61\u001b[0m )\u001b[38;5;241m.\u001b[39mbatched(batch_size, partial\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m)\n\u001b[1;32m 63\u001b[0m \u001b[38;5;66;03m# pytorch dataloader\u001b[39;00m\n\u001b[1;32m 64\u001b[0m dataloader \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mutils\u001b[38;5;241m.\u001b[39mdata\u001b[38;5;241m.\u001b[39mDataLoader(dataset, num_workers\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m4\u001b[39m, batch_size\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m)\n", "\u001b[0;31mNameError\u001b[0m: name 'wds' is not defined" ] } ], "source": [ "import random\n", "\n", "webdataset_pth_dir = '../data/example_100_grids/reformat_webdataset_pth/'\n", "# shards path\n", "shards_path = os.path.join(webdataset_pth_dir, 'example-{000000..000009}.tar')\n", "batch_size = 1\n", "#shuffle = 8\n", "#train_transform = None\n", "\n", "def has_all_modalities(sample):\n", " required_keys = [\n", " \"s1_grd.pth\", \n", " \"s2_toa.pth\", \n", " \"s3_olci.pth\", \n", " \"s5p_co.pth\", \n", " \"s5p_no2.pth\",\n", " \"s5p_o3.pth\",\n", " \"s5p_so2.pth\",\n", " \"dem.pth\",\n", " \"json\"\n", " ]\n", " return all(key in sample for key in required_keys)\n", "\n", "def sample_one_local_patch(sample):\n", " s1 = sample[\"s1_grd.pth\"]\n", " s2 = sample[\"s2_toa.pth\"]\n", " meta_s1 = sample[\"json\"][\"s1_grd\"]\n", " meta_s2 = sample[\"json\"][\"s2_toa\"]\n", " #idx = torch.randint(0, s1.shape[0], (1,))\n", " idx = random.randint(0, s1.shape[0]-1)\n", " s1_new = s1[idx]\n", " s2_new = s2[idx]\n", " meta_s1_new = meta_s1[idx]\n", " meta_s2_new = meta_s2[idx]\n", " sample[\"s1_grd.pth\"] = s1_new\n", " sample[\"s2_toa.pth\"] = s2_new\n", " sample[\"json\"][\"s1_grd\"] = meta_s1_new\n", " sample[\"json\"][\"s2_toa\"] = meta_s2_new\n", " return sample\n", "\n", "\n", "# pytorch dataset\n", "dataset = (\n", " wds.WebDataset(shards_path,shardshuffle=True)\n", " #.shuffle(shuffle)\n", " .decode()\n", " .select(has_all_modalities) # only keep samples with all modalities\n", " .map(sample_one_local_patch) # sample one local patch for S1 and S2\n", " #.to_tuple(\"json\")\n", " .to_tuple(\"s1_grd.pth\", \n", " \"s2_toa.pth\", \n", " \"s3_olci.pth\", \n", " \"s5p_co.pth\", \n", " \"s5p_no2.pth\",\n", " \"s5p_o3.pth\",\n", " \"s5p_so2.pth\",\n", " \"dem.pth\",\n", " \"json\") # also possible to only extract part of the data\n", " #.map(sample_one_grid)\n", " #.map_tuple(train_transform, identity)\n", " ).batched(batch_size, partial=False)\n", "\n", "# pytorch dataloader\n", "dataloader = torch.utils.data.DataLoader(dataset, num_workers=4, batch_size=None)" ] }, { "cell_type": "code", "execution_count": 24, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "0it [00:00, ?it/s]" ] }, { "name": "stderr", "output_type": "stream", "text": [ "100it [00:51, 1.94it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Time: 51.68s\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n" ] } ], "source": [ "start_time = time.time()\n", "for i, data in tqdm(enumerate(dataloader)):\n", " #key,img = data\n", " pass\n", "print(f\"Time: {time.time()-start_time:.2f}s\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Load from lmdb" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "from ssl4eo_s_lmdb_dataset import SSL4EO_S_lmdb\n", "\n", "root_dir = '../data/example_100_grids/'\n", "lmdb_path = root_dir + 'reformat_lmdb/' + 'ssl4eo_s_data.lmdb'\n", "key_path = root_dir + 'reformat_lmdb/' + 'ssl4eo_s_key.csv'\n", "mode = ['s1_grd', 's2_toa', 's3_olci', 's5p_co', 's5p_no2', 's5p_so2', 's5p_o3', 'dem']\n", "\n", "dataset = SSL4EO_S_lmdb(lmdb_path=lmdb_path, key_path=key_path, mode=mode)\n", "dataloader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=4)" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/home/wangyi111/miniconda3/envs/pytorch/lib/python3.10/site-packages/torch/utils/data/_utils/collate.py:171: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at ../torch/csrc/utils/tensor_numpy.cpp:206.)\n", " return collate([torch.as_tensor(b) for b in batch], collate_fn_map=collate_fn_map)\n", "0it [00:00, ?it/s]/home/wangyi111/miniconda3/envs/pytorch/lib/python3.10/site-packages/torch/utils/data/_utils/collate.py:171: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at ../torch/csrc/utils/tensor_numpy.cpp:206.)\n", " return collate([torch.as_tensor(b) for b in batch], collate_fn_map=collate_fn_map)\n", "/home/wangyi111/miniconda3/envs/pytorch/lib/python3.10/site-packages/torch/utils/data/_utils/collate.py:171: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at ../torch/csrc/utils/tensor_numpy.cpp:206.)\n", " return collate([torch.as_tensor(b) for b in batch], collate_fn_map=collate_fn_map)\n", "/home/wangyi111/miniconda3/envs/pytorch/lib/python3.10/site-packages/torch/utils/data/_utils/collate.py:171: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at ../torch/csrc/utils/tensor_numpy.cpp:206.)\n", " return collate([torch.as_tensor(b) for b in batch], collate_fn_map=collate_fn_map)\n", "100it [00:04, 23.99it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Time: 4.21s\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n" ] } ], "source": [ "start_time = time.time()\n", "for i, data in tqdm(enumerate(dataloader)):\n", " #key,img = data\n", " pass\n", "print(f\"Time: {time.time()-start_time:.2f}s\")" ] } ], "metadata": { "kernelspec": { "display_name": "pytorch", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.13" } }, "nbformat": 4, "nbformat_minor": 2 }