{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "07b57859", "metadata": {}, "outputs": [], "source": [ "\"\"\"\n", "\n", "Contains the necessary scripts to actually download the FITS files that are in your JWST csv.\n", "\n", "\n", "\"\"\"" ] }, { "cell_type": "code", "execution_count": null, "id": "240cc56f-e1b6-47f7-93ee-dc1a0918f5af", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import numpy as np\n", "from astropy.coordinates import SkyCoord\n", "from astropy import units as u\n", "from sklearn.cluster import AgglomerativeClustering\n", "import matplotlib.pyplot as plt\n", "import matplotlib.patches as patches\n", "import os\n", "import numpy as np\n", "from astropy.io import fits\n", "from astropy.wcs import WCS\n", "from tqdm import tqdm\n", "\n", "df = pd.read_csv(\"jwst_FINAL.csv\")\n", "\n", "df = df.rename(columns={'sci_data_set_name': 'obs_id'})\n", "\n", "# Effective integration time should be more than 30 seconds\n", "df = df[df['effinttm'] > 30]\n", "\n", "df = df[df['exp_type'] == \"NRC_IMAGE\"]\n", "\n", "\"\"\"\n", "The data downloading process looks like the following:\n", "\n", "1. Use MastMissions to query the list of observations and their metadata, like ra/dec\n", "\n", "2. Filtering process to make sure there are no overlapping observations.\n", "\n", "3. Use Observations to pull the names of the data files associated with each observation.\n", "\n", "4. Pull the data by wget all those file links.\n", "\n", "5. Preprocess.\n", "\n", "Note that the data file names use the first 6 chars of obs_id from this observations array\n", "that we have created. That's why we create the shortened identifier, to match\n", "observations to product file names. This will be used later.\n", "\"\"\"\n", "\n", "df['obs_id_short'] = df['obs_id'].str[:6]\n", "\n", "RA_NAME = 'targ_ra'\n", "DEC_NAME = 'targ_dec'\n", "\n", "assert df[RA_NAME].isna().sum() < 10\n", "assert df[DEC_NAME].isna().sum() < 10\n", "\n", "df = df.dropna(subset=[RA_NAME, DEC_NAME])\n", "\n", "df = df.groupby([RA_NAME, DEC_NAME]).apply(lambda x: x.drop_duplicates(subset='detector', keep='first'))\n", "\n", "multi_index_df = df.index.to_frame().groupby(level=0).first().reset_index(drop=True)\n", "multi_index_df = multi_index_df.drop(columns=[2])\n", "\n", "df = df.reset_index(drop=True)" ] }, { "cell_type": "code", "execution_count": 173, "id": "cd89a849-6ef4-493a-910f-0b385e254eb2", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|█████████████████████████████████████████| 117/117 [13:48<00:00, 7.08s/it]\n" ] } ], "source": [ "import requests\n", "import csv\n", "\n", "# Function to search datasets using the given endpoint\n", "def search_datasets(dataset_ids):\n", " # Base URL for the search API\n", " base_url = 'https://mast.stsci.edu/search/jwst/api/v0.1/list_products'\n", " \n", " # List to store search results\n", " search_results = []\n", " \n", " ids_str = ','.join(dataset_ids)\n", "\n", " # Construct the search URL\n", " search_url = f\"{base_url}?dataset_ids={ids_str}\"\n", "\n", " # Make the API request\n", " response = requests.get(search_url)\n", "\n", " # Check if the request was successful\n", " if response.status_code == 200:\n", " # Parse the JSON response\n", " data = response.json()\n", " search_results.append(data)\n", " else:\n", " # Handle errors\n", " print(f\"Error: Unable to fetch data for dataset ID {dataset_id}\")\n", " \n", " return search_results\n", "\n", "# Example usage\n", "dataset_ids_csv = list(df['fileSetName'])\n", "\n", "sz_chunk = 10\n", "chunks = [dataset_ids_csv[i:i+sz_chunk] for i in range(0,len(dataset_ids_csv), sz_chunk)]\n", "\n", "all_results = []\n", "\n", "for chunk in tqdm(chunks):\n", " all_results.append(search_datasets(chunk))" ] }, { "cell_type": "code", "execution_count": 179, "id": "b6aad3d4-fdd0-4799-a84c-692d1c94463d", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|███████████████████████████████████████| 117/117 [00:00<00:00, 3803.87it/s]\n" ] } ], "source": [ "new_all_results = []\n", "\n", "for result in tqdm(all_results):\n", " l = result[0]['products']\n", " new_all_results.extend(l)" ] }, { "cell_type": "code", "execution_count": 181, "id": "0c2caafa-d649-4e2a-92a6-24680ee06cb7", "metadata": {}, "outputs": [], "source": [ "new_all_results_df = pd.DataFrame(new_all_results)" ] }, { "cell_type": "code", "execution_count": 184, "id": "e189e901-dbad-4689-8454-ee9e1ccab09a", "metadata": {}, "outputs": [], "source": [ "detectors = ['NRCA1_FULL', 'NRCA2_FULL', 'NRCA3_FULL', 'NRCA4_FULL', 'NRCB1_FULL', 'NRCB2_FULL', 'NRCB3_FULL', 'NRCB4_FULL']\n", "\n", "\n", "resultsdf = new_all_results_df[new_all_results_df['category'] == '1b']\n", "resultsdf = resultsdf[resultsdf['filters'].isin(detectors)]" ] }, { "cell_type": "code", "execution_count": 189, "id": "1f46cbf4-5b53-437d-8e9b-f7d126839ccc", "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
product_keyaccessdatasetinstrument_namefiltersfilenameuriauthz_primary_identifierauthz_secondary_identifierfile_suffixcategorysizetype
28jw02561001002_06101_00001_jw02561001002_06101_...PUBLICjw02561001002_06101_00001NIRCAMNRCA3_FULLjw02561001002_06101_00001_nrca3_uncal.fitsjw02561001002_06101_00001/jw02561001002_06101_...jw02561001002_06101_00001_nrca3_uncal.fitsjw02561001002_06101_00001_nrca3_uncal.fits_uncal1b75553920science
50jw02561001002_06101_00001_jw02561001002_06101_...PUBLICjw02561001002_06101_00001NIRCAMNRCB3_FULLjw02561001002_06101_00001_nrcb3_uncal.fitsjw02561001002_06101_00001/jw02561001002_06101_...jw02561001002_06101_00001_nrcb3_uncal.fitsjw02561001002_06101_00001_nrcb3_uncal.fits_uncal1b75553920science
72jw02561001002_06101_00001_jw02561001002_06101_...PUBLICjw02561001002_06101_00001NIRCAMNRCA2_FULLjw02561001002_06101_00001_nrca2_uncal.fitsjw02561001002_06101_00001/jw02561001002_06101_...jw02561001002_06101_00001_nrca2_uncal.fitsjw02561001002_06101_00001_nrca2_uncal.fits_uncal1b75553920science
93jw02561001002_06101_00001_jw02561001002_06101_...PUBLICjw02561001002_06101_00001NIRCAMNRCB2_FULLjw02561001002_06101_00001_nrcb2_uncal.fitsjw02561001002_06101_00001/jw02561001002_06101_...jw02561001002_06101_00001_nrcb2_uncal.fitsjw02561001002_06101_00001_nrcb2_uncal.fits_uncal1b75553920science
114jw02561001002_06101_00001_jw02561001002_06101_...PUBLICjw02561001002_06101_00001NIRCAMNRCA4_FULLjw02561001002_06101_00001_nrca4_uncal.fitsjw02561001002_06101_00001/jw02561001002_06101_...jw02561001002_06101_00001_nrca4_uncal.fitsjw02561001002_06101_00001_nrca4_uncal.fits_uncal1b75553920science
..........................................
216718jw02130007001_03101_00002_jw02130007001_03101_...PUBLICjw02130007001_03101_00002NIRCAMNRCB1_FULLjw02130007001_03101_00002_nrcb1_uncal.fitsjw02130007001_03101_00002/jw02130007001_03101_...jw02130007001_03101_00002_nrcb1_uncal.fitsjw02130007001_03101_00002_nrcb1_uncal.fits_uncal1b75553920science
216740jw02130007001_03101_00002_jw02130007001_03101_...PUBLICjw02130007001_03101_00002NIRCAMNRCA4_FULLjw02130007001_03101_00002_nrca4_uncal.fitsjw02130007001_03101_00002/jw02130007001_03101_...jw02130007001_03101_00002_nrca4_uncal.fitsjw02130007001_03101_00002_nrca4_uncal.fits_uncal1b75553920science
216786jw02130007001_03101_00002_jw02130007001_03101_...PUBLICjw02130007001_03101_00002NIRCAMNRCA1_FULLjw02130007001_03101_00002_nrca1_uncal.fitsjw02130007001_03101_00002/jw02130007001_03101_...jw02130007001_03101_00002_nrca1_uncal.fitsjw02130007001_03101_00002_nrca1_uncal.fits_uncal1b75553920science
216808jw02130007001_03101_00002_jw02130007001_03101_...PUBLICjw02130007001_03101_00002NIRCAMNRCA3_FULLjw02130007001_03101_00002_nrca3_uncal.fitsjw02130007001_03101_00002/jw02130007001_03101_...jw02130007001_03101_00002_nrca3_uncal.fitsjw02130007001_03101_00002_nrca3_uncal.fits_uncal1b75553920science
216830jw02130007001_03101_00002_jw02130007001_03101_...PUBLICjw02130007001_03101_00002NIRCAMNRCA2_FULLjw02130007001_03101_00002_nrca2_uncal.fitsjw02130007001_03101_00002/jw02130007001_03101_...jw02130007001_03101_00002_nrca2_uncal.fitsjw02130007001_03101_00002_nrca2_uncal.fits_uncal1b75553920science
\n", "

7537 rows × 13 columns

\n", "
" ], "text/plain": [ " product_key access \\\n", "28 jw02561001002_06101_00001_jw02561001002_06101_... PUBLIC \n", "50 jw02561001002_06101_00001_jw02561001002_06101_... PUBLIC \n", "72 jw02561001002_06101_00001_jw02561001002_06101_... PUBLIC \n", "93 jw02561001002_06101_00001_jw02561001002_06101_... PUBLIC \n", "114 jw02561001002_06101_00001_jw02561001002_06101_... PUBLIC \n", "... ... ... \n", "216718 jw02130007001_03101_00002_jw02130007001_03101_... PUBLIC \n", "216740 jw02130007001_03101_00002_jw02130007001_03101_... PUBLIC \n", "216786 jw02130007001_03101_00002_jw02130007001_03101_... PUBLIC \n", "216808 jw02130007001_03101_00002_jw02130007001_03101_... PUBLIC \n", "216830 jw02130007001_03101_00002_jw02130007001_03101_... PUBLIC \n", "\n", " dataset instrument_name filters \\\n", "28 jw02561001002_06101_00001 NIRCAM NRCA3_FULL \n", "50 jw02561001002_06101_00001 NIRCAM NRCB3_FULL \n", "72 jw02561001002_06101_00001 NIRCAM NRCA2_FULL \n", "93 jw02561001002_06101_00001 NIRCAM NRCB2_FULL \n", "114 jw02561001002_06101_00001 NIRCAM NRCA4_FULL \n", "... ... ... ... \n", "216718 jw02130007001_03101_00002 NIRCAM NRCB1_FULL \n", "216740 jw02130007001_03101_00002 NIRCAM NRCA4_FULL \n", "216786 jw02130007001_03101_00002 NIRCAM NRCA1_FULL \n", "216808 jw02130007001_03101_00002 NIRCAM NRCA3_FULL \n", "216830 jw02130007001_03101_00002 NIRCAM NRCA2_FULL \n", "\n", " filename \\\n", "28 jw02561001002_06101_00001_nrca3_uncal.fits \n", "50 jw02561001002_06101_00001_nrcb3_uncal.fits \n", "72 jw02561001002_06101_00001_nrca2_uncal.fits \n", "93 jw02561001002_06101_00001_nrcb2_uncal.fits \n", "114 jw02561001002_06101_00001_nrca4_uncal.fits \n", "... ... \n", "216718 jw02130007001_03101_00002_nrcb1_uncal.fits \n", "216740 jw02130007001_03101_00002_nrca4_uncal.fits \n", "216786 jw02130007001_03101_00002_nrca1_uncal.fits \n", "216808 jw02130007001_03101_00002_nrca3_uncal.fits \n", "216830 jw02130007001_03101_00002_nrca2_uncal.fits \n", "\n", " uri \\\n", "28 jw02561001002_06101_00001/jw02561001002_06101_... \n", "50 jw02561001002_06101_00001/jw02561001002_06101_... \n", "72 jw02561001002_06101_00001/jw02561001002_06101_... \n", "93 jw02561001002_06101_00001/jw02561001002_06101_... \n", "114 jw02561001002_06101_00001/jw02561001002_06101_... \n", "... ... \n", "216718 jw02130007001_03101_00002/jw02130007001_03101_... \n", "216740 jw02130007001_03101_00002/jw02130007001_03101_... \n", "216786 jw02130007001_03101_00002/jw02130007001_03101_... \n", "216808 jw02130007001_03101_00002/jw02130007001_03101_... \n", "216830 jw02130007001_03101_00002/jw02130007001_03101_... \n", "\n", " authz_primary_identifier \\\n", "28 jw02561001002_06101_00001_nrca3_uncal.fits \n", "50 jw02561001002_06101_00001_nrcb3_uncal.fits \n", "72 jw02561001002_06101_00001_nrca2_uncal.fits \n", "93 jw02561001002_06101_00001_nrcb2_uncal.fits \n", "114 jw02561001002_06101_00001_nrca4_uncal.fits \n", "... ... \n", "216718 jw02130007001_03101_00002_nrcb1_uncal.fits \n", "216740 jw02130007001_03101_00002_nrca4_uncal.fits \n", "216786 jw02130007001_03101_00002_nrca1_uncal.fits \n", "216808 jw02130007001_03101_00002_nrca3_uncal.fits \n", "216830 jw02130007001_03101_00002_nrca2_uncal.fits \n", "\n", " authz_secondary_identifier file_suffix category \\\n", "28 jw02561001002_06101_00001_nrca3_uncal.fits _uncal 1b \n", "50 jw02561001002_06101_00001_nrcb3_uncal.fits _uncal 1b \n", "72 jw02561001002_06101_00001_nrca2_uncal.fits _uncal 1b \n", "93 jw02561001002_06101_00001_nrcb2_uncal.fits _uncal 1b \n", "114 jw02561001002_06101_00001_nrca4_uncal.fits _uncal 1b \n", "... ... ... ... \n", "216718 jw02130007001_03101_00002_nrcb1_uncal.fits _uncal 1b \n", "216740 jw02130007001_03101_00002_nrca4_uncal.fits _uncal 1b \n", "216786 jw02130007001_03101_00002_nrca1_uncal.fits _uncal 1b \n", "216808 jw02130007001_03101_00002_nrca3_uncal.fits _uncal 1b \n", "216830 jw02130007001_03101_00002_nrca2_uncal.fits _uncal 1b \n", "\n", " size type \n", "28 75553920 science \n", "50 75553920 science \n", "72 75553920 science \n", "93 75553920 science \n", "114 75553920 science \n", "... ... ... \n", "216718 75553920 science \n", "216740 75553920 science \n", "216786 75553920 science \n", "216808 75553920 science \n", "216830 75553920 science \n", "\n", "[7537 rows x 13 columns]" ] }, "execution_count": 189, "metadata": {}, "output_type": "execute_result" } ], "source": [ "resultsdf" ] }, { "cell_type": "code", "execution_count": 188, "id": "f086c0f9-9ef6-4945-a53a-f3cf051b4dee", "metadata": {}, "outputs": [], "source": [ "resultsdf.to_csv(\"all_jwst_uris.csv\")" ] }, { "cell_type": "code", "execution_count": 145, "id": "09ca516e-3bbb-4a2b-9c6e-984c9a7e801a", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Symmetric?\n", "True\n", "(475, 475)\n" ] } ], "source": [ "# Array of latitudes and longitudes\n", "# MAKE SURE TO PUT RA=LON, DEC=LAT\n", "latitudes = np.array(multi_index_df.groupby(level=0).first()[DEC_NAME]) # Example latitudes\n", "longitudes = np.array(multi_index_df.groupby(level=0).first()[RA_NAME]) # Example longitudes\n", "\n", "n_points = len(latitudes)\n", "\n", "# Repeat each point n_points times for lat1, lon1\n", "lat1 = np.repeat(latitudes, n_points)\n", "lon1 = np.repeat(longitudes, n_points)\n", "\n", "# Tile the whole array n_points times for lat2, lon2\n", "lat2 = np.tile(latitudes, n_points)\n", "lon2 = np.tile(longitudes, n_points)\n", "\n", "# Calculates angular separation between two spherical coords\n", "# This can be lat/lon or ra/dec\n", "# Taken from astropy\n", "def angular_separation_deg(lon1, lat1, lon2, lat2):\n", " lon1 = np.deg2rad(lon1)\n", " lon2 = np.deg2rad(lon2)\n", " lat1 = np.deg2rad(lat1)\n", " lat2 = np.deg2rad(lat2)\n", " \n", " sdlon = np.sin(lon2 - lon1)\n", " cdlon = np.cos(lon2 - lon1)\n", " slat1 = np.sin(lat1)\n", " slat2 = np.sin(lat2)\n", " clat1 = np.cos(lat1)\n", " clat2 = np.cos(lat2)\n", "\n", " num1 = clat2 * sdlon\n", " num2 = clat1 * slat2 - slat1 * clat2 * cdlon\n", " denominator = slat1 * slat2 + clat1 * clat2 * cdlon\n", "\n", " return np.rad2deg(np.arctan2(np.hypot(num1, num2), denominator))\n", "\n", "# Compute the pairwise angular separations\n", "angular_separations = angular_separation_deg(lon1, lat1, lon2, lat2)\n", "\n", "# Reshape the result into a matrix form\n", "angular_separations_matrix = angular_separations.reshape(n_points, n_points)\n", "\n", "def check_symmetric(a, rtol=1e-05, atol=1e-07):\n", " return np.allclose(a, a.T, rtol=rtol, atol=atol)\n", "\n", "print(\"Symmetric?\")\n", "print(check_symmetric(angular_separations_matrix))\n", "print(angular_separations_matrix.shape)" ] }, { "cell_type": "code", "execution_count": 84, "id": "85edd94e-5591-4c7c-8251-b8c976962f72", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "24 40\n", "12 40\n", "76 36\n", "34 30\n", "7 24\n", " ..\n", "221 1\n", "166 1\n", "139 1\n", "176 1\n", "20 1\n", "Length: 311, dtype: int64\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "100%|███████████████████████████████████████| 311/311 [00:00<00:00, 3456.86it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Max subset with minimum distance: 321\n", "1110\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n" ] } ], "source": [ "#HUBBLE_FOV = 0.057\n", "JWST_FOV = 0.0366667\n", "\n", "THRESH = JWST_FOV\n", "\n", "\"\"\"\n", "Initial clustering and filtering using single RA DEC value.\n", "\n", "\"\"\"\n", "\n", "clustering = AgglomerativeClustering(n_clusters=None, metric='precomputed', linkage='single', distance_threshold=THRESH)\n", "labels = clustering.fit_predict(angular_separations_matrix)\n", "\n", "multi_index_df['label'] = labels\n", "\n", "print(pd.Series(labels).value_counts())\n", "\n", "def max_subset_with_min_distance(points, min_distance):\n", " subset = []\n", " for i, row in points.iterrows():\n", " if all(angular_separation_deg(row[RA_NAME], row[DEC_NAME], existing_point[RA_NAME], existing_point[DEC_NAME]) >= min_distance for existing_point in subset):\n", " subset.append(row)\n", " return subset\n", "\n", "all_subsets = []\n", "\n", "for label in tqdm(np.unique(labels)):\n", " cds = multi_index_df[multi_index_df['label'] == label]\n", " subset = max_subset_with_min_distance(cds, THRESH)\n", " all_subsets.extend(subset)\n", "\n", "print(\"Max subset with minimum distance:\", len(all_subsets))\n", "\n", "locations = pd.DataFrame(all_subsets)\n", "\n", "df = pd.merge(df, locations, on=[RA_NAME, DEC_NAME], how='right')\n", "\n", "print(len(df))" ] }, { "cell_type": "code", "execution_count": 107, "id": "55a9d951-3210-4a7f-aca8-a0a80628a8ac", "metadata": {}, "outputs": [], "source": [ "detectors = ['nrca1', 'nrca2', 'nrca3', 'nrca4', 'nrcb1', 'nrcb2', 'nrcb3', 'nrcb4']\n", "obsids_search = []\n", "\n", "for fileset in df['fileSetName']:\n", " for detector in detectors:\n", " obsids_search.append(fileset + \"_\" + detector)" ] }, { "cell_type": "code", "execution_count": 108, "id": "55ae58fc-5c2f-46ec-8fa7-d7229fd61b5f", "metadata": {}, "outputs": [], "source": [ "from astroquery.mast import Observations\n", "\n", "# Query for data with the specified obs_id\n", "result = Observations.query_criteria(obs_id=obsids_search)" ] }, { "cell_type": "code", "execution_count": 120, "id": "5a29aaf9-dd68-4fa2-910a-8d399f4580cd", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|█████████████████████████████████████████| 108/108 [01:07<00:00, 1.60it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "There are 0 unique files, which are 0.0 GB in size.\n" ] }, { "ename": "ValueError", "evalue": "no values provided to stack.", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", "Input \u001b[0;32mIn [120]\u001b[0m, in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 21\u001b[0m files[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mobs_id_short\u001b[39m\u001b[38;5;124m'\u001b[39m] \u001b[38;5;241m=\u001b[39m files[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mobs_id\u001b[39m\u001b[38;5;124m'\u001b[39m]\u001b[38;5;241m.\u001b[39mstr[:\u001b[38;5;241m6\u001b[39m]\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mThere are \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mlen\u001b[39m(files)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m unique files, which are \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28msum\u001b[39m(files[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124msize\u001b[39m\u001b[38;5;124m'\u001b[39m])\u001b[38;5;241m/\u001b[39m\u001b[38;5;241m10\u001b[39m\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m9\u001b[39m\u001b[38;5;132;01m:\u001b[39;00m\u001b[38;5;124m.1f\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m GB in size.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m---> 25\u001b[0m manifest \u001b[38;5;241m=\u001b[39m \u001b[43mObservations\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdownload_products\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfiles\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mobsID\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcurl_flag\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\n", "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/astroquery/mast/observations.py:715\u001b[0m, in \u001b[0;36mObservationsClass.download_products\u001b[0;34m(self, products, download_dir, cache, curl_flag, mrp_only, cloud_only, **filters)\u001b[0m\n\u001b[1;32m 712\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m oid \u001b[38;5;129;01min\u001b[39;00m products:\n\u001b[1;32m 713\u001b[0m product_lists\u001b[38;5;241m.\u001b[39mappend(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mget_product_list(oid))\n\u001b[0;32m--> 715\u001b[0m products \u001b[38;5;241m=\u001b[39m \u001b[43mvstack\u001b[49m\u001b[43m(\u001b[49m\u001b[43mproduct_lists\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 717\u001b[0m \u001b[38;5;66;03m# apply filters\u001b[39;00m\n\u001b[1;32m 718\u001b[0m products \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfilter_products(products, mrp_only\u001b[38;5;241m=\u001b[39mmrp_only, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mfilters)\n", "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/astropy/table/operations.py:677\u001b[0m, in \u001b[0;36mvstack\u001b[0;34m(tables, join_type, metadata_conflicts)\u001b[0m\n\u001b[1;32m 623\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 624\u001b[0m \u001b[38;5;124;03mStack tables vertically (along rows).\u001b[39;00m\n\u001b[1;32m 625\u001b[0m \n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 673\u001b[0m \u001b[38;5;124;03m 6 8\u001b[39;00m\n\u001b[1;32m 674\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 675\u001b[0m _check_join_type(join_type, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mvstack\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 677\u001b[0m tables \u001b[38;5;241m=\u001b[39m \u001b[43m_get_list_of_tables\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtables\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;66;03m# validates input\u001b[39;00m\n\u001b[1;32m 678\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(tables) \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m1\u001b[39m:\n\u001b[1;32m 679\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m tables[\u001b[38;5;241m0\u001b[39m] \u001b[38;5;66;03m# no point in stacking a single table\u001b[39;00m\n", "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/astropy/table/operations.py:60\u001b[0m, in \u001b[0;36m_get_list_of_tables\u001b[0;34m(tables)\u001b[0m\n\u001b[1;32m 58\u001b[0m \u001b[38;5;66;03m# Make sure there is something to stack\u001b[39;00m\n\u001b[1;32m 59\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(tables) \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[0;32m---> 60\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mno values provided to stack.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 62\u001b[0m \u001b[38;5;66;03m# Convert inputs (Table, Row, or anything column-like) to Tables.\u001b[39;00m\n\u001b[1;32m 63\u001b[0m \u001b[38;5;66;03m# Special case that Quantity converts to a QTable.\u001b[39;00m\n\u001b[1;32m 64\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m ii, val \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(tables):\n", "\u001b[0;31mValueError\u001b[0m: no values provided to stack." ] } ], "source": [ "from astropy.table import unique, vstack, Table\n", "\n", "\"\"\"\n", "Call API that gives the file URLs for each observations.\n", "\n", "\"\"\"\n", "\n", "matched_obs = result\n", "\n", "# Split the observations into \"chunks\" of size five\n", "sz_chunk = 5\n", "chunks = [matched_obs[i:i+sz_chunk] for i in range(0,len(matched_obs), sz_chunk)]\n", "\n", "# Get the list of products for each chunk\n", "t = []\n", "for chunk in tqdm(chunks):\n", " t.append(Observations.get_product_list(chunk))\n", "\n", "files = unique(vstack(t), keys='productFilename')\n", "files = files.to_pandas()\n", "\n", "# Ensure we only keep raw data files\n", "files = files[files['productSubGroupDescription'] == 'UNCAL']\n", "\n", "# Create a shortened identified\n", "files['obs_id_short'] = files['obs_id'].str[:6]\n", "\n", "print(f\"There are {len(files)} unique files, which are {sum(files['size'])/10**9:.1f} GB in size.\")\n", "\n", "manifest = Observations.download_products(files['obsID'], curl_flag=True)" ] }, { "cell_type": "code", "execution_count": null, "id": "b2307af8-d6ea-4969-8b8a-4d42f21d674f", "metadata": {}, "outputs": [], "source": [ "files[['obsID']].to_csv('list_of_hubble_filenames.csv')" ] }, { "cell_type": "code", "execution_count": null, "id": "ab5d1fd5-502f-4b94-a8f1-23636043f779", "metadata": {}, "outputs": [], "source": [ "manifest = Observations.download_products(files['obsID'], curl_flag=True)\n", "# Creates .sh scripts that have to then be run to actually download data" ] }, { "cell_type": "code", "execution_count": null, "id": "556fdab2-517e-466c-a9ee-c7b629df5276", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "from astropy.io import fits\n", "from astropy.table import Table\n", "import glob\n", "\n", "\"\"\"\n", "This code allows us to combine multiple exposures of the same observation into one FITS file.\n", "\n", "This code was NOT used; we only used a single exposure for our dataset.\n", "\"\"\"\n", "\n", "def create_combined_hubble_file(short_obs_id):\n", " \n", " file_list = list(files[files['obs_id_short'] == short_obs_id]['productFilename'])\n", " \n", " ccd1_data = []\n", " ccd2_data = []\n", " ccd1_times = []\n", " ccd2_times = []\n", " ccd1_headers = []\n", " ccd2_headers = []\n", "\n", " for file in file_list:\n", " with fits.open(file) as hdul:\n", " # Extract TIME-OBS from the primary header\n", " time_obs = hdul[0].header['TIME-OBS']\n", "\n", " # Extract data and headers from HDUs\n", " for hdu in [hdul[1], hdul[4]]:\n", " if hdu.header['CCDCHIP'] == 1:\n", " ccd1_data.append(hdu.data)\n", " ccd1_times.append(time_obs)\n", " ccd1_headers.append(hdul[0].header.copy())\n", " elif hdu.header['CCDCHIP'] == 2:\n", " ccd2_data.append(hdu.data)\n", " ccd2_times.append(time_obs)\n", " ccd2_headers.append(hdul[0].header.copy())\n", "\n", " # Sort the data based on TIME-OBS\n", " ccd1_times, ccd1_data, ccd1_headers = zip(*sorted(zip(ccd1_times, ccd1_data, ccd1_headers)))\n", " ccd2_times, ccd2_data, ccd2_headers = zip(*sorted(zip(ccd2_times, ccd2_data, ccd2_headers)))\n", "\n", " # Concatenate the data for each CCDCHIP\n", " ccd1_concat = np.stack(ccd1_data)\n", " ccd2_concat = np.stack(ccd2_data)\n", "\n", " # Function to create a new FITS file for a given CCDCHIP\n", " def create_fits_file(output_file, ccd_data, ccd_headers, ccd_chip):\n", "\n", " primary_hdu = fits.PrimaryHDU()\n", " primary_hdu.header['EXTEND'] = True\n", " primary_hdu.header['CCDCHIP'] = ccd_chip\n", "\n", " metadata_hdus = [fits.ImageHDU(header=header) for header in ccd_headers]\n", "\n", " # Create ImageHDU with concatenated data\n", " image_hdu = fits.ImageHDU(data=ccd_data, header=fits.Header({'CCDCHIP': ccd_chip}))\n", "\n", " # Create HDUList and write to a new FITS file\n", " hdulist = fits.HDUList([primary_hdu] + [image_hdu] + metadata_hdus)\n", " hdulist.writeto(output_file, overwrite=True)\n", "\n", " # Create FITS files for CCDCHIP 1 and 2\n", " create_fits_file(f'{short_obs_id}_ccd1.fits', ccd1_concat, ccd1_headers, 1)\n", " create_fits_file(f'{short_obs_id}_ccd2.fits', ccd2_concat, ccd2_headers, 2)\n", "\n", " print(\"New FITS files created successfully.\")" ] }, { "cell_type": "code", "execution_count": null, "id": "d09be044-ce2e-4cb5-ae1f-9562f9ac6fa3", "metadata": {}, "outputs": [], "source": [ "def visualize_label(df, LABEL=None):\n", " \n", " cds = df\n", " \n", " if LABEL is not None:\n", " cds = cds[labels == LABEL]\n", "\n", " ras, decs = [], []\n", " cmap = []\n", " \n", " print(len(cds))\n", "\n", " for i, cd in cds.iterrows():\n", " ras.append(cd[RA_NAME])\n", " decs.append(cd[DEC_NAME])\n", "\n", " if LABEL is None:\n", " # No gridlines, will be overwhelming\n", "\n", " # Compute the 2D histogram\n", " # Number of bins for x and y; adjust these based on your dataset\n", " bins = (30, 30)\n", "\n", " # Compute the histogram\n", " hist, xedges, yedges = np.histogram2d(ras, decs, bins=bins)\n", " \n", " N_cutoff = 50\n", " hist = np.clip(hist, 0, N_cutoff)\n", "\n", " # Generate a 2D histogram plot\n", " plt.figure(figsize=(8, 6))\n", " plt.imshow(hist, interpolation='nearest', origin='lower',\n", " extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]],\n", " cmap='viridis') # Choose a colormap (e.g., 'viridis', 'plasma', 'inferno')\n", "\n", " # Add labels and title if necessary\n", " plt.colorbar(label='Number of points in bin')\n", " plt.xlabel('RA')\n", " plt.ylabel('DEC')\n", " plt.title(f'2D Histogram of Point Density, clipped to {N_cutoff} points')\n", "\n", " # Show the plot\n", " plt.show()\n", "\n", " return\n", " else:\n", " fig = plt.figure()\n", " ax = fig.gca()\n", " ax.set_xticks(np.arange(np.min(ras), np.max(ras), THRESH))\n", " ax.set_yticks(np.arange(np.min(decs), np.max(decs), THRESH))\n", " plt.scatter(ras, decs, alpha=0.1)\n", " plt.grid()\n", " plt.show()\n", "\n", "visualize_label(df, 7)" ] }, { "cell_type": "code", "execution_count": null, "id": "35a648ab-07cd-409e-be32-125ead927bdf", "metadata": {}, "outputs": [], "source": [ "from sklearn.model_selection import train_test_split\n", "\n", "data = list(range(len(labels)))\n", "\n", "# Perform the train-test split with an 80-20 ratio\n", "train_indices, test_indices = train_test_split(data, test_size=0.2, random_state=42)" ] }, { "cell_type": "code", "execution_count": null, "id": "8f6a0472-b8fc-4989-a506-39019914c853", "metadata": {}, "outputs": [], "source": [ "sdss_test = all_sdss_data[all_sdss_data['cluster_label'].isin(test_indices)]\n", "sdss_train = all_sdss_data[all_sdss_data['cluster_label'].isin(train_indices)]" ] }, { "cell_type": "code", "execution_count": null, "id": "bfbe8686-e580-4285-bd8f-43154d18182b", "metadata": {}, "outputs": [], "source": [ "len(sdss_test)" ] }, { "cell_type": "code", "execution_count": null, "id": "cacb74dd-0d54-43d7-a301-ea4eb7b3e07e", "metadata": {}, "outputs": [], "source": [ "len(sdss_train)" ] }, { "cell_type": "code", "execution_count": null, "id": "f0fb157d-1307-42d9-ac57-7e6aa3cecd84", "metadata": {}, "outputs": [], "source": [ "sdss_test" ] }, { "cell_type": "code", "execution_count": null, "id": "62fb160e-74e5-4f50-96ee-109a6261370f", "metadata": {}, "outputs": [], "source": [ "test_data = pd.DataFrame(test_data)\n", "train_data = pd.DataFrame(train_data)" ] }, { "cell_type": "code", "execution_count": null, "id": "e7b1379c-d5a9-4f3a-ac68-e8b5a46f4c8a", "metadata": {}, "outputs": [], "source": [ "\"\"\"\n", "Code to verify test/train pollution.\n", "Prints images which are nearby and might overlap.\n", "\"\"\"\n", "\n", "\n", "import json\n", "from math import radians, sin, cos, sqrt, atan2, degrees\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "\n", "\n", "# Function to load data from file\n", "def load_data(file_path):\n", " data = []\n", " with open(file_path, 'r') as f:\n", " for line in f:\n", " data.append(json.loads(line))\n", " return data\n", "\n", "# Load data\n", "train_file_path = '/Users/rithwik/Desktop/full_train.jsonl.txt'\n", "test_file_path = '/Users/rithwik/Desktop/full_test.jsonl.txt'\n", "#train_data = sdss_train\n", "#test_data = sdss_test\n", "\n", "# Define the threshold\n", "threshold = 0.09*3\n", "\n", "# Find test dataset rows with a minimum great circle distance less than the threshold\n", "close_pairs = []\n", "\n", "for i, test_point in test_data.iterrows():\n", " ra_test, dec_test = test_point['ra'], test_point['dec']\n", " distances = [(train_point, angular_separation_deg(ra_test, dec_test, train_point['ra'], train_point['dec'])) for i, train_point in train_data.iterrows()]\n", " closest_train_point, min_distance = min(distances, key=lambda x: x[1])\n", " if min_distance < threshold:\n", " close_pairs.append((test_point, closest_train_point, min_distance))\n", "\n", "close_pairs_summary = [\n", " {\n", " \"test_image_id\": test_point['image_id'],\n", " \"test_ra\": test_point['ra'],\n", " \"test_dec\": test_point['dec'],\n", " \"train_image_id\": closest_train_point['image_id'],\n", " \"train_ra\": closest_train_point['ra'],\n", " \"train_dec\": closest_train_point['dec'],\n", " \"min_distance_deg\": min_distance\n", " }\n", " for test_point, closest_train_point, min_distance in close_pairs\n", "]\n", "\n", "# Print the results\n", "result = \"Success\"\n", "for pair in close_pairs_summary:\n", " print(pair)\n", " result = \"FAIL\"\n", " \n", "print(f\"Done. result is {result}\")" ] }, { "cell_type": "code", "execution_count": null, "id": "93cf2978-0799-4d6c-93ec-921f4f41e10c", "metadata": {}, "outputs": [], "source": [ "sdss_test.to_json('full_test1.jsonl', orient='records', lines=True)\n", "sdss_train.to_json('full_train1.jsonl', orient='records', lines=True)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.13" } }, "nbformat": 4, "nbformat_minor": 5 }