{ "cells": [ { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import wandb\n", "\n", "api = wandb.Api()\n", "\n", "# Project is specified by \n", "runs = api.runs(\"jameswburton18/OrientalMuesumImage\")\n", "\n", "summary_list, config_list, name_list = [], [], []\n", "for run in runs:\n", " # .summary contains the output keys/values for metrics like accuracy.\n", " # We call ._json_dict to omit large files\n", " summary_list.append(run.summary._json_dict)\n", "\n", " # .config contains the hyperparameters.\n", " # We remove special values that start with _.\n", " config_list.append({k: v for k, v in run.config.items() if not k.startswith(\"_\")})\n", "\n", " # .name is the human-readable name of the run.\n", " name_list.append(run.name)\n", "\n", "runs_df = pd.DataFrame({\"summary\": summary_list, \"config\": config_list, \"name\": name_list})" ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
summaryconfigname
0{'test_avg_plus_3Dx4_w0.0/f1_micro': 0.5605381...{'my_args/lr': 5e-05, 'my_args/fp16': True, 'm...valiant-sky-376
1{'test_steps_per_second': 0.804, 'test_avg_plu...{'my_args/lr': 5e-05, 'my_args/fp16': True, 'm...sweet-fog-375
2{'test_avg_plus_3Dx4_w0.75/f1_weighted': 0.537...{'my_args/lr': 5e-05, 'my_args/fp16': False, '...lively-lion-374
3{'eval/accuracy': 0.4373990306946688, 'test_av...{'bf16': False, 'fp16': False, 'fsdp': [], 'se...peachy-tree-373
4{'eval/accuracy': 0.33037156704361875, 'eval/f...{'bf16': False, 'fp16': True, 'fsdp': [], 'see...upbeat-sound-372
............
303{'test_precision_macro': 0.1243382563150882, '...{'bf16': False, 'fp16': False, 'fsdp': [], 'se...crimson-surf-33
304{'_runtime': 4879.275132894516, 'eval/accuracy...{'bf16': False, 'fp16': False, 'fsdp': [], 'se...eternal-silence-32
305{'eval/recall_macro': 0.6267291174075081, 'tes...{'bf16': False, 'fp16': False, 'fsdp': [], 'se...leafy-dawn-31
306{'eval/accuracy': 0.11090675004598124, 'train/...{'bf16': False, 'fp16': False, 'fsdp': [], 'se...peach-morning-30
307{'eval/samples_per_second': 116.434, 'train/tr...{'bf16': False, 'fp16': False, 'fsdp': [], 'se...generous-breeze-29
\n", "

308 rows × 3 columns

\n", "
" ], "text/plain": [ " summary \\\n", "0 {'test_avg_plus_3Dx4_w0.0/f1_micro': 0.5605381... \n", "1 {'test_steps_per_second': 0.804, 'test_avg_plu... \n", "2 {'test_avg_plus_3Dx4_w0.75/f1_weighted': 0.537... \n", "3 {'eval/accuracy': 0.4373990306946688, 'test_av... \n", "4 {'eval/accuracy': 0.33037156704361875, 'eval/f... \n", ".. ... \n", "303 {'test_precision_macro': 0.1243382563150882, '... \n", "304 {'_runtime': 4879.275132894516, 'eval/accuracy... \n", "305 {'eval/recall_macro': 0.6267291174075081, 'tes... \n", "306 {'eval/accuracy': 0.11090675004598124, 'train/... \n", "307 {'eval/samples_per_second': 116.434, 'train/tr... \n", "\n", " config name \n", "0 {'my_args/lr': 5e-05, 'my_args/fp16': True, 'm... valiant-sky-376 \n", "1 {'my_args/lr': 5e-05, 'my_args/fp16': True, 'm... sweet-fog-375 \n", "2 {'my_args/lr': 5e-05, 'my_args/fp16': False, '... lively-lion-374 \n", "3 {'bf16': False, 'fp16': False, 'fsdp': [], 'se... peachy-tree-373 \n", "4 {'bf16': False, 'fp16': True, 'fsdp': [], 'see... upbeat-sound-372 \n", ".. ... ... \n", "303 {'bf16': False, 'fp16': False, 'fsdp': [], 'se... crimson-surf-33 \n", "304 {'bf16': False, 'fp16': False, 'fsdp': [], 'se... eternal-silence-32 \n", "305 {'bf16': False, 'fp16': False, 'fsdp': [], 'se... leafy-dawn-31 \n", "306 {'bf16': False, 'fp16': False, 'fsdp': [], 'se... peach-morning-30 \n", "307 {'bf16': False, 'fp16': False, 'fsdp': [], 'se... generous-breeze-29 \n", "\n", "[308 rows x 3 columns]" ] }, "execution_count": 17, "metadata": {}, "output_type": "execute_result" } ], "source": [ "runs_df" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [], "source": [ "ds_to_technique = {\n", " \"james-burton/OrientalMuseum-3Dwhite-1frame\": \"white+3Dx1\",\n", " \"james-burton/OrientalMuseum-3Dwhite\": \"white+3Dx4\",\n", " \"james-burton/OrientalMuseum-white\": \"white\",\n", " # \"james-burton/OrientalMuseum\": \"normal\",\n", "}\n", "\n", "\n", "metrics = [\n", " \"accuracy\",\n", " \"top3_accuracy\",\n", " \"top5_accuracy\",\n", " \"top10_accuracy\",\n", " \"f1_weighted\",\n", " \"precision_weighted\",\n", " \"recall_weighted\",\n", " \"f1_macro\",\n", " \"precision_macro\",\n", " \"recall_macro\",\n", " \"f1_micro\",\n", " \"precision_micro\",\n", " \"recall_micro\",\n", " # \"avg_images_per_item\",\n", "]\n", "\n", "\n", "def get_pretrained(x):\n", " if \"pretrn\" in x:\n", " return \"Yes\"\n", " else:\n", " return \"No\"\n", "\n", "\n", "def get_filtered_df(\n", " runs_df,\n", " exp_type,\n", " exclude_classes_less_than_x_items=None,\n", " label_col=None,\n", " dataset=None,\n", " metrics=metrics,\n", "):\n", " new_df = pd.DataFrame()\n", " new_df[\"run_num\"] = runs_df[\"name\"].apply(lambda x: int(x.split(\"-\")[-1]))\n", " new_df[\"config\"] = runs_df[\"config\"].apply(lambda x: x.get(\"my_args/config\", None))\n", " new_df[\"exclude classes less than x items\"] = runs_df[\"config\"].apply(\n", " lambda x: x.get(\"my_args/lower_lim\", None)\n", " )\n", " new_df[\"label_col\"] = runs_df[\"config\"].apply(lambda x: x.get(\"my_args/label_col\", None))\n", " new_df[\"BM pretrain\"] = new_df[\"config\"].map(get_pretrained)\n", " new_df[\"Train data\"] = runs_df[\"config\"].apply(lambda x: x.get(\"my_args/dataset\", None))\n", " new_df[\"Train data\"] = new_df[\"Train data\"].apply(lambda x: ds_to_technique.get(x, None))\n", " for metric in metrics:\n", " if exp_type == \"test_\" and metric in [\"avg_images_per_item\"]:\n", " continue\n", " new_df[metric] = runs_df[\"summary\"].apply(lambda x: x.get(exp_type + metric, None))\n", " # set to 2 decimal places\n", " new_df[metric] = new_df[metric].apply(lambda x: round(x, 3) if x is not None else None)\n", " new_df = new_df.dropna()\n", " max_run_nums = new_df.groupby(\"config\")[\"run_num\"].idxmax()\n", " filtered_df = new_df.loc[max_run_nums]\n", " if exclude_classes_less_than_x_items is not None:\n", " filtered_df = filtered_df[\n", " filtered_df[\"exclude classes less than x items\"] == exclude_classes_less_than_x_items\n", " ]\n", " filtered_df = filtered_df.drop(columns=[\"exclude classes less than x items\"])\n", " if dataset is not None:\n", " filtered_df = filtered_df[filtered_df[\"Train data\"] == dataset]\n", " filtered_df = filtered_df.drop(columns=[\"Train data\"])\n", " if label_col is not None:\n", " filtered_df = filtered_df[filtered_df[\"label_col\"] == label_col]\n", " filtered_df = filtered_df.drop(columns=[\"label_col\"])\n", " filtered_df.drop(columns=[\"run_num\"], inplace=True)\n", " filtered_df.drop(\n", " columns=[\n", " \"f1_macro\",\n", " \"precision_macro\",\n", " \"recall_macro\",\n", " \"f1_micro\",\n", " \"precision_micro\",\n", " \"recall_micro\",\n", " ],\n", " inplace=True,\n", " )\n", " filtered_df.rename(\n", " columns={\n", " \"accuracy\": \"Acc.\",\n", " \"top3_accuracy\": \"Top 3 Acc.\",\n", " \"top5_accuracy\": \"Top 5 Acc.\",\n", " \"top10_accuracy\": \"Top 10 Acc.\",\n", " \"f1_weighted\": \"F1\",\n", " \"precision_weighted\": \"Precision\",\n", " \"recall_weighted\": \"Recall\",\n", " },\n", " inplace=True,\n", " )\n", " return filtered_df" ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [], "source": [ "exp_types = (\n", " [\n", " # \"test_\", # Normal\n", " \"test_avg/\", # Averaged over each item\n", " ]\n", " + [f\"test_avg_max{i}/\" for i in range(1, 6)] # Averaged over each item, max i images per item\n", " + [\n", " f\"test_avg_plus_3Dx4_w{w}/\" for w in [0.0, 0.25, 0.5, 0.75, 1.0]\n", " ] # Averaged over each item, plus weighted 3D predictions\n", " + [\n", " f\"test_avg_max1_plus_3Dx4_w{w}/\" for w in [0.0, 0.25, 0.5, 0.75, 1.0]\n", " ] # Averaged over each item, max 1 images per item, plus weighted 3D predictions\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Predict object\n", "Given an image of an object we have seen before, but from a different angle, can we predict what that object is?" ] }, { "cell_type": "code", "execution_count": 20, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
configBM pretrainTrain dataAcc.Top 3 Acc.Top 5 Acc.Top 10 Acc.F1PrecisionRecall
146om6-3Dwhite-1frame_numNowhite+3Dx10.8200.8930.9140.9360.7940.8030.820
48om6-3Dwhite-1frame_num_bm-pretrnYeswhite+3Dx10.8010.8800.9000.9250.7740.7870.801
217om6-3Dwhite_numNowhite+3Dx40.8260.8980.9180.9340.8000.8110.826
51om6-3Dwhite_num_bm-pretrnYeswhite+3Dx40.8220.8840.9110.9370.7980.8120.822
239om6-white_numNowhite0.8280.8980.9180.9410.8040.8130.828
54om6-white_num_bm-pretrnYeswhite0.8190.8810.9010.9250.7910.8000.819
\n", "
" ], "text/plain": [ " config BM pretrain Train data Acc. \\\n", "146 om6-3Dwhite-1frame_num No white+3Dx1 0.820 \n", "48 om6-3Dwhite-1frame_num_bm-pretrn Yes white+3Dx1 0.801 \n", "217 om6-3Dwhite_num No white+3Dx4 0.826 \n", "51 om6-3Dwhite_num_bm-pretrn Yes white+3Dx4 0.822 \n", "239 om6-white_num No white 0.828 \n", "54 om6-white_num_bm-pretrn Yes white 0.819 \n", "\n", " Top 3 Acc. Top 5 Acc. Top 10 Acc. F1 Precision Recall \n", "146 0.893 0.914 0.936 0.794 0.803 0.820 \n", "48 0.880 0.900 0.925 0.774 0.787 0.801 \n", "217 0.898 0.918 0.934 0.800 0.811 0.826 \n", "51 0.884 0.911 0.937 0.798 0.812 0.822 \n", "239 0.898 0.918 0.941 0.804 0.813 0.828 \n", "54 0.881 0.901 0.925 0.791 0.800 0.819 " ] }, "execution_count": 20, "metadata": {}, "output_type": "execute_result" } ], "source": [ "get_filtered_df(runs_df, \"test_\", exclude_classes_less_than_x_items=6, label_col=\"obj_num\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Predict material\n", "Given one or more images from an object we have never seen before, can we predict what that object is made from?" ] }, { "cell_type": "code", "execution_count": 21, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
BM pretrainTrain dataTest time methodAcc.Top 3 Acc.Top 5 Acc.Top 10 Acc.F1PrecisionRecall
117Nowhiteavg0.6320.8170.8780.9270.6060.5980.632
85avg+3D0.6320.8110.8690.9240.6020.5930.632
114white+3Dx1avg0.6150.8170.8780.9290.5930.6170.615
81avg+3D0.6200.8180.8800.9280.5970.6230.620
115white+3Dx4avg0.6280.8190.8790.9330.6040.6110.628
83avg+3D0.6240.8190.8800.9330.5970.5980.624
53Yeswhiteavg0.6410.8240.8860.9340.6140.6100.641
26avg+3D0.6440.8340.8850.9380.6170.6180.644
47white+3Dx1avg0.6220.8180.8800.9350.5930.5900.622
21avg+3D0.6170.8130.8790.9320.5860.5840.617
50white+3Dx4avg0.6400.8240.8790.9360.6150.6150.640
23avg+3D0.6480.8270.8830.9400.6220.6220.648
\n", "
" ], "text/plain": [ " BM pretrain Train data Test time method Acc. Top 3 Acc. Top 5 Acc. \\\n", "117 No white avg 0.632 0.817 0.878 \n", "85 avg+3D 0.632 0.811 0.869 \n", "114 white+3Dx1 avg 0.615 0.817 0.878 \n", "81 avg+3D 0.620 0.818 0.880 \n", "115 white+3Dx4 avg 0.628 0.819 0.879 \n", "83 avg+3D 0.624 0.819 0.880 \n", "53 Yes white avg 0.641 0.824 0.886 \n", "26 avg+3D 0.644 0.834 0.885 \n", "47 white+3Dx1 avg 0.622 0.818 0.880 \n", "21 avg+3D 0.617 0.813 0.879 \n", "50 white+3Dx4 avg 0.640 0.824 0.879 \n", "23 avg+3D 0.648 0.827 0.883 \n", "\n", " Top 10 Acc. F1 Precision Recall \n", "117 0.927 0.606 0.598 0.632 \n", "85 0.924 0.602 0.593 0.632 \n", "114 0.929 0.593 0.617 0.615 \n", "81 0.928 0.597 0.623 0.620 \n", "115 0.933 0.604 0.611 0.628 \n", "83 0.933 0.597 0.598 0.624 \n", "53 0.934 0.614 0.610 0.641 \n", "26 0.938 0.617 0.618 0.644 \n", "47 0.935 0.593 0.590 0.622 \n", "21 0.932 0.586 0.584 0.617 \n", "50 0.936 0.615 0.615 0.640 \n", "23 0.940 0.622 0.622 0.648 " ] }, "execution_count": 21, "metadata": {}, "output_type": "execute_result" } ], "source": [ "for max1 in [\"_max1\", \"\"]:\n", " for x in [3, 4, 5, 6]:\n", "\n", " material_x = get_filtered_df(\n", " runs_df, f\"test_avg{max1}/\", exclude_classes_less_than_x_items=x, label_col=\"material\"\n", " )\n", " material_x[\"Test time method\"] = \"avg\"\n", " material_x_plus3Ds = []\n", " for w in [0.75]: # [0.25, 0.5, 0.75]:\n", " df = get_filtered_df(\n", " runs_df,\n", " f\"test_avg{max1}_plus_3Dx4_w{w}/\",\n", " exclude_classes_less_than_x_items=x,\n", " label_col=\"material\",\n", " )\n", " # df[\"Test time method\"] = f\"average_w{w}_plus_3D_w{1-w}\"\n", " df[\"Test time method\"] = f\"avg+3D\"\n", " material_x_plus3Ds.append(df)\n", "\n", " material_x_plus3Ds = pd.concat([material_x, pd.concat(material_x_plus3Ds)])\n", "\n", " cols = [\"BM pretrain\", \"Train data\", \"Test time method\"]\n", " other_cols = [col for col in material_x_plus3Ds.columns if col not in cols]\n", " material_x_plus3Ds = material_x_plus3Ds[cols + other_cols]\n", "\n", " material_x_plus3Ds.sort_values(\n", " by=[\"BM pretrain\", \"Train data\", \"Test time method\"], inplace=True\n", " )\n", "\n", " # # Assuming your DataFrame is named 'df'\n", " # # Create a copy to avoid modifying the original DataFrame\n", " df_new = material_x_plus3Ds.copy()\n", "\n", " # For column 'BM Pretrain'\n", " df_new[\"BM pretrain\"] = df_new[\"BM pretrain\"].where(\n", " df_new[\"BM pretrain\"] != df_new[\"BM pretrain\"].shift(1), \"\"\n", " )\n", "\n", " # For column 'Train data'\n", " df_new[\"Train data\"] = df_new[\"Train data\"].where(\n", " df_new[\"Train data\"] != df_new[\"Train data\"].shift(1), \"\"\n", " )\n", " df_new.drop(columns=[\"config\"], inplace=True)\n", " df_new.to_csv(f\"material_min{x}{max1}.csv\", index=False)\n", "df_new" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### If we have only have one/two/three/four images" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "x = 3\n", "material_x = get_filtered_df(\n", " runs_df, \"test_avg_max1/\", exclude_classes_less_than_x_items=x, label_col=\"material\"\n", ")\n", "material_x[\"test_time_technique\"] = \"average\"\n", "material_x_plus3Ds = []\n", "for w in [0.25, 0.5, 0.75]:\n", " df = get_filtered_df(\n", " runs_df,\n", " f\"test_avg_max1_plus_3Dx4_w{w}/\",\n", " exclude_classes_less_than_x_items=x,\n", " label_col=\"material\",\n", " )\n", " df[\"test_time_technique\"] = f\"average_w{w}_plus_3D_w{1-w}\"\n", " material_x_plus3Ds.append(df)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### If we have only have one/two/three/four images" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "One" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
datasetaccuracytop3_accuracytop5_accuracytop10_accuracyf1_weightedprecision_weightedrecall_weightedf1_macroprecision_macrorecall_macrof1_microprecision_microrecall_microavg_images_per_item
43white3D-1frame0.6040.7740.8400.9150.5800.5810.6040.3210.3450.3260.6040.6040.6041.0
45white3D-4frames0.5930.7920.8490.9110.5650.5630.5930.2980.3120.3120.5930.5930.5931.0
47white0.5650.7420.8130.8870.5360.5690.5650.3280.3430.3490.5650.5650.5651.0
49normal0.6830.8140.8740.9260.6680.6680.6830.3850.4060.3960.6830.6830.6831.0
\n", "
" ], "text/plain": [ " dataset accuracy top3_accuracy top5_accuracy top10_accuracy \\\n", "43 white3D-1frame 0.604 0.774 0.840 0.915 \n", "45 white3D-4frames 0.593 0.792 0.849 0.911 \n", "47 white 0.565 0.742 0.813 0.887 \n", "49 normal 0.683 0.814 0.874 0.926 \n", "\n", " f1_weighted precision_weighted recall_weighted f1_macro \\\n", "43 0.580 0.581 0.604 0.321 \n", "45 0.565 0.563 0.593 0.298 \n", "47 0.536 0.569 0.565 0.328 \n", "49 0.668 0.668 0.683 0.385 \n", "\n", " precision_macro recall_macro f1_micro precision_micro recall_micro \\\n", "43 0.345 0.326 0.604 0.604 0.604 \n", "45 0.312 0.312 0.593 0.593 0.593 \n", "47 0.343 0.349 0.565 0.565 0.565 \n", "49 0.406 0.396 0.683 0.683 0.683 \n", "\n", " avg_images_per_item \n", "43 1.0 \n", "45 1.0 \n", "47 1.0 \n", "49 1.0 " ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "get_filtered_df(\n", " runs_df, \"test_avg_max1/\", exclude_classes_less_than_x_items=5, label_col=\"material\"\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Two" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
datasetaccuracytop3_accuracytop5_accuracytop10_accuracyf1_weightedprecision_weightedrecall_weightedf1_macroprecision_macrorecall_macrof1_microprecision_microrecall_microavg_images_per_item
43white3D-1frame0.6300.7980.8620.9190.6020.6030.6300.3370.3640.3400.6300.6300.6301.534
45white3D-4frames0.6240.8110.8650.9210.5930.5970.6240.3200.3470.3330.6240.6240.6241.534
47white0.5970.7670.8350.9060.5630.5890.5970.3590.3790.3810.5970.5970.5971.534
49normal0.7070.8350.8920.9400.6920.6960.7070.4280.4640.4360.7070.7070.7071.534
\n", "
" ], "text/plain": [ " dataset accuracy top3_accuracy top5_accuracy top10_accuracy \\\n", "43 white3D-1frame 0.630 0.798 0.862 0.919 \n", "45 white3D-4frames 0.624 0.811 0.865 0.921 \n", "47 white 0.597 0.767 0.835 0.906 \n", "49 normal 0.707 0.835 0.892 0.940 \n", "\n", " f1_weighted precision_weighted recall_weighted f1_macro \\\n", "43 0.602 0.603 0.630 0.337 \n", "45 0.593 0.597 0.624 0.320 \n", "47 0.563 0.589 0.597 0.359 \n", "49 0.692 0.696 0.707 0.428 \n", "\n", " precision_macro recall_macro f1_micro precision_micro recall_micro \\\n", "43 0.364 0.340 0.630 0.630 0.630 \n", "45 0.347 0.333 0.624 0.624 0.624 \n", "47 0.379 0.381 0.597 0.597 0.597 \n", "49 0.464 0.436 0.707 0.707 0.707 \n", "\n", " avg_images_per_item \n", "43 1.534 \n", "45 1.534 \n", "47 1.534 \n", "49 1.534 " ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "get_filtered_df(\n", " runs_df, \"test_avg_max2/\", exclude_classes_less_than_x_items=5, label_col=\"material\"\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Three" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
datasetaccuracytop3_accuracytop5_accuracytop10_accuracyf1_weightedprecision_weightedrecall_weightedf1_macroprecision_macrorecall_macrof1_microprecision_microrecall_microavg_images_per_item
43white3D-1frame0.6290.8030.8660.9220.6010.6030.6290.3370.3620.3410.6290.6290.6291.838
45white3D-4frames0.6310.8140.8700.9230.5980.6020.6310.3140.3430.3230.6310.6310.6311.838
47white0.6030.7710.8390.9100.5680.6100.6030.3650.3800.3850.6030.6030.6031.838
49normal0.7070.8360.8920.9440.6910.6940.7070.4280.4610.4390.7070.7070.7071.838
\n", "
" ], "text/plain": [ " dataset accuracy top3_accuracy top5_accuracy top10_accuracy \\\n", "43 white3D-1frame 0.629 0.803 0.866 0.922 \n", "45 white3D-4frames 0.631 0.814 0.870 0.923 \n", "47 white 0.603 0.771 0.839 0.910 \n", "49 normal 0.707 0.836 0.892 0.944 \n", "\n", " f1_weighted precision_weighted recall_weighted f1_macro \\\n", "43 0.601 0.603 0.629 0.337 \n", "45 0.598 0.602 0.631 0.314 \n", "47 0.568 0.610 0.603 0.365 \n", "49 0.691 0.694 0.707 0.428 \n", "\n", " precision_macro recall_macro f1_micro precision_micro recall_micro \\\n", "43 0.362 0.341 0.629 0.629 0.629 \n", "45 0.343 0.323 0.631 0.631 0.631 \n", "47 0.380 0.385 0.603 0.603 0.603 \n", "49 0.461 0.439 0.707 0.707 0.707 \n", "\n", " avg_images_per_item \n", "43 1.838 \n", "45 1.838 \n", "47 1.838 \n", "49 1.838 " ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "get_filtered_df(\n", " runs_df, \"test_avg_max3/\", exclude_classes_less_than_x_items=5, label_col=\"material\"\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Four" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
datasetaccuracytop3_accuracytop5_accuracytop10_accuracyf1_weightedprecision_weightedrecall_weightedf1_macroprecision_macrorecall_macrof1_microprecision_microrecall_microavg_images_per_item
43white3D-1frame0.6320.8030.8680.9240.6040.6060.6320.3390.3660.3420.6320.6320.6322.083
45white3D-4frames0.6320.8120.8720.9210.6000.6030.6320.3190.3470.3240.6320.6320.6322.083
47white0.6030.7760.8400.9090.5680.6040.6030.3690.3860.3890.6030.6030.6032.083
49normal0.7060.8400.8920.9460.6910.6940.7060.4240.4550.4360.7060.7060.7062.083
\n", "
" ], "text/plain": [ " dataset accuracy top3_accuracy top5_accuracy top10_accuracy \\\n", "43 white3D-1frame 0.632 0.803 0.868 0.924 \n", "45 white3D-4frames 0.632 0.812 0.872 0.921 \n", "47 white 0.603 0.776 0.840 0.909 \n", "49 normal 0.706 0.840 0.892 0.946 \n", "\n", " f1_weighted precision_weighted recall_weighted f1_macro \\\n", "43 0.604 0.606 0.632 0.339 \n", "45 0.600 0.603 0.632 0.319 \n", "47 0.568 0.604 0.603 0.369 \n", "49 0.691 0.694 0.706 0.424 \n", "\n", " precision_macro recall_macro f1_micro precision_micro recall_micro \\\n", "43 0.366 0.342 0.632 0.632 0.632 \n", "45 0.347 0.324 0.632 0.632 0.632 \n", "47 0.386 0.389 0.603 0.603 0.603 \n", "49 0.455 0.436 0.706 0.706 0.706 \n", "\n", " avg_images_per_item \n", "43 2.083 \n", "45 2.083 \n", "47 2.083 \n", "49 2.083 " ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "get_filtered_df(\n", " runs_df, \"test_avg_max4/\", exclude_classes_less_than_x_items=5, label_col=\"material\"\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### If we have only have one images, but we augment at test time with 4 generated 3D views" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "One" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [], "source": [ "# df = get_filtered_df(\n", "# runs_df, \"test_avg_max1/\", exclude_classes_less_than_x_items=5, label_col=\"material\"\n", "# )\n", "# df[\"genuine_img_w\"] = \"none\"\n", "\n", "df_25 = get_filtered_df(\n", " runs_df,\n", " \"test_avg_max1_plus_3Dx4_w0.25/\",\n", " exclude_classes_less_than_x_items=5,\n", " label_col=\"material\",\n", ")\n", "df_25[\"genuine_img_w\"] = 0.25\n", "df_50 = get_filtered_df(\n", " runs_df,\n", " \"test_avg_max1_plus_3Dx4_w0.5/\",\n", " exclude_classes_less_than_x_items=5,\n", " label_col=\"material\",\n", ")\n", "df_50[\"genuine_img_w\"] = 0.5\n", "df_75 = get_filtered_df(\n", " runs_df,\n", " \"test_avg_max1_plus_3Dx4_w0.75/\",\n", " exclude_classes_less_than_x_items=5,\n", " label_col=\"material\",\n", ")\n", "df_75[\"genuine_img_w\"] = 0.75\n", "df_1 = get_filtered_df(\n", " runs_df,\n", " \"test_avg_max1_plus_3Dx4_w1.0/\",\n", " exclude_classes_less_than_x_items=5,\n", " label_col=\"material\",\n", ")\n", "df_1[\"genuine_img_w\"] = 1.0\n", "\n", "df = pd.concat([df_25, df_50, df_75, df_1])\n", "\n", "# put the genuine_img_w column first\n", "cols = df.columns.tolist()\n", "cols = cols[-1:] + cols[:-1]\n", "df = df[cols]" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
genuine_img_wdatasetaccuracytop3_accuracytop5_accuracytop10_accuracyf1_weightedprecision_weightedrecall_weightedf1_macroprecision_macrorecall_macrof1_microprecision_microrecall_microavg_images_per_item
91.00white3D-1frame0.6030.7750.8400.9150.5790.5800.6030.3200.3450.3260.6030.6030.6031.0
90.75white3D-1frame0.5990.7770.8460.9190.5730.5740.5990.3050.3280.3080.5990.5990.5991.0
110.50white3D-4frames0.5950.7870.8510.9110.5670.5700.5950.2960.3270.3030.5950.5950.5951.0
111.00white3D-4frames0.5930.7920.8490.9110.5650.5630.5930.2980.3120.3120.5930.5930.5931.0
110.75white3D-4frames0.5910.7900.8500.9170.5620.5670.5910.2870.3160.2990.5910.5910.5911.0
90.50white3D-1frame0.5870.7710.8500.9170.5560.5620.5870.2870.3130.2890.5870.5870.5871.0
110.25white3D-4frames0.5810.7690.8450.9130.5530.5600.5810.2740.2990.2850.5810.5810.5811.0
130.75white0.5680.7340.8220.8990.5320.5760.5680.3190.3540.3330.5680.5680.5681.0
131.00white0.5650.7420.8130.8870.5360.5690.5650.3300.3450.3500.5650.5650.5651.0
90.25white3D-1frame0.5640.7570.8370.9110.5290.5430.5640.2620.2920.2690.5640.5640.5641.0
130.50white0.5450.7260.8150.9000.5040.5600.5450.2750.3250.2850.5450.5450.5451.0
130.25white0.4600.6640.7610.8660.4270.5030.4600.1990.2690.2060.4600.4600.4601.0
\n", "
" ], "text/plain": [ " genuine_img_w dataset accuracy top3_accuracy top5_accuracy \\\n", "9 1.00 white3D-1frame 0.603 0.775 0.840 \n", "9 0.75 white3D-1frame 0.599 0.777 0.846 \n", "11 0.50 white3D-4frames 0.595 0.787 0.851 \n", "11 1.00 white3D-4frames 0.593 0.792 0.849 \n", "11 0.75 white3D-4frames 0.591 0.790 0.850 \n", "9 0.50 white3D-1frame 0.587 0.771 0.850 \n", "11 0.25 white3D-4frames 0.581 0.769 0.845 \n", "13 0.75 white 0.568 0.734 0.822 \n", "13 1.00 white 0.565 0.742 0.813 \n", "9 0.25 white3D-1frame 0.564 0.757 0.837 \n", "13 0.50 white 0.545 0.726 0.815 \n", "13 0.25 white 0.460 0.664 0.761 \n", "\n", " top10_accuracy f1_weighted precision_weighted recall_weighted \\\n", "9 0.915 0.579 0.580 0.603 \n", "9 0.919 0.573 0.574 0.599 \n", "11 0.911 0.567 0.570 0.595 \n", "11 0.911 0.565 0.563 0.593 \n", "11 0.917 0.562 0.567 0.591 \n", "9 0.917 0.556 0.562 0.587 \n", "11 0.913 0.553 0.560 0.581 \n", "13 0.899 0.532 0.576 0.568 \n", "13 0.887 0.536 0.569 0.565 \n", "9 0.911 0.529 0.543 0.564 \n", "13 0.900 0.504 0.560 0.545 \n", "13 0.866 0.427 0.503 0.460 \n", "\n", " f1_macro precision_macro recall_macro f1_micro precision_micro \\\n", "9 0.320 0.345 0.326 0.603 0.603 \n", "9 0.305 0.328 0.308 0.599 0.599 \n", "11 0.296 0.327 0.303 0.595 0.595 \n", "11 0.298 0.312 0.312 0.593 0.593 \n", "11 0.287 0.316 0.299 0.591 0.591 \n", "9 0.287 0.313 0.289 0.587 0.587 \n", "11 0.274 0.299 0.285 0.581 0.581 \n", "13 0.319 0.354 0.333 0.568 0.568 \n", "13 0.330 0.345 0.350 0.565 0.565 \n", "9 0.262 0.292 0.269 0.564 0.564 \n", "13 0.275 0.325 0.285 0.545 0.545 \n", "13 0.199 0.269 0.206 0.460 0.460 \n", "\n", " recall_micro avg_images_per_item \n", "9 0.603 1.0 \n", "9 0.599 1.0 \n", "11 0.595 1.0 \n", "11 0.593 1.0 \n", "11 0.591 1.0 \n", "9 0.587 1.0 \n", "11 0.581 1.0 \n", "13 0.568 1.0 \n", "13 0.565 1.0 \n", "9 0.564 1.0 \n", "13 0.545 1.0 \n", "13 0.460 1.0 " ] }, "execution_count": 13, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df[df[\"dataset\"] != \"normal\"].sort_values(\"accuracy\", ascending=False)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Predict category\n", "Given one or more images from an object we have never seen before, can we predict what category that object is in?" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
configBM pretrainTrain dataAcc.Top 3 Acc.Top 5 Acc.Top 10 Acc.F1PrecisionRecall
117om5-3Dwhite-1frame_nameNowhite+3Dx10.5860.7430.8110.8750.5460.5390.586
52om5-3Dwhite-1frame_name_bm-pretrnYeswhite+3Dx10.5920.7570.8140.8710.5550.5540.592
119om5-3Dwhite_nameNowhite+3Dx40.6010.7430.8040.8740.5690.5690.601
55om5-3Dwhite_name_bm-pretrnYeswhite+3Dx40.5840.7510.8050.8700.5530.5560.584
121om5-white_nameNowhite0.6180.7640.8190.8910.6010.6050.618
58om5-white_name_bm-pretrnYeswhite0.6000.7650.8110.8710.5720.5730.600
\n", "
" ], "text/plain": [ " config BM pretrain Train data Acc. \\\n", "117 om5-3Dwhite-1frame_name No white+3Dx1 0.586 \n", "52 om5-3Dwhite-1frame_name_bm-pretrn Yes white+3Dx1 0.592 \n", "119 om5-3Dwhite_name No white+3Dx4 0.601 \n", "55 om5-3Dwhite_name_bm-pretrn Yes white+3Dx4 0.584 \n", "121 om5-white_name No white 0.618 \n", "58 om5-white_name_bm-pretrn Yes white 0.600 \n", "\n", " Top 3 Acc. Top 5 Acc. Top 10 Acc. F1 Precision Recall \n", "117 0.743 0.811 0.875 0.546 0.539 0.586 \n", "52 0.757 0.814 0.871 0.555 0.554 0.592 \n", "119 0.743 0.804 0.874 0.569 0.569 0.601 \n", "55 0.751 0.805 0.870 0.553 0.556 0.584 \n", "121 0.764 0.819 0.891 0.601 0.605 0.618 \n", "58 0.765 0.811 0.871 0.572 0.573 0.600 " ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "get_filtered_df(runs_df, \"test_avg/\", exclude_classes_less_than_x_items=5, label_col=\"object_name\")" ] }, { "cell_type": "code", "execution_count": 22, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
BM pretrainTrain dataTest time methodAcc.Top 3 Acc.Top 5 Acc.Top 10 Acc.F1PrecisionRecall
116Nowhiteavg0.6240.7720.8310.8880.6090.6210.624
84avg+3D0.6130.7720.8240.8860.5980.6140.613
112white+3Dx1avg0.6160.7650.8210.8850.6000.6070.616
80avg+3D0.6170.7600.8270.8820.6000.6050.617
113white+3Dx4avg0.6000.7610.8110.8820.5770.5900.600
82avg+3D0.6040.7590.8160.8830.5800.5930.604
52Yeswhiteavg0.6110.7720.8220.8760.5820.5780.611
25avg+3D0.6130.7730.8290.8760.5920.5980.613
46white+3Dx1avg0.6160.7690.8270.8800.5940.6020.616
19avg+3D0.6220.7680.8240.8860.6060.6120.622
49white+3Dx4avg0.6020.7590.8230.8800.5750.5840.602
22avg+3D0.6090.7540.8180.8770.5920.6080.609
\n", "
" ], "text/plain": [ " BM pretrain Train data Test time method Acc. Top 3 Acc. Top 5 Acc. \\\n", "116 No white avg 0.624 0.772 0.831 \n", "84 avg+3D 0.613 0.772 0.824 \n", "112 white+3Dx1 avg 0.616 0.765 0.821 \n", "80 avg+3D 0.617 0.760 0.827 \n", "113 white+3Dx4 avg 0.600 0.761 0.811 \n", "82 avg+3D 0.604 0.759 0.816 \n", "52 Yes white avg 0.611 0.772 0.822 \n", "25 avg+3D 0.613 0.773 0.829 \n", "46 white+3Dx1 avg 0.616 0.769 0.827 \n", "19 avg+3D 0.622 0.768 0.824 \n", "49 white+3Dx4 avg 0.602 0.759 0.823 \n", "22 avg+3D 0.609 0.754 0.818 \n", "\n", " Top 10 Acc. F1 Precision Recall \n", "116 0.888 0.609 0.621 0.624 \n", "84 0.886 0.598 0.614 0.613 \n", "112 0.885 0.600 0.607 0.616 \n", "80 0.882 0.600 0.605 0.617 \n", "113 0.882 0.577 0.590 0.600 \n", "82 0.883 0.580 0.593 0.604 \n", "52 0.876 0.582 0.578 0.611 \n", "25 0.876 0.592 0.598 0.613 \n", "46 0.880 0.594 0.602 0.616 \n", "19 0.886 0.606 0.612 0.622 \n", "49 0.880 0.575 0.584 0.602 \n", "22 0.877 0.592 0.608 0.609 " ] }, "execution_count": 22, "metadata": {}, "output_type": "execute_result" } ], "source": [ "for max1 in ['_max1', '']:\n", " for x in [3, 4, 5, 6]:\n", "\n", " category_x = get_filtered_df(\n", " runs_df, f\"test_avg{max1}/\", exclude_classes_less_than_x_items=x, label_col=\"object_name\"\n", " )\n", " category_x[\"Test time method\"] = \"avg\"\n", " category_x_plus3Ds = []\n", " for w in [0.75]: # [0.25, 0.5, 0.75]:\n", " df = get_filtered_df(\n", " runs_df,\n", " f\"test_avg{max1}_plus_3Dx4_w{w}/\",\n", " exclude_classes_less_than_x_items=x,\n", " label_col=\"object_name\",\n", " )\n", " # df[\"Test time method\"] = f\"average_w{w}_plus_3D_w{1-w}\"\n", " df[\"Test time method\"] = f\"avg+3D\"\n", " category_x_plus3Ds.append(df)\n", "\n", " category_x_plus3Ds = pd.concat([\n", " category_x, pd.concat(category_x_plus3Ds)\n", " ])\n", "\n", " cols = [\"BM pretrain\", \"Train data\", \"Test time method\"]\n", " other_cols = [col for col in category_x_plus3Ds.columns if col not in cols]\n", " category_x_plus3Ds = category_x_plus3Ds[cols + other_cols]\n", "\n", " category_x_plus3Ds.sort_values(by=[\"BM pretrain\", \"Train data\", \"Test time method\"], inplace=True)\n", "\n", " # # Assuming your DataFrame is named 'df'\n", " # # Create a copy to avoid modifying the original DataFrame\n", " df_new = category_x_plus3Ds.copy()\n", "\n", " # For column 'BM Pretrain'\n", " df_new[\"BM pretrain\"] = df_new[\"BM pretrain\"].where(\n", " df_new[\"BM pretrain\"] != df_new[\"BM pretrain\"].shift(1), \"\"\n", " )\n", "\n", " # For column 'Train data'\n", " df_new[\"Train data\"] = df_new[\"Train data\"].where(\n", " df_new[\"Train data\"] != df_new[\"Train data\"].shift(1), \"\"\n", " )\n", " df_new.drop(columns=['config'], inplace=True)\n", " df_new.to_csv(f\"object_name_min{x}{max1}.csv\", index=False)\n", "df_new" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### If we have only have one/two/three/four images" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "One" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
datasetaccuracytop3_accuracytop5_accuracytop10_accuracyf1_weightedprecision_weightedrecall_weightedf1_macroprecision_macrorecall_macrof1_microprecision_microrecall_microavg_images_per_item
42white3D-1frame0.5510.7190.7830.8570.5140.5070.5510.2490.2630.2650.5510.5510.5511.0
44white3D-4frames0.5620.7160.7810.8530.5330.5330.5620.2860.3040.3010.5620.5620.5621.0
46white0.5780.7240.7890.8750.5690.5790.5780.3460.3640.3590.5780.5780.5781.0
48normal0.6510.7940.8460.8950.6300.6340.6510.3680.3950.3770.6510.6510.6511.0
\n", "
" ], "text/plain": [ " dataset accuracy top3_accuracy top5_accuracy top10_accuracy \\\n", "42 white3D-1frame 0.551 0.719 0.783 0.857 \n", "44 white3D-4frames 0.562 0.716 0.781 0.853 \n", "46 white 0.578 0.724 0.789 0.875 \n", "48 normal 0.651 0.794 0.846 0.895 \n", "\n", " f1_weighted precision_weighted recall_weighted f1_macro \\\n", "42 0.514 0.507 0.551 0.249 \n", "44 0.533 0.533 0.562 0.286 \n", "46 0.569 0.579 0.578 0.346 \n", "48 0.630 0.634 0.651 0.368 \n", "\n", " precision_macro recall_macro f1_micro precision_micro recall_micro \\\n", "42 0.263 0.265 0.551 0.551 0.551 \n", "44 0.304 0.301 0.562 0.562 0.562 \n", "46 0.364 0.359 0.578 0.578 0.578 \n", "48 0.395 0.377 0.651 0.651 0.651 \n", "\n", " avg_images_per_item \n", "42 1.0 \n", "44 1.0 \n", "46 1.0 \n", "48 1.0 " ] }, "execution_count": 15, "metadata": {}, "output_type": "execute_result" } ], "source": [ "get_filtered_df(\n", " runs_df, \"test_avg_max1/\", exclude_classes_less_than_x_items=5, label_col=\"object_name\"\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Two" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
datasetaccuracytop3_accuracytop5_accuracytop10_accuracyf1_weightedprecision_weightedrecall_weightedf1_macroprecision_macrorecall_macrof1_microprecision_microrecall_microavg_images_per_item
42white3D-1frame0.5770.7380.8040.8710.5380.5310.5770.2730.2910.2900.5770.5770.5771.504
44white3D-4frames0.5860.7380.7930.8680.5540.5520.5860.3110.3320.3210.5860.5860.5861.504
46white0.6000.7490.8090.8850.5850.5890.6000.3620.3710.3850.6000.6000.6001.504
48normal0.6790.8080.8570.9060.6570.6600.6790.3960.4170.4100.6790.6790.6791.504
\n", "
" ], "text/plain": [ " dataset accuracy top3_accuracy top5_accuracy top10_accuracy \\\n", "42 white3D-1frame 0.577 0.738 0.804 0.871 \n", "44 white3D-4frames 0.586 0.738 0.793 0.868 \n", "46 white 0.600 0.749 0.809 0.885 \n", "48 normal 0.679 0.808 0.857 0.906 \n", "\n", " f1_weighted precision_weighted recall_weighted f1_macro \\\n", "42 0.538 0.531 0.577 0.273 \n", "44 0.554 0.552 0.586 0.311 \n", "46 0.585 0.589 0.600 0.362 \n", "48 0.657 0.660 0.679 0.396 \n", "\n", " precision_macro recall_macro f1_micro precision_micro recall_micro \\\n", "42 0.291 0.290 0.577 0.577 0.577 \n", "44 0.332 0.321 0.586 0.586 0.586 \n", "46 0.371 0.385 0.600 0.600 0.600 \n", "48 0.417 0.410 0.679 0.679 0.679 \n", "\n", " avg_images_per_item \n", "42 1.504 \n", "44 1.504 \n", "46 1.504 \n", "48 1.504 " ] }, "execution_count": 16, "metadata": {}, "output_type": "execute_result" } ], "source": [ "get_filtered_df(\n", " runs_df, \"test_avg_max2/\", exclude_classes_less_than_x_items=5, label_col=\"object_name\"\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Three" ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
datasetaccuracytop3_accuracytop5_accuracytop10_accuracyf1_weightedprecision_weightedrecall_weightedf1_macroprecision_macrorecall_macrof1_microprecision_microrecall_microavg_images_per_item
42white3D-1frame0.5840.7450.8100.8740.5430.5340.5840.2780.2950.2940.5840.5840.5841.776
44white3D-4frames0.5950.7430.8030.8710.5630.5600.5950.3220.3460.3300.5950.5950.5951.776
46white0.6130.7610.8160.8890.5970.6030.6130.3790.3920.3970.6130.6130.6131.776
48normal0.6820.8120.8600.9090.6580.6610.6820.3880.4150.4020.6820.6820.6821.776
\n", "
" ], "text/plain": [ " dataset accuracy top3_accuracy top5_accuracy top10_accuracy \\\n", "42 white3D-1frame 0.584 0.745 0.810 0.874 \n", "44 white3D-4frames 0.595 0.743 0.803 0.871 \n", "46 white 0.613 0.761 0.816 0.889 \n", "48 normal 0.682 0.812 0.860 0.909 \n", "\n", " f1_weighted precision_weighted recall_weighted f1_macro \\\n", "42 0.543 0.534 0.584 0.278 \n", "44 0.563 0.560 0.595 0.322 \n", "46 0.597 0.603 0.613 0.379 \n", "48 0.658 0.661 0.682 0.388 \n", "\n", " precision_macro recall_macro f1_micro precision_micro recall_micro \\\n", "42 0.295 0.294 0.584 0.584 0.584 \n", "44 0.346 0.330 0.595 0.595 0.595 \n", "46 0.392 0.397 0.613 0.613 0.613 \n", "48 0.415 0.402 0.682 0.682 0.682 \n", "\n", " avg_images_per_item \n", "42 1.776 \n", "44 1.776 \n", "46 1.776 \n", "48 1.776 " ] }, "execution_count": 17, "metadata": {}, "output_type": "execute_result" } ], "source": [ "get_filtered_df(\n", " runs_df, \"test_avg_max3/\", exclude_classes_less_than_x_items=5, label_col=\"object_name\"\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Four" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
datasetaccuracytop3_accuracytop5_accuracytop10_accuracyf1_weightedprecision_weightedrecall_weightedf1_macroprecision_macrorecall_macrof1_microprecision_microrecall_microavg_images_per_item
42white3D-1frame0.5860.7460.8090.8740.5450.5360.5860.2840.3000.3000.5860.5860.5862.001
44white3D-4frames0.5980.7430.8030.8710.5650.5640.5980.3210.3480.3280.5980.5980.5982.001
46white0.6150.7590.8150.8910.5990.6050.6150.3790.3930.3990.6150.6150.6152.001
48normal0.6810.8130.8600.9100.6560.6580.6810.3880.4120.4040.6810.6810.6812.001
\n", "
" ], "text/plain": [ " dataset accuracy top3_accuracy top5_accuracy top10_accuracy \\\n", "42 white3D-1frame 0.586 0.746 0.809 0.874 \n", "44 white3D-4frames 0.598 0.743 0.803 0.871 \n", "46 white 0.615 0.759 0.815 0.891 \n", "48 normal 0.681 0.813 0.860 0.910 \n", "\n", " f1_weighted precision_weighted recall_weighted f1_macro \\\n", "42 0.545 0.536 0.586 0.284 \n", "44 0.565 0.564 0.598 0.321 \n", "46 0.599 0.605 0.615 0.379 \n", "48 0.656 0.658 0.681 0.388 \n", "\n", " precision_macro recall_macro f1_micro precision_micro recall_micro \\\n", "42 0.300 0.300 0.586 0.586 0.586 \n", "44 0.348 0.328 0.598 0.598 0.598 \n", "46 0.393 0.399 0.615 0.615 0.615 \n", "48 0.412 0.404 0.681 0.681 0.681 \n", "\n", " avg_images_per_item \n", "42 2.001 \n", "44 2.001 \n", "46 2.001 \n", "48 2.001 " ] }, "execution_count": 18, "metadata": {}, "output_type": "execute_result" } ], "source": [ "get_filtered_df(\n", " runs_df, \"test_avg_max4/\", exclude_classes_less_than_x_items=5, label_col=\"object_name\"\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### If we have only have one images, but we augment at test time with 4 generated 3D views" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "One" ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [], "source": [ "# df = get_filtered_df(\n", "# runs_df, \"test_avg_max1/\", exclude_classes_less_than_x_items=5, label_col=\"material\"\n", "# )\n", "# df[\"genuine_img_w\"] = \"none\"\n", "\n", "df_25 = get_filtered_df(\n", " runs_df,\n", " \"test_avg_max1_plus_3Dx4_w0.25/\",\n", " exclude_classes_less_than_x_items=5,\n", " label_col=\"object_name\",\n", ")\n", "df_25[\"genuine_img_w\"] = 0.25\n", "df_50 = get_filtered_df(\n", " runs_df,\n", " \"test_avg_max1_plus_3Dx4_w0.5/\",\n", " exclude_classes_less_than_x_items=5,\n", " label_col=\"object_name\",\n", ")\n", "df_50[\"genuine_img_w\"] = 0.5\n", "df_75 = get_filtered_df(\n", " runs_df,\n", " \"test_avg_max1_plus_3Dx4_w0.75/\",\n", " exclude_classes_less_than_x_items=5,\n", " label_col=\"object_name\",\n", ")\n", "df_75[\"genuine_img_w\"] = 0.75\n", "df_1 = get_filtered_df(\n", " runs_df,\n", " \"test_avg_max1_plus_3Dx4_w1.0/\",\n", " exclude_classes_less_than_x_items=5,\n", " label_col=\"object_name\",\n", ")\n", "df_1[\"genuine_img_w\"] = 1.0\n", "\n", "df = pd.concat([df_25, df_50, df_75, df_1])\n", "\n", "# put the genuine_img_w column first\n", "cols = df.columns.tolist()\n", "cols = cols[-1:] + cols[:-1]\n", "df = df[cols]" ] }, { "cell_type": "code", "execution_count": 20, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
genuine_img_wdatasetaccuracytop3_accuracytop5_accuracytop10_accuracyf1_weightedprecision_weightedrecall_weightedf1_macroprecision_macrorecall_macrof1_microprecision_microrecall_microavg_images_per_item
121.00white0.5780.7240.7890.8750.5690.5790.5780.3460.3640.3590.5780.5780.5781.0
120.75white0.5730.7200.7850.8690.5610.5730.5730.3300.3570.3350.5730.5730.5731.0
101.00white3D-4frames0.5620.7170.7810.8530.5330.5330.5620.2860.3040.3010.5620.5620.5621.0
100.75white3D-4frames0.5590.7140.7850.8510.5310.5350.5590.2880.3140.3000.5590.5590.5591.0
100.50white3D-4frames0.5560.7160.7800.8480.5280.5310.5560.2830.3100.2940.5560.5560.5561.0
80.75white3D-1frame0.5520.7230.7850.8540.5130.5060.5520.2530.2710.2660.5520.5520.5521.0
81.00white3D-1frame0.5510.7190.7830.8570.5140.5070.5510.2490.2630.2650.5510.5510.5511.0
80.50white3D-1frame0.5430.7120.7750.8460.5060.5030.5430.2490.2700.2600.5430.5430.5431.0
100.25white3D-4frames0.5390.7090.7690.8450.5100.5120.5390.2590.2810.2710.5390.5390.5391.0
120.50white0.5290.7010.7720.8500.5180.5390.5290.2920.3370.2880.5290.5290.5291.0
80.25white3D-1frame0.5200.6950.7600.8350.4840.4840.5200.2290.2510.2370.5200.5200.5201.0
120.25white0.4220.6140.6920.7910.4080.4550.4220.1870.2310.1880.4220.4220.4221.0
\n", "
" ], "text/plain": [ " genuine_img_w dataset accuracy top3_accuracy top5_accuracy \\\n", "12 1.00 white 0.578 0.724 0.789 \n", "12 0.75 white 0.573 0.720 0.785 \n", "10 1.00 white3D-4frames 0.562 0.717 0.781 \n", "10 0.75 white3D-4frames 0.559 0.714 0.785 \n", "10 0.50 white3D-4frames 0.556 0.716 0.780 \n", "8 0.75 white3D-1frame 0.552 0.723 0.785 \n", "8 1.00 white3D-1frame 0.551 0.719 0.783 \n", "8 0.50 white3D-1frame 0.543 0.712 0.775 \n", "10 0.25 white3D-4frames 0.539 0.709 0.769 \n", "12 0.50 white 0.529 0.701 0.772 \n", "8 0.25 white3D-1frame 0.520 0.695 0.760 \n", "12 0.25 white 0.422 0.614 0.692 \n", "\n", " top10_accuracy f1_weighted precision_weighted recall_weighted \\\n", "12 0.875 0.569 0.579 0.578 \n", "12 0.869 0.561 0.573 0.573 \n", "10 0.853 0.533 0.533 0.562 \n", "10 0.851 0.531 0.535 0.559 \n", "10 0.848 0.528 0.531 0.556 \n", "8 0.854 0.513 0.506 0.552 \n", "8 0.857 0.514 0.507 0.551 \n", "8 0.846 0.506 0.503 0.543 \n", "10 0.845 0.510 0.512 0.539 \n", "12 0.850 0.518 0.539 0.529 \n", "8 0.835 0.484 0.484 0.520 \n", "12 0.791 0.408 0.455 0.422 \n", "\n", " f1_macro precision_macro recall_macro f1_micro precision_micro \\\n", "12 0.346 0.364 0.359 0.578 0.578 \n", "12 0.330 0.357 0.335 0.573 0.573 \n", "10 0.286 0.304 0.301 0.562 0.562 \n", "10 0.288 0.314 0.300 0.559 0.559 \n", "10 0.283 0.310 0.294 0.556 0.556 \n", "8 0.253 0.271 0.266 0.552 0.552 \n", "8 0.249 0.263 0.265 0.551 0.551 \n", "8 0.249 0.270 0.260 0.543 0.543 \n", "10 0.259 0.281 0.271 0.539 0.539 \n", "12 0.292 0.337 0.288 0.529 0.529 \n", "8 0.229 0.251 0.237 0.520 0.520 \n", "12 0.187 0.231 0.188 0.422 0.422 \n", "\n", " recall_micro avg_images_per_item \n", "12 0.578 1.0 \n", "12 0.573 1.0 \n", "10 0.562 1.0 \n", "10 0.559 1.0 \n", "10 0.556 1.0 \n", "8 0.552 1.0 \n", "8 0.551 1.0 \n", "8 0.543 1.0 \n", "10 0.539 1.0 \n", "12 0.529 1.0 \n", "8 0.520 1.0 \n", "12 0.422 1.0 " ] }, "execution_count": 20, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df[df[\"dataset\"] != \"normal\"].sort_values(\"accuracy\", ascending=False)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "ArtifactClassification", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.12" } }, "nbformat": 4, "nbformat_minor": 2 }