{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import os, sys, shutil\n", "from tqdm import tqdm\n", "import numpy as np\n", "import pandas as pd\n", "import matplotlib as plt\n", "from PIL import Image\n", "from matplotlib.lines import Line2D\n", "import matplotlib as mpl\n", "import math\n", "import matplotlib.image as mpimg\n", "import random\n", "from datetime import datetime\n", "from torchvision import transforms\n", "import torch\n", "# os.chdir(\"..\")\n", "experiment_version = 4\n", "os.makedirs(f\"stimuli_v{experiment_version}\", exist_ok=True)\n", "os.makedirs(f\"responses_v{experiment_version}\", exist_ok=True)\n", "os.makedirs(f\"dataframes_v{experiment_version}\", exist_ok=True)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# CREATE EXPERIMENT DATAFRAME AND TRIAL FILES FOR MEADOWS" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#Experiment column key:\n", "# 1: Experiment 1, mindeye vs second sight\n", "# 2: Experiment 2, second sight two way identification\n", "# 3: Experiment 3, mental imagery two way identification\n", "df_exp = pd.DataFrame(columns=[\"experiment\", \"stim1\", \"stim2\", \"stim3\", \"sample\", \"subject\", \"target_on_left\", \"catch_trial\", \"rep\"])\n", "i=0\n", "random_count = 0\n", "gt_tensor_block = torch.load(\"raw_stimuli/all_images_425.pt\")\n", "for subj in [1,2,5,7]: #1,2,5,7\n", " subject_enhanced_recons_40 = torch.load(f\"raw_stimuli/final_subj0{subj}_pretrained_40sess_24bs_all_enhancedrecons.pt\")\n", " subject_unclip_recons_40 = torch.load(f\"raw_stimuli/final_subj0{subj}_pretrained_40sess_24bs_all_recons.pt\")\n", " subject_enhanced_recons_1 = torch.load(f\"raw_stimuli/final_subj0{subj}_pretrained_1sess_24bs_all_enhancedrecons.pt\")\n", " subject_braindiffuser_recons_1 = torch.load(f\"raw_stimuli/subj0{subj}_brain_diffuser_750_all_recons.pt\")\n", " #Experiment 1, mindeye two way identification\n", " random_indices = random.sample(range(1000), 300)\n", " for sample in tqdm(random_indices):\n", " \n", " # Get random sample to compare against\n", " random_number = random.choice([x for x in range(1000) if x != sample])\n", " # Extract the stimulus images from tensor blocks and save as pngs to stimuli folder\n", " gt_sample = transforms.ToPILImage()(gt_tensor_block[sample])\n", " sample_enhanced_recons_40 = transforms.ToPILImage()(subject_enhanced_recons_40[sample]).resize((425,425))\n", " random_enhanced_recons_40 = transforms.ToPILImage()(subject_enhanced_recons_40[random_number]).resize((425,425))\n", " sample_enhanced_recons_40.save(f\"stimuli_v{experiment_version}/{sample}_subject{subj}_mindeye_enhanced_40.png\")\n", " random_enhanced_recons_40.save(f\"stimuli_v{experiment_version}/{random_number}_subject{subj}_mindeye_enhanced_40.png\")\n", " gt_sample.save(f\"stimuli_v{experiment_version}/{sample}_ground_truth.png\")\n", " \n", " # Configure stimuli names and order in experiment dataframe\n", " sample_names = [f\"{random_number}_subject{subj}_mindeye_enhanced_40\", f\"{sample}_subject{subj}_mindeye_enhanced_40\"]\n", " order = random.randrange(2)\n", " left_sample = sample_names.pop(order)\n", " right_sample = sample_names.pop()\n", " gt_sample = f\"{sample}_ground_truth\"\n", " df_exp.loc[i] = {\"experiment\" : 1, \"stim1\" : gt_sample, \"stim2\" : left_sample, \"stim3\" : right_sample, \"sample\" : sample, \"subject\" : subj, \n", " \"target_on_left\" : order == 1, \"catch_trial\" : None, \"rep\" : 0}\n", " i+=1\n", " \n", " #Experiment 2, refined vs unrefined\n", " random_indices = random.sample(range(1000), 300)\n", " for sample in tqdm(random_indices):\n", " \n", " # Extract the stimulus images from tensor blocks and save as pngs to stimuli folder\n", " gt_sample = transforms.ToPILImage()(gt_tensor_block[sample])\n", " sample_enhanced_recons_40 = transforms.ToPILImage()(subject_enhanced_recons_40[sample]).resize((425,425))\n", " sample_unclip_recons_40 = transforms.ToPILImage()(subject_unclip_recons_40[sample]).resize((425,425))\n", " sample_enhanced_recons_40.save(f\"stimuli_v{experiment_version}/{sample}_subject{subj}_mindeye_enhanced_40.png\")\n", " sample_unclip_recons_40.save(f\"stimuli_v{experiment_version}/{sample}_subject{subj}_mindeye_unclip_40.png\")\n", " gt_sample.save(f\"stimuli_v{experiment_version}/{sample}_ground_truth.png\")\n", " \n", " # Configure stimuli names and order in experiment dataframe\n", " sample_names = [f\"{sample}_subject{subj}_mindeye_unclip_40\", f\"{sample}_subject{subj}_mindeye_enhanced_40\"]\n", " order = random.randrange(2)\n", " left_sample = sample_names.pop(order)\n", " right_sample = sample_names.pop()\n", " gt_sample = f\"{sample}_ground_truth\"\n", " df_exp.loc[i] = {\"experiment\" : 2, \"stim1\" : gt_sample, \"stim2\" : left_sample, \"stim3\" : right_sample, \"sample\" : sample, \"subject\" : subj, \n", " \"target_on_left\" : order == 1, \"catch_trial\" : None, \"rep\" : 0}\n", " i+=1\n", " \n", " #Experiment 3, refined 1 session vs brain diffuser 1 session\n", " random_indices = random.sample(range(1000), 300)\n", " for sample in tqdm(random_indices):\n", " \n", " # Extract the stimulus images from tensor blocks and save as pngs to stimuli folder\n", " gt_sample = transforms.ToPILImage()(gt_tensor_block[sample])\n", " sample_enhanced_recons_1 = transforms.ToPILImage()(subject_enhanced_recons_1[sample]).resize((425,425))\n", " sample_braindiffuser_1 = transforms.ToPILImage()(subject_braindiffuser_recons_1[sample]).resize((425,425))\n", " sample_enhanced_recons_1.save(f\"stimuli_v{experiment_version}/{sample}_subject{subj}_mindeye_enhanced_1.png\")\n", " sample_braindiffuser_1.save(f\"stimuli_v{experiment_version}/{sample}_subject{subj}_braindiffuser_1.png\")\n", " gt_sample.save(f\"stimuli_v{experiment_version}/{sample}_ground_truth.png\")\n", " \n", " # Configure stimuli names and order in experiment dataframe\n", " sample_names = [f\"{sample}_subject{subj}_braindiffuser_1\", f\"{sample}_subject{subj}_mindeye_enhanced_1\"]\n", " order = random.randrange(2)\n", " left_sample = sample_names.pop(order)\n", " right_sample = sample_names.pop()\n", " gt_sample = f\"{sample}_ground_truth\"\n", " df_exp.loc[i] = {\"experiment\" : 3, \"stim1\" : gt_sample, \"stim2\" : left_sample, \"stim3\" : right_sample, \"sample\" : sample, \"subject\" : subj, \n", " \"target_on_left\" : order == 1, \"catch_trial\" : None, \"rep\" : 0}\n", " i+=1\n", "df_exp = df_exp.sample(frac=1)\n", "print(len(df_exp))\n", "print(df_exp)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Check if all images are present in final stimuli folder\n", "count_not_found = 0\n", "stim_path = f\"stimuli_v{experiment_version}/\"\n", "for index, row in df_exp.iterrows():\n", " if not (os.path.exists(f\"{stim_path}{row['stim1']}.png\")):\n", " print(f\"{row['stim1']}.png\")\n", " count_not_found += 1\n", " if not (os.path.exists(f\"{stim_path}{row['stim2']}.png\")):\n", " print(f\"{row['stim2']}.png\")\n", " count_not_found += 1\n", " if not (os.path.exists(f\"{stim_path}{row['stim3']}.png\")):\n", " print(f\"{row['stim3']}.png\")\n", " count_not_found += 1\n", "print(count_not_found)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#Add participant ID column\n", "pIDs = []\n", "for i in range(len(df_exp)):\n", " pIDs.append(i // 60)\n", "df_exp.insert(0, \"pID\", pIDs)\n", "print(len(df_exp[(df_exp['pID'] == 0)]))\n", "#Add catch trials within each pID section\n", "for pID in range(max(pIDs)):\n", " df_pid = df_exp[(df_exp['experiment'] == 1) & (df_exp['pID'] == pID)]\n", " \n", " # Ground truth catch trials\n", " gt_catch_trials = df_pid.sample(n=9)\n", " gt_catch_trials['catch_trial'] = \"ground_truth\"\n", " for index, row in gt_catch_trials.iterrows():\n", " \n", " order = random.randrange(2)\n", " ground_truth = row['stim1']\n", " stims = [row['stim2'], ground_truth]\n", " \n", " gt_catch_trials.at[index, 'stim2'] = stims.pop(order)\n", " gt_catch_trials.at[index, 'stim3'] = stims.pop()\n", " # Target on left here means the ground truth repeat is on the left\n", " gt_catch_trials.at[index, 'target_on_left'] = (order == 1)\n", " \n", " # repeated trial catch trials, first sample indices\n", " sampled_indices = df_pid.sample(n=9).index\n", " #mark the trials at these indices as catch trials\n", " df_exp.loc[sampled_indices]['catch_trial'] = \"repeat\"\n", " #create duplicate trials for these samples to repeat\n", " repeat_catch_trials_rep1 = df_exp.loc[sampled_indices].copy()\n", " repeat_catch_trials_rep2 = df_exp.loc[sampled_indices].copy()\n", " repeat_catch_trials_rep1['rep'] = 1\n", " repeat_catch_trials_rep2['rep'] = 2\n", " \n", " \n", " df_exp = pd.concat([df_exp, gt_catch_trials, repeat_catch_trials_rep1, repeat_catch_trials_rep2])\n", " \n", "df_exp = df_exp.sample(frac=1).sort_values(by='pID', kind='mergesort')\n", "print(len(df_exp))\n", "print(len(df_exp[(df_exp['pID'] == 0)]))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "\n", "df_exp.to_csv(f'dataframes_v{experiment_version}/experiment_v{experiment_version}.csv', index=False)\n", "\n", "df_exp_tsv = df_exp[['pID', 'stim1', 'stim2', 'stim3']].copy()\n", "df_exp_tsv.to_csv(f\"dataframes_v{experiment_version}/meadow_trials_v{experiment_version}.tsv\", sep=\"\\t\", index=False, header=False) " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# THE FOLLOWING CELLS ARE FOR PROCESSING RESPONSES" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "response_path = f\"responses_v{experiment_version}/\"\n", "dataframe_path = f\"dataframes_v{experiment_version}/\"\n", "df_experiment = pd.read_csv(dataframe_path + f\"experiment_v{experiment_version}.csv\")\n", "response_version = \"2\"\n", "df_responses = pd.read_csv(f\"{response_path}deployment_v{response_version}.csv\")\n", "print(df_responses)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "df_responses.head()\n", "df_trial = pd.DataFrame(columns=[\"experiment\", \"stim1\", \"stim2\", \"stim3\", \"sample\", \"subject\", \"target_on_left\", \"method\", \"catch_trial\", \"rep\", \"picked_left\", \"participant\"])\n", "df_experiment['picked_left'] = None\n", "for index, row in tqdm(df_responses.iterrows()):\n", " if row['label'] == row['stim2_id']:\n", " picked_left = True\n", " elif row['label'] == row['stim3_id']:\n", " picked_left = False\n", " else:\n", " print(\"Error\")\n", " break\n", " start_timestamp = row['time_trial_start']\n", " end_timestamp = row['time_trial_response']\n", " start = datetime.fromisoformat(start_timestamp.replace(\"Z\", \"+00:00\"))\n", " end = datetime.fromisoformat(end_timestamp.replace(\"Z\", \"+00:00\"))\n", " # Calculate the difference in seconds\n", " time_difference_seconds = (end - start).total_seconds()\n", " \n", " df_trial.loc[index] = df_experiment[(df_experiment['stim1'] == row['stim1_name']) & (df_experiment['stim2'] == row['stim2_name']) & (df_experiment['stim3'] == row['stim3_name'])].iloc[0]\n", " df_trial.loc[index, 'picked_left'] = picked_left\n", " df_trial.loc[index, 'participant'] = row['participation']\n", " df_trial.loc[index, 'response_time'] = time_difference_seconds\n", " \n", "df_trial[\"picked_target\"] = df_trial[\"picked_left\"] == df_trial[\"target_on_left\"]\n", "print(df_trial)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# number of participants\n", "print(\"Total participants:\", len(df_trial[\"participant\"].unique()))\n", "# print(df_trial)\n", "\n", "# Remove participants who failed the ground truth catch trial, no tolerance\n", "participants_to_remove_rule1 = df_trial[(df_trial['catch_trial'] == 'ground_truth') & (df_trial['picked_target'] == False)]['participant'].unique()\n", "print(\"Participants to remove 1:\", participants_to_remove_rule1)\n", "# Remove participants who failed the repeat catch trial, and gave different responses for identical trials\n", "repeat_trials = df_trial[df_trial['rep'] > 0]\n", "\n", "# Group by the 3 stimuli presented to identify unique sets of trials\n", "grouped_repeat_trials = repeat_trials.groupby(['stim1', 'stim2', 'stim3'])\n", "\n", "# Track participant failures\n", "participant_failures = {}\n", "\n", "# Iterate through groups to check consistency in \"picked_target\" across repetitions\n", "for _, group in grouped_repeat_trials:\n", " if group['picked_target'].nunique() != 1: # Inconsistent \"picked_target\" within the group\n", " print(group['picked_target'])\n", " for participant in group['participant'].unique(): \n", " participant_failures[participant] = participant_failures.get(participant, 0) + 1\n", "\n", "# Identify participants who failed at least one set of trial repetitions\n", "participants_to_remove_rule2 = [participant for participant, failures in participant_failures.items() if failures > 1]\n", "print(\"Participants to remove 2:\", participants_to_remove_rule2)\n", "participants_to_remove = set(participants_to_remove_rule1).union(set(participants_to_remove_rule2))\n", "filtered_df = df_trial[~df_trial['participant'].isin(participants_to_remove)]\n", "print(\"Clean participants:\", len(filtered_df[\"participant\"].unique()))\n", "print(len(df_trial), len(filtered_df))\n", "print(participants_to_remove)\n", "filtered_df.to_csv(f'{dataframe_path}filtered_responses_v{response_version}.csv', index=False)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Load filtered responses\n", "filtered_df = pd.read_csv(f'{dataframe_path}filtered_responses_v{response_version}.csv')\n", "# Filter out catch trials\n", "df_trial_exp = filtered_df[(filtered_df['catch_trial'].isnull() & (filtered_df['rep'] == 0))]\n", "\n", "# Grab results from an individual experiment and print them out\n", "df_trial_exp1 = df_trial_exp[df_trial_exp['experiment'] == 1]\n", "print(\"Number of experiment trials:\", len(df_trial_exp1))\n", "print(\"Success rate: \", len(df_trial_exp1[df_trial_exp1[\"picked_target\"]]) / len(df_trial_exp1))\n" ] } ], "metadata": { "kernelspec": { "display_name": "SS", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.12" } }, "nbformat": 4, "nbformat_minor": 2 }