{ "cells": [ { "cell_type": "markdown", "id": "91b21cf6", "metadata": {}, "source": [ "## Generate the datasets for uploading" ] }, { "cell_type": "code", "execution_count": null, "id": "e1a3d25b", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 14, "id": "aa925968", "metadata": { "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[]\n", "[]\n", "['kotiria000263.wav', 'kotiria000265.wav', 'kotiria000273.wav', 'kotiria000285.wav', 'kotiria000289.wav', 'kotiria000291.wav', 'kotiria000294.wav', 'kotiria000295.wav', 'kotiria000297.wav', 'kotiria000300.wav', 'kotiria000306.wav', 'kotiria000308.wav']\n", "[]\n", "['waikhana000740.wav', 'waikhana000745.wav', 'waikhana000746.wav']\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "15adf9d48a44440dac871ce9f432294c", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Uploading the dataset shards: 0%| | 0/3 [00:00\u001b[0;34m()\u001b[0m\n\u001b[1;32m 31\u001b[0m a \u001b[38;5;241m=\u001b[39m flatten(a)\n\u001b[1;32m 32\u001b[0m audio_dataset \u001b[38;5;241m=\u001b[39m Dataset\u001b[38;5;241m.\u001b[39mfrom_dict({\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124maudio\u001b[39m\u001b[38;5;124m\"\u001b[39m: flatten(df[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfile_name\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mvalues\u001b[38;5;241m.\u001b[39mtolist()),\n\u001b[1;32m 33\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msource_processed\u001b[39m\u001b[38;5;124m\"\u001b[39m: flatten(df[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msource_processed\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mvalues\u001b[38;5;241m.\u001b[39mtolist()),\n\u001b[1;32m 34\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msource_raw\u001b[39m\u001b[38;5;124m\"\u001b[39m: flatten(df[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msource_raw\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mvalues\u001b[38;5;241m.\u001b[39mtolist()),\n\u001b[1;32m 35\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtarget_raw\u001b[39m\u001b[38;5;124m\"\u001b[39m: flatten(df[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtarget_raw\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mvalues\u001b[38;5;241m.\u001b[39mtolist()),\n\u001b[1;32m 36\u001b[0m },\n\u001b[1;32m 37\u001b[0m )\u001b[38;5;241m.\u001b[39mcast_column(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124maudio\u001b[39m\u001b[38;5;124m\"\u001b[39m, Audio())\n\u001b[0;32m---> 38\u001b[0m \u001b[43maudio_dataset\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpush_to_hub\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mivangtorre/second_americas_nlp_2022\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msplit\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtrain\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 40\u001b[0m df\u001b[38;5;241m.\u001b[39mto_csv(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtrain.csv\u001b[39m\u001b[38;5;124m\"\u001b[39m, sep\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;130;01m\\t\u001b[39;00m\u001b[38;5;124m'\u001b[39m, index\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m)\n\u001b[1;32m 42\u001b[0m df \u001b[38;5;241m=\u001b[39m generate_df(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mquechua\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdev\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n", "File \u001b[0;32m~/.local/lib/python3.10/site-packages/datasets/arrow_dataset.py:5707\u001b[0m, in \u001b[0;36mDataset.push_to_hub\u001b[0;34m(self, repo_id, config_name, set_default, split, data_dir, commit_message, commit_description, private, token, revision, branch, create_pr, max_shard_size, num_shards, embed_external_files)\u001b[0m\n\u001b[1;32m 5705\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m repo_info\u001b[38;5;241m.\u001b[39msplits \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mlist\u001b[39m(repo_info\u001b[38;5;241m.\u001b[39msplits) \u001b[38;5;241m!=\u001b[39m [split]:\n\u001b[1;32m 5706\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_info\u001b[38;5;241m.\u001b[39mfeatures \u001b[38;5;241m!=\u001b[39m repo_info\u001b[38;5;241m.\u001b[39mfeatures:\n\u001b[0;32m-> 5707\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 5708\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFeatures of the new split don\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt match the features of the existing splits on the hub: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_info\u001b[38;5;241m.\u001b[39mfeatures\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m != \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mrepo_info\u001b[38;5;241m.\u001b[39mfeatures\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 5709\u001b[0m )\n\u001b[1;32m 5711\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m split \u001b[38;5;129;01min\u001b[39;00m repo_info\u001b[38;5;241m.\u001b[39msplits:\n\u001b[1;32m 5712\u001b[0m repo_info\u001b[38;5;241m.\u001b[39mdownload_size \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m=\u001b[39m deleted_size\n", "\u001b[0;31mValueError\u001b[0m: Features of the new split don't match the features of the existing splits on the hub: {'audio': Audio(sampling_rate=None, mono=True, decode=True, id=None), 'source_processed': Value(dtype='string', id=None), 'source_raw': Value(dtype='string', id=None), 'target_raw': Value(dtype='string', id=None)} != {'audio': Audio(sampling_rate=None, mono=True, decode=True, id=None)}" ] } ], "source": [ "import pandas as pd\n", "from datasets import Dataset, Audio\n", "\n", "def generate_df(language, split):\n", " # QUECHUA TRAIN\n", " with open(\"./../\"+language +\"_\"+split+\".tsv\") as f:\n", " lines = f.read().splitlines()\n", " lines2 = [l.split(\"\\t\") for l in lines if len(l.split(\"\\t\"))==4]\n", " asd = [l.split(\"\\t\")[0] for l in lines if len(l.split(\"\\t\"))>4]\n", " print(asd)\n", " df1 = pd.DataFrame(lines2[1::], columns =lines2[0:1])\n", " df1 = df1.assign(split=[split]*df1.shape[0])\n", " df1 = df1.assign(subset=[language]*df1.shape[0])\n", " df1 = df1.rename(columns={'wav': 'file_name'})\n", " df1['file_name'] = 'data/' + language + '/' + split +'/' + df1['file_name'].astype(str)\n", " return df1\n", "\n", "df = generate_df(\"quechua\", \"train\")\n", "df = pd.concat([df, generate_df(\"guarani\", \"train\")])\n", "df = pd.concat([df, generate_df(\"kotiria\", \"train\")])\n", "df = pd.concat([df, generate_df(\"bribri\", \"train\")])\n", "df = pd.concat([df, generate_df(\"waikhana\", \"train\")])\n", "cols = df.columns.tolist()\n", "cols = cols[-1:] + cols[:-1]\n", "df = df[cols]\n", "\n", "def flatten(xss):\n", " return [x for xs in xss for x in xs]\n", "\n", "a = flatten(df[\"file_name\"].values.tolist())\n", "a = flatten(a)\n", "audio_dataset = Dataset.from_dict({\"audio\": flatten(df[\"file_name\"].values.tolist()),\n", " \"source_processed\": flatten(df[\"source_processed\"].values.tolist()),\n", " \"source_raw\": flatten(df[\"source_raw\"].values.tolist()),\n", " \"target_raw\": flatten(df[\"target_raw\"].values.tolist()),\n", " },\n", " ).cast_column(\"audio\", Audio())\n", "audio_dataset.push_to_hub(\"ivangtorre/second_americas_nlp_2022\", split=\"train\")\n", "\n", "df.to_csv(\"train.csv\", sep='\\t', index=None)\n", "\n", "df = generate_df(\"quechua\", \"dev\")\n", "df = pd.concat([df, generate_df(\"guarani\", \"dev\")])\n", "df = pd.concat([df, generate_df(\"kotiria\", \"dev\")])\n", "df = pd.concat([df, generate_df(\"bribri\", \"dev\")])\n", "df = pd.concat([df, generate_df(\"waikhana\", \"dev\")])\n", "cols = df.columns.tolist()\n", "cols = cols[-1:] + cols[:-1]\n", "df = df[cols]\n", "df.to_csv(\"dev.csv\", sep='\\t', index=None)\n", "\n", "a = df[\"file_name\"].values.tolist()\n", "a = flatten(a)\n", "#audio_dataset = Dataset.from_dict({\"audio\": a}).cast_column(\"audio\", Audio())\n", "#audio_dataset.push_to_hub(\"ivangtorre/second_americas_nlp_2022\", split=\"dev\")\n", "\n" ] }, { "cell_type": "code", "execution_count": 6, "id": "4ce2eeb3", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'audio': {'path': 'data/quechua/train/quechua000000.wav',\n", " 'array': array([0.00045776, 0.00042725, 0.00018311, ..., 0.00286865, 0.00186157,\n", " 0.00253296]),\n", " 'sampling_rate': 16000}}" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "audio_dataset[0]" ] }, { "cell_type": "code", "execution_count": 10, "id": "bd39f2f4", "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
subsetfile_namesource_processedsource_rawtarget_rawsplit
0quechuadata/quechua/train/quechua000000.wavwañuchisunchu kay suwakunatawañuchisunchu kay suwakunatamatemos a esos ladronestrain
1quechuadata/quechua/train/quechua000001.wavimaninkichikmi qamkunaimaninkichikmi qamkunaque dicen ustedestrain
2quechuadata/quechua/train/quechua000002.wavhatun urqukunapi kunturkunapas uyarirqanhatun urqukunapi kunturkunapas uyarirqanen grandes montañas hasta los condores escuchabantrain
3quechuadata/quechua/train/quechua000003.wavninsi winsislaw maqtaqa tumpa machasqañaninsi winsislaw maqtaqa tumpa machasqañadice el joven wessceslao cuando ya estaba borr...train
4quechuadata/quechua/train/quechua000004.wavhuk qilli chuspi chuspi misapi kimsantin suwak...huk qilli chuspi chuspi misapi kimsantin suwak...una sucia mosca en la mesa con los tres ladron...train
.....................
1411waikhanadata/waikhana/train/waikhana001414.wavmasiaha malia masinapeamasiaha malia masinapea, ()Nos tambem sabemos (as historias antigas)train
1412waikhanadata/waikhana/train/waikhana001415.wava'lide mu:sale ya'uaha yu:'u:a'lide mu:sale ya'uaha yu:'u:Tudo isso estou explicando para voces.train
1413waikhanadata/waikhana/train/waikhana001416.wava'lide tina a'likodo pekasonoko a'li gravaka'a...a'lide tina a'likodo pekasonoko a'li gravaka'a...Tudo isso essa branca vai gravar.train
1414waikhanadata/waikhana/train/waikhana001417.wavsayeotha ninokata mipesayeotha ninokata mipeEla disse que vai fazer tudo isso,train
1415waikhanadata/waikhana/train/waikhana001418.wavyu:'u:le ~o'o ihide yu:'u: akayeyu:'u:le ~o'o ihide yu:'u: akayePara mim, e' ate aqui, meus irmaos.train
\n", "

4749 rows × 6 columns

\n", "
" ], "text/plain": [ " subset file_name \\\n", "0 quechua data/quechua/train/quechua000000.wav \n", "1 quechua data/quechua/train/quechua000001.wav \n", "2 quechua data/quechua/train/quechua000002.wav \n", "3 quechua data/quechua/train/quechua000003.wav \n", "4 quechua data/quechua/train/quechua000004.wav \n", "... ... ... \n", "1411 waikhana data/waikhana/train/waikhana001414.wav \n", "1412 waikhana data/waikhana/train/waikhana001415.wav \n", "1413 waikhana data/waikhana/train/waikhana001416.wav \n", "1414 waikhana data/waikhana/train/waikhana001417.wav \n", "1415 waikhana data/waikhana/train/waikhana001418.wav \n", "\n", " source_processed \\\n", "0 wañuchisunchu kay suwakunata \n", "1 imaninkichikmi qamkuna \n", "2 hatun urqukunapi kunturkunapas uyarirqan \n", "3 ninsi winsislaw maqtaqa tumpa machasqaña \n", "4 huk qilli chuspi chuspi misapi kimsantin suwak... \n", "... ... \n", "1411 masiaha malia masinapea \n", "1412 a'lide mu:sale ya'uaha yu:'u: \n", "1413 a'lide tina a'likodo pekasonoko a'li gravaka'a... \n", "1414 sayeotha ninokata mipe \n", "1415 yu:'u:le ~o'o ihide yu:'u: akaye \n", "\n", " source_raw \\\n", "0 wañuchisunchu kay suwakunata \n", "1 imaninkichikmi qamkuna \n", "2 hatun urqukunapi kunturkunapas uyarirqan \n", "3 ninsi winsislaw maqtaqa tumpa machasqaña \n", "4 huk qilli chuspi chuspi misapi kimsantin suwak... \n", "... ... \n", "1411 masiaha malia masinapea, () \n", "1412 a'lide mu:sale ya'uaha yu:'u: \n", "1413 a'lide tina a'likodo pekasonoko a'li gravaka'a... \n", "1414 sayeotha ninokata mipe \n", "1415 yu:'u:le ~o'o ihide yu:'u: akaye \n", "\n", " target_raw split \n", "0 matemos a esos ladrones train \n", "1 que dicen ustedes train \n", "2 en grandes montañas hasta los condores escuchaban train \n", "3 dice el joven wessceslao cuando ya estaba borr... train \n", "4 una sucia mosca en la mesa con los tres ladron... train \n", "... ... ... \n", "1411 Nos tambem sabemos (as historias antigas) train \n", "1412 Tudo isso estou explicando para voces. train \n", "1413 Tudo isso essa branca vai gravar. train \n", "1414 Ela disse que vai fazer tudo isso, train \n", "1415 Para mim, e' ate aqui, meus irmaos. train \n", "\n", "[4749 rows x 6 columns]" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df" ] }, { "cell_type": "code", "execution_count": 2, "id": "a1f02703", "metadata": { "scrolled": true }, "outputs": [], "source": [ "#from datasets import load_dataset\n", "#dataset = load_dataset(\"audiofolder\", data_dir=\"second_americas_nlp_2022\")\n" ] }, { "cell_type": "markdown", "id": "5eaa7c93", "metadata": {}, "source": [ "# EVALUATE MODELS\n" ] }, { "cell_type": "markdown", "id": "2e4e15c9", "metadata": {}, "source": [ "## QUECHUA" ] }, { "cell_type": "code", "execution_count": 8, "id": "e165f4bf", "metadata": { "scrolled": true }, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "9c96f2ce38474bc990e57387acd56fc8", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Map: 0%| | 0/250 [00:00\u001b[0;34m()\u001b[0m\n\u001b[1;32m 22\u001b[0m batch[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtranscription\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m processor\u001b[38;5;241m.\u001b[39mbatch_decode(predicted_ids)\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m batch\n\u001b[0;32m---> 25\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43mquechua\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmap\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmap_to_pred\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbatched\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbatch_size\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 27\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCER:\u001b[39m\u001b[38;5;124m\"\u001b[39m, cer(result[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msource_processed\u001b[39m\u001b[38;5;124m\"\u001b[39m], result[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtranscription\u001b[39m\u001b[38;5;124m\"\u001b[39m]))\n", "File \u001b[0;32m~/.local/lib/python3.10/site-packages/datasets/arrow_dataset.py:602\u001b[0m, in \u001b[0;36mtransmit_tasks..wrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 600\u001b[0m \u001b[38;5;28mself\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDataset\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m=\u001b[39m kwargs\u001b[38;5;241m.\u001b[39mpop(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mself\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 601\u001b[0m \u001b[38;5;66;03m# apply actual function\u001b[39;00m\n\u001b[0;32m--> 602\u001b[0m out: Union[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDataset\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDatasetDict\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 603\u001b[0m datasets: List[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDataset\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlist\u001b[39m(out\u001b[38;5;241m.\u001b[39mvalues()) \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(out, \u001b[38;5;28mdict\u001b[39m) \u001b[38;5;28;01melse\u001b[39;00m [out]\n\u001b[1;32m 604\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m dataset \u001b[38;5;129;01min\u001b[39;00m datasets:\n\u001b[1;32m 605\u001b[0m \u001b[38;5;66;03m# Remove task templates if a column mapping of the template is no longer valid\u001b[39;00m\n", "File \u001b[0;32m~/.local/lib/python3.10/site-packages/datasets/arrow_dataset.py:567\u001b[0m, in \u001b[0;36mtransmit_format..wrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 560\u001b[0m self_format \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 561\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtype\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_format_type,\n\u001b[1;32m 562\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mformat_kwargs\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_format_kwargs,\n\u001b[1;32m 563\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcolumns\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_format_columns,\n\u001b[1;32m 564\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124moutput_all_columns\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_output_all_columns,\n\u001b[1;32m 565\u001b[0m }\n\u001b[1;32m 566\u001b[0m \u001b[38;5;66;03m# apply actual function\u001b[39;00m\n\u001b[0;32m--> 567\u001b[0m out: Union[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDataset\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDatasetDict\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 568\u001b[0m datasets: List[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDataset\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlist\u001b[39m(out\u001b[38;5;241m.\u001b[39mvalues()) \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(out, \u001b[38;5;28mdict\u001b[39m) \u001b[38;5;28;01melse\u001b[39;00m [out]\n\u001b[1;32m 569\u001b[0m \u001b[38;5;66;03m# re-apply format to the output\u001b[39;00m\n", "File \u001b[0;32m~/.local/lib/python3.10/site-packages/datasets/arrow_dataset.py:3156\u001b[0m, in \u001b[0;36mDataset.map\u001b[0;34m(self, function, with_indices, with_rank, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, num_proc, suffix_template, new_fingerprint, desc)\u001b[0m\n\u001b[1;32m 3150\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m transformed_dataset \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 3151\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m hf_tqdm(\n\u001b[1;32m 3152\u001b[0m unit\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m examples\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 3153\u001b[0m total\u001b[38;5;241m=\u001b[39mpbar_total,\n\u001b[1;32m 3154\u001b[0m desc\u001b[38;5;241m=\u001b[39mdesc \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mMap\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 3155\u001b[0m ) \u001b[38;5;28;01mas\u001b[39;00m pbar:\n\u001b[0;32m-> 3156\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m rank, done, content \u001b[38;5;129;01min\u001b[39;00m Dataset\u001b[38;5;241m.\u001b[39m_map_single(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mdataset_kwargs):\n\u001b[1;32m 3157\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m done:\n\u001b[1;32m 3158\u001b[0m shards_done \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n", "File \u001b[0;32m~/.local/lib/python3.10/site-packages/datasets/arrow_dataset.py:3547\u001b[0m, in \u001b[0;36mDataset._map_single\u001b[0;34m(shard, function, with_indices, with_rank, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, new_fingerprint, rank, offset)\u001b[0m\n\u001b[1;32m 3543\u001b[0m indices \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlist\u001b[39m(\n\u001b[1;32m 3544\u001b[0m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;241m*\u001b[39m(\u001b[38;5;28mslice\u001b[39m(i, i \u001b[38;5;241m+\u001b[39m batch_size)\u001b[38;5;241m.\u001b[39mindices(shard\u001b[38;5;241m.\u001b[39mnum_rows)))\n\u001b[1;32m 3545\u001b[0m ) \u001b[38;5;66;03m# Something simpler?\u001b[39;00m\n\u001b[1;32m 3546\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m-> 3547\u001b[0m batch \u001b[38;5;241m=\u001b[39m \u001b[43mapply_function_on_filtered_inputs\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 3548\u001b[0m \u001b[43m \u001b[49m\u001b[43mbatch\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3549\u001b[0m \u001b[43m \u001b[49m\u001b[43mindices\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3550\u001b[0m \u001b[43m \u001b[49m\u001b[43mcheck_same_num_examples\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mshard\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlist_indexes\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m>\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3551\u001b[0m \u001b[43m \u001b[49m\u001b[43moffset\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moffset\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3552\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3553\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m NumExamplesMismatchError:\n\u001b[1;32m 3554\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m DatasetTransformationNotAllowedError(\n\u001b[1;32m 3555\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUsing `.map` in batched mode on a dataset with attached indexes is allowed only if it doesn\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt create or remove existing examples. You can first run `.drop_index() to remove your index and then re-add it.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3556\u001b[0m ) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n", "File \u001b[0;32m~/.local/lib/python3.10/site-packages/datasets/arrow_dataset.py:3416\u001b[0m, in \u001b[0;36mDataset._map_single..apply_function_on_filtered_inputs\u001b[0;34m(pa_inputs, indices, check_same_num_examples, offset)\u001b[0m\n\u001b[1;32m 3414\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m with_rank:\n\u001b[1;32m 3415\u001b[0m additional_args \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m (rank,)\n\u001b[0;32m-> 3416\u001b[0m processed_inputs \u001b[38;5;241m=\u001b[39m \u001b[43mfunction\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mfn_args\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43madditional_args\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mfn_kwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3417\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(processed_inputs, LazyDict):\n\u001b[1;32m 3418\u001b[0m processed_inputs \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 3419\u001b[0m k: v \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m processed_inputs\u001b[38;5;241m.\u001b[39mdata\u001b[38;5;241m.\u001b[39mitems() \u001b[38;5;28;01mif\u001b[39;00m k \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m processed_inputs\u001b[38;5;241m.\u001b[39mkeys_to_format\n\u001b[1;32m 3420\u001b[0m }\n", "Input \u001b[0;32mIn [8]\u001b[0m, in \u001b[0;36mmap_to_pred\u001b[0;34m(batch)\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mmap_to_pred\u001b[39m(batch):\n\u001b[0;32m---> 16\u001b[0m wav, curr_sample_rate \u001b[38;5;241m=\u001b[39m \u001b[43msf\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mbatch\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mfile_name\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdtype\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mfloat32\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 17\u001b[0m feats \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mfrom_numpy(wav)\u001b[38;5;241m.\u001b[39mfloat()\n\u001b[1;32m 18\u001b[0m feats \u001b[38;5;241m=\u001b[39m F\u001b[38;5;241m.\u001b[39mlayer_norm(feats, feats\u001b[38;5;241m.\u001b[39mshape) \u001b[38;5;66;03m# Normalization performed during finetuning\u001b[39;00m\n", "File \u001b[0;32m~/.local/lib/python3.10/site-packages/soundfile.py:285\u001b[0m, in \u001b[0;36mread\u001b[0;34m(file, frames, start, stop, dtype, always_2d, fill_value, out, samplerate, channels, format, subtype, endian, closefd)\u001b[0m\n\u001b[1;32m 199\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mread\u001b[39m(file, frames\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m, start\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m, stop\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, dtype\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mfloat64\u001b[39m\u001b[38;5;124m'\u001b[39m, always_2d\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[1;32m 200\u001b[0m fill_value\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, out\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, samplerate\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, channels\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 201\u001b[0m \u001b[38;5;28mformat\u001b[39m\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, subtype\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, endian\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, closefd\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m):\n\u001b[1;32m 202\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Provide audio data from a sound file as NumPy array.\u001b[39;00m\n\u001b[1;32m 203\u001b[0m \n\u001b[1;32m 204\u001b[0m \u001b[38;5;124;03m By default, the whole file is read from the beginning, but the\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 283\u001b[0m \n\u001b[1;32m 284\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 285\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[43mSoundFile\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfile\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mr\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msamplerate\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mchannels\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 286\u001b[0m \u001b[43m \u001b[49m\u001b[43msubtype\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mendian\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mformat\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mclosefd\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mas\u001b[39;00m f:\n\u001b[1;32m 287\u001b[0m frames \u001b[38;5;241m=\u001b[39m f\u001b[38;5;241m.\u001b[39m_prepare_read(start, stop, frames)\n\u001b[1;32m 288\u001b[0m data \u001b[38;5;241m=\u001b[39m f\u001b[38;5;241m.\u001b[39mread(frames, dtype, always_2d, fill_value, out)\n", "File \u001b[0;32m~/.local/lib/python3.10/site-packages/soundfile.py:658\u001b[0m, in \u001b[0;36mSoundFile.__init__\u001b[0;34m(self, file, mode, samplerate, channels, subtype, endian, format, closefd)\u001b[0m\n\u001b[1;32m 655\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_mode \u001b[38;5;241m=\u001b[39m mode\n\u001b[1;32m 656\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_info \u001b[38;5;241m=\u001b[39m _create_info_struct(file, mode, samplerate, channels,\n\u001b[1;32m 657\u001b[0m \u001b[38;5;28mformat\u001b[39m, subtype, endian)\n\u001b[0;32m--> 658\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_file \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_open\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfile\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmode_int\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mclosefd\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 659\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mset\u001b[39m(mode)\u001b[38;5;241m.\u001b[39missuperset(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mr+\u001b[39m\u001b[38;5;124m'\u001b[39m) \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mseekable():\n\u001b[1;32m 660\u001b[0m \u001b[38;5;66;03m# Move write position to 0 (like in Python file objects)\u001b[39;00m\n\u001b[1;32m 661\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mseek(\u001b[38;5;241m0\u001b[39m)\n", "File \u001b[0;32m~/.local/lib/python3.10/site-packages/soundfile.py:1216\u001b[0m, in \u001b[0;36mSoundFile._open\u001b[0;34m(self, file, mode_int, closefd)\u001b[0m\n\u001b[1;32m 1213\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m file_ptr \u001b[38;5;241m==\u001b[39m _ffi\u001b[38;5;241m.\u001b[39mNULL:\n\u001b[1;32m 1214\u001b[0m \u001b[38;5;66;03m# get the actual error code\u001b[39;00m\n\u001b[1;32m 1215\u001b[0m err \u001b[38;5;241m=\u001b[39m _snd\u001b[38;5;241m.\u001b[39msf_error(file_ptr)\n\u001b[0;32m-> 1216\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m LibsndfileError(err, prefix\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mError opening \u001b[39m\u001b[38;5;132;01m{0!r}\u001b[39;00m\u001b[38;5;124m: \u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;241m.\u001b[39mformat(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mname))\n\u001b[1;32m 1217\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m mode_int \u001b[38;5;241m==\u001b[39m _snd\u001b[38;5;241m.\u001b[39mSFM_WRITE:\n\u001b[1;32m 1218\u001b[0m \u001b[38;5;66;03m# Due to a bug in libsndfile version <= 1.0.25, frames != 0\u001b[39;00m\n\u001b[1;32m 1219\u001b[0m \u001b[38;5;66;03m# when opening a named pipe in SFM_WRITE mode.\u001b[39;00m\n\u001b[1;32m 1220\u001b[0m \u001b[38;5;66;03m# See http://github.com/erikd/libsndfile/issues/77.\u001b[39;00m\n\u001b[1;32m 1221\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_info\u001b[38;5;241m.\u001b[39mframes \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m\n", "\u001b[0;31mLibsndfileError\u001b[0m: Error opening 'data/quechua/dev/quechua000573.wav': System error." ] } ], "source": [ "from datasets import load_dataset\n", "from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor\n", "import torch\n", "from jiwer import cer\n", "import torch.nn.functional as F\n", "from datasets import load_dataset\n", "import soundfile as sf\n", "\n", "americasnlp = load_dataset(\"ivangtorre/second_americas_nlp_2022\", split=\"dev\")\n", "quechua = americasnlp.filter(lambda language: language['subset']=='quechua')\n", "\n", "model = Wav2Vec2ForCTC.from_pretrained(\"ivangtorre/wav2vec2-xlsr-300m-quechua\")\n", "processor = Wav2Vec2Processor.from_pretrained(\"ivangtorre/wav2vec2-xlsr-300m-quechua\")\n", "\n", "def map_to_pred(batch):\n", " wav, curr_sample_rate = sf.read(batch[\"file_name\"][0], dtype=\"float32\")\n", " feats = torch.from_numpy(wav).float()\n", " feats = F.layer_norm(feats, feats.shape) # Normalization performed during finetuning\n", " feats = torch.unsqueeze(feats, 0)\n", " logits = model(feats).logits\n", " predicted_ids = torch.argmax(logits, dim=-1)\n", " batch[\"transcription\"] = processor.batch_decode(predicted_ids)\n", " return batch\n", "\n", "result = quechua.map(map_to_pred, batched=True, batch_size=1)\n", "\n", "print(\"CER:\", cer(result[\"source_processed\"], result[\"transcription\"]))\n" ] }, { "cell_type": "markdown", "id": "8e29bc13", "metadata": {}, "source": [ "## BRIBRI\n" ] }, { "cell_type": "code", "execution_count": 7, "id": "7cdec414", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'data/quechua/dev/quechua000573.wav'" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "quechua[0:1][\"file_name\"][0]" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.12" } }, "nbformat": 4, "nbformat_minor": 5 }