File size: 18,585 Bytes
ceff78e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import datasets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Downloading readme: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 12.5k/12.5k [00:00<00:00, 12.5MB/s]\n",
      "Downloading data: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 2.27M/2.27M [00:00<00:00, 6.70MB/s]\n",
      "Generating train split: 0 examples [00:00, ? examples/s]c:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\download\\streaming_download_manager.py:778: FutureWarning: The 'verbose' keyword in pd.read_csv is deprecated and will be removed in a future version.\n",
      "  return pd.read_csv(xopen(filepath_or_buffer, \"rb\", download_config=download_config), **kwargs)\n",
      "Generating train split: 0 examples [00:00, ? examples/s]\n"
     ]
    },
    {
     "ename": "DatasetGenerationCastError",
     "evalue": "An error occurred while generating the dataset\n\nAll the data files must have the same columns, but at some point there are 1 new columns (id) and 1 missing columns (post_id).\n\nThis happened while the csv dataset builder was generating data using\n\nhf://datasets/steamcyclone/Pill_Ideologies-Post_Titles/reddit_posts_fm.csv (at revision dac12a4af65322040e2e6c00c346ffc49cc8eccf)\n\nPlease either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mCastError\u001b[0m                                 Traceback (most recent call last)",
      "File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\builder.py:1989\u001b[0m, in \u001b[0;36mArrowBasedBuilder._prepare_split_single\u001b[1;34m(self, gen_kwargs, fpath, file_format, max_shard_size, job_id)\u001b[0m\n\u001b[0;32m   1988\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m-> 1989\u001b[0m     \u001b[43mwriter\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwrite_table\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtable\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   1990\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m CastError \u001b[38;5;28;01mas\u001b[39;00m cast_error:\n",
      "File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\arrow_writer.py:574\u001b[0m, in \u001b[0;36mArrowWriter.write_table\u001b[1;34m(self, pa_table, writer_batch_size)\u001b[0m\n\u001b[0;32m    573\u001b[0m pa_table \u001b[38;5;241m=\u001b[39m pa_table\u001b[38;5;241m.\u001b[39mcombine_chunks()\n\u001b[1;32m--> 574\u001b[0m pa_table \u001b[38;5;241m=\u001b[39m \u001b[43mtable_cast\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpa_table\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_schema\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    575\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39membed_local_files:\n",
      "File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\table.py:2322\u001b[0m, in \u001b[0;36mtable_cast\u001b[1;34m(table, schema)\u001b[0m\n\u001b[0;32m   2321\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m table\u001b[38;5;241m.\u001b[39mschema \u001b[38;5;241m!=\u001b[39m schema:\n\u001b[1;32m-> 2322\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mcast_table_to_schema\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtable\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mschema\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   2323\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m table\u001b[38;5;241m.\u001b[39mschema\u001b[38;5;241m.\u001b[39mmetadata \u001b[38;5;241m!=\u001b[39m schema\u001b[38;5;241m.\u001b[39mmetadata:\n",
      "File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\table.py:2276\u001b[0m, in \u001b[0;36mcast_table_to_schema\u001b[1;34m(table, schema)\u001b[0m\n\u001b[0;32m   2275\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28msorted\u001b[39m(table\u001b[38;5;241m.\u001b[39mcolumn_names) \u001b[38;5;241m!=\u001b[39m \u001b[38;5;28msorted\u001b[39m(features):\n\u001b[1;32m-> 2276\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m CastError(\n\u001b[0;32m   2277\u001b[0m         \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCouldn\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt cast\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mtable\u001b[38;5;241m.\u001b[39mschema\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mto\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mfeatures\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mbecause column names don\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt match\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m   2278\u001b[0m         table_column_names\u001b[38;5;241m=\u001b[39mtable\u001b[38;5;241m.\u001b[39mcolumn_names,\n\u001b[0;32m   2279\u001b[0m         requested_column_names\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mlist\u001b[39m(features),\n\u001b[0;32m   2280\u001b[0m     )\n\u001b[0;32m   2281\u001b[0m arrays \u001b[38;5;241m=\u001b[39m [cast_array_to_feature(table[name], feature) \u001b[38;5;28;01mfor\u001b[39;00m name, feature \u001b[38;5;129;01min\u001b[39;00m features\u001b[38;5;241m.\u001b[39mitems()]\n",
      "\u001b[1;31mCastError\u001b[0m: Couldn't cast\nsubreddit: string\nid: string\ntitle: string\ntext: string\nurl: string\nscore: int64\nauthor: string\ndate: double\n-- schema metadata --\npandas: '{\"index_columns\": [{\"kind\": \"range\", \"name\": null, \"start\": 0, \"' + 1136\nto\n{'subreddit': Value(dtype='string', id=None), 'post_id': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None), 'text': Value(dtype='string', id=None), 'url': Value(dtype='string', id=None), 'score': Value(dtype='int32', id=None), 'author': Value(dtype='string', id=None), 'date': Value(dtype='int64', id=None)}\nbecause column names don't match",
      "\nDuring handling of the above exception, another exception occurred:\n",
      "\u001b[1;31mDatasetGenerationCastError\u001b[0m                Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[4], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m test \u001b[38;5;241m=\u001b[39m \u001b[43mdatasets\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_dataset\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43msteamcyclone/Pill_Ideologies-Post_Titles\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
      "File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\load.py:2549\u001b[0m, in \u001b[0;36mload_dataset\u001b[1;34m(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs)\u001b[0m\n\u001b[0;32m   2546\u001b[0m try_from_hf_gcs \u001b[38;5;241m=\u001b[39m path \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m _PACKAGED_DATASETS_MODULES\n\u001b[0;32m   2548\u001b[0m \u001b[38;5;66;03m# Download and prepare data\u001b[39;00m\n\u001b[1;32m-> 2549\u001b[0m \u001b[43mbuilder_instance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdownload_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m   2550\u001b[0m \u001b[43m    \u001b[49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m   2551\u001b[0m \u001b[43m    \u001b[49m\u001b[43mdownload_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m   2552\u001b[0m \u001b[43m    \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverification_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m   2553\u001b[0m \u001b[43m    \u001b[49m\u001b[43mtry_from_hf_gcs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtry_from_hf_gcs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m   2554\u001b[0m \u001b[43m    \u001b[49m\u001b[43mnum_proc\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnum_proc\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m   2555\u001b[0m \u001b[43m    \u001b[49m\u001b[43mstorage_options\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstorage_options\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m   2556\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   2558\u001b[0m \u001b[38;5;66;03m# Build dataset for splits\u001b[39;00m\n\u001b[0;32m   2559\u001b[0m keep_in_memory \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m   2560\u001b[0m     keep_in_memory \u001b[38;5;28;01mif\u001b[39;00m keep_in_memory \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m is_small_dataset(builder_instance\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mdataset_size)\n\u001b[0;32m   2561\u001b[0m )\n",
      "File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\builder.py:1005\u001b[0m, in \u001b[0;36mDatasetBuilder.download_and_prepare\u001b[1;34m(self, output_dir, download_config, download_mode, verification_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)\u001b[0m\n\u001b[0;32m   1003\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m num_proc \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m   1004\u001b[0m         prepare_split_kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnum_proc\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m num_proc\n\u001b[1;32m-> 1005\u001b[0m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_download_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m   1006\u001b[0m \u001b[43m        \u001b[49m\u001b[43mdl_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdl_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m   1007\u001b[0m \u001b[43m        \u001b[49m\u001b[43mverification_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverification_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m   1008\u001b[0m \u001b[43m        \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mprepare_split_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m   1009\u001b[0m \u001b[43m        \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mdownload_and_prepare_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m   1010\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   1011\u001b[0m \u001b[38;5;66;03m# Sync info\u001b[39;00m\n\u001b[0;32m   1012\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mdataset_size \u001b[38;5;241m=\u001b[39m \u001b[38;5;28msum\u001b[39m(split\u001b[38;5;241m.\u001b[39mnum_bytes \u001b[38;5;28;01mfor\u001b[39;00m split \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39msplits\u001b[38;5;241m.\u001b[39mvalues())\n",
      "File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\builder.py:1100\u001b[0m, in \u001b[0;36mDatasetBuilder._download_and_prepare\u001b[1;34m(self, dl_manager, verification_mode, **prepare_split_kwargs)\u001b[0m\n\u001b[0;32m   1096\u001b[0m split_dict\u001b[38;5;241m.\u001b[39madd(split_generator\u001b[38;5;241m.\u001b[39msplit_info)\n\u001b[0;32m   1098\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m   1099\u001b[0m     \u001b[38;5;66;03m# Prepare split will record examples associated to the split\u001b[39;00m\n\u001b[1;32m-> 1100\u001b[0m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_prepare_split\u001b[49m\u001b[43m(\u001b[49m\u001b[43msplit_generator\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mprepare_split_kwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   1101\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mOSError\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m   1102\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mOSError\u001b[39;00m(\n\u001b[0;32m   1103\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCannot find data file. \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m   1104\u001b[0m         \u001b[38;5;241m+\u001b[39m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmanual_download_instructions \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m   1105\u001b[0m         \u001b[38;5;241m+\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mOriginal error:\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m   1106\u001b[0m         \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mstr\u001b[39m(e)\n\u001b[0;32m   1107\u001b[0m     ) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n",
      "File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\builder.py:1860\u001b[0m, in \u001b[0;36mArrowBasedBuilder._prepare_split\u001b[1;34m(self, split_generator, file_format, num_proc, max_shard_size)\u001b[0m\n\u001b[0;32m   1858\u001b[0m job_id \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m\n\u001b[0;32m   1859\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m pbar:\n\u001b[1;32m-> 1860\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mjob_id\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdone\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcontent\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_prepare_split_single\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m   1861\u001b[0m \u001b[43m        \u001b[49m\u001b[43mgen_kwargs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgen_kwargs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mjob_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mjob_id\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43m_prepare_split_args\u001b[49m\n\u001b[0;32m   1862\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\u001b[43m:\u001b[49m\n\u001b[0;32m   1863\u001b[0m \u001b[43m        \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mdone\u001b[49m\u001b[43m:\u001b[49m\n\u001b[0;32m   1864\u001b[0m \u001b[43m            \u001b[49m\u001b[43mresult\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mcontent\u001b[49m\n",
      "File \u001b[1;32mc:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\datasets\\builder.py:1991\u001b[0m, in \u001b[0;36mArrowBasedBuilder._prepare_split_single\u001b[1;34m(self, gen_kwargs, fpath, file_format, max_shard_size, job_id)\u001b[0m\n\u001b[0;32m   1989\u001b[0m     writer\u001b[38;5;241m.\u001b[39mwrite_table(table)\n\u001b[0;32m   1990\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m CastError \u001b[38;5;28;01mas\u001b[39;00m cast_error:\n\u001b[1;32m-> 1991\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m DatasetGenerationCastError\u001b[38;5;241m.\u001b[39mfrom_cast_error(\n\u001b[0;32m   1992\u001b[0m         cast_error\u001b[38;5;241m=\u001b[39mcast_error,\n\u001b[0;32m   1993\u001b[0m         builder_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mbuilder_name,\n\u001b[0;32m   1994\u001b[0m         gen_kwargs\u001b[38;5;241m=\u001b[39mgen_kwargs,\n\u001b[0;32m   1995\u001b[0m         token\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtoken,\n\u001b[0;32m   1996\u001b[0m     )\n\u001b[0;32m   1997\u001b[0m num_examples_progress_update \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlen\u001b[39m(table)\n\u001b[0;32m   1998\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m time\u001b[38;5;241m.\u001b[39mtime() \u001b[38;5;241m>\u001b[39m _time \u001b[38;5;241m+\u001b[39m config\u001b[38;5;241m.\u001b[39mPBAR_REFRESH_TIME_INTERVAL:\n",
      "\u001b[1;31mDatasetGenerationCastError\u001b[0m: An error occurred while generating the dataset\n\nAll the data files must have the same columns, but at some point there are 1 new columns (id) and 1 missing columns (post_id).\n\nThis happened while the csv dataset builder was generating data using\n\nhf://datasets/steamcyclone/Pill_Ideologies-Post_Titles/reddit_posts_fm.csv (at revision dac12a4af65322040e2e6c00c346ffc49cc8eccf)\n\nPlease either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)"
     ]
    }
   ],
   "source": [
    "\n",
    "\n",
    "test = datasets.load_dataset(\"steamcyclone/Pill_Ideologies-Post_Titles\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "sta663C",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}