{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import json\n", "import math\n", "from pathlib import Path\n", "\n", "import numpy as np\n", "import pandas as pd\n", "from datasets import Dataset\n", "from sklearn.metrics import f1_score, accuracy_score, log_loss\n", "from tqdm import tqdm\n", "\n", "from models.models import language_to_models" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "en = \"en\"\n", "ru = \"ru\"\n", "datasets_dir = Path(\"datasets\")\n", "test_filename = \"arxiv_test\"\n", "test_dataset_filename = {\n", " en: datasets_dir / en / test_filename,\n", " ru: datasets_dir / ru / test_filename,\n", "}" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "test_datasets = {}\n", "for lang in (en, ru):\n", " csv_file = str(test_dataset_filename[lang]) + \".csv\"\n", " json_file = str(test_dataset_filename[lang]) + \".json\"\n", " if Path(csv_file).exists():\n", " test_datasets[lang] = pd.read_csv(csv_file)\n", " else:\n", " test_datasets[lang] = pd.read_json(json_file, lines=True)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "test_results_filename = Path(\"test_results.json\")\n", "if test_results_filename.exists():\n", " with open(test_results_filename, \"r\") as f:\n", " test_results = json.load(f)\n", "else:\n", " test_results = {}" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def pred_to_1d(pred):\n", " return pred.idxmax(axis=1)\n", "\n", "\n", "def true_to_nd(true, columns):\n", " columns = list(columns)\n", " true_arr = np.zeros((len(true), len(columns)))\n", " column_numbers = true.apply(lambda label: columns.index(label)).to_numpy()\n", " one_inds = np.column_stack((np.arange(len(true)), column_numbers))\n", " true_arr[one_inds] = 1\n", " true = pd.DataFrame(true_arr, columns=columns)\n", " return true\n", "\n", "\n", "def accuracy(pred, true):\n", " return accuracy_score(true, pred_to_1d(pred))\n", "\n", "\n", "def f1(pred, true):\n", " return f1_score(true, pred_to_1d(pred), average=\"macro\")\n", "\n", "\n", "def cross_entropy(pred, true):\n", " pred = pd.DataFrame(\n", " pred.to_numpy() / pred.sum(axis=1).to_numpy()[:, None], columns=pred.columns\n", " )\n", " return log_loss(true_to_nd(true, pred.columns), pred)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "metrics = {\"Macro F1\": f1, \"Accuracy\": accuracy, \"Cross-entropy loss\": cross_entropy}" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "predications_dir = Path(\"pred\")\n", "predications_dir.mkdir(exist_ok=True)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def canonicalize_label(label):\n", " if \".\" in label:\n", " return label[: label.index(\".\")]\n", " return label\n", "\n", "\n", "def predict(model_name, model, dataset: pd.DataFrame, batch_size=32, first: int = 3000):\n", " label = \"category\"\n", " all_labels = list(dataset[label].unique())\n", " if first is not None:\n", " dataset = dataset[:first]\n", " true = dataset[label]\n", " prediction_file_path = predications_dir / (model_name + \".csv\")\n", " dataset_size = len(dataset)\n", " if not prediction_file_path.exists():\n", " preds = []\n", " for i in tqdm(\n", " range(0, dataset_size + batch_size, batch_size),\n", " desc=f\"Predicting using {model_name}\",\n", " total=math.ceil(dataset_size / batch_size),\n", " unit=\"batch\",\n", " ):\n", " data = dataset.iloc[i : i + batch_size]\n", " if data.empty:\n", " break\n", " data = Dataset.from_pandas(data)\n", " batch_pred = model(data)\n", " batch_pred_canonicalised = []\n", " for paper_pred in batch_pred:\n", " labels_dict = {}\n", " for label_score in paper_pred:\n", " label = canonicalize_label(label_score[\"label\"])\n", " if label not in all_labels:\n", " return None, None\n", " labels_dict[label] = label_score[\"score\"]\n", " batch_pred_canonicalised.append(labels_dict)\n", " preds.extend(batch_pred_canonicalised)\n", " else:\n", " preds = pd.read_csv(prediction_file_path, index_col=0)\n", " preds = pd.DataFrame(preds).fillna(0)\n", " for label in all_labels:\n", " if label not in preds.columns:\n", " preds[label] = 0\n", " preds = preds.reindex(sorted(preds.columns), axis=1)\n", " if not prediction_file_path.exists():\n", " preds.to_csv(prediction_file_path)\n", " return preds, true\n", "\n", "\n", "for lang, name_get_model in language_to_models.items():\n", " lang_results = test_results.setdefault(lang, {})\n", " for metric_name, metic in metrics.items():\n", " metrics_results = lang_results.setdefault(metric_name, {})\n", " for model_name, get_model in name_get_model.items():\n", " model_name = model_name.replace(\"/\", \".\")\n", " if model_name not in metrics_results:\n", " test_size = 3000 if en == lang else 500\n", " pred, true = predict(model_name, get_model(), test_datasets[lang], first=test_size)\n", " if pred is None:\n", " print(f\"{model_name} does not produce labels that we can estimate\")\n", " continue\n", " metrics_results[model_name] = metic(pred, true)\n", " print(f\"{metric_name} for {model_name} = {metrics_results[model_name]}\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "with open(test_results_filename, \"w\") as f:\n", " json.dump(test_results, f)" ] } ], "metadata": { "kernelspec": { "display_name": ".venv", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.12" } }, "nbformat": 4, "nbformat_minor": 2 }