{ "cells": [ { "cell_type": "code", "execution_count": 2, "id": "61582349", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import os\n", "import gc\n", "import json\n", "import cv2\n", "from tqdm.auto import tqdm\n", "from HCFA_OCR_XML_to_DataFrame import *" ] }, { "cell_type": "code", "execution_count": 3, "id": "2dcbb30c", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "37" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import warnings\n", "warnings.filterwarnings('ignore')\n", "warnings.simplefilter('ignore')\n", "gc.collect()" ] }, { "cell_type": "code", "execution_count": 4, "id": "34a48aa8", "metadata": {}, "outputs": [], "source": [ "HCFA_final_keys = pd.read_excel(r\"D:\\Xelp_work\\FSL Project\\Sprint_2\\HCFA_Keys_list_verification.xlsx\", sheet_name = 'Field_Names from KEY file')" ] }, { "cell_type": "code", "execution_count": 5, "id": "1ead8164", "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
Key_Name
010A_PatConditionEmpN
110A_PatConditionEmpY
210B_PatAAState
310B_PatConditionAutoN
410B_PatConditionAutoY
......
1529B_SecInsSexM
1539D_SecInsPlanName
15431_RenDateSigned
15511B_PriInsEmpName
15624J_RenProvFullName
\n", "

157 rows × 1 columns

\n", "
" ], "text/plain": [ " Key_Name\n", "0 10A_PatConditionEmpN\n", "1 10A_PatConditionEmpY\n", "2 10B_PatAAState\n", "3 10B_PatConditionAutoN\n", "4 10B_PatConditionAutoY\n", ".. ...\n", "152 9B_SecInsSexM\n", "153 9D_SecInsPlanName\n", "154 31_RenDateSigned\n", "155 11B_PriInsEmpName\n", "156 24J_RenProvFullName\n", "\n", "[157 rows x 1 columns]" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "HCFA_final_keys" ] }, { "cell_type": "code", "execution_count": 6, "id": "e60b549b", "metadata": {}, "outputs": [], "source": [ "key_names = HCFA_final_keys['Key_Name'].tolist()" ] }, { "cell_type": "code", "execution_count": 7, "id": "b3e49b64", "metadata": {}, "outputs": [], "source": [ "def get_charges(row):\n", " ground_truth = row[\"Data\"]\n", " return ground_truth" ] }, { "cell_type": "code", "execution_count": 8, "id": "9f76b9e3", "metadata": {}, "outputs": [], "source": [ "def get_Pat_details(row):\n", " if \"?\" not in str(row[\"OCR_Optimizer\"]):\n", " ground_truth = row[\"OCR_Optimizer\"]\n", " else:\n", " if row[\"Data\"] != '[BLANK]':\n", " ground_truth = row[\"Data\"]\n", " else:\n", " ground_truth = row[\"OCR_Optimizer\"].replace(\"?\", \" \")\n", " if ground_truth == ' ':\n", " ground_truth = '[BLANK]'\n", " return ground_truth" ] }, { "cell_type": "code", "execution_count": 9, "id": "e5a55f38", "metadata": {}, "outputs": [], "source": [ "def generate_ground_truth(row):\n", " ground_truth = '[BLANK]'\n", " try:\n", " if row[\"Field_Name\"] in ['28_TotalCharges','29_AmountPaid', '30_BalanceDue']:\n", " ground_truth = get_charges(row)\n", " elif row[\"Field_Name\"] in['5_PatAddr1', '5_PatCity', '5_PatPostCode', '5_PatState']:\n", " ground_truth = get_Pat_details(row)\n", " else:\n", " if \"?\" not in str(row[\"OCR_Optimizer\"]):\n", " if row[\"OCR_Optimizer\"] != '[BLANK]':\n", " ground_truth = row[\"OCR_Optimizer\"]\n", " else:\n", " ground_truth = row[\"Data\"]\n", " except:\n", " ground_truth = row[\"Data\"]\n", " if \"@\" in ground_truth:\n", " ground_truth = '[BLANK]'\n", " return ground_truth" ] }, { "cell_type": "code", "execution_count": 10, "id": "110a36f9", "metadata": {}, "outputs": [], "source": [ "def create_annotation_dict(df):\n", " result_dict = {}\n", " for index, row in df.iterrows():\n", " field_name = row['Field_Name']\n", " gt_value = row['Data']\n", " if field_name in result_dict:\n", " if pd.notna(field_name):\n", " result_dict[field_name] = f\"{result_dict[field_name]}; {gt_value}\" \n", " else:\n", " result_dict[field_name] = gt_value\n", "\n", " return result_dict" ] }, { "cell_type": "code", "execution_count": 11, "id": "7a4eff9f", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "27913\n", "BSC134A9O001_001.tiff\n" ] } ], "source": [ "import os\n", "img_files = os.listdir(r\"D:\\Xelp_work\\FSL Project\\Sprint_2\\HCFA_data\\train\\img\")\n", "print(len(img_files))\n", "print(img_files[0])" ] }, { "cell_type": "code", "execution_count": 11, "id": "647049d1", "metadata": {}, "outputs": [], "source": [ "# import xml.etree.ElementTree as ET\n", "# xml_file_path = r\"D:\\Xelp_work\\FSL Project\\Sprint_2\\HCFA_data\\train\\key\\BSC134A9O001_000_HCFA.KEY\"\n", "# # Parse the XML file\n", "# tree = ET.parse(xml_file_path)\n", "# root = tree.getroot()\n", "# # Iterate through all elements\n", "# for fld in root.findall('.//Fld'):\n", "# typ = fld.get('Typ') # Get the 'Typ' attribute\n", "# if typ == 'SINGLE': # If Typ is 'SINGLE', follow the previous logic\n", "# nm_element = fld.find('.//Nm')\n", "# if nm_element is not None:\n", "# print(nm_element.text)\n", "# elif typ in ['GROUP', 'TABLE']: # If Typ is 'GROUP' or 'TABLE', follow the new logic\n", "# clms = fld.findall('.//Clm')\n", "# for clm in clms:\n", "# nm_value = clm.get('Nm')\n", "# if nm_value:\n", "# print(nm_value)" ] }, { "cell_type": "code", "execution_count": 12, "id": "48efee4f", "metadata": {}, "outputs": [], "source": [ "json_target_path = r\"D:\\Xelp_work\\FSL Project\\Sprint_2\\HCFA_data\\donut_data\\train\\key\"\n", "images_source_path = r\"D:\\Xelp_work\\FSL Project\\Sprint_2\\HCFA_data\\train\\img\"\n", "images_target_path = r\"D:\\Xelp_work\\FSL Project\\Sprint_2\\HCFA_data\\donut_data\\train\\img\"\n", "Key_dir = r\"D:\\Xelp_work\\FSL Project\\Sprint_2\\HCFA_data\\train\\key\"" ] }, { "cell_type": "code", "execution_count": 14, "id": "e30a7c3c", "metadata": {}, "outputs": [], "source": [ "os.makedirs(json_target_path, exist_ok=True)\n", "os.makedirs(images_target_path, exist_ok=True)" ] }, { "cell_type": "code", "execution_count": 15, "id": "986f8a36", "metadata": { "scrolled": false }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████████████████████████████████████████████████████████████████████| 27919/27919 [1:48:46<00:00, 4.28it/s]\n" ] } ], "source": [ "import gc\n", "gc.collect()\n", "import os\n", "import json\n", "import shutil\n", "from tqdm import tqdm\n", "\n", "file_counter = 0\n", "for file in tqdm(os.listdir(Key_dir)):\n", " try:\n", " Annotation_dict = {}\n", " xml_file_name = os.path.join(Key_dir, file)\n", " grnd_df, single_fields, grp_field_names, table_field_names, image_name = OCR_XML_to_DataFrame(xml_file_name, key_names)\n", " \n", " if image_name[0][0] in img_files:\n", " # Generate ground truth and create annotation dictionary\n", " grnd_df.fillna(\"[BLANK]\", inplace=True)\n", " grnd_df[\"Ground_truth\"] = grnd_df.apply(generate_ground_truth, axis=1)\n", " Annotation_dict = create_annotation_dict(grnd_df)\n", "\n", " # Generate the base filename (without extension)\n", " base_filename = os.path.splitext(file)[0]\n", "\n", " # Save the annotation dictionary as JSON with the base filename\n", " json_file_path = os.path.join(json_target_path, base_filename + \".json\")\n", " with open(json_file_path, 'w') as json_file:\n", " json.dump(Annotation_dict, json_file)\n", "\n", " source_image_path = os.path.join(images_source_path, image_name[0][0])\n", " target_image_path = os.path.join(images_target_path, base_filename + os.path.splitext(image_name[0][0])[1])\n", " if os.path.exists(source_image_path):\n", " shutil.copy(source_image_path, target_image_path)\n", "\n", "# print(f\"{file} : {len(Annotation_dict)}\")\n", "# print(Annotation_dict)\n", " \n", " except Exception as e:\n", " raise e\n", "\n", " file_counter += 1\n", " if file_counter%200 == 0:\n", " gc.collect()" ] }, { "cell_type": "code", "execution_count": null, "id": "0713f7d0", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 40, "id": "0570dbc1", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import json\n", "from tqdm import tqdm\n", "key_file_dir = r\"D:\\Xelp_work\\FSL Project\\Sprint_2\\HCFA_data\\donut_data\\valid\\key\"\n", "train_key_files = r\"D:\\Xelp_work\\FSL Project\\Sprint_2\\HCFA_data\\donut_data\\train\\key\"" ] }, { "cell_type": "code", "execution_count": 50, "id": "e785b6da", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████████████████████████████████████████████████████████████████████| 27919/27919 [00:14<00:00, 1925.41it/s]\n" ] } ], "source": [ "import os\n", "import json\n", "\n", "files_with_151 = []\n", "files_with_157 = []\n", "others = []\n", "key_files = os.listdir(train_key_files)\n", "for each_key_file in tqdm(key_files):\n", " each_path = os.path.join(train_key_files, each_key_file) \n", " # Reading the JSON file\n", " with open(each_path, 'r') as file:\n", " data = json.load(file)\n", " if len(data) == 151:\n", " files_with_151.append(each_key_file)\n", " elif len(data) == 157:\n", " files_with_157.append(each_key_file)\n", " else:\n", " others.append(each_key_file)\n", "print(len(files_with_151), len(files_with_157), len(others))" ] }, { "cell_type": "code", "execution_count": 51, "id": "db89b159", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "0 27919 0\n" ] } ], "source": [] }, { "cell_type": "code", "execution_count": 33, "id": "e27a2c9f", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[]" ] }, "execution_count": 33, "metadata": {}, "output_type": "execute_result" } ], "source": [ "others" ] }, { "cell_type": "code", "execution_count": 47, "id": "bf2f2e9e", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|███████████████████████████████████████████████████████████████████████████| 27919/27919 [01:19<00:00, 353.14it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Files with 151 keys: 27320\n", "Files with 157 keys: 599\n", "Other files: 0\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n" ] } ], "source": [ "import os\n", "import json\n", "from tqdm import tqdm\n", "\n", "extra_keys = {\n", " \"24_AnesStartTime\": \"[BLANK]\",\n", " \"24_AnesEndTime\": \"[BLANK]\",\n", " \"24_AnesTotalTime\": \"[BLANK]\",\n", " \"24_AnesTotalMin\": \"[BLANK]\",\n", " \"24_AnesMod\": \"[BLANK]\",\n", " \"24_AnesUnits\": \"[BLANK]\"\n", "}\n", "\n", "files_with_151 = []\n", "files_with_157 = []\n", "others = []\n", "train_key_files = r\"D:\\Xelp_work\\FSL Project\\Sprint_2\\HCFA_data\\donut_data\\train\\key\"\n", "\n", "key_files = os.listdir(train_key_files)\n", "for each_key_file in tqdm(key_files):\n", " each_path = os.path.join(train_key_files, each_key_file) \n", " # Reading the JSON file\n", " with open(each_path, 'r') as file:\n", " data = json.load(file)\n", " \n", " if len(data) == 151:\n", " files_with_151.append(each_key_file)\n", " # Add the extra keys and values\n", " data.update(extra_keys)\n", " \n", " # Save the updated JSON data back to the file\n", " with open(each_path, 'w') as file:\n", " json.dump(data, file, indent=4)\n", " \n", " elif len(data) == 157:\n", " files_with_157.append(each_key_file)\n", " else:\n", " others.append(each_key_file)\n", "\n", "print(f\"Files with 151 keys: {len(files_with_151)}\")\n", "print(f\"Files with 157 keys: {len(files_with_157)}\")\n", "print(f\"Other files: {len(others)}\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "39aa5696", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.5" } }, "nbformat": 4, "nbformat_minor": 5 }