File size: 12,269 Bytes
d94c92e 475081a d94c92e 3f5c862 d94c92e 3f5c862 d94c92e 475081a d94c92e 475081a 889c3a5 d94c92e 475081a d94c92e 475081a d94c92e 475081a d94c92e 475081a d94c92e 78950cf 475081a d94c92e 475081a d94c92e 81b299b d94c92e 00e1647 475081a d94c92e 693597c f584d87 17b677f 693597c 17b677f f584d87 475081a d94c92e 475081a d94c92e f584d87 d94c92e 2053ce2 3f5c862 087d986 3f5c862 087d986 3f5c862 087d986 3f5c862 2053ce2 3f5c862 2053ce2 3f5c862 087d986 3f5c862 087d986 3f5c862 087d986 3f5c862 087d986 3f5c862 087d986 3f5c862 087d986 3f5c862 2053ce2 3f5c862 d94c92e 475081a 81b299b 57da4b3 3f5c862 4c644ed 57da4b3 d94c92e 475081a 5b469a4 f584d87 81b299b f584d87 81b299b 57da4b3 3f5c862 17ea6af 4c644ed 3f5c862 3894697 f584d87 d94c92e f584d87 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ISCO-08 Hierarchical Accuracy Measure."""
from typing import List, Set, Dict, Tuple
import evaluate
import datasets
# import ham
# import isco
# TODO: Add BibTeX citation
_CITATION = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_DESCRIPTION = """
The ISCO-08 Hierarchical Accuracy Measure is an implementation of the measure described in [Functional Annotation of Genes Using Hierarchical Text Categorization](https://www.researchgate.net/publication/44046343_Functional_Annotation_of_Genes_Using_Hierarchical_Text_Categorization) (Kiritchenko, Svetlana and Famili, Fazel. 2005) and adapted for the ISCO-08 classification scheme by the International Labour Organization.
"""
_KWARGS_DESCRIPTION = """
Calculates hierarchical precision, hierarchical recall and hierarchical F1 given a list of reference codes and predicted codes from the ISCO-08 taxonomy by the International Labour Organization.
Args:
- references (List[str]): List of ISCO-08 reference codes. Each reference code should be a single token, 4-digit ISCO-08 code string.
- predictions (List[str]): List of machine predicted or human assigned ISCO-08 codes to score. Each prediction should be a single token, 4-digit ISCO-08 code string.
Returns:
- hierarchical_precision (`float` or `int`): Hierarchical precision score. Minimum possible value is 0. Maximum possible value is 1.0. A higher score means higher accuracy.
- hierarchical_recall: Hierarchical recall score. Minimum possible value is 0. Maximum possible value is 1.0. A higher score means higher accuracy.
- hierarchical_fmeasure: Hierarchical F1 score. Minimum possible value is 0. Maximum possible value is 1.0. A higher score means higher accuracy.
Examples:
Example 1
>>> ham = evaluate.load("danieldux/isco_hierarchical_accuracy")
>>> results = ham.compute(reference=["1111", "1112", "1113", "1114"], predictions=["1111", "1113", "1120", "1211"])
>>> print(results)
{
'accuracy': 0.25,
'hierarchical_precision': 0.7142857142857143,
'hierarchical_recall': 0.5,
'hierarchical_fmeasure': 0.588235294117647
}
"""
# TODO: Define external resources urls if needed
ISCO_CSV_MIRROR_URL = (
"https://storage.googleapis.com/isco-public/tables/ISCO_structure.csv"
)
ILO_ISCO_CSV_URL = (
"https://www.ilo.org/ilostat-files/ISCO/newdocs-08-2021/ISCO-08/ISCO-08%20EN.csv"
)
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class ISCO_Hierarchical_Accuracy(evaluate.Metric):
"""The ISCO-08 Hierarchical Accuracy Measure"""
def _info(self):
# TODO: Specifies the evaluate.EvaluationModuleInfo object
return evaluate.MetricInfo(
# This is the description that will appear on the modules page.
module_type="metric",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
# This defines the format of each prediction and reference
# features=datasets.Features(
# {
# "predictions": datasets.Value("string"),
# "references": datasets.Value("string"),
# }
# ),
features=datasets.Features(
{
"references": datasets.Sequence(datasets.Value("string")),
"predictions": datasets.Sequence(datasets.Value("string")),
}
if self.config_name == "multilabel"
else {
"references": datasets.Value("string"),
"predictions": datasets.Value("string"),
}
),
# TODO: Homepage of the module for documentation
homepage="http://module.homepage",
# TODO: Additional links to the codebase or references
codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
reference_urls=["http://path.to.reference.url/new_module"],
)
def create_hierarchy_dict(self, file: str) -> dict:
"""
Creates a dictionary where keys are nodes and values are dictionaries of their parent nodes with distance as weights,
representing the group level hierarchy of the ISCO-08 structure.
Args:
- file: A string representing the path to the CSV file containing the 4-digit ISCO-08 codes. It can be a local path or a web URL.
Returns:
- A dictionary where keys are ISCO-08 unit codes and values are dictionaries of their parent codes with distances.
"""
try:
import requests
import csv
except ImportError as error:
raise error
isco_hierarchy = {}
if file.startswith("http://") or file.startswith("https://"):
response = requests.get(file)
lines = response.text.splitlines()
else:
with open(file, newline="") as csvfile:
lines = csvfile.readlines()
reader = csv.DictReader(lines)
for row in reader:
unit_code = row["unit"].zfill(4)
minor_code = unit_code[0:3]
sub_major_code = unit_code[0:2]
major_code = unit_code[0]
# Assign weights, higher for closer ancestors
weights = {minor_code: 0.75, sub_major_code: 0.5, major_code: 0.25}
# Store ancestors with their weights
isco_hierarchy[unit_code] = weights
return isco_hierarchy
def find_ancestors(self, node: str, hierarchy: dict) -> set:
"""
Find the ancestors of a given node in a hierarchy.
Args:
node (str): The node for which to find ancestors.
hierarchy (dict): A dictionary representing the hierarchy, where the keys are nodes and the values are their parents.
Returns:
set: A set of ancestors of the given node.
"""
ancestors = set()
nodes_to_visit = [node]
while nodes_to_visit:
current_node = nodes_to_visit.pop()
if current_node in hierarchy:
parents = hierarchy[current_node]
ancestors.update(parents)
nodes_to_visit.extend(parents)
return ancestors
def extend_with_ancestors(self, classes: set, hierarchy: dict) -> set:
"""
Extend the given set of classes with their ancestors from the hierarchy.
Args:
classes (set): The set of classes to extend.
hierarchy (dict): The hierarchy of classes.
Returns:
set: The extended set of classes including their ancestors.
"""
extended_classes = set(classes)
for cls in classes:
ancestors = self.find_ancestors(cls, hierarchy)
extended_classes.update(ancestors)
return extended_classes
def calculate_hierarchical_precision_recall(
self,
reference_codes: List[str],
predicted_codes: List[str],
hierarchy: Dict[str, Dict[str, float]],
) -> Tuple[float, float]:
"""
Calculates the hierarchical precision and recall given the reference codes, predicted codes, and hierarchy definition.
Args:
reference_codes (List[str]): The list of reference codes.
predicted_codes (List[str]): The list of predicted codes.
hierarchy (Dict[str, Set[str]]): The hierarchy definition where keys are nodes and values are sets of parent nodes.
Returns:
Tuple[float, float]: A tuple containing the hierarchical precision and recall floating point values.
"""
extended_real = {}
# Extend the sets of reference codes with their ancestors
for code in reference_codes:
weight = 1.0 # Full weight for exact match
extended_real[code] = weight
for ancestor, ancestor_weight in hierarchy.get(code, {}).items():
extended_real[ancestor] = max(
extended_real.get(ancestor, 0), ancestor_weight
)
extended_predicted = {}
# Extend the sets of predicted codes with their ancestors
for code in predicted_codes:
weight = 1.0
extended_predicted[code] = weight
for ancestor, ancestor_weight in hierarchy.get(code, {}).items():
extended_predicted[ancestor] = max(
extended_predicted.get(ancestor, 0), ancestor_weight
)
# Calculate weighted correct predictions
correct_weights = 0
for code, weight in extended_predicted.items():
if code in extended_real:
correct_weights += min(weight, extended_real[code])
total_predicted_weights = sum(extended_predicted.values())
total_real_weights = sum(extended_real.values())
# Calculate hierarchical precision and recall using weighted sums
hP = correct_weights / total_predicted_weights if total_predicted_weights else 0
hR = correct_weights / total_real_weights if total_real_weights else 0
return hP, hR
def hierarchical_f_measure(self, hP, hR, beta=1.0):
"""
Calculate the hierarchical F-measure.
Parameters:
hP (float): The hierarchical precision.
hR (float): The hierarchical recall.
beta (float, optional): The beta value for F-measure calculation. Default is 1.0.
Returns:
float: The hierarchical F-measure.
"""
if hP + hR == 0:
return 0
return (beta**2 + 1) * hP * hR / (beta**2 * hP + hR)
def _download_and_prepare(self, dl_manager):
"""Download external ISCO-08 csv file from the ILO website for creating the hierarchy dictionary."""
isco_csv = dl_manager.download_and_extract(ISCO_CSV_MIRROR_URL)
print(f"ISCO CSV file downloaded")
# self.isco_hierarchy = isco.create_hierarchy_dict(isco_csv)
self.isco_hierarchy = self.create_hierarchy_dict(isco_csv)
print("ISCO hierarchy dictionary created")
print(self.isco_hierarchy)
def _compute(self, predictions, references):
"""Returns the accuracy scores."""
# Convert the inputs to strings
predictions = [str(p) for p in predictions]
references = [str(r) for r in references]
# Calculate accuracy
accuracy = sum(i == j for i, j in zip(predictions, references)) / len(
predictions
)
print(f"Accuracy: {accuracy}")
# Calculate hierarchical precision, recall and f-measure
hierarchy = self.isco_hierarchy
hP, hR = self.calculate_hierarchical_precision_recall(
references, predictions, hierarchy
)
hF = self.hierarchical_f_measure(hP, hR)
print(
f"Hierarchical Precision: {hP}, Hierarchical Recall: {hR}, Hierarchical F-measure: {hF}"
)
return {
"accuracy": accuracy,
"hierarchical_precision": hP,
"hierarchical_recall": hR,
"hierarchical_fmeasure": hF,
}
|