Spaces:
Runtime error
Runtime error
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""TODO: Add a description here.""" | |
import evaluate | |
import datasets | |
import numpy as np | |
# TODO: Add BibTeX citation | |
_CITATION = """\ | |
@InProceedings{huggingface:module, | |
title = {A great new module}, | |
authors={huggingface, Inc.}, | |
year={2020} | |
} | |
""" | |
# TODO: Add description of the module here | |
_DESCRIPTION = """\ | |
This new module is designed to solve this great ML task and is crafted with a lot of care. | |
""" | |
# TODO: Add description of the arguments of the module here | |
_KWARGS_DESCRIPTION = """ | |
Calculates how good are predictions given some references, using certain scores | |
Args: | |
predictions: list of predictions to score. Each predictions | |
should be a string with tokens separated by spaces. | |
references: list of reference for each prediction. Each | |
reference should be a string with tokens separated by spaces. | |
Returns: | |
accuracy: description of the first score, | |
another_score: description of the second score, | |
Examples: | |
Examples should be written in doctest format, and should illustrate how | |
to use the function. | |
>>> my_new_module = evaluate.load("my_new_module") | |
>>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1]) | |
>>> print(results) | |
{'accuracy': 1.0} | |
""" | |
# TODO: Define external resources urls if needed | |
BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt" | |
# This code was taken from https://gist.github.com/kylebgorman/1081951/bce3de986e4b05fc0b63d4d9e0cfa4bde6664365 | |
def _dist(A, B, insertion, deletion, substitution): | |
D = np.zeros((len(A) + 1, len(B) + 1)) | |
for i in range(len(A)): | |
D[i + 1][0] = D[i][0] + deletion | |
for j in range(len(B)): | |
D[0][j + 1] = D[0][j] + insertion | |
for i in range(len(A)): # fill out middle of matrix | |
for j in range(len(B)): | |
if A[i] == B[j]: | |
D[i + 1][j + 1] = D[i][j] # aka, it's free. | |
else: | |
D[i + 1][j + 1] = min(D[i + 1][j] + insertion, | |
D[i][j + 1] + deletion, | |
D[i][j] + substitution) | |
return D | |
def levenshtein_distance(l1, l2, normalize=False): | |
dist = _dist(l1, l2, 1, 1, 1)[-1][-1] | |
if normalize: | |
return dist / max(len(l1), len(l2)) | |
else: | |
return dist | |
# @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) | |
class LevenshteinDistance(evaluate.Comparison): | |
"""TODO: Short description of my evaluation module.""" | |
def _info(self): | |
# TODO: Specifies the evaluate.EvaluationModuleInfo object | |
return evaluate.ComparisonInfo( | |
# This is the description that will appear on the modules page. | |
module_type="comparison", | |
description=_DESCRIPTION, | |
citation=_CITATION, | |
inputs_description=_KWARGS_DESCRIPTION, | |
# This defines the format of each prediction and reference | |
features=datasets.Features({ | |
"predictions": datasets.Value("string", id="sequence"), | |
"references": datasets.Value("string", id="sequence"), | |
}), | |
# Homepage of the module for documentation | |
homepage="http://module.homepage", | |
# Additional links to the codebase or references | |
codebase_urls=["http://github.com/path/to/codebase/of/new_module"], | |
reference_urls=["http://path.to.reference.url/new_module"] | |
) | |
def _download_and_prepare(self, dl_manager): | |
"""Optional: download external resources useful to compute the scores""" | |
# TODO: Download external resources if needed | |
pass | |
def _compute(self, predictions, references, tokenizer=lambda x: x.split(), normalize=False): | |
"""Returns the scores""" | |
dists = [] | |
for prediction, reference in zip(predictions, references): | |
tokenized_prediction = tokenizer(prediction) | |
tokenized_reference = tokenizer(reference) | |
dists.append(levenshtein_distance(tokenized_prediction, tokenized_reference, normalize=normalize)) | |
avg_dist = np.mean(dists) | |
std_dist = np.std(dists) | |
return { | |
"levenshtein_distance": avg_dist, | |
"distance_std": std_dist, | |
"distances": dists, | |
} |