matching_series / matching_series.py
bowdbeg's picture
add macro scores
f5bb76d
raw
history blame
7.42 kB
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: Add a description here."""
import statistics
import datasets
import evaluate
import numpy as np
# TODO: Add BibTeX citation
_CITATION = """\
@InProceedings{huggingface:module,
title = {A great new module},
authors={huggingface, Inc.},
year={2020}
}
"""
# TODO: Add description of the module here
_DESCRIPTION = """\
This new module is designed to solve this great ML task and is crafted with a lot of care.
"""
# TODO: Add description of the arguments of the module here
_KWARGS_DESCRIPTION = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of generated time series.
shape: (num_generation, num_timesteps, num_features)
references: list of reference
shape: (num_reference, num_timesteps, num_features)
Returns:
Examples:
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> my_new_module = evaluate.load("bowdbeg/matching_series")
>>> results = my_new_module.compute(references=[[[0.0, 1.0]]], predictions=[[[0.0, 1.0]]])
>>> print(results)
{'matchin': 1.0}
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class matching_series(evaluate.Metric):
"""TODO: Short description of my evaluation module."""
def _info(self):
# TODO: Specifies the evaluate.EvaluationModuleInfo object
return evaluate.MetricInfo(
# This is the description that will appear on the modules page.
module_type="metric",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
# This defines the format of each prediction and reference
features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("float"))),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("float"))),
}
),
# Homepage of the module for documentation
homepage="http://module.homepage",
# Additional links to the codebase or references
codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
reference_urls=["http://path.to.reference.url/new_module"],
)
def _download_and_prepare(self, dl_manager):
"""Optional: download external resources useful to compute the scores"""
pass
def _compute(self, predictions: list | np.ndarray, references: list | np.ndarray, batch_size: None | int = None):
"""
Compute the scores of the module given the predictions and references
Args:
predictions: list of generated time series.
shape: (num_generation, num_timesteps, num_features)
references: list of reference
shape: (num_reference, num_timesteps, num_features)
batch_size: batch size to use for the computation. If None, the whole dataset is processed at once.
Returns:
"""
predictions = np.array(predictions)
references = np.array(references)
if predictions.shape[1:] != references.shape[1:]:
raise ValueError(
"The number of features in the predictions and references should be the same. predictions: {}, references: {}".format(
predictions.shape[1:], references.shape[1:]
)
)
# at first, convert the inputs to numpy arrays
# MSE between predictions and references for all example combinations for each features
# shape: (num_generation, num_reference, num_features)
if batch_size is not None:
mse = np.zeros((len(predictions), len(references), predictions.shape[-1]))
# iterate over the predictions and references in batches
for i in range(0, len(predictions) + batch_size, batch_size):
for j in range(0, len(references) + batch_size, batch_size):
mse[i : i + batch_size, j : j + batch_size] = np.mean(
(predictions[i : i + batch_size, None] - references[None, j : j + batch_size]) ** 2, axis=-2
)
else:
mse = np.mean((predictions[:, None] - references) ** 2, axis=1)
index_mse = mse.diagonal(axis1=0, axis2=1).mean()
# matching scores
mse_mean = mse.mean(axis=-1)
# best match for each generated time series
# shape: (num_generation,)
best_match = np.argmin(mse_mean, axis=-1)
# matching mse
# shape: (num_generation,)
matching_mse = mse_mean[np.arange(len(best_match)), best_match].mean()
# best match for each reference time series
# shape: (num_reference,)
best_match_inv = np.argmin(mse_mean, axis=0)
covered_mse = mse_mean[best_match_inv, np.arange(len(best_match_inv))].mean()
harmonic_mean = 2 / (1 / matching_mse + 1 / covered_mse)
# take matching for each feature and compute metrics for them
matching_mse_features = []
covered_mse_features = []
harmonic_mean_features = []
index_mse_features = []
for f in range(predictions.shape[-1]):
mse_f = mse[:, :, f]
index_mse_f = mse_f.diagonal(axis1=0, axis2=1).mean()
best_match_f = np.argmin(mse_f, axis=-1)
matching_mse_f = mse_f[np.arange(len(best_match_f)), best_match_f].mean()
best_match_inv_f = np.argmin(mse_f, axis=0)
covered_mse_f = mse_f[best_match_inv_f, np.arange(len(best_match_inv_f))].mean()
harmonic_mean_f = 2 / (1 / matching_mse_f + 1 / covered_mse_f)
matching_mse_features.append(matching_mse_f)
covered_mse_features.append(covered_mse_f)
harmonic_mean_features.append(harmonic_mean_f)
index_mse_features.append(index_mse_f)
macro_matching_mse = statistics.mean(matching_mse_features)
macro_covered_mse = statistics.mean(covered_mse_features)
macro_harmonic_mean = statistics.mean(harmonic_mean_features)
return {
"matching_mse": matching_mse,
"harmonic_mean": harmonic_mean,
"covered_mse": covered_mse,
"index_mse": index_mse,
"matching_mse_features": matching_mse_features,
"harmonic_mean_features": harmonic_mean_features,
"covered_mse_features": covered_mse_features,
"index_mse_features": index_mse_features,
"macro_matching_mse": macro_matching_mse,
"macro_covered_mse": macro_covered_mse,
"macro_harmonic_mean": macro_harmonic_mean,
}