Spaces:
Running
Running
File size: 1,521 Bytes
6d7d1e3 9b970f9 6d7d1e3 aae7167 9b970f9 0fa8793 6d7d1e3 0fa8793 37d48a5 0fa8793 aae7167 0fa8793 9b970f9 0fa8793 9b970f9 0fa8793 37d48a5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
from datasets.features import Features, Sequence, Value
from evaluate.module import EvaluationModuleInfo
import evaluate
class Suite(evaluate.EvaluationSuite):
def _info(self):
return EvaluationModuleInfo(
description="dummy metric for tests",
citation="insert citation here",
features=Features({"predictions": Value("int64"), "references": Value("int64")}))
def __init__(self):
super().__init__()
self.preprocessor = None #lambda x: x["text"].lower()
self.suite = [
evaluate.SubTask(
data="imdb",
split="test",
data_preprocessor=self.preprocessor,
args_for_task={
"metric": "accuracy",
"input_column": "text",
"label_column": "label",
"label_mapping": {
"LABEL_0": 0.0,
"LABEL_1": 1.0
}
}
),
evaluate.SubTask(
data="sst2",
split="test[:10]",
data_preprocessor=self.preprocessor,
args_for_task={
"metric": "accuracy",
"input_column": "sentence",
"label_column": "label",
"label_mapping": {
"LABEL_0": 0.0,
"LABEL_1": 1.0
}
}
)
] |