Angelina Wang
commited on
Commit
·
228c8dc
1
Parent(s):
70b1fce
conver to numpy
Browse files
directional_bias_amplification.py
CHANGED
@@ -15,6 +15,8 @@
|
|
15 |
|
16 |
import evaluate
|
17 |
|
|
|
|
|
18 |
_DESCRIPTION = """
|
19 |
Directional Bias Amplification is a metric that captures the amount of bias (i.e., a conditional probability) that is amplified.
|
20 |
This metric was introduced in the ICML 2021 paper "Directional Bias Amplification" (https://arxiv.org/abs/2102.12594).
|
@@ -60,7 +62,7 @@ class DirectionalBiasAmplification(evaluate.EvaluationModule):
|
|
60 |
|
61 |
def _compute(self, predictions, references, attributes):
|
62 |
|
63 |
-
task_preds, task_labels, attribute_labels = predictions, references, attributes
|
64 |
|
65 |
assert len(task_labels.shape) == 2 and len(attribute_labels.shape) == 2, 'Please read the shape of the expected inputs, which should be "num samples" by "num classification items"'
|
66 |
assert len(task_labels) == len(attribute_labels) == len(task_preds), 'Please make sure the number of samples in the three input arrays is the same.'
|
|
|
15 |
|
16 |
import evaluate
|
17 |
|
18 |
+
import numpy as np
|
19 |
+
|
20 |
_DESCRIPTION = """
|
21 |
Directional Bias Amplification is a metric that captures the amount of bias (i.e., a conditional probability) that is amplified.
|
22 |
This metric was introduced in the ICML 2021 paper "Directional Bias Amplification" (https://arxiv.org/abs/2102.12594).
|
|
|
62 |
|
63 |
def _compute(self, predictions, references, attributes):
|
64 |
|
65 |
+
task_preds, task_labels, attribute_labels = np.array(predictions), np.array(references), np.array(attributes)
|
66 |
|
67 |
assert len(task_labels.shape) == 2 and len(attribute_labels.shape) == 2, 'Please read the shape of the expected inputs, which should be "num samples" by "num classification items"'
|
68 |
assert len(task_labels) == len(attribute_labels) == len(task_preds), 'Please make sure the number of samples in the three input arrays is the same.'
|