berkatil commited on
Commit
6b816f7
1 Parent(s): 0ef7bae
Files changed (2) hide show
  1. README.md +1 -1
  2. mrr.py +3 -4
README.md CHANGED
@@ -14,7 +14,7 @@ pinned: false
14
  # Metric Card for mrr
15
 
16
  ## Metric Description
17
- This is the mean average precision (map) metric for retrieval systems.
18
  It is the average of the precision scores computer after each relevant document is got. You can refer to [here](https://amenra.github.io/ranx/metrics/#mean-reciprocal-rank)
19
 
20
  ## How to Use
 
14
  # Metric Card for mrr
15
 
16
  ## Metric Description
17
+ This is the mean reciprocal rank (mrr) metric for retrieval systems.
18
  It is the average of the precision scores computer after each relevant document is got. You can refer to [here](https://amenra.github.io/ranx/metrics/#mean-reciprocal-rank)
19
 
20
  ## How to Use
mrr.py CHANGED
@@ -11,7 +11,7 @@
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
- """Mean average precision metric"""
15
 
16
  import evaluate
17
  import datasets
@@ -62,7 +62,7 @@ Examples:
62
  """
63
 
64
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
65
- class map(evaluate.Metric):
66
  def _info(self):
67
  return evaluate.MetricInfo(
68
  # This is the description that will appear on the modules page.
@@ -73,8 +73,7 @@ class map(evaluate.Metric):
73
  # This defines the format of each prediction and reference
74
  features=datasets.Features({
75
  'predictions': datasets.Value("string"),
76
- 'references': datasets.Value("string"),
77
- 'k': datasets.Value("int32")
78
  }),
79
  # Homepage of the module for documentation
80
  reference_urls=["https://amenra.github.io/ranx/"]
 
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
+ """Mean reciprocal rank metric"""
15
 
16
  import evaluate
17
  import datasets
 
62
  """
63
 
64
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
65
+ class mrr(evaluate.Metric):
66
  def _info(self):
67
  return evaluate.MetricInfo(
68
  # This is the description that will appear on the modules page.
 
73
  # This defines the format of each prediction and reference
74
  features=datasets.Features({
75
  'predictions': datasets.Value("string"),
76
+ 'references': datasets.Value("string")
 
77
  }),
78
  # Homepage of the module for documentation
79
  reference_urls=["https://amenra.github.io/ranx/"]