Spaces:
Running
Running
import datasets | |
import evaluate | |
from harim_scorer import Harimplus_Scorer | |
logger = evaluate.logging.get_logger(__name__) | |
CODEBASE_URL='' | |
PAPER_URL='TBA' | |
_CITATION = """\ | |
@inproceedings{harimplus, | |
title={HaRiM+: Evaluating Summary Quality with Hallucination Risk}, | |
author={Seonil Son, Junsoo Park, Jeong-in Hwang, Hyungjong Noh, Yeonsoo Lee}, | |
booktitle={AACL}, | |
year={2022}, | |
url={TBA} | |
} | |
""" | |
_DESCRIPTION = """\ | |
HaRiM+ is a reference-less (i.e. scoring summary quality only requires an article) evaluation metric score for summarization task which hurls the power of summarization model. | |
It will work great ranking the summary-article pairs according to its quality. | |
Note that the score range is unbound. | |
Summarization model inside the HaRiM+ will read and evaluate how good the quality of a summary given the paired source article. | |
HaRiM+ is proved effective for benchmarking summarization systems (system-level performance) as well as ranking the article-summary pairs (segment-level performance) in comprehensive aspect such as factuality, consistency, coherency, fluency, and relevance. For details, refer to our paper published in AACL2022. | |
""" | |
_KWARGS_DESCRIPTION = """ | |
HaRiM+ score. | |
Args: | |
For scorer = evaluate.load(): | |
`pretrained_name` (str or pathlib.Path): summarization model checkpoint or path, loaded by transformers.AutoModelForSeq2SeqLM.from_pretrained(). Defaults to Yale-LILY/brio-cnndm-uncased. | |
`tokenizer`: (use when your tokenizer cannot be loaded by from_pretrained)Tokenizer function compatible with transformers.PreTrainedTokenizer. It requires tokenizer.pad_token|eos_token|bos_token and tokenizer.__call__() method for HaRiM+ score computation. | |
For scorer.compute(): | |
`predictions` (list of str): generated summaries | |
`references` (list of str): source articles to be summarized | |
`use_aggregator` (bool): if True, average of the scores are returned | |
Returns: | |
'results' (dict): { | |
'harim+' (List[float] or float): HaRiM+ score to use, | |
'harim' (List[float] or float): HaRiM term for computing the score above, | |
'log_ppl' (List[float] or float): Log perplexity term. Same as (Yuan et al., NeurIPS 2021), | |
'lambda' (float): (recommend not to modify this) Balancing coeff. for computing harim+ from harim and log_ppl. | |
} | |
Examples: | |
>>> summaries = ["hello there", "hello there"] | |
>>> articles = ["hello, this is the article to be summarized", "hello, this is the article to be summarized"] | |
>>> scorer = evaluate.load("NCSOFT/harim_plus") #, pretrained_name='PRETRAINEDNAME', tokenizer=TOKENIZER # optional | |
>>> results = scorer.compute(predictions=summaries, references=articles) # use_aggregator=True # optional | |
>>> print([round(v, 2) for v in results["harim+"]]) | |
[0.4, 0.4] | |
""" | |
class Harimplus(evaluate.Metric): | |
def __init__(self, | |
pretrained_name='facebook/bart-large-cnn', | |
tokenizer=None, | |
device='cuda', | |
**kwargs | |
): | |
super().__init__(**kwargs) | |
self.myconfig = dict( | |
pretrained_name=pretrained_name, | |
tokenizer=tokenizer, | |
device=device, | |
) | |
def _info(self): | |
return evaluate.MetricInfo( | |
description=_DESCRIPTION, | |
citation=_CITATION, | |
homepage=CODEBASE_URL, | |
inputs_description=_KWARGS_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"predictions": datasets.Value("string", id="sequence"), | |
"references": datasets.Value("string", id="sequence"), | |
} | |
), | |
codebase_urls=[CODEBASE_URL], | |
reference_urls=[CODEBASE_URL, PAPER_URL], | |
) | |
def _download_and_prepare(self, dl_manager): | |
pretrained_name = self.myconfig['pretrained_name'] | |
is_custom_tokenzer = self.myconfig['tokenizer'] is not None | |
logger.warning( | |
"Loading HaRiM+ score" | |
f"\tpretrained_name = {pretrained_name}" | |
) | |
if is_custom_tokenizer: | |
logger.warning( | |
f"tokenizer is overriden by \n\tself.myconfig['tokenizer']" | |
) | |
logger.warning( | |
"You can change checkpoints with `pretrained_name` kwarg in evaluate.load. Strongly recommend to use *-large or larger ones." | |
"Refrain from using checkpoints trained on noisy corpus such as bbc-XSUM.") | |
# download the model checkpoint specified by self.myconfig_name and set up the scorer | |
self.scorer = score.Harimplus_Scorer(**self.myconfig) | |
def _compute(self, predictions=None, | |
references=None, | |
use_aggregator=False, | |
bsz=32, | |
tokenwise_score=False): | |
summaries = predictions | |
articles = references | |
scores = self.scorer.compute(predictions=summaries, references=articles, use_aggregator=use_aggregator, bsz=bsz, tokenwise_score=tokenwise_score) | |
return scores | |