|
import json |
|
from tqdm import tqdm |
|
import scipy.stats as stats |
|
def evaluate_retrieval(retrieval_file, topk): |
|
retrieval = json.load(open(retrieval_file)) |
|
accuracy = { k : [] for k in topk } |
|
max_k = max(topk) |
|
for qid in tqdm(list(retrieval.keys())): |
|
contexts = retrieval[qid]['contexts'] |
|
has_ans_idx = max_k |
|
|
|
for idx, ctx in enumerate(contexts): |
|
if idx >= max_k: |
|
break |
|
|
|
if ctx['has_answer']: |
|
has_ans_idx = idx |
|
break |
|
|
|
for k in topk: |
|
accuracy[k].append(0 if has_ans_idx >= k else 1) |
|
|
|
return accuracy |
|
|
|
if __name__=="__main__": |
|
topk = [5, 10, 20, 50, 100] |
|
all_scores_a,all_scores_b=None,None |
|
for DATA in ["nq","trivia","wq","curated","squad"]: |
|
FileNameA="/data/t-junhanyang/InfoCSE/QA_TEST/InfoCSE_ICT_K1.{}.test.json".format(DATA) |
|
FileNameB="/data/t-junhanyang/InfoCSE/QA_TEST/CONPONO.{}.test.json".format(DATA) |
|
scores_a=evaluate_retrieval(FileNameA,topk) |
|
if all_scores_a is None: |
|
all_scores_a=scores_a |
|
else: |
|
for k in topk: |
|
all_scores_a[k]+=scores_a[k] |
|
print(FileNameB) |
|
scores_b=evaluate_retrieval(FileNameB,topk) |
|
if all_scores_b is None: |
|
all_scores_b=scores_b |
|
else: |
|
for k in topk: |
|
all_scores_b[k]+=scores_b[k] |
|
print(len(all_scores_a[5])) |
|
print(len(all_scores_a[100])) |
|
for k in topk: |
|
stat_val, p_val = stats.ttest_ind(all_scores_a[k], all_scores_b[k]) |
|
print(str(k)+': '+str(p_val/2)) |