File size: 1,649 Bytes
3082e6e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import json
from tqdm import tqdm
import scipy.stats as stats
def evaluate_retrieval(retrieval_file, topk):
retrieval = json.load(open(retrieval_file))
accuracy = { k : [] for k in topk }
max_k = max(topk)
for qid in tqdm(list(retrieval.keys())):
contexts = retrieval[qid]['contexts']
has_ans_idx = max_k # first index in contexts that has answers
for idx, ctx in enumerate(contexts):
if idx >= max_k:
break
if ctx['has_answer']:
has_ans_idx = idx
break
for k in topk:
accuracy[k].append(0 if has_ans_idx >= k else 1)
return accuracy
if __name__=="__main__":
topk = [5, 10, 20, 50, 100]
all_scores_a,all_scores_b=None,None
for DATA in ["nq","trivia","wq","curated","squad"]:
FileNameA="/data/t-junhanyang/InfoCSE/QA_TEST/InfoCSE_ICT_K1.{}.test.json".format(DATA)
FileNameB="/data/t-junhanyang/InfoCSE/QA_TEST/CONPONO.{}.test.json".format(DATA)
scores_a=evaluate_retrieval(FileNameA,topk)
if all_scores_a is None:
all_scores_a=scores_a
else:
for k in topk:
all_scores_a[k]+=scores_a[k]
print(FileNameB)
scores_b=evaluate_retrieval(FileNameB,topk)
if all_scores_b is None:
all_scores_b=scores_b
else:
for k in topk:
all_scores_b[k]+=scores_b[k]
print(len(all_scores_a[5]))
print(len(all_scores_a[100]))
for k in topk:
stat_val, p_val = stats.ttest_ind(all_scores_a[k], all_scores_b[k])
print(str(k)+': '+str(p_val/2)) |