koichi12 commited on
Commit
5701701
·
verified ·
1 Parent(s): b17f4b7

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. scripts/yans/lm-evaluation-harness/tests/testdata/arc_easy-v0-loglikelihood +1 -0
  2. scripts/yans/lm-evaluation-harness/tests/testdata/arc_easy-v0-res.json +1 -0
  3. scripts/yans/lm-evaluation-harness/tests/testdata/arithmetic_3da-v0-res.json +1 -0
  4. scripts/yans/lm-evaluation-harness/tests/testdata/arithmetic_4da-v0-loglikelihood +1 -0
  5. scripts/yans/lm-evaluation-harness/tests/testdata/blimp_animate_subject_trans-v0-loglikelihood +1 -0
  6. scripts/yans/lm-evaluation-harness/tests/testdata/blimp_complex_NP_island-v0-res.json +1 -0
  7. scripts/yans/lm-evaluation-harness/tests/testdata/blimp_distractor_agreement_relative_clause-v0-res.json +1 -0
  8. scripts/yans/lm-evaluation-harness/tests/testdata/blimp_existential_there_quantifiers_1-v0-res.json +1 -0
  9. scripts/yans/lm-evaluation-harness/tests/testdata/blimp_expletive_it_object_raising-v0-loglikelihood +1 -0
  10. scripts/yans/lm-evaluation-harness/tests/testdata/blimp_matrix_question_npi_licensor_present-v0-loglikelihood +1 -0
  11. scripts/yans/lm-evaluation-harness/tests/testdata/blimp_matrix_question_npi_licensor_present-v0-res.json +1 -0
  12. scripts/yans/lm-evaluation-harness/tests/testdata/blimp_npi_present_2-v0-res.json +1 -0
  13. scripts/yans/lm-evaluation-harness/tests/testdata/blimp_only_npi_licensor_present-v0-res.json +1 -0
  14. scripts/yans/lm-evaluation-harness/tests/testdata/blimp_passive_2-v0-loglikelihood +1 -0
  15. scripts/yans/lm-evaluation-harness/tests/testdata/blimp_principle_A_c_command-v0-loglikelihood +1 -0
  16. scripts/yans/lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_2-v0-res.json +1 -0
  17. scripts/yans/lm-evaluation-harness/tests/testdata/blimp_regular_plural_subject_verb_agreement_1-v0-loglikelihood +1 -0
  18. scripts/yans/lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_1-v0-loglikelihood +1 -0
  19. scripts/yans/lm-evaluation-harness/tests/testdata/blimp_transitive-v0-res.json +1 -0
  20. scripts/yans/lm-evaluation-harness/tests/testdata/blimp_wh_questions_object_gap-v0-loglikelihood +1 -0
  21. scripts/yans/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_with_gap-v0-loglikelihood +1 -0
  22. scripts/yans/lm-evaluation-harness/tests/testdata/crows_pairs_english_autre-v0-loglikelihood +1 -0
  23. scripts/yans/lm-evaluation-harness/tests/testdata/crows_pairs_english_race_color-v0-res.json +1 -0
  24. scripts/yans/lm-evaluation-harness/tests/testdata/crows_pairs_english_religion-v0-loglikelihood +1 -0
  25. scripts/yans/lm-evaluation-harness/tests/testdata/crows_pairs_english_religion-v0-res.json +1 -0
  26. scripts/yans/lm-evaluation-harness/tests/testdata/crows_pairs_english_socioeconomic-v0-res.json +1 -0
  27. scripts/yans/lm-evaluation-harness/tests/testdata/crows_pairs_french_autre-v0-res.json +1 -0
  28. scripts/yans/lm-evaluation-harness/tests/testdata/crows_pairs_french_physical_appearance-v0-loglikelihood +1 -0
  29. scripts/yans/lm-evaluation-harness/tests/testdata/ethics_utilitarianism-v0-res.json +1 -0
  30. scripts/yans/lm-evaluation-harness/tests/testdata/gpt3_test_bb2cc49115e88788ed870ad0716eb00b280a885f91c7ed6e1e864435e5e2b6ac.pkl +3 -0
  31. scripts/yans/lm-evaluation-harness/tests/testdata/hellaswag-v0-res.json +1 -0
  32. scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-anatomy-v0-loglikelihood +1 -0
  33. scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-college_biology-v0-res.json +1 -0
  34. scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-elementary_mathematics-v0-loglikelihood +1 -0
  35. scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_physics-v0-loglikelihood +1 -0
  36. scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_statistics-v0-res.json +1 -0
  37. scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_world_history-v0-loglikelihood +1 -0
  38. scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-miscellaneous-v0-res.json +1 -0
  39. scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-moral_disputes-v0-res.json +1 -0
  40. scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-sociology-v0-res.json +1 -0
  41. scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-virology-v0-loglikelihood +1 -0
  42. scripts/yans/lm-evaluation-harness/tests/testdata/lambada_openai-v0-res.json +1 -0
  43. scripts/yans/lm-evaluation-harness/tests/testdata/lambada_openai_mt_de-v0-loglikelihood +1 -0
  44. scripts/yans/lm-evaluation-harness/tests/testdata/lambada_openai_mt_fr-v0-loglikelihood +1 -0
  45. scripts/yans/lm-evaluation-harness/tests/testdata/lambada_standard-v0-res.json +1 -0
  46. scripts/yans/lm-evaluation-harness/tests/testdata/math_counting_and_prob-v0-res.json +1 -0
  47. scripts/yans/lm-evaluation-harness/tests/testdata/math_geometry-v1-res.json +1 -0
  48. scripts/yans/lm-evaluation-harness/tests/testdata/math_intermediate_algebra-v1-res.json +1 -0
  49. scripts/yans/lm-evaluation-harness/tests/testdata/mc_taco-v0-loglikelihood +1 -0
  50. scripts/yans/lm-evaluation-harness/tests/testdata/mnli_mismatched-v0-loglikelihood +1 -0
scripts/yans/lm-evaluation-harness/tests/testdata/arc_easy-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ ffa6e39a35a16299dcb015f17f986aaa598ad8b4840c4cebe0339a7042232741
scripts/yans/lm-evaluation-harness/tests/testdata/arc_easy-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"arc_easy": {"acc": 0.2474747474747475, "acc_norm": 0.24074074074074073, "acc_norm_stderr": 0.008772796145221907, "acc_stderr": 0.008855114414834707}}, "versions": {"arc_easy": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/arithmetic_3da-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"arithmetic_3da": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"arithmetic_3da": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/arithmetic_4da-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d3557beb8b9e5704122c2fc6362b11fbe2c3f2f3cb72aed4462b208767c40e01
scripts/yans/lm-evaluation-harness/tests/testdata/blimp_animate_subject_trans-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 2a84231e7b79f517427e57e2099c88fed3d60a7efab4ef9506e263b4091d5cfa
scripts/yans/lm-evaluation-harness/tests/testdata/blimp_complex_NP_island-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_complex_NP_island": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_complex_NP_island": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/blimp_distractor_agreement_relative_clause-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_distractor_agreement_relative_clause": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_distractor_agreement_relative_clause": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/blimp_existential_there_quantifiers_1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_existential_there_quantifiers_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_existential_there_quantifiers_1": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/blimp_expletive_it_object_raising-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ ceede5b38248a62125a74a8332602b8eac5ef40864f071ad8d86e7971e07219d
scripts/yans/lm-evaluation-harness/tests/testdata/blimp_matrix_question_npi_licensor_present-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ a3a702a3335c79b02b36caf37c68069050c2a8a3a03c3610c09afc39d2b83fb1
scripts/yans/lm-evaluation-harness/tests/testdata/blimp_matrix_question_npi_licensor_present-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_matrix_question_npi_licensor_present": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_matrix_question_npi_licensor_present": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/blimp_npi_present_2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_npi_present_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_npi_present_2": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/blimp_only_npi_licensor_present-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_only_npi_licensor_present": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_only_npi_licensor_present": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/blimp_passive_2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 755bdfe2c89737c43001ff1dc83d68ad33e444aaf0669af66aaf82dcd09f2eca
scripts/yans/lm-evaluation-harness/tests/testdata/blimp_principle_A_c_command-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 7c2ed82612af9175052cd44d8e178b6dd084c04eb462a3d88fcacfad2df8be8e
scripts/yans/lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_principle_A_domain_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_domain_2": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/blimp_regular_plural_subject_verb_agreement_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 5bc0441f31e32443cf761bca6e961d504e1e84b15aa4e1d79e5c8ed5b4c2aa3a
scripts/yans/lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 973fe56534fdef1207f0fc08dd09a210304c55f33c6cbb17552754bf54f11c86
scripts/yans/lm-evaluation-harness/tests/testdata/blimp_transitive-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_transitive": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_transitive": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/blimp_wh_questions_object_gap-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 4d4aaa0274ccd485ff8430ed61b8f83806febe18c16616c7d050f637a0463eba
scripts/yans/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_with_gap-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d41a9b85e4c31e445bf9b46b8642df02203ccc02b4a9b254bf76066d5c54b4b7
scripts/yans/lm-evaluation-harness/tests/testdata/crows_pairs_english_autre-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ a197ccc8538231404a8e43f5ed0fbbfb2c317b4da337f6e7aa9642131aeb426a
scripts/yans/lm-evaluation-harness/tests/testdata/crows_pairs_english_race_color-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_english_race_color": {"likelihood_difference": 0.3322827903840805, "likelihood_difference_stderr": 0.01019838186372816, "pct_stereotype": 0.4822834645669291, "pct_stereotype_stderr": 0.022191835500120254}}, "versions": {"crows_pairs_english_race_color": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/crows_pairs_english_religion-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 2ed57377174adaf0fb30037eb055eafdd02cd46e57bc32066d5fecd90a14b6e1
scripts/yans/lm-evaluation-harness/tests/testdata/crows_pairs_english_religion-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_english_religion": {"likelihood_difference": 0.32170622542430666, "likelihood_difference_stderr": 0.022101541392310232, "pct_stereotype": 0.43243243243243246, "pct_stereotype_stderr": 0.04723583229758394}}, "versions": {"crows_pairs_english_religion": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/crows_pairs_english_socioeconomic-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_english_socioeconomic": {"likelihood_difference": 0.3424577735757881, "likelihood_difference_stderr": 0.017459994170011896, "pct_stereotype": 0.46842105263157896, "pct_stereotype_stderr": 0.036297038088316094}}, "versions": {"crows_pairs_english_socioeconomic": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/crows_pairs_french_autre-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_french_autre": {"likelihood_difference": 0.3517045997290783, "likelihood_difference_stderr": 0.07647821858130377, "pct_stereotype": 0.23076923076923078, "pct_stereotype_stderr": 0.12162606385262997}}, "versions": {"crows_pairs_french_autre": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/crows_pairs_french_physical_appearance-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ ea61eaad64e9292790d4bbef955ffeebed7a595de098bc5ac726a6e51f27f9af
scripts/yans/lm-evaluation-harness/tests/testdata/ethics_utilitarianism-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"ethics_utilitarianism": {"acc": 0.49771214642262895, "acc_stderr": 0.007211546310787838}}, "versions": {"ethics_utilitarianism": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/gpt3_test_bb2cc49115e88788ed870ad0716eb00b280a885f91c7ed6e1e864435e5e2b6ac.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad18c6203e8b3eda1b88f8dfd7d197c4053c07640b0542fcdd8170e9b3bd2d30
3
+ size 2479
scripts/yans/lm-evaluation-harness/tests/testdata/hellaswag-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hellaswag": {"acc": 0.24965146385182235, "acc_norm": 0.24756024696275641, "acc_norm_stderr": 0.004307128573285236, "acc_stderr": 0.004319267432460666}}, "versions": {"hellaswag": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-anatomy-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ bf05e04ed8cf61cf3aad294ed3f5a16137775ffdd20f1b129022ddffc1251768
scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-college_biology-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-college_biology": {"acc": 0.24305555555555555, "acc_norm": 0.2361111111111111, "acc_norm_stderr": 0.03551446610810826, "acc_stderr": 0.03586879280080341}}, "versions": {"hendrycksTest-college_biology": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-elementary_mathematics-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 6b21f5cd5606268421a667152ec989424b66905c02adbab8d4ff6bb9d21b77d1
scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_physics-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ dae59e82d3d4d8dec82239d9620b72cc47bb6efbe2f1c2f9b9d23e849c9c5e32
scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_statistics-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-high_school_statistics": {"acc": 0.2962962962962963, "acc_norm": 0.3055555555555556, "acc_norm_stderr": 0.03141554629402544, "acc_stderr": 0.03114144782353604}}, "versions": {"hendrycksTest-high_school_statistics": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_world_history-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 1c8b994bd9a63ec874fc8d0e3a27077118b7adc472306b2fd6c55635a78b9d52
scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-miscellaneous-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-miscellaneous": {"acc": 0.23499361430395913, "acc_norm": 0.2515964240102171, "acc_norm_stderr": 0.015517322365529622, "acc_stderr": 0.015162024152278445}}, "versions": {"hendrycksTest-miscellaneous": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-moral_disputes-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-moral_disputes": {"acc": 0.24855491329479767, "acc_norm": 0.27167630057803466, "acc_norm_stderr": 0.023948512905468365, "acc_stderr": 0.023267528432100174}}, "versions": {"hendrycksTest-moral_disputes": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-sociology-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-sociology": {"acc": 0.23383084577114427, "acc_norm": 0.24875621890547264, "acc_norm_stderr": 0.030567675938916707, "acc_stderr": 0.02992941540834838}}, "versions": {"hendrycksTest-sociology": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/hendrycksTest-virology-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 0ffa491f7bad2abbb64ecd752a295729167599b3815238cab0ecf4cb08bba9b6
scripts/yans/lm-evaluation-harness/tests/testdata/lambada_openai-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_openai": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_openai": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/lambada_openai_mt_de-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 5ad125e1708499832b2cee8c3388f89f9c0277010fd96fbd3359039ce8105984
scripts/yans/lm-evaluation-harness/tests/testdata/lambada_openai_mt_fr-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 5d16f4a0c51dc6d7b6df2ebeba2bbfa51e700b843779b559b3d90183d7b02a11
scripts/yans/lm-evaluation-harness/tests/testdata/lambada_standard-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_standard": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_standard": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/math_counting_and_prob-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"math_counting_and_prob": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_counting_and_prob": 0}}
scripts/yans/lm-evaluation-harness/tests/testdata/math_geometry-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"math_geometry": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_geometry": 1}}
scripts/yans/lm-evaluation-harness/tests/testdata/math_intermediate_algebra-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"math_intermediate_algebra": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_intermediate_algebra": 1}}
scripts/yans/lm-evaluation-harness/tests/testdata/mc_taco-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 1811808ef05afd5f30ffc3471622a3dd7a1b681b17a2f7616695ad6b2a45943c
scripts/yans/lm-evaluation-harness/tests/testdata/mnli_mismatched-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 3784acf322e79f31702a7a0612030e4ba5c4fc466ad976a34ee3f3d7278c01f0