|
{ |
|
"_name_or_path": "microsoft/deberta-v3-base", |
|
"architectures": [ |
|
"DebertaV2ForSequenceClassification" |
|
], |
|
"attention_probs_dropout_prob": 0.1, |
|
"classifiers_size": [ |
|
3, |
|
2, |
|
2, |
|
2, |
|
2, |
|
2, |
|
1, |
|
2, |
|
3, |
|
2, |
|
2, |
|
2, |
|
3, |
|
3, |
|
3, |
|
3, |
|
1, |
|
3, |
|
3, |
|
2, |
|
2, |
|
3, |
|
2, |
|
2, |
|
2, |
|
2, |
|
6, |
|
2, |
|
2, |
|
2, |
|
2, |
|
2, |
|
2, |
|
3, |
|
3, |
|
3, |
|
3, |
|
3, |
|
3, |
|
3, |
|
2, |
|
2, |
|
2, |
|
2, |
|
6, |
|
3, |
|
3, |
|
3, |
|
3, |
|
3, |
|
3, |
|
3, |
|
3, |
|
2, |
|
2, |
|
2, |
|
3, |
|
3, |
|
3, |
|
3, |
|
3, |
|
3, |
|
3, |
|
3, |
|
2, |
|
2, |
|
2, |
|
2, |
|
2, |
|
47, |
|
23, |
|
9, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
2, |
|
2, |
|
2, |
|
2, |
|
2, |
|
2, |
|
20, |
|
50, |
|
3, |
|
3, |
|
4, |
|
2, |
|
8, |
|
4, |
|
2, |
|
2, |
|
3, |
|
3, |
|
3, |
|
3, |
|
3, |
|
3, |
|
20, |
|
2, |
|
174, |
|
2, |
|
2, |
|
3, |
|
2, |
|
2, |
|
2, |
|
2, |
|
41, |
|
51, |
|
2, |
|
8, |
|
2, |
|
3, |
|
2, |
|
17, |
|
3, |
|
2, |
|
18, |
|
16, |
|
2, |
|
3, |
|
3, |
|
42, |
|
7, |
|
12, |
|
11, |
|
7, |
|
4, |
|
100, |
|
13, |
|
100, |
|
8, |
|
1, |
|
20, |
|
2, |
|
2, |
|
4, |
|
5, |
|
3, |
|
4, |
|
14, |
|
2, |
|
6, |
|
4, |
|
2, |
|
1, |
|
3, |
|
10, |
|
77, |
|
3, |
|
10, |
|
4, |
|
2, |
|
7, |
|
6, |
|
28, |
|
3, |
|
6, |
|
4, |
|
5, |
|
6, |
|
7, |
|
3, |
|
2, |
|
2, |
|
20, |
|
2, |
|
2, |
|
2, |
|
7, |
|
2, |
|
6, |
|
4, |
|
2, |
|
4, |
|
3, |
|
3, |
|
2, |
|
13, |
|
2, |
|
9, |
|
2, |
|
2, |
|
2, |
|
2, |
|
4, |
|
1, |
|
2, |
|
1, |
|
13, |
|
3, |
|
5, |
|
11, |
|
37, |
|
2, |
|
49, |
|
12, |
|
40, |
|
10, |
|
4, |
|
1, |
|
2, |
|
2, |
|
1, |
|
5, |
|
3, |
|
2, |
|
3, |
|
2, |
|
2, |
|
2, |
|
2, |
|
2, |
|
3, |
|
2, |
|
2, |
|
12, |
|
3, |
|
3, |
|
2, |
|
19, |
|
3, |
|
1, |
|
1, |
|
2, |
|
2, |
|
2, |
|
2, |
|
2, |
|
1, |
|
2, |
|
2, |
|
1, |
|
1, |
|
2, |
|
3, |
|
2, |
|
1, |
|
4, |
|
4, |
|
1, |
|
1, |
|
1, |
|
2, |
|
3, |
|
2, |
|
3, |
|
1, |
|
1, |
|
2, |
|
1, |
|
3, |
|
2, |
|
2, |
|
2, |
|
2, |
|
3, |
|
2, |
|
2, |
|
2, |
|
1, |
|
3, |
|
2, |
|
2, |
|
1, |
|
1, |
|
1, |
|
1, |
|
2, |
|
1, |
|
1, |
|
1, |
|
1, |
|
4, |
|
1, |
|
1, |
|
1, |
|
1, |
|
3, |
|
1, |
|
3, |
|
1, |
|
2, |
|
2, |
|
1, |
|
2, |
|
3, |
|
3, |
|
2, |
|
1, |
|
3, |
|
1, |
|
1, |
|
3, |
|
1, |
|
3, |
|
2, |
|
1, |
|
1, |
|
1, |
|
2, |
|
2, |
|
50, |
|
50, |
|
50, |
|
50, |
|
50, |
|
50, |
|
2, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
1, |
|
2, |
|
3, |
|
3, |
|
3, |
|
1, |
|
1 |
|
], |
|
"hidden_act": "gelu", |
|
"hidden_dropout_prob": 0.1, |
|
"hidden_size": 768, |
|
"id2label": { |
|
"0": "entailment", |
|
"1": "neutral", |
|
"2": "contradiction" |
|
}, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 3072, |
|
"label2id": { |
|
"contradiction": 2, |
|
"entailment": 0, |
|
"neutral": 1 |
|
}, |
|
"layer_norm_eps": 1e-07, |
|
"max_position_embeddings": 512, |
|
"max_relative_positions": -1, |
|
"model_type": "deberta-v2", |
|
"norm_rel_ebd": "layer_norm", |
|
"num_attention_heads": 12, |
|
"num_hidden_layers": 12, |
|
"pad_token_id": 0, |
|
"pooler_dropout": 0, |
|
"pooler_hidden_act": "gelu", |
|
"pooler_hidden_size": 768, |
|
"pos_att_type": [ |
|
"p2c", |
|
"c2p" |
|
], |
|
"position_biased_input": false, |
|
"position_buckets": 256, |
|
"relative_attention": true, |
|
"share_att_key": true, |
|
"tasks": [ |
|
"glue/mnli", |
|
"glue/qnli", |
|
"glue/rte", |
|
"glue/wnli", |
|
"glue/mrpc", |
|
"glue/qqp", |
|
"glue/stsb", |
|
"super_glue/boolq", |
|
"super_glue/cb", |
|
"super_glue/multirc", |
|
"super_glue/wic", |
|
"super_glue/axg", |
|
"anli/a1", |
|
"anli/a2", |
|
"anli/a3", |
|
"sick/label", |
|
"sick/relatedness", |
|
"sick/entailment_AB", |
|
"snli", |
|
"scitail/snli_format", |
|
"hans", |
|
"WANLI", |
|
"recast/recast_verbcorner", |
|
"recast/recast_megaveridicality", |
|
"recast/recast_sentiment", |
|
"recast/recast_ner", |
|
"recast/recast_kg_relations", |
|
"recast/recast_factuality", |
|
"recast/recast_puns", |
|
"recast/recast_verbnet", |
|
"probability_words_nli/reasoning_1hop", |
|
"probability_words_nli/usnli", |
|
"probability_words_nli/reasoning_2hop", |
|
"nan-nli/joey234--nan-nli", |
|
"nli_fever", |
|
"breaking_nli", |
|
"conj_nli", |
|
"fracas", |
|
"dialogue_nli", |
|
"mpe", |
|
"dnc", |
|
"recast_white/fnplus", |
|
"recast_white/sprl", |
|
"recast_white/dpr", |
|
"joci", |
|
"robust_nli/IS_CS", |
|
"robust_nli/LI_LI", |
|
"robust_nli/ST_WO", |
|
"robust_nli/PI_SP", |
|
"robust_nli/PI_CD", |
|
"robust_nli/ST_SE", |
|
"robust_nli/ST_NE", |
|
"robust_nli/ST_LM", |
|
"robust_nli_is_sd", |
|
"robust_nli_li_ts", |
|
"add_one_rte", |
|
"imppres/implicature_quantifiers/log", |
|
"imppres/implicature_numerals_2_3/log", |
|
"imppres/implicature_numerals_10_100/log", |
|
"imppres/implicature_gradable_adjective/log", |
|
"imppres/implicature_connectives/log", |
|
"imppres/implicature_modals/log", |
|
"imppres/implicature_gradable_verb/log", |
|
"glue_diagnostics/diagnostics", |
|
"hlgd", |
|
"paws/labeled_final", |
|
"paws/labeled_swap", |
|
"quora", |
|
"medical_questions_pairs", |
|
"conll2003/pos_tags", |
|
"conll2003/chunk_tags", |
|
"conll2003/ner_tags", |
|
"hh-rlhf", |
|
"model-written-evals", |
|
"truthful_qa/multiple_choice", |
|
"fig-qa", |
|
"bigbench/novel_concepts", |
|
"bigbench/winowhy", |
|
"bigbench/dark_humor_detection", |
|
"bigbench/crass_ai", |
|
"bigbench/undo_permutation", |
|
"bigbench/logical_fallacy_detection", |
|
"bigbench/analytic_entailment", |
|
"bigbench/similarities_abstraction", |
|
"bigbench/simple_ethical_questions", |
|
"bigbench/intent_recognition", |
|
"bigbench/english_proverbs", |
|
"bigbench/penguins_in_a_table", |
|
"bigbench/hyperbaton", |
|
"bigbench/phrase_relatedness", |
|
"bigbench/social_support", |
|
"bigbench/symbol_interpretation", |
|
"bigbench/play_dialog_same_or_different", |
|
"bigbench/discourse_marker_prediction", |
|
"bigbench/human_organs_senses", |
|
"bigbench/date_understanding", |
|
"bigbench/contextual_parametric_knowledge_conflicts", |
|
"bigbench/code_line_description", |
|
"bigbench/moral_permissibility", |
|
"bigbench/crash_blossom", |
|
"bigbench/nonsense_words_grammar", |
|
"bigbench/conceptual_combinations", |
|
"bigbench/identify_odd_metaphor", |
|
"bigbench/question_selection", |
|
"bigbench/mathematical_induction", |
|
"bigbench/logical_args", |
|
"bigbench/arithmetic", |
|
"bigbench/temporal_sequences", |
|
"bigbench/sports_understanding", |
|
"bigbench/timedial", |
|
"bigbench/hindu_knowledge", |
|
"bigbench/navigate", |
|
"bigbench/unit_interpretation", |
|
"bigbench/figure_of_speech_detection", |
|
"bigbench/authorship_verification", |
|
"bigbench/entailed_polarity", |
|
"bigbench/odd_one_out", |
|
"bigbench/physics", |
|
"bigbench/dyck_languages", |
|
"bigbench/riddle_sense", |
|
"bigbench/physical_intuition", |
|
"bigbench/checkmate_in_one", |
|
"bigbench/gre_reading_comprehension", |
|
"bigbench/causal_judgment", |
|
"bigbench/misconceptions", |
|
"bigbench/presuppositions_as_nli", |
|
"bigbench/anachronisms", |
|
"bigbench/vitaminc_fact_verification", |
|
"bigbench/movie_dialog_same_or_different", |
|
"bigbench/implicatures", |
|
"bigbench/bbq_lite_json", |
|
"bigbench/emoji_movie", |
|
"bigbench/geometric_shapes", |
|
"bigbench/metaphor_boolean", |
|
"bigbench/understanding_fables", |
|
"bigbench/cause_and_effect", |
|
"bigbench/elementary_math_qa", |
|
"bigbench/formal_fallacies_syllogisms_negation", |
|
"bigbench/fact_checker", |
|
"bigbench/evaluating_information_essentiality", |
|
"bigbench/salient_translation_error_detection", |
|
"bigbench/sentence_ambiguity", |
|
"bigbench/cs_algorithms", |
|
"bigbench/hhh_alignment", |
|
"bigbench/social_iqa", |
|
"bigbench/abstract_narrative_understanding", |
|
"bigbench/implicit_relations", |
|
"bigbench/known_unknowns", |
|
"bigbench/identify_math_theorems", |
|
"bigbench/tracking_shuffled_objects", |
|
"bigbench/disambiguation_qa", |
|
"bigbench/logic_grid_puzzle", |
|
"bigbench/suicide_risk", |
|
"bigbench/snarks", |
|
"bigbench/goal_step_wikihow", |
|
"bigbench/cifar10_classification", |
|
"bigbench/analogical_similarity", |
|
"bigbench/international_phonetic_alphabet_nli", |
|
"bigbench/epistemic_reasoning", |
|
"bigbench/logical_deduction", |
|
"bigbench/mnist_ascii", |
|
"bigbench/emojis_emotion_prediction", |
|
"bigbench/movie_recommendation", |
|
"bigbench/real_or_fake_text", |
|
"bigbench/fantasy_reasoning", |
|
"bigbench/ruin_names", |
|
"bigbench/general_knowledge", |
|
"bigbench/reasoning_about_colored_objects", |
|
"bigbench/key_value_maps", |
|
"bigbench/logical_sequence", |
|
"bigbench/color", |
|
"bigbench/strategyqa", |
|
"bigbench/irony_identification", |
|
"bigbench/empirical_judgments", |
|
"bigbench/strange_stories", |
|
"bigbench/metaphor_understanding", |
|
"cos_e/v1.0", |
|
"cosmos_qa", |
|
"dream", |
|
"openbookqa", |
|
"qasc", |
|
"quartz", |
|
"quail", |
|
"head_qa/en", |
|
"sciq", |
|
"social_i_qa", |
|
"wiki_hop/original", |
|
"wiqa", |
|
"piqa", |
|
"hellaswag", |
|
"super_glue/copa", |
|
"balanced-copa", |
|
"e-CARE", |
|
"art", |
|
"winogrande/winogrande_xl", |
|
"codah/codah", |
|
"ai2_arc/ARC-Challenge/challenge", |
|
"ai2_arc/ARC-Easy/challenge", |
|
"definite_pronoun_resolution", |
|
"swag/regular", |
|
"math_qa", |
|
"glue/cola", |
|
"glue/sst2", |
|
"utilitarianism", |
|
"amazon_counterfactual/en", |
|
"insincere-questions", |
|
"toxic_conversations", |
|
"TuringBench", |
|
"trec", |
|
"vitaminc/tals--vitaminc", |
|
"hope_edi/english", |
|
"rumoureval_2019/RumourEval2019", |
|
"ethos/binary", |
|
"ethos/multilabel", |
|
"tweet_eval/emotion", |
|
"tweet_eval/irony", |
|
"tweet_eval/offensive", |
|
"tweet_eval/sentiment", |
|
"tweet_eval/stance_abortion", |
|
"tweet_eval/stance_atheism", |
|
"tweet_eval/stance_climate", |
|
"tweet_eval/stance_feminist", |
|
"tweet_eval/stance_hillary", |
|
"tweet_eval/emoji", |
|
"tweet_eval/hate", |
|
"discovery/discovery", |
|
"pragmeval/squinky-informativeness", |
|
"pragmeval/squinky-implicature", |
|
"pragmeval/verifiability", |
|
"pragmeval/squinky-formality", |
|
"pragmeval/emobank-valence", |
|
"pragmeval/emobank-dominance", |
|
"pragmeval/emobank-arousal", |
|
"pragmeval/switchboard", |
|
"pragmeval/mrda", |
|
"pragmeval/sarcasm", |
|
"pragmeval/persuasiveness-premisetype", |
|
"pragmeval/persuasiveness-eloquence", |
|
"pragmeval/persuasiveness-claimtype", |
|
"pragmeval/persuasiveness-specificity", |
|
"pragmeval/gum", |
|
"pragmeval/emergent", |
|
"pragmeval/persuasiveness-strength", |
|
"pragmeval/stac", |
|
"pragmeval/pdtb", |
|
"pragmeval/persuasiveness-relevance", |
|
"silicone/meld_s", |
|
"silicone/sem", |
|
"silicone/oasis", |
|
"silicone/meld_e", |
|
"silicone/maptask", |
|
"silicone/iemocap", |
|
"silicone/dyda_e", |
|
"silicone/dyda_da", |
|
"lex_glue/eurlex", |
|
"lex_glue/scotus", |
|
"lex_glue/ledgar", |
|
"lex_glue/unfair_tos", |
|
"lex_glue/case_hold", |
|
"language-identification", |
|
"imdb", |
|
"rotten_tomatoes", |
|
"ag_news", |
|
"yelp_review_full/yelp_review_full", |
|
"financial_phrasebank/sentences_allagree", |
|
"poem_sentiment", |
|
"dbpedia_14/dbpedia_14", |
|
"amazon_polarity/amazon_polarity", |
|
"app_reviews", |
|
"hate_speech18", |
|
"sms_spam", |
|
"humicroedit/subtask-1", |
|
"humicroedit/subtask-2", |
|
"snips_built_in_intents", |
|
"banking77", |
|
"hate_speech_offensive", |
|
"yahoo_answers_topics", |
|
"stackoverflow-questions", |
|
"hyperpartisan_news", |
|
"sciie", |
|
"citation_intent", |
|
"go_emotions/simplified", |
|
"scicite", |
|
"liar", |
|
"lexical_relation_classification/K&H+N", |
|
"lexical_relation_classification/CogALexV", |
|
"lexical_relation_classification/BLESS", |
|
"lexical_relation_classification/EVALution", |
|
"lexical_relation_classification/ROOT09", |
|
"linguisticprobing/subj_number", |
|
"linguisticprobing/bigram_shift", |
|
"linguisticprobing/top_constituents", |
|
"linguisticprobing/odd_man_out", |
|
"linguisticprobing/past_present", |
|
"linguisticprobing/coordination_inversion", |
|
"linguisticprobing/tree_depth", |
|
"linguisticprobing/obj_number", |
|
"linguisticprobing/sentence_length", |
|
"crowdflower/sentiment_nuclear_power", |
|
"crowdflower/tweet_global_warming", |
|
"crowdflower/corporate-messaging", |
|
"crowdflower/economic-news", |
|
"crowdflower/airline-sentiment", |
|
"crowdflower/political-media-bias", |
|
"crowdflower/text_emotion", |
|
"crowdflower/political-media-audience", |
|
"crowdflower/political-media-message", |
|
"ethics/commonsense", |
|
"ethics/deontology", |
|
"ethics/justice", |
|
"ethics/virtue", |
|
"emo/emo2019", |
|
"google_wellformed_query", |
|
"tweets_hate_speech_detection", |
|
"has_part", |
|
"wnut_17/wnut_17", |
|
"ncbi_disease/ncbi_disease", |
|
"acronym_identification", |
|
"jnlpba/jnlpba", |
|
"ontonotes_english/SpeedOfMagic--ontonotes_english", |
|
"blog_authorship_corpus/gender", |
|
"blog_authorship_corpus/age", |
|
"blog_authorship_corpus/horoscope", |
|
"blog_authorship_corpus/job", |
|
"open_question_type", |
|
"health_fact", |
|
"commonsense_qa", |
|
"mc_taco", |
|
"ade_corpus_v2/Ade_corpus_v2_classification", |
|
"discosense", |
|
"circa", |
|
"EffectiveFeedbackStudentWriting", |
|
"promptSentiment", |
|
"promptNLI", |
|
"promptSpoke", |
|
"promptProficiency", |
|
"promptGrammar", |
|
"promptCoherence", |
|
"phrase_similarity", |
|
"scientific-exaggeration-detection", |
|
"quarel", |
|
"fever-evidence-related/mwong--fever-related", |
|
"numer_sense", |
|
"dynasent/dynabench.dynasent.r1.all/r1", |
|
"dynasent/dynabench.dynasent.r2.all/r2", |
|
"Sarcasm_News_Headline", |
|
"sem_eval_2010_task_8", |
|
"auditor_review/demo-org--auditor_review", |
|
"medmcqa", |
|
"aqua_rat/tokenized", |
|
"Dynasent_Disagreement", |
|
"Politeness_Disagreement", |
|
"SBIC_Disagreement", |
|
"SChem_Disagreement", |
|
"Dilemmas_Disagreement", |
|
"logiqa", |
|
"wiki_qa", |
|
"cycic_classification", |
|
"cycic_multiplechoice", |
|
"sts-companion", |
|
"commonsense_qa_2.0", |
|
"lingnli", |
|
"monotonicity-entailment", |
|
"arct", |
|
"scinli", |
|
"naturallogic", |
|
"onestop_qa", |
|
"moral_stories/full", |
|
"prost", |
|
"dynahate", |
|
"syntactic-augmentation-nli", |
|
"autotnli", |
|
"CONDAQA", |
|
"webgpt_comparisons", |
|
"synthetic-instruct-gptj-pairwise", |
|
"scruples", |
|
"wouldyourather", |
|
"attempto-nli", |
|
"defeasible-nli/atomic", |
|
"defeasible-nli/snli", |
|
"nli-veridicality-transitivity", |
|
"natural-language-satisfiability", |
|
"lonli", |
|
"dadc-limit-nli", |
|
"FLUTE", |
|
"strategy-qa", |
|
"summarize_from_feedback/comparisons", |
|
"folio", |
|
"tomi-nli", |
|
"avicenna", |
|
"SHP", |
|
"MedQA-USMLE-4-options-hf", |
|
"wikimedqa/medwiki", |
|
"cicero", |
|
"CREAK", |
|
"mutual", |
|
"NeQA", |
|
"quote-repetition", |
|
"redefine-math", |
|
"puzzte", |
|
"implicatures", |
|
"race/high", |
|
"race/middle", |
|
"race-c", |
|
"spartqa-yn", |
|
"spartqa-mchoice", |
|
"temporal-nli", |
|
"riddle_sense", |
|
"clcd-english", |
|
"twentyquestions", |
|
"reclor", |
|
"counterfactually-augmented-imdb", |
|
"counterfactually-augmented-snli", |
|
"cnli", |
|
"boolq-natural-perturbations", |
|
"acceptability-prediction", |
|
"equate", |
|
"ScienceQA_text_only", |
|
"ekar_english", |
|
"implicit-hate-stg1", |
|
"chaos-mnli-ambiguity", |
|
"headline_cause/en_simple", |
|
"logiqa-2.0-nli", |
|
"oasst1_dense_flat/quality", |
|
"oasst1_dense_flat/toxicity", |
|
"oasst1_dense_flat/helpfulness", |
|
"PARARULE-Plus", |
|
"mindgames", |
|
"universal_dependencies/en_partut/deprel", |
|
"universal_dependencies/en_lines/deprel", |
|
"universal_dependencies/en_gumreddit/deprel", |
|
"universal_dependencies/en_esl/deprel", |
|
"universal_dependencies/en_ewt/deprel", |
|
"universal_dependencies/en_gum/deprel", |
|
"ambient", |
|
"path-naturalness-prediction", |
|
"civil_comments/toxicity", |
|
"civil_comments/severe_toxicity", |
|
"civil_comments/obscene", |
|
"civil_comments/threat", |
|
"civil_comments/insult", |
|
"civil_comments/identity_attack", |
|
"civil_comments/sexual_explicit", |
|
"cloth", |
|
"dgen", |
|
"oasst1_pairwise_rlhf_reward", |
|
"babi_nli", |
|
"gen_debiased_nli", |
|
"imppres/presupposition", |
|
"/prag", |
|
"blimp-2", |
|
"mmlu-4" |
|
], |
|
"torch_dtype": "float32", |
|
"transformers_version": "4.26.1", |
|
"type_vocab_size": 0, |
|
"vocab_size": 128100 |
|
} |
|
|