asahi417 commited on
Commit
fe122c3
·
1 Parent(s): ad8a1cd

model update

Browse files
Files changed (4) hide show
  1. README.md +17 -17
  2. config.json +1 -1
  3. pytorch_model.bin +2 -2
  4. tokenizer_config.json +1 -1
README.md CHANGED
@@ -31,25 +31,25 @@ model-index:
31
  metrics:
32
  - name: BLEU4
33
  type: bleu4
34
- value: 33.3157820569588
35
  - name: ROUGE-L
36
  type: rouge-l
37
- value: 62.23954899626488
38
  - name: METEOR
39
  type: meteor
40
- value: 35.426819977024685
41
  - name: BERTScore
42
  type: bertscore
43
- value: 94.58259980280204
44
  - name: MoverScore
45
  type: moverscore
46
- value: 80.06984647429996
47
  - name: AnswerF1Score (Question Answering)
48
  type: answer_f1_score_question_answering
49
- value: 69.3966081647612
50
  - name: AnswerExactMatch (Question Answering)
51
  type: answer_exact_match_question_answering
52
- value: 52.28595178719867
53
  ---
54
 
55
  # Model Card of `lmqg/t5-base-tweetqa-question-answering`
@@ -95,16 +95,16 @@ output = pipe('question: What is a person called is practicing heresy?, context:
95
 
96
  | | Score | Type | Dataset |
97
  |:-----------------|--------:|:--------|:-------------------------------------------------------------------|
98
- | AnswerExactMatch | 52.286 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
99
- | AnswerF1Score | 69.3966 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
100
- | BERTScore | 94.5826 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
101
- | Bleu_1 | 57.0705 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
102
- | Bleu_2 | 48.1677 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
103
- | Bleu_3 | 39.7816 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
104
- | Bleu_4 | 33.3158 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
105
- | METEOR | 35.4268 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
106
- | MoverScore | 80.0698 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
107
- | ROUGE_L | 62.2395 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
108
 
109
  - [raw metric file](https://huggingface.co/lmqg/t5-base-tweetqa-question-answering/raw/main/eval/metric.first.answer.paragraph_question.answer.lmqg_qg_tweetqa.default.json)
110
 
 
31
  metrics:
32
  - name: BLEU4
33
  type: bleu4
34
+ value: 33.32
35
  - name: ROUGE-L
36
  type: rouge-l
37
+ value: 62.24
38
  - name: METEOR
39
  type: meteor
40
+ value: 35.43
41
  - name: BERTScore
42
  type: bertscore
43
+ value: 94.58
44
  - name: MoverScore
45
  type: moverscore
46
+ value: 80.07
47
  - name: AnswerF1Score (Question Answering)
48
  type: answer_f1_score_question_answering
49
+ value: 69.4
50
  - name: AnswerExactMatch (Question Answering)
51
  type: answer_exact_match_question_answering
52
+ value: 52.29
53
  ---
54
 
55
  # Model Card of `lmqg/t5-base-tweetqa-question-answering`
 
95
 
96
  | | Score | Type | Dataset |
97
  |:-----------------|--------:|:--------|:-------------------------------------------------------------------|
98
+ | AnswerExactMatch | 52.29 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
99
+ | AnswerF1Score | 69.4 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
100
+ | BERTScore | 94.58 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
101
+ | Bleu_1 | 57.07 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
102
+ | Bleu_2 | 48.17 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
103
+ | Bleu_3 | 39.78 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
104
+ | Bleu_4 | 33.32 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
105
+ | METEOR | 35.43 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
106
+ | MoverScore | 80.07 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
107
+ | ROUGE_L | 62.24 | default | [lmqg/qg_tweetqa](https://huggingface.co/datasets/lmqg/qg_tweetqa) |
108
 
109
  - [raw metric file](https://huggingface.co/lmqg/t5-base-tweetqa-question-answering/raw/main/eval/metric.first.answer.paragraph_question.answer.lmqg_qg_tweetqa.default.json)
110
 
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "lmqg_output/t5-base-tweetqa-question-answering/best_model",
3
  "add_prefix": false,
4
  "architectures": [
5
  "T5ForConditionalGeneration"
 
1
  {
2
+ "_name_or_path": "lmqg_output/t5-base-tweetqa-question-answering/model_eszyci/epoch_9",
3
  "add_prefix": false,
4
  "architectures": [
5
  "T5ForConditionalGeneration"
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:507ff75c34f5ca9b2a3ab171d05d7c573afdc8dbecfb71a3d48280b1940579cf
3
- size 891614207
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d9af18f30d344085a0c0eb5fce63dbe9354c80444d241743054f9bc72089d2f
3
+ size 891617855
tokenizer_config.json CHANGED
@@ -104,7 +104,7 @@
104
  "eos_token": "</s>",
105
  "extra_ids": 100,
106
  "model_max_length": 512,
107
- "name_or_path": "lmqg_output/t5-base-tweetqa-question-answering/best_model",
108
  "pad_token": "<pad>",
109
  "special_tokens_map_file": null,
110
  "tokenizer_class": "T5Tokenizer",
 
104
  "eos_token": "</s>",
105
  "extra_ids": 100,
106
  "model_max_length": 512,
107
+ "name_or_path": "lmqg_output/t5-base-tweetqa-question-answering/model_eszyci/epoch_9",
108
  "pad_token": "<pad>",
109
  "special_tokens_map_file": null,
110
  "tokenizer_class": "T5Tokenizer",