theblackcat102 commited on
Commit
5cb5a28
·
verified ·
1 Parent(s): e88b70c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +46 -44
README.md CHANGED
@@ -946,50 +946,52 @@ Arxiv : [VisTW: Benchmarking Vision-Language Models for Traditional Chinese in T
946
 
947
  | Model | VisTW-MCQ Accuracy | VisTW-MCQ Rank | VisTW-Dialogue Score | VisTW-Dialogue Rank | Avg Rank |
948
  | --- | ---: | ---: | ---: | ---: | ---: |
949
- | quasar-alpha (early version of gpt-4.1) | 0.6673 | 1 | 6.2733 | 8 | 4.5 |
950
- | ~~gemini-2.0-pro-exp-02-05~~ | 0.6619 | 2 | 6.7237 | 2 | 2.0 |
951
- | gemini-2.0-flash-001 | 0.6596 | 3 | 6.6451 | 4 | 3.5 |
952
- | llama-4-maverick | 0.6529 | 4 | 4.884 | 15 | 9.5 |
953
- | gpt-4.1-2025-04-19 | 0.6503 | 5 | 6.5954 | 5 | 5.0 |
954
- | optimus-alpha (early version of gpt-4.1) | 0.6434 | 6 | 6.6916 | 3 | 4.5 |
955
- | gemini-2.5-pro-preview-03-25 | 0.6072 | 7 | 7.9725 | 1 | 4.0 |
956
- | claude-3-5-sonnet-20241022 | 0.6019 | 8 | 5.9603 | 12 | 10.0 |
957
- | gpt-4.1-mini-2025-04-19 | 0.5809 | 9 | 6.1344 | 9 | 9.0 |
958
- | gpt-4o-2024-11-20 | 0.5755 | 10 | 6.1176 | 10 | 10.0 |
959
- | qwen2.5-vl-72b-instruct | 0.5504 | 11 | 4.8656 | 16 | 13.5 |
960
- | llama-4-scout | 0.5292 | 12 | 4.0943 | 24 | 18.0 |
961
- | gemini-2.0-flash-lite-preview-02-05 | 0.4992 | 13 | 6.4159 | 7 | 10.0 |
962
- | qwen2.5-vl-32b-instruct | 0.4935 | 14 | 5.5027 | 13 | 13.5 |
963
- | gemma-3-12b-it | 0.4863 | 15 | 3.9403 | 25 | 20.0 |
964
- | mistral-small-3.1-24b-instruct-2503 | 0.459 | 16 | 4.3298 | 19 | 17.5 |
965
- | gemini-1.5-pro | 0.4417 | 17 | 5.0504 | 14 | 15.5 |
966
- | meta-llama-Llama-3.2-90B-Vision-Instruct-Turbo | 0.4119 | 18 | 3.4443 | 32 | 25.0 |
967
- | qvq-72b-preview | 0.4094 | 19 | 3.6122 | 29 | 24.0 |
968
- | gpt-4o-mini-2024-07-18 | 0.4091 | 20 | 4.7405 | 17 | 18.5 |
969
- | gpt-4o-2024-08-06 | 0.4 | 21 | 5.9756 | 11 | 16.0 |
970
- | gpt-4.1-nano-2025-04-19 | 0.3974 | 22 | 4.1634 | 23 | 22.5 |
971
- | gemini-1.5-flash | 0.3943 | 23 | 4.2611 | 20 | 21.5 |
972
- | gemini-2.0-flash-thinking-exp-1219 | 0.3764 | 24 | 6.5053 | 6 | 15.0 |
973
- | Qwen-Qwen2.5-VL-7B-Instruct | 0.3592 | 25 | 4.542 | 18 | 21.5 |
974
- | OpenGVLab-InternVL2-8B-MPO | 0.3533 | 26 | 3.6778 | 28 | 27.0 |
975
- | OpenGVLab-InternVL2_5-8B | 0.3447 | 27 | 3.9008 | 26 | 26.5 |
976
- | OpenGVLab-InternVL2-8B | 0.3431 | 28 | 3.4504 | 31 | 29.5 |
977
- | nova-lite-v1 | 0.3377 | 29 | 3.2626 | 33 | 31.0 |
978
- | claude-3-haiku-20240307 | 0.3291 | 30 | 3.6992 | 27 | 28.5 |
979
- | OpenGVLab-InternVL2_5-4B | 0.3291 | 31 | 3.6031 | 30 | 30.5 |
980
- | gemini-1.5-flash-8b | 0.328 | 32 | 4.1771 | 22 | 27.0 |
981
- | meta-llama-Llama-3.2-11B-Vision-Instruct-Turbo | 0.3262 | 33 | 2.5786 | 38 | 35.5 |
982
- | deepseek-ai-deepseek-vl2-small | 0.3181 | 34 | 0.5084 | 44 | 39.0 |
983
- | llama3.2-ffm-11b-v-32k-chat | 0.3119 | 35 | 3.115 | 35 | 35.0 |
984
- | OpenGVLab-InternVL2-4B | 0.3081 | 36 | 2.3069 | 39 | 37.5 |
985
- | Qwen-Qwen2-VL-7B-Instruct | 0.3004 | 37 | 4.2122 | 21 | 29.0 |
986
- | MediaTek-Research-Llama-Breeze2-3B-Instruct | 0.2971 | 38 | 2.8992 | 37 | 37.5 |
987
- | MediaTek-Research-Llama-Breeze2-8B-Instruct | 0.2915 | 39 | 3.1374 | 34 | 36.5 |
988
- | OpenGVLab-InternVL2-2B | 0.2891 | 40 | 2.2198 | 40 | 40.0 |
989
- | phi-4-multimodal-instruct | 0.286 | 41 | 1.7863 | 43 | 42.0 |
990
- | deepseek-ai-deepseek-vl2-tiny | 0.2781 | 42 | 2.0076 | 42 | 42.0 |
991
- | THUDM-cogvlm2-llama3-chinese-chat-19B | 0.2777 | 43 | 2.9618 | 36 | 39.5 |
992
- | OpenGVLab-InternVL2-1B | 0.2689 | 44 | 2.1298 | 41 | 42.5 |
 
 
993
 
994
 
995
  *Models ordered by VisTW-MCQ Rank.*
 
946
 
947
  | Model | VisTW-MCQ Accuracy | VisTW-MCQ Rank | VisTW-Dialogue Score | VisTW-Dialogue Rank | Avg Rank |
948
  | --- | ---: | ---: | ---: | ---: | ---: |
949
+ | o3-2025-04-16 | 0.7769 | 1 | 6.9878 | 2 | 1.5 |
950
+ | o4-mini-2025-04-16 | 0.7364 | 2 | 6.7802 | 3 | 2.5 |
951
+ | quasar-alpha (early version of gpt-4.1) | 0.6673 | 3 | 6.2733 | 10 | 6.5 |
952
+ | ~~gemini-2.0-pro-exp-02-05~~ | 0.6619 | 4 | 6.7237 | 4 | 4.0 |
953
+ | gemini-2.0-flash-001 | 0.6596 | 5 | 6.6451 | 6 | 5.5 |
954
+ | llama-4-maverick | 0.6529 | 6 | 4.884 | 17 | 11.5 |
955
+ | gpt-4.1-2025-04-16 | 0.6503 | 7 | 6.5954 | 7 | 7.0 |
956
+ | optimus-alpha (early version of gpt-4.1) | 0.6434 | 8 | 6.6916 | 5 | 6.5 |
957
+ | gemini-2.5-pro-preview-03-25 | 0.6072 | 9 | 7.9725 | 1 | 5.0 |
958
+ | claude-3-5-sonnet-20241022 | 0.6019 | 10 | 5.9603 | 14 | 12.0 |
959
+ | gpt-4.1-mini-2025-04-16 | 0.5809 | 11 | 6.1344 | 11 | 11.0 |
960
+ | gpt-4o-2024-11-20 | 0.5755 | 12 | 6.1176 | 12 | 12.0 |
961
+ | qwen2.5-vl-72b-instruct | 0.5504 | 13 | 4.8656 | 18 | 15.5 |
962
+ | llama-4-scout | 0.5292 | 14 | 4.0943 | 26 | 20.0 |
963
+ | gemini-2.0-flash-lite-preview-02-05 | 0.4992 | 15 | 6.4159 | 9 | 12.0 |
964
+ | qwen2.5-vl-32b-instruct | 0.4935 | 16 | 5.5027 | 15 | 15.5 |
965
+ | gemma-3-12b-it | 0.4863 | 17 | 3.9403 | 27 | 22.0 |
966
+ | mistral-small-3.1-24b-instruct-2503 | 0.459 | 18 | 4.3298 | 21 | 19.5 |
967
+ | gemini-1.5-pro | 0.4417 | 19 | 5.0504 | 16 | 17.5 |
968
+ | meta-llama-Llama-3.2-90B-Vision-Instruct-Turbo | 0.4119 | 20 | 3.4443 | 34 | 27.0 |
969
+ | qvq-72b-preview | 0.4094 | 21 | 3.6122 | 31 | 26.0 |
970
+ | gpt-4o-mini-2024-07-18 | 0.4091 | 22 | 4.7405 | 19 | 20.5 |
971
+ | gpt-4o-2024-08-06 | 0.4 | 23 | 5.9756 | 13 | 18.0 |
972
+ | gpt-4.1-nano-2025-04-16 | 0.3974 | 24 | 4.1634 | 25 | 24.5 |
973
+ | gemini-1.5-flash | 0.3943 | 25 | 4.2611 | 22 | 23.5 |
974
+ | gemini-2.0-flash-thinking-exp-1219 | 0.3764 | 26 | 6.5053 | 8 | 17.0 |
975
+ | Qwen-Qwen2.5-VL-7B-Instruct | 0.3592 | 27 | 4.542 | 20 | 23.5 |
976
+ | OpenGVLab-InternVL2-8B-MPO | 0.3533 | 28 | 3.6778 | 30 | 29.0 |
977
+ | OpenGVLab-InternVL2_5-8B | 0.3447 | 29 | 3.9008 | 28 | 28.5 |
978
+ | OpenGVLab-InternVL2-8B | 0.3431 | 30 | 3.4504 | 33 | 31.5 |
979
+ | nova-lite-v1 | 0.3377 | 31 | 3.2626 | 35 | 33.0 |
980
+ | claude-3-haiku-20240307 | 0.3291 | 32 | 3.6992 | 29 | 30.5 |
981
+ | OpenGVLab-InternVL2_5-4B | 0.3291 | 33 | 3.6031 | 32 | 32.5 |
982
+ | gemini-1.5-flash-8b | 0.328 | 34 | 4.1771 | 24 | 29.0 |
983
+ | meta-llama-Llama-3.2-11B-Vision-Instruct-Turbo | 0.3262 | 35 | 2.5786 | 40 | 37.5 |
984
+ | deepseek-ai-deepseek-vl2-small | 0.3181 | 36 | 0.5084 | 46 | 41.0 |
985
+ | llama3.2-ffm-11b-v-32k-chat | 0.3119 | 37 | 3.115 | 37 | 37.0 |
986
+ | OpenGVLab-InternVL2-4B | 0.3081 | 38 | 2.3069 | 41 | 39.5 |
987
+ | Qwen-Qwen2-VL-7B-Instruct | 0.3004 | 39 | 4.2122 | 23 | 31.0 |
988
+ | MediaTek-Research-Llama-Breeze2-3B-Instruct | 0.2971 | 40 | 2.8992 | 39 | 39.5 |
989
+ | MediaTek-Research-Llama-Breeze2-8B-Instruct | 0.2915 | 41 | 3.1374 | 36 | 38.5 |
990
+ | OpenGVLab-InternVL2-2B | 0.2891 | 42 | 2.2198 | 42 | 42.0 |
991
+ | phi-4-multimodal-instruct | 0.286 | 43 | 1.7863 | 45 | 44.0 |
992
+ | deepseek-ai-deepseek-vl2-tiny | 0.2781 | 44 | 2.0076 | 44 | 44.0 |
993
+ | THUDM-cogvlm2-llama3-chinese-chat-19B | 0.2777 | 45 | 2.9618 | 38 | 41.5 |
994
+ | OpenGVLab-InternVL2-1B | 0.2689 | 46 | 2.1298 | 43 | 44.5 |
995
 
996
 
997
  *Models ordered by VisTW-MCQ Rank.*