lixuejing
update
f1caf55
raw
history blame
9.71 kB
from dataclasses import dataclass
from enum import Enum
@dataclass
class Task:
benchmark: str
metric: str
col_name: str
# Select your tasks here
# ---------------------------------------------------
class Tasks(Enum):
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
CMMMU = Task("CMMMU", "acc", "CMMMU")
CMMU = Task("CMMU", "acc", "CMMU")
ChartQA = Task('ChartQA',"acc", "ChartQA")
MMMU = Task("MMMU", "acc", "MMMU")
OCRBench = Task("OCRBench", "acc", "OCRBench")
MMMU_Pro_standard = Task("MMMU_Pro_standard", "acc", "MMMU_Pro_standard")
MMMU_Pro_vision = Task("MMMU_Pro_vision", "acc", "MMMU_Pro_vision")
MathVision = Task("MathVision", "acc", "MathVision")
CII_Bench = Task("CII-Bench", "acc", "CII-Bench")
Blink = Task("Blink", "acc", "Blink")
NUM_FEWSHOT = 0 # Change with your few shot
# ---------------------------------------------------
# Your leaderboard name
TITLE = """<h1 align="center" id="space-title">FlagEval-VLM Leaderboard</h1>"""
# What does your leaderboard evaluate?
INTRODUCTION_TEXT = """
欢迎使用FlagEval-VLM Leaderboard!
FlagEval-VLM Leaderboard 旨在跟踪、排名和评估开放式视觉大语言模型(VLM)。本排行榜由FlagEval平台提供相应算力和运行环境。VLM构建了一种基于数据集的能力体系,依据所接入的开源数据集,我们总结出了数学,视觉、图表、通用、文字以及中文等六个能力维度,由此组成一个评测集合。
Welcome to the FlagEval-VLM Leaderboard!
The FlagEval-VLM Leaderboard is designed to track, rank and evaluate open Visual Large Language Models (VLMs). This leaderboard is powered by the FlagEval platform, which provides the appropriate arithmetic and runtime environment.
VLM builds a dataset-based competency system. Based on the accessed open source datasets, we summarize six competency dimensions, including Mathematical, Visual, Graphical, Generic, Textual, and Chinese, to form a collection of assessments.
"""
# Which evaluations are you running? how can people reproduce what you have?
LLM_BENCHMARKS_TEXT = f"""
# The Goal of FlagEval-VLM Leaderboard
感谢您积极的参与评测,在未来,我们会持续推动 FlagEval-VLM Leaderboard 更加完善,维护生态开放,欢迎开发者参与评测方法、工具和数据集的探讨,让我们一起建设更加科学和公正的榜单。
Thanks for your active participation in the evaluation. In the future, we will continue to promote FlagEval-VLM Leaderboard to be more perfect and maintain the openness of the ecosystem, and we welcome developers to participate in the discussion of evaluation methodology, tools and datasets, so that we can build a more scientific and fair list together.
# Context
FlagEval-VLM Leaderboard是视觉大语言排行榜,我们希望能够推动更加开放的生态,让视觉大语言模型开发者参与进来,为推动视觉大语言模型进步做出相应的贡献。 为了实现公平性的目标,所有模型都在 FlagEval 平台上使用标准化 GPU 和统一环境进行评估,以确保公平性。
FlagEval-VLM Leaderboard is a Visual Large Language Leaderboard, and we hope to promote a more open ecosystem for visual large language model developers to participate and contribute accordingly to the advancement of visual large language models. To achieve the goal of fairness, all models are evaluated on the FlagEval platform using standardized GPUs and a unified environment to ensure fairness.
## How it works
We evaluate models on 9 key benchmarks using the https://github.com/flageval-baai/FlagEvalMM , FlagEvalMM is an open-source evaluation framework designed to comprehensively assess multimodal models. It provides a standardized way to evaluate models that work with multiple modalities (text, images, video) across various tasks and metrics.
- <a href="https://github.com/vis-nlp/ChartQA" target="_blank"> ChartQA </a> - a large-scale benchmark covering 9.6K manually written questions and 23.1K questions generated from manually written chart summaries.
- <a href="https://huggingface.co/datasets/BLINK-Benchmark/BLINK"> Blink </a> - a benchmark containing 14 visual perception tasks that can be solved by humans “within a blink”.
- <a href="https://github.com/flageval-baai/CMMU"> CMMU </a> - a benchmark for Chinese multi-modal multi-type question understanding and reasoning
- <a href="https://cmmmu-benchmark.github.io/"> CMMMU </a> - a new Chinese Massive Multi-discipline Multimodal Understanding benchmark designed to evaluate LMMs on tasks demanding college-level subject knowledge and deliberate reasoning in a Chinese context.
- <a href="https://mmmu-benchmark.github.io/"> MMMU </a> - a massive multi-discipline multimodal understanding and reasoning benchmark for expert AGI.
- <a href="https://huggingface.co/datasets/MMMU/MMMU_Pro"> MMMU_Pro(standard & vision) </a> - a more robust multi-discipline multimodal understanding benchmark.
- <a href="https://github.com/Yuliang-Liu/MultimodalOCR"> OCRBench </a> - a comprehensive evaluation benchmark designed to assess the OCR capabilities of Large Multimodal Models.
- <a href="https://mathvision-cuhk.github.io/"> MathVision </a> - a meticulously curated collection of 3,040 high-quality mathematical problems with visual contexts sourced from real math competitions.
- <a href="https://cii-bench.github.io/"> CII-Bench </a> -a new benchmark measuring the higher-order perceptual, reasoning and comprehension abilities of MLLMs when presented with complex Chinese implication images.
For all these evaluations, a higher score is a better score.
Accuracy will be used as the evaluation metric, and it will primarily be calculated according to the methodology outlined in the original paper.
## Details and logs
You can find:
- detailed numerical results in the results Hugging Face dataset: https://huggingface.co/datasets/open-cn-llm-leaderboard/vlm_results
- community queries and running status in the requests Hugging Face dataset: https://huggingface.co/datasets/open-cn-llm-leaderboard/vlm_requests
## Reproducibility
An example of llava with vllm as backend:
`flagevalmm --tasks tasks/mmmu/mmmu_val.py \
--exec model_zoo/vlm/api_model/model_adapter.py \
--model llava-hf/llava-onevision-qwen2-7b-ov-chat-hf \
--num-workers 8 \
--output-dir ./results/llava-onevision-qwen2-7b-ov-chat-hf \
--backend vllm \
--extra-args "--limit-mm-per-prompt image=10 --max-model-len 32768"`
## Icons
- 🟢 : pretrained model: new, base models, trained on a given corpora
- 🔶 : fine-tuned on domain-specific datasets model: pretrained models finetuned on more data
- 💬 : chat models (RLHF, DPO, IFT, ...) model: chat like fine-tunes, either using IFT (datasets of task instruction), RLHF or DPO (changing the model loss a bit with an added policy), etc
- 🤝 : base merges and moerges model: merges or MoErges, models which have been merged or fused without additional fine-tuning. If there is no icon, we have not uploaded the information on the model yet, feel free to open an issue with the model information!
"Flagged" indicates that this model has been flagged by the community, and should probably be ignored! Clicking the link will redirect you to the discussion about the model.
## Useful links
- [Community resources](https://huggingface.co/spaces/BAAI/open_flageval_vlm_leaderboard/discussions)
- [FlagEvalMM](https://github.com/flageval-baai/FlagEvalMM)
"""
EVALUATION_QUEUE_TEXT = """
## Evaluation Queue for the FlagEval VLM Leaderboard
Models added here will be automatically evaluated on the FlagEval cluster.
Currently, we provide two methods for model evaluation, including API calling and private deployment. If you choose to evaluate via API calling, you need to provide the model's interface, name, and corresponding API KEY.
### 1) Make sure you can load your model and tokenizer using AutoClasses:
```python
from transformers import AutoConfig, AutoModel, AutoTokenizer
config = AutoConfig.from_pretrained("your model name", revision=revision)
model = AutoModel.from_pretrained("your model name", revision=revision)
tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
```
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
Note: make sure your model is public!
Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
### 3) Make sure your model has an open license!
This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
### 4) Fill up your model card
When we add extra information about models to the leaderboard, it will be automatically taken from the model card
## In case of model failure
If your model is displayed in the `FAILED` category, its execution stopped.
Make sure you have followed the above steps first.
If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
"""
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""
"""