Ali-C137 commited on
Commit
c5df7b5
1 Parent(s): 08824fe

Update src/about.py

Browse files
Files changed (1) hide show
  1. src/about.py +11 -2
src/about.py CHANGED
@@ -42,8 +42,13 @@ TITLE = """<h1 align="center" id="space-title">Open Arabic LLM Leaderboard</h1>"
42
  INTRODUCTION_TEXT = """
43
  🚀 The Open Arabic LLM Leaderboard : Objectively evaluates and compare the performance of Arabic Large Language Models (LLMs).
44
 
45
- When you submit a model on the "Submit here!" page, it is automatically evaluated on a set of benchmarks. The GPU used for evaluation is operated with the support of __[Technology Innovation Institute (TII)](https://www.tii.ae/)__.
 
 
 
 
46
  The datasets used for evaluation consists of datasets that are Arabic Native like the `AlGhafa` benchmark from [TII](https://www.tii.ae/) and `ACVA` benchmark from [FreedomIntelligence](https://huggingface.co/FreedomIntelligence) to assess reasoning, language understanding, commonsense, and more.
 
47
  More details about the benchmarks and the evaluation process is provided on the “About” page.
48
  """
49
 
@@ -55,10 +60,14 @@ While outstanding LLM models are being released competitively, most of them are
55
  ## Icons & Model types
56
 
57
  🟢 : `pretrained` or `continuously pretrained`
 
58
  🔶 : `fine-tuned on domain-specific datasets`
 
59
  💬 : `chat models (RLHF, DPO, ORPO, ...)`
 
60
  🤝 : `base merges and moerges`
61
 
 
62
  If the icon is "?", it indicates that there is insufficient information about the model.
63
  Please provide information about the model through an issue! 🤩
64
 
@@ -177,7 +186,7 @@ CITATION_BUTTON_TEXT = r"""
177
  archivePrefix={arXiv},
178
  primaryClass={cs.CL}
179
  }
180
- @misc{datatrove,
181
  author = {Clémentine, Fourrier, and Nathan, Habib and Wolf, Thomas},
182
  title = {LightEval: A lightweight framework for LLM evaluation},
183
  year = {2024},
 
42
  INTRODUCTION_TEXT = """
43
  🚀 The Open Arabic LLM Leaderboard : Objectively evaluates and compare the performance of Arabic Large Language Models (LLMs).
44
 
45
+
46
+ When you submit a model on the "Submit here!" page, it is automatically evaluated on a set of benchmarks.
47
+
48
+ The GPU used for evaluation is operated with the support of __[Technology Innovation Institute (TII)](https://www.tii.ae/)__.
49
+
50
  The datasets used for evaluation consists of datasets that are Arabic Native like the `AlGhafa` benchmark from [TII](https://www.tii.ae/) and `ACVA` benchmark from [FreedomIntelligence](https://huggingface.co/FreedomIntelligence) to assess reasoning, language understanding, commonsense, and more.
51
+
52
  More details about the benchmarks and the evaluation process is provided on the “About” page.
53
  """
54
 
 
60
  ## Icons & Model types
61
 
62
  🟢 : `pretrained` or `continuously pretrained`
63
+
64
  🔶 : `fine-tuned on domain-specific datasets`
65
+
66
  💬 : `chat models (RLHF, DPO, ORPO, ...)`
67
+
68
  🤝 : `base merges and moerges`
69
 
70
+
71
  If the icon is "?", it indicates that there is insufficient information about the model.
72
  Please provide information about the model through an issue! 🤩
73
 
 
186
  archivePrefix={arXiv},
187
  primaryClass={cs.CL}
188
  }
189
+ @misc{lighteval,
190
  author = {Clémentine, Fourrier, and Nathan, Habib and Wolf, Thomas},
191
  title = {LightEval: A lightweight framework for LLM evaluation},
192
  year = {2024},