Commit
·
04dab39
1
Parent(s):
180ffb5
Add dependencies for Jupyter support and enhance leaderboard data processing
Browse files- pyproject.toml +2 -0
- src/about.py +24 -18
- src/populate.py +3 -1
pyproject.toml
CHANGED
@@ -12,6 +12,8 @@ dependencies = [
|
|
12 |
"gradio-leaderboard==0.0.13",
|
13 |
"gradio[oauth]>=5.35.0",
|
14 |
"huggingface-hub>=0.18.0",
|
|
|
|
|
15 |
"matplotlib>=3.10.3",
|
16 |
"numpy>=2.3.1",
|
17 |
"pandas>=2.3.0",
|
|
|
12 |
"gradio-leaderboard==0.0.13",
|
13 |
"gradio[oauth]>=5.35.0",
|
14 |
"huggingface-hub>=0.18.0",
|
15 |
+
"ipykernel>=6.29.5",
|
16 |
+
"ipywidgets>=8.1.7",
|
17 |
"matplotlib>=3.10.3",
|
18 |
"numpy>=2.3.1",
|
19 |
"pandas>=2.3.0",
|
src/about.py
CHANGED
@@ -8,12 +8,13 @@ class Task:
|
|
8 |
col_name: str
|
9 |
|
10 |
|
11 |
-
#
|
12 |
# ---------------------------------------------------
|
13 |
class Tasks(Enum):
|
14 |
-
#
|
15 |
-
|
16 |
-
|
|
|
17 |
|
18 |
NUM_FEWSHOT = 0 # Change with your few shot
|
19 |
# ---------------------------------------------------
|
@@ -21,26 +22,39 @@ NUM_FEWSHOT = 0 # Change with your few shot
|
|
21 |
|
22 |
|
23 |
# Your leaderboard name
|
24 |
-
TITLE = """<h1 align="center" id="space-title">
|
25 |
|
26 |
# What does your leaderboard evaluate?
|
27 |
INTRODUCTION_TEXT = """
|
28 |
-
|
|
|
29 |
"""
|
30 |
|
31 |
# Which evaluations are you running? how can people reproduce what you have?
|
32 |
LLM_BENCHMARKS_TEXT = f"""
|
33 |
## How it works
|
34 |
|
|
|
|
|
|
|
|
|
35 |
## Reproducibility
|
36 |
-
To reproduce our results,
|
37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
"""
|
39 |
|
40 |
EVALUATION_QUEUE_TEXT = """
|
41 |
## Some good practices before submitting a model
|
42 |
|
43 |
-
### 1) Make sure
|
|
|
|
|
44 |
```python
|
45 |
from transformers import AutoConfig, AutoModel, AutoTokenizer
|
46 |
config = AutoConfig.from_pretrained("your model name", revision=revision)
|
@@ -52,19 +66,11 @@ If this step fails, follow the error messages to debug your model before submitt
|
|
52 |
Note: make sure your model is public!
|
53 |
Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
|
54 |
|
55 |
-
###
|
56 |
It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
|
57 |
|
58 |
-
###
|
59 |
This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
|
60 |
-
|
61 |
-
### 4) Fill up your model card
|
62 |
-
When we add extra information about models to the leaderboard, it will be automatically taken from the model card
|
63 |
-
|
64 |
-
## In case of model failure
|
65 |
-
If your model is displayed in the `FAILED` category, its execution stopped.
|
66 |
-
Make sure you have followed the above steps first.
|
67 |
-
If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
|
68 |
"""
|
69 |
|
70 |
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
|
|
8 |
col_name: str
|
9 |
|
10 |
|
11 |
+
# Tunisian Dialect Tasks
|
12 |
# ---------------------------------------------------
|
13 |
class Tasks(Enum):
|
14 |
+
# Example: Sentiment Analysis on TSAC
|
15 |
+
tsac_sentiment = Task("fbougares/tsac", "accuracy", "TSAC Sentiment")
|
16 |
+
# Example: Text Classification or Corpus Coverage on Tunisian Dialect Corpus
|
17 |
+
tunisian_corpus = Task("arbml/Tunisian_Dialect_Corpus", "coverage", "Tunisian Corpus Coverage")
|
18 |
|
19 |
NUM_FEWSHOT = 0 # Change with your few shot
|
20 |
# ---------------------------------------------------
|
|
|
22 |
|
23 |
|
24 |
# Your leaderboard name
|
25 |
+
TITLE = """<h1 align="center" id="space-title">Tunisian Dialect Leaderboard</h1>"""
|
26 |
|
27 |
# What does your leaderboard evaluate?
|
28 |
INTRODUCTION_TEXT = """
|
29 |
+
This leaderboard evaluates models and datasets focused on the Tunisian dialect of Arabic.\
|
30 |
+
It highlights performance on key resources such as TSAC (fbougares/tsac) and the Tunisian Dialect Corpus (arbml/Tunisian_Dialect_Corpus).
|
31 |
"""
|
32 |
|
33 |
# Which evaluations are you running? how can people reproduce what you have?
|
34 |
LLM_BENCHMARKS_TEXT = f"""
|
35 |
## How it works
|
36 |
|
37 |
+
We evaluate models on:
|
38 |
+
- **TSAC** ([fbougares/tsac](https://huggingface.co/datasets/fbougares/tsac)): Sentiment analysis in Tunisian dialect.
|
39 |
+
- **Tunisian Dialect Corpus** ([arbml/Tunisian_Dialect_Corpus](https://huggingface.co/datasets/arbml/Tunisian_Dialect_Corpus)): Coverage and language understanding.
|
40 |
+
|
41 |
## Reproducibility
|
42 |
+
To reproduce our results, use the following commands (replace with your model):
|
43 |
|
44 |
+
```python
|
45 |
+
from transformers import AutoConfig, AutoModel, AutoTokenizer
|
46 |
+
config = AutoConfig.from_pretrained("your model name", revision=revision)
|
47 |
+
model = AutoModel.from_pretrained("your model name", revision=revision)
|
48 |
+
tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
|
49 |
+
```
|
50 |
"""
|
51 |
|
52 |
EVALUATION_QUEUE_TEXT = """
|
53 |
## Some good practices before submitting a model
|
54 |
|
55 |
+
### 1) Make sure your model is trained or evaluated on Tunisian dialect data (e.g., TSAC, Tunisian Dialect Corpus).
|
56 |
+
|
57 |
+
### 2) Make sure you can load your model and tokenizer using AutoClasses:
|
58 |
```python
|
59 |
from transformers import AutoConfig, AutoModel, AutoTokenizer
|
60 |
config = AutoConfig.from_pretrained("your model name", revision=revision)
|
|
|
66 |
Note: make sure your model is public!
|
67 |
Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
|
68 |
|
69 |
+
### 3) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
|
70 |
It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
|
71 |
|
72 |
+
### 4) Make sure your model has an open license!
|
73 |
This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
"""
|
75 |
|
76 |
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
src/populate.py
CHANGED
@@ -14,7 +14,9 @@ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchm
|
|
14 |
all_data_json = [v.to_dict() for v in raw_data]
|
15 |
|
16 |
df = pd.DataFrame.from_records(all_data_json)
|
17 |
-
|
|
|
|
|
18 |
df = df[cols].round(decimals=2)
|
19 |
|
20 |
# filter out if any of the benchmarks have not been produced
|
|
|
14 |
all_data_json = [v.to_dict() for v in raw_data]
|
15 |
|
16 |
df = pd.DataFrame.from_records(all_data_json)
|
17 |
+
print("Columns:", df.columns.tolist())
|
18 |
+
|
19 |
+
df = df.sort_values(by=[AutoEvalColumn().average.name], ascending=False)
|
20 |
df = df[cols].round(decimals=2)
|
21 |
|
22 |
# filter out if any of the benchmarks have not been produced
|