File size: 6,596 Bytes
9ceb843
 
 
 
 
0b8c16d
9ceb843
91c5b22
 
 
 
 
92f1f93
91c5b22
2bf0b31
ec86cda
2bf0b31
3df678e
 
 
b64c62d
 
d027111
b64c62d
 
 
 
 
 
 
 
4752cc3
 
 
6075381
e8ed87f
 
 
91c5b22
 
9ceb843
 
5e9e451
 
 
90eea3b
91c5b22
1d33a30
91c5b22
1d33a30
91c5b22
1d33a30
91c5b22
b146dec
91c5b22
5e9e451
91c5b22
 
 
 
b64c62d
 
 
 
9ceb843
0b8c16d
 
 
 
 
 
 
 
 
 
 
9ceb843
ab74236
9ceb843
ab74236
9ceb843
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ab74236
9ceb843
 
 
 
 
 
 
 
35e2ca1
 
 
9ceb843
 
 
 
 
 
 
 
b7aaef4
 
 
faa2dab
 
 
8799e00
 
 
 
 
06fd8bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e0e569
 
 
 
 
 
 
06fd8bd
7eaa6d2
 
 
9ceb843
 
 
 
 
 
 
 
 
 
56fcfaf
b7aaef4
 
 
 
 
 
9f4ce43
 
 
9ceb843
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
import pandas as pd
from pathlib import Path
from datasets import load_dataset
import numpy as np
import os
import re

UNVERIFIED_MODELS = [
    "nvidia/Nemotron-4-340B-Reward",
    "nvidia/Llama3-70B-SteerLM-RM",
    "Cohere May 2024",
    "google/gemini-1.5-pro-0514",
    "google/flame-24b-july-2024",
    "Cohere March 2024",
    "facebook/Self-taught-Llama-3-70B",
    "facebook/Self-taught-evaluator-llama3.1-70B",
    "google/flame-1.0-24B-july-2024",
    "Salesforce/SFR-LLaMa-3.1-70B-Judge-r",
    "Salesforce/SFR-nemo-12B-Judge-r",
    "Salesforce/SFR-LLaMa-3.1-8B-Judge-r",
    "SF-Foundation/TextEval-OffsetBias-12B",
    "SF-Foundation/TextEval-Llama3.1-70B",
    "nvidia/Llama-3.1-Nemotron-70B-Reward",
]

CONTAMINATED_MODELS = [
    "Skywork/Skywork-Reward-Gemma-2-27B",
    "Skywork/Skywork-Critic-Llama-3.1-70B",
    "LxzGordon/URM-LLaMa-3.1-8B",
    "Skywork/Skywork-Reward-Llama-3.1-8B",
    "Ray2333/GRM-Llama3-8B-rewardmodel-ft",
    "nicolinho/QRM-Llama3.1-8B",
    "nicolinho/QRM-Llama3-8B",
    "general-preference/GPM-Llama-3.1-8B",
    "SF-Foundation/TextEval-Llama3.1-70B",
    "ZiyiYe/Con-J-Qwen2-7B",
    "Ray2333/Gemma-2B-rewardmodel-ft",
    "Ray2333/GRM-Gemma-2B-rewardmodel-ft"
]

# From Open LLM Leaderboard
def model_hyperlink(link, model_name):
    # if model_name is above 50 characters, return first 47 characters and "..."
    if len(model_name) > 50:
        model_name = model_name[:47] + "..."
    if model_name == "random":
        output = "random"
    elif model_name == "Cohere March 2024":
        output = f'<a target="_blank" href="https://huggingface.co/Cohere" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
    elif "openai" == model_name.split("/")[0]:
        output = f'<a target="_blank" href="https://huggingface.co/openai" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
    elif "Anthropic" == model_name.split("/")[0]:
        output = f'<a target="_blank" href="https://huggingface.co/Anthropic" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
    elif "google" == model_name.split("/")[0]:
        output = f'<a target="_blank" href="https://huggingface.co/google" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
    elif "PoLL" == model_name.split("/")[0]:
        output = model_name
    output = f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'

    if model_name in UNVERIFIED_MODELS:
        output += " *"
    if model_name in CONTAMINATED_MODELS:
        output += " ⚠️"
    return output

def undo_hyperlink(html_string):
    # Regex pattern to match content inside > and <
    pattern = r'>[^<]+<'
    match = re.search(pattern, html_string)
    if match:
        # Extract the matched text and remove leading '>' and trailing '<'
        return match.group(0)[1:-1]
    else:
        return "No text found"


# Define a function to fetch and process data
def load_all_data(data_repo, subdir:str, subsubsets=False):    # use HF api to pull the git repo
    dir = Path(data_repo)
    data_dir = dir / subdir
    orgs = [d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, d))]
    # get all files within the sub folders orgs
    models_results = []
    for org in orgs:
        org_dir = data_dir / org
        files = [f for f in os.listdir(org_dir) if os.path.isfile(os.path.join(org_dir, f))]
        for file in files:
            if file.endswith(".json"):
                models_results.append(org + "/" + file)

    # create empty dataframe to add all data to
    df = pd.DataFrame()

    # load all json data in the list models_results one by one to avoid not having the same entries
    for model in models_results:
        model_data = load_dataset("json", data_files=data_repo + subdir+ "/" + model, split="train")
        df2 = pd.DataFrame(model_data)
        # add to df
        df = pd.concat([df2, df])


    # remove chat_template comlumn
    df = df.drop(columns=["chat_template"])

    # sort columns alphabetically
    df = df.reindex(sorted(df.columns), axis=1)
    
    # move column "model" to the front
    cols = list(df.columns)
    cols.insert(0, cols.pop(cols.index('model')))
    df = df.loc[:, cols]

    # select all columns except "model"
    cols = df.columns.tolist()
    cols.remove("model")
    # if model_type is a column (pref tests may not have it)
    if "model_type" in cols:
        cols.remove("model_type")
    # remove ref_model if in columns
    if "ref_model" in cols:
        cols.remove("ref_model")
    # remove model_beaker from dataframe
    if "model_beaker" in cols:
        cols.remove("model_beaker")
        df = df.drop(columns=["model_beaker"])
    
    # remove column xstest (outdated data)
    # if xstest is a column
    if "xstest" in cols:
        df = df.drop(columns=["xstest"])
        cols.remove("xstest")

    if "ref_model" in df.columns:
        df = df.drop(columns=["ref_model"])

    # remove column anthropic and summarize_prompted (outdated data)
    if "anthropic" in cols:
        df = df.drop(columns=["anthropic"])
        cols.remove("anthropic")
    if "summarize_prompted" in cols:
        df = df.drop(columns=["summarize_prompted"])
        cols.remove("summarize_prompted")
    # remove pku_better and pku_safer (removed from the leaderboard)
    if "pku_better" in cols:
        df = df.drop(columns=["pku_better"])
        cols.remove("pku_better")
    if "pku_safer" in cols:
        df = df.drop(columns=["pku_safer"])
        cols.remove("pku_safer")

    # convert to score 
    df[cols] = (df[cols]*100)
    avg = np.nanmean(df[cols].values,axis=1)
    # add average column
    df["average"] = avg
    
    # apply model_hyperlink function to column "model"
    df["model"] = df["model"].apply(lambda x: model_hyperlink(f"https://huggingface.co/{x}", x))

    # move average column to the second
    cols = list(df.columns)
    cols.insert(1, cols.pop(cols.index('average')))
    df = df.loc[:, cols]

    # move model_type column to first
    if "model_type" in cols:
        cols = list(df.columns)
        cols.insert(1, cols.pop(cols.index('model_type')))
        df = df.loc[:, cols]

    # remove models with DPO Ref. Free as type (future work)
    df = df[~df["model_type"].str.contains("DPO Ref. Free", na=False)]

    return df