Spaces:
Runtime error
Runtime error
Commit
·
6bc4b4f
1
Parent(s):
c20a739
Update app.py
Browse files
app.py
CHANGED
@@ -151,7 +151,7 @@ rl_envs = [
|
|
151 |
|
152 |
def restart():
|
153 |
print("RESTART")
|
154 |
-
api.restart_space(repo_id="
|
155 |
|
156 |
def get_metadata(model_id):
|
157 |
try:
|
@@ -233,40 +233,6 @@ def update_leaderboard_dataset_parallel(rl_env, path):
|
|
233 |
|
234 |
return ranked_dataframe
|
235 |
|
236 |
-
|
237 |
-
def update_leaderboard_dataset(rl_env, path):
|
238 |
-
# Get model ids associated with rl_env
|
239 |
-
model_ids = get_model_ids(rl_env)
|
240 |
-
data = []
|
241 |
-
for model_id in model_ids:
|
242 |
-
"""
|
243 |
-
readme_path = hf_hub_download(model_id, filename="README.md")
|
244 |
-
meta = metadata_load(readme_path)
|
245 |
-
"""
|
246 |
-
meta = get_metadata(model_id)
|
247 |
-
#LOADED_MODEL_METADATA[model_id] = meta if meta is not None else ''
|
248 |
-
if meta is None:
|
249 |
-
continue
|
250 |
-
user_id = model_id.split('/')[0]
|
251 |
-
row = {}
|
252 |
-
row["User"] = user_id
|
253 |
-
row["Model"] = model_id
|
254 |
-
accuracy = parse_metrics_accuracy(meta)
|
255 |
-
mean_reward, std_reward = parse_rewards(accuracy)
|
256 |
-
mean_reward = mean_reward if not pd.isna(mean_reward) else 0
|
257 |
-
std_reward = std_reward if not pd.isna(std_reward) else 0
|
258 |
-
row["Results"] = mean_reward - std_reward
|
259 |
-
row["Mean Reward"] = mean_reward
|
260 |
-
row["Std Reward"] = std_reward
|
261 |
-
data.append(row)
|
262 |
-
|
263 |
-
ranked_dataframe = rank_dataframe(pd.DataFrame.from_records(data))
|
264 |
-
new_history = ranked_dataframe
|
265 |
-
file_path = path + "/" + rl_env + ".csv"
|
266 |
-
new_history.to_csv(file_path, index=False)
|
267 |
-
|
268 |
-
return ranked_dataframe
|
269 |
-
|
270 |
def download_leaderboard_dataset():
|
271 |
path = snapshot_download(repo_id=DATASET_REPO_ID, repo_type="dataset")
|
272 |
return path
|
@@ -305,19 +271,6 @@ def rank_dataframe(dataframe):
|
|
305 |
dataframe['Ranking'] = [i for i in range(1,len(dataframe)+1)]
|
306 |
return dataframe
|
307 |
|
308 |
-
|
309 |
-
def run_update_dataset():
|
310 |
-
path_ = download_leaderboard_dataset()
|
311 |
-
for i in range(0, len(rl_envs)):
|
312 |
-
rl_env = rl_envs[i]
|
313 |
-
update_leaderboard_dataset_parallel(rl_env["rl_env"], path_)
|
314 |
-
|
315 |
-
api.upload_folder(
|
316 |
-
folder_path=path_,
|
317 |
-
repo_id="huggingface-projects/drlc-leaderboard-data",
|
318 |
-
repo_type="dataset",
|
319 |
-
commit_message="Update dataset")
|
320 |
-
|
321 |
def filter_data(rl_env, path, user_id):
|
322 |
data_df = get_data_no_html(rl_env, path)
|
323 |
models = []
|
|
|
151 |
|
152 |
def restart():
|
153 |
print("RESTART")
|
154 |
+
api.restart_space(repo_id="homunculus/Deep-Reinforcement-Learning-Leaderboard")
|
155 |
|
156 |
def get_metadata(model_id):
|
157 |
try:
|
|
|
233 |
|
234 |
return ranked_dataframe
|
235 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
def download_leaderboard_dataset():
|
237 |
path = snapshot_download(repo_id=DATASET_REPO_ID, repo_type="dataset")
|
238 |
return path
|
|
|
271 |
dataframe['Ranking'] = [i for i in range(1,len(dataframe)+1)]
|
272 |
return dataframe
|
273 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
274 |
def filter_data(rl_env, path, user_id):
|
275 |
data_df = get_data_no_html(rl_env, path)
|
276 |
models = []
|