File size: 4,658 Bytes
059d8f0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
import requests
import json
import pandas as pd
from tqdm.auto import tqdm
import gradio as gr
#import streamlit as st
from huggingface_hub import HfApi, hf_hub_download
from huggingface_hub.repocard import metadata_load
#import streamlit.components.v1 as components
# Based on Omar Sanseviero work
# Make model clickable link
def make_clickable_model(model_name):
link = "https://huggingface.co/" + model_name
return f'<a target="_blank" href="{link}">{model_name}</a>'
# Make user clickable link
def make_clickable_user(user_id):
link = "https://huggingface.co/" + user_id
return f'<a target="_blank" href="{link}">{user_id}</a>'
def get_model_ids(rl_env):
api = HfApi()
models = api.list_models(filter=rl_env)
model_ids = [x.modelId for x in models]
return model_ids
def get_metadata(model_id):
try:
readme_path = hf_hub_download(model_id, filename="README.md")
return metadata_load(readme_path)
except requests.exceptions.HTTPError:
# 404 README.md not found
return None
def parse_metrics_accuracy(meta):
if "model-index" not in meta:
return None
result = meta["model-index"][0]["results"]
metrics = result[0]["metrics"]
accuracy = metrics[0]["value"]
print("ACCURACY", accuracy)
return accuracy
# We keep the worst case episode
def parse_rewards(accuracy):
if accuracy != None:
parsed = accuracy.split(' +/- ')
mean_reward = float(parsed[0])
std_reward = float(parsed[1])
else:
mean_reward = -1000
std_reward = -1000
return mean_reward, std_reward
def get_data(rl_env):
data = []
model_ids = get_model_ids(rl_env)
for model_id in tqdm(model_ids):
meta = get_metadata(model_id)
if meta is None:
continue
user_id = model_id.split('/')[0]
row = {}
row["User"] = user_id
row["Model"] = model_id
accuracy = parse_metrics_accuracy(meta)
print("RETURNED ACCURACY", accuracy)
mean_reward, std_reward = parse_rewards(accuracy)
print("MEAN REWARD", mean_reward)
row["Results"] = mean_reward - std_reward
row["Mean Reward"] = mean_reward
row["Std Reward"] = std_reward
data.append(row)
return pd.DataFrame.from_records(data)
def get_data_per_env(rl_env):
dataframe = get_data(rl_env)
dataframe = dataframe.fillna("")
#import pdb; pdb.set_trace()
if not dataframe.empty:
# turn the model ids into clickable links
dataframe["User"] = dataframe["User"].apply(make_clickable_user)
dataframe["Model"] = dataframe["Model"].apply(make_clickable_model)
dataframe = dataframe.sort_values(by=['Results'], ascending=False)
table_html = dataframe.to_html(escape=False, index=False)
table_html = table_html.replace("<th>", '<th align="left">') # left-align the headers
return table_html,dataframe,dataframe.empty
else:
html = """<div style="color: green">
<p> β Please wait. Results will be out soon... </p>
</div>
"""
return html,dataframe,dataframe.empty
RL_ENVS = ['CarRacing-v0','MountainCar-v0','LunarLander-v2']
RL_DETAILS ={'CarRacing-v0':{'title':" The Car Racing π Leaderboard π",'data':get_data_per_env('CarRacing-v0')},
'MountainCar-v0':{'title':"The Mountain Car π Leaderboard π",'data':get_data_per_env('MountainCar-v0')},
'LunarLander-v2':{'title':" The Lunar Lander π Leaderboard π",'data':get_data_per_env('LunarLander-v2')}
}
block = gr.Blocks()
with block:
with gr.Tabs():
for rl_env in RL_ENVS:
with gr.TabItem(rl_env):
data_html,data_dataframe,is_empty = RL_DETAILS[rl_env]['data']
markdown = """
# {name_leaderboard}
This is a leaderboard of {len_dataframe}** agents playing {env_name} π©βπ.
We use lower bound result to sort the models: mean_reward - std_reward.
You can click on the model's name to be redirected to its model card which includes documentation.
You want to try your model? Read this Unit 1 of Deep Reinforcement Learning Class: https://github.com/huggingface/deep-rl-class/blob/Unit1/unit1/README.md.
""".format(len_dataframe = len(data_dataframe),env_name = rl_env,name_leaderboard = RL_DETAILS[rl_env]['title'])
gr.Markdown(markdown)
gr.HTML(data_html)
block.launch()
|