loodvanniekerkginkgo commited on
Commit
11793f2
·
1 Parent(s): de75bee

Added logo and fixed spearman_abs

Browse files
Files changed (3) hide show
  1. .gitattributes +1 -0
  2. app.py +7 -6
  3. assets/competition_logo.jpg +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.jpg filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -69,12 +69,12 @@ def get_leaderboard_table(df_results: pd.DataFrame, assay: str | None = None):
69
  # to_show['user'] = to_show['user'].apply(lambda x: make_user_clickable(x)).astype(str)
70
 
71
  # Previously hosted on HF hub, local for now (Can also pull directly from github backend)
72
- column_order = ["model", "property", "spearman", "spearman_abs"]
73
  df = df_results.query("assay.isin(@ASSAY_RENAME.keys())").copy()
74
  if assay is not None:
75
  df = df[df['assay'] == assay]
76
  df = df[column_order]
77
- return df.sort_values(by="spearman_abs", ascending=False)
78
 
79
  def get_leaderboard_object(df_results: pd.DataFrame, assay: str | None = None):
80
  df = get_leaderboard_table(df_results=df_results, assay=assay)
@@ -85,7 +85,7 @@ def get_leaderboard_object(df_results: pd.DataFrame, assay: str | None = None):
85
  Leaderboard(
86
  value=df,
87
  datatype=["str", "str", "str", "number"],
88
- select_columns=["model", "property", "spearman"],
89
  search_columns=["model"],
90
  filter_columns=filter_columns,
91
  every=60,
@@ -99,13 +99,14 @@ def fetch_hf_results():
99
  ds = load_dataset(results_repo, split='no_low_spearman', download_mode="force_redownload")
100
  df = pd.DataFrame(ds).drop_duplicates(subset=["model", "assay"])
101
  df["property"] = df["assay"].map(ASSAY_RENAME)
 
102
  return df
103
 
104
  with gr.Blocks() as demo:
105
  gr.Markdown("""
106
- ## Welcome to the Ginkgo Antibody Developability Benchmark Leaderboard!
107
 
108
- Participants can submit their model to the leaderboard by
109
  """)
110
  df = fetch_hf_results()
111
  with gr.Tabs(elem_classes="tab-buttons"):
@@ -117,11 +118,11 @@ with gr.Blocks() as demo:
117
 
118
  with gr.TabItem("🚀 Overall", elem_id="abdev-benchmark-tab-table"):
119
  gr.Markdown("# Antibody Developability Benchmark Leaderboard over all properties")
120
-
121
  get_leaderboard_object(df_results=df)
122
  # TODO: this is not going to update well, need to fix
123
 
124
  with gr.TabItem("❔About", elem_id="abdev-benchmark-tab-table"):
 
125
  gr.Markdown(
126
  """
127
  ## About this challenge
 
69
  # to_show['user'] = to_show['user'].apply(lambda x: make_user_clickable(x)).astype(str)
70
 
71
  # Previously hosted on HF hub, local for now (Can also pull directly from github backend)
72
+ column_order = ["model", "property", "spearman", "spearman_cross_val"]
73
  df = df_results.query("assay.isin(@ASSAY_RENAME.keys())").copy()
74
  if assay is not None:
75
  df = df[df['assay'] == assay]
76
  df = df[column_order]
77
+ return df.sort_values(by="spearman", ascending=False)
78
 
79
  def get_leaderboard_object(df_results: pd.DataFrame, assay: str | None = None):
80
  df = get_leaderboard_table(df_results=df_results, assay=assay)
 
85
  Leaderboard(
86
  value=df,
87
  datatype=["str", "str", "str", "number"],
88
+ select_columns=["model", "property", "spearman", "spearman_cross_val"],
89
  search_columns=["model"],
90
  filter_columns=filter_columns,
91
  every=60,
 
99
  ds = load_dataset(results_repo, split='no_low_spearman', download_mode="force_redownload")
100
  df = pd.DataFrame(ds).drop_duplicates(subset=["model", "assay"])
101
  df["property"] = df["assay"].map(ASSAY_RENAME)
102
+ print(df.head())
103
  return df
104
 
105
  with gr.Blocks() as demo:
106
  gr.Markdown("""
107
+ ## Welcome to the Ginkgo Antibody Developability Benchmark!
108
 
109
+ Participants can submit their model to the leaderboard by uploading a CSV file (see the "✉️ Submit" tab).
110
  """)
111
  df = fetch_hf_results()
112
  with gr.Tabs(elem_classes="tab-buttons"):
 
118
 
119
  with gr.TabItem("🚀 Overall", elem_id="abdev-benchmark-tab-table"):
120
  gr.Markdown("# Antibody Developability Benchmark Leaderboard over all properties")
 
121
  get_leaderboard_object(df_results=df)
122
  # TODO: this is not going to update well, need to fix
123
 
124
  with gr.TabItem("❔About", elem_id="abdev-benchmark-tab-table"):
125
+ gr.Image(value="./assets/competition_logo.jpg")
126
  gr.Markdown(
127
  """
128
  ## About this challenge
assets/competition_logo.jpg ADDED

Git LFS Details

  • SHA256: f3a5727b36da0a7541a072dc2bdaf45ea562fdc4574a51baba40ddb83a0fa8d9
  • Pointer size: 131 Bytes
  • Size of remote file: 151 kB