CultriX commited on
Commit
137ba23
ยท
verified ยท
1 Parent(s): fc68f79

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -20
app.py CHANGED
@@ -122,23 +122,6 @@ def calculate_highest_combined_score(data, column):
122
 
123
  # Function to create and display charts (existing functions can be reused or modified as needed)
124
 
125
- # Main function to run the Streamlit app
126
- def main():
127
- st.set_page_config(page_title="YALL - Yet Another LLM Leaderboard", layout="wide")
128
- st.title("๐Ÿ† YALL - Yet Another LLM Leaderboard")
129
-
130
- # Example content placeholder - replace with actual markdown content or file upload
131
- content = """Your markdown table content here"""
132
-
133
- if content:
134
- full_df = convert_markdown_table_to_dataframe(content)
135
- full_df = get_model_info(full_df)
136
- # Assuming the scores are already in the right format, otherwise adjust as needed
137
- display_highest_combined_scores(full_df) # Call to display the calculated scores
138
-
139
- # Rest of your Streamlit app logic here (tabs, visualizations, etc.)
140
-
141
-
142
 
143
  @st.cache_data
144
  def get_model_info(df):
@@ -188,9 +171,6 @@ def create_bar_chart(df, category):
188
 
189
  # Main function to run the Streamlit app
190
  def main():
191
- # Set page configuration and title
192
- st.set_page_config(page_title="YALL - Yet Another LLM Leaderboard", layout="wide")
193
-
194
  st.title("๐Ÿ† YALL - Yet Another LLM Leaderboard")
195
  st.markdown("Leaderboard made with ๐Ÿง [LLM AutoEval](https://github.com/mlabonne/llm-autoeval) using [Nous](https://huggingface.co/NousResearch) benchmark suite.")
196
 
 
122
 
123
  # Function to create and display charts (existing functions can be reused or modified as needed)
124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
  @st.cache_data
127
  def get_model_info(df):
 
171
 
172
  # Main function to run the Streamlit app
173
  def main():
 
 
 
174
  st.title("๐Ÿ† YALL - Yet Another LLM Leaderboard")
175
  st.markdown("Leaderboard made with ๐Ÿง [LLM AutoEval](https://github.com/mlabonne/llm-autoeval) using [Nous](https://huggingface.co/NousResearch) benchmark suite.")
176