import streamlit as st import pandas as pd import wandb import time from datetime import datetime, timedelta import requests def get_competitions(CONFIG_URL): """ Get competition names and their evaluation times from the config URL. """ competitions = [] try: config = requests.get(CONFIG_URL).json() for competition in config: # Extract competition name and evaluation times competition_name = competition["competition_id"] evaluation_times = competition.get("evaluation_times", []) # Store competition name and evaluation times as a tuple competitions.append((competition_name, evaluation_times)) except Exception as e: print(f"Error loading competition data: {str(e)}") return competitions def get_latest_evaluation_time(evaluation_times): """ Get the latest UTC evaluation time as a datetime object (either today or yesterday). Args: evaluation_times (list): List of evaluation times as strings (in HH:MM format). Returns: datetime: Latest evaluation time as a datetime object. """ # Get the current UTC date and time current_utc_datetime = datetime.utcnow() current_utc_time = current_utc_datetime.time() # Convert evaluation times to datetime.time objects eval_times = [datetime.strptime(time, "%H:%M").time() for time in evaluation_times] # Sort the evaluation times to ensure they are in chronological order eval_times.sort() # Loop through the evaluation times in reverse to find the latest time that has passed for eval_time in reversed(eval_times): if current_utc_time >= eval_time: # If the time has passed today, return today's date with that time return current_utc_datetime.replace(hour=eval_time.hour, minute=eval_time.minute, second=0, microsecond=0) # If none of the evaluation times have passed, return the latest one from the previous day yesterday = current_utc_datetime - timedelta(days=1) latest_eval_time = eval_times[-1] return yesterday.replace(hour=latest_eval_time.hour, minute=latest_eval_time.minute, second=0, microsecond=0) def fetch_competition_summary(api, entity, project): data = [] # entity = projects[selected_project]["entity"] # project = projects[selected_project]["project"] runs = api.runs(f"{entity}/{project}") for run in runs: try: summary = run.summary if summary.get("validator_hotkey") and summary.get("winning_hotkey"): data.append({ "Created At": run.created_at, "Validator ID": summary.get("validator_hotkey"), "Winning Hotkey": summary.get("winning_hotkey"), "Run Time (s)": summary.get("run_time_s"), }) except Exception as e: st.write(f"Error processing run {run.id}: {str(e)}") df = pd.DataFrame(data) if not df.empty: df['Created At'] = pd.to_datetime(df['Created At'], utc=True) df = df.sort_values(by="Created At", ascending=False) return df def fetch_models_evaluation(api, entity, project): data = [] # entity = projects[selected_project]["entity"] # project = projects[selected_project]["project"] runs = api.runs(f"{entity}/{project}") for run in runs: try: summary = run.summary if summary.get("score") is not None: # Assuming runs with score are model evaluations model_link = summary.get("hf_model_link", "") if model_link: # Create clickable link model_link_html = f'Model Link' else: model_link_html = "N/A" data.append({ "Created At": run.created_at, "Miner hotkey": summary.get("miner_hotkey", "N/A"), "F1-beta": summary.get("fbeta"), "Accuracy": summary.get("accuracy"), "Recall": summary.get("recall"), "Precision": summary.get("precision"), "Tested entries": summary.get("tested_entries"), "ROC AUC": summary.get("roc_auc"), "Confusion Matrix": summary.get("confusion_matrix"), "Model link": model_link_html, "Score": summary.get("score"), #TODO link to huggingface model }) except Exception as e: st.write(f"Error processing run {run.id}: {str(e)}") df = pd.DataFrame(data) if not df.empty: df['Created At'] = pd.to_datetime(df['Created At'], utc=True) df = df.sort_values(by="Created At", ascending=False) return df def highlight_score_column(s): """ Highlight the 'Score' column with a custom background color. """ return ['background-color: yellow' if s.name == 'Score' else '' for _ in s]