|
import gradio as gr |
|
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns |
|
from apscheduler.schedulers.background import BackgroundScheduler |
|
from huggingface_hub import snapshot_download |
|
import pandas as pd |
|
import os |
|
import logging |
|
from datetime import datetime |
|
|
|
from src.core.evaluation import EvaluationManager, EvaluationRequest |
|
from src.core.queue_manager import QueueManager |
|
from src.logging_config import setup_logging |
|
from src.about import ( |
|
CITATION_BUTTON_LABEL, |
|
CITATION_BUTTON_TEXT, |
|
EVALUATION_QUEUE_TEXT, |
|
INTRODUCTION_TEXT, |
|
LLM_BENCHMARKS_TEXT, |
|
TITLE, |
|
) |
|
from src.display.css_html_js import custom_css |
|
from src.display.utils import ( |
|
BENCHMARK_COLS, |
|
COLS, |
|
EVAL_COLS, |
|
EVAL_TYPES, |
|
AutoEvalColumn, |
|
ModelType, |
|
fields, |
|
WeightType, |
|
Precision |
|
) |
|
from src.envs import ( |
|
API, |
|
CACHE_PATH, |
|
EVAL_REQUESTS_PATH, |
|
EVAL_RESULTS_PATH, |
|
QUEUE_REPO, |
|
REPO_ID, |
|
RESULTS_REPO, |
|
TOKEN |
|
) |
|
from src.populate import get_evaluation_queue_df, get_leaderboard_df |
|
from src.submission.submit import add_new_eval |
|
|
|
|
|
|
|
setup_logging(log_dir="logs") |
|
logger = logging.getLogger('web') |
|
|
|
|
|
evaluation_manager = EvaluationManager( |
|
results_dir=EVAL_RESULTS_PATH, |
|
backup_dir=os.path.join(CACHE_PATH, "eval-backups") |
|
) |
|
|
|
queue_manager = QueueManager( |
|
queue_dir=os.path.join(CACHE_PATH, "eval-queue") |
|
) |
|
|
|
def restart_space(): |
|
"""Restart the Hugging Face space.""" |
|
logger.info("Restarting space") |
|
API.restart_space(repo_id=REPO_ID) |
|
|
|
def initialize_space(): |
|
"""Initialize the space by downloading required data.""" |
|
logger.info("Initializing space") |
|
try: |
|
logger.info(f"Downloading queue data from {QUEUE_REPO}") |
|
snapshot_download( |
|
repo_id=QUEUE_REPO, |
|
local_dir=EVAL_REQUESTS_PATH, |
|
repo_type="dataset", |
|
tqdm_class=None, |
|
etag_timeout=30, |
|
token=TOKEN |
|
) |
|
except Exception as e: |
|
logger.error(f"Failed to download queue data: {str(e)}") |
|
restart_space() |
|
|
|
try: |
|
logger.info(f"Downloading results data from {RESULTS_REPO}") |
|
snapshot_download( |
|
repo_id=RESULTS_REPO, |
|
local_dir=EVAL_RESULTS_PATH, |
|
repo_type="dataset", |
|
tqdm_class=None, |
|
etag_timeout=30, |
|
token=TOKEN |
|
) |
|
except Exception as e: |
|
logger.error(f"Failed to download results data: {str(e)}") |
|
restart_space() |
|
|
|
|
|
initialize_space() |
|
|
|
|
|
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS) |
|
|
|
( |
|
finished_eval_queue_df, |
|
running_eval_queue_df, |
|
pending_eval_queue_df, |
|
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS) |
|
|
|
def process_evaluation_queue(): |
|
"""Process pending evaluation requests.""" |
|
logger.info("Processing evaluation queue") |
|
while True: |
|
request = queue_manager.get_next_request() |
|
if not request: |
|
break |
|
|
|
try: |
|
|
|
eval_request = EvaluationRequest( |
|
model_id=request.model_id, |
|
revision=request.revision, |
|
precision="float16", |
|
weight_type="Safetensors", |
|
submitted_time=request.timestamp |
|
) |
|
|
|
|
|
results = evaluation_manager.run_evaluation(eval_request) |
|
logger.info(f"Evaluation complete for {request.model_id}") |
|
|
|
|
|
queue_manager.mark_complete(request.request_id) |
|
|
|
except Exception as e: |
|
logger.error(f"Evaluation failed for {request.model_id}: {str(e)}") |
|
|
|
|
|
def init_leaderboard(df): |
|
"""Initialize the leaderboard with the given DataFrame.""" |
|
if df is None or df.empty: |
|
df = pd.DataFrame(columns=COLS) |
|
logger.info("Creating empty leaderboard - no evaluations completed yet") |
|
|
|
|
|
return gr.Dataframe( |
|
headers=COLS, |
|
datatype=["str"] * len(COLS), |
|
row_count=10, |
|
col_count=(len(COLS), "fixed"), |
|
value=df, |
|
wrap=True, |
|
column_widths=[50] + [None] * (len(COLS) - 1), |
|
type="pandas", |
|
) |
|
|
|
|
|
demo = gr.Blocks(css=custom_css) |
|
with demo: |
|
gr.HTML(TITLE) |
|
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text") |
|
|
|
with gr.Tabs(elem_classes="tab-buttons") as tabs: |
|
with gr.TabItem("π Security Leaderboard", elem_id="security-leaderboard-tab", id=0): |
|
leaderboard = init_leaderboard(LEADERBOARD_DF) |
|
|
|
with gr.TabItem("π About", elem_id="about-tab", id=2): |
|
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text") |
|
|
|
with gr.TabItem("π Submit Model", elem_id="submit-tab", id=3): |
|
with gr.Column(): |
|
with gr.Row(): |
|
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text") |
|
|
|
with gr.Column(): |
|
with gr.Accordion( |
|
f"β
Finished Evaluations ({len(finished_eval_queue_df)})", |
|
open=False, |
|
): |
|
with gr.Row(): |
|
finished_eval_table = gr.components.Dataframe( |
|
value=finished_eval_queue_df, |
|
headers=EVAL_COLS, |
|
datatype=EVAL_TYPES, |
|
row_count=5, |
|
) |
|
with gr.Accordion( |
|
f"π Running Evaluation Queue ({len(running_eval_queue_df)})", |
|
open=False, |
|
): |
|
with gr.Row(): |
|
running_eval_table = gr.components.Dataframe( |
|
value=running_eval_queue_df, |
|
headers=EVAL_COLS, |
|
datatype=EVAL_TYPES, |
|
row_count=5, |
|
) |
|
|
|
with gr.Accordion( |
|
f"β³ Pending Evaluation Queue ({len(pending_eval_queue_df)})", |
|
open=False, |
|
): |
|
with gr.Row(): |
|
pending_eval_table = gr.components.Dataframe( |
|
value=pending_eval_queue_df, |
|
headers=EVAL_COLS, |
|
datatype=EVAL_TYPES, |
|
row_count=5, |
|
) |
|
with gr.Row(): |
|
gr.Markdown("# π Submit Your Model for Security Evaluation", elem_classes="markdown-text") |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
model_name_textbox = gr.Textbox( |
|
label="Model name (organization/model-name)", |
|
placeholder="huggingface/model-name" |
|
) |
|
revision_name_textbox = gr.Textbox( |
|
label="Revision commit", |
|
placeholder="main" |
|
) |
|
model_type = gr.Dropdown( |
|
choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown], |
|
label="Model type", |
|
multiselect=False, |
|
value=None, |
|
interactive=True, |
|
) |
|
|
|
with gr.Column(): |
|
precision = gr.Dropdown( |
|
choices=[i.value.name for i in Precision if i != Precision.Unknown], |
|
label="Precision", |
|
multiselect=False, |
|
value="float16", |
|
interactive=True, |
|
) |
|
weight_type = gr.Dropdown( |
|
choices=[i.value.name for i in WeightType], |
|
label="Weight Format", |
|
multiselect=False, |
|
value="Safetensors", |
|
interactive=True, |
|
) |
|
base_model_name_textbox = gr.Textbox( |
|
label="Base model (for delta or adapter weights)", |
|
placeholder="Optional: base model path" |
|
) |
|
|
|
with gr.Row(): |
|
gr.Markdown( |
|
""" |
|
### Security Requirements: |
|
1. Model weights must be in safetensors format |
|
2. Model card must include security considerations |
|
3. Model will be evaluated on secure coding capabilities |
|
""", |
|
elem_classes="markdown-text" |
|
) |
|
|
|
submit_button = gr.Button("Submit for Security Evaluation") |
|
submission_result = gr.Markdown() |
|
def handle_submission(model, base_model, revision, precision, weight_type, model_type): |
|
"""Handle new model submission.""" |
|
try: |
|
logger.info(f"New submission received for {model}") |
|
|
|
|
|
request_id = queue_manager.add_request( |
|
model_id=model, |
|
revision=revision if revision else "main" |
|
) |
|
|
|
|
|
process_evaluation_queue() |
|
|
|
return gr.Markdown("Submission successful! Your model has been added to the evaluation queue.") |
|
except Exception as e: |
|
logger.error(f"Submission failed: {str(e)}") |
|
return gr.Markdown(f"Error: {str(e)}") |
|
|
|
submit_button.click( |
|
handle_submission, |
|
[ |
|
model_name_textbox, |
|
base_model_name_textbox, |
|
revision_name_textbox, |
|
precision, |
|
weight_type, |
|
model_type, |
|
], |
|
submission_result, |
|
) |
|
|
|
with gr.Row(): |
|
with gr.Accordion("π Citation", open=False): |
|
citation_button = gr.Textbox( |
|
value=CITATION_BUTTON_TEXT, |
|
label=CITATION_BUTTON_LABEL, |
|
lines=20, |
|
elem_id="citation-button", |
|
show_copy_button=True, |
|
) |
|
|
|
|
|
scheduler = BackgroundScheduler() |
|
scheduler.add_job(restart_space, "interval", seconds=1800) |
|
scheduler.add_job(process_evaluation_queue, "interval", seconds=300) |
|
scheduler.start() |
|
|
|
logger.info("Application startup complete") |
|
demo.queue(default_concurrency_limit=40).launch() |
|
|