diff --git a/.env.example b/.env.example deleted file mode 100644 index aa96e1820a29689a9163b6a202ac15bf5d81fd8e..0000000000000000000000000000000000000000 --- a/.env.example +++ /dev/null @@ -1,3 +0,0 @@ -ENVIRONMENT=development -HF_TOKEN=xxx -HF_HOME=.cache diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..3b6ab3cb9f296564e0dc782b3473700bbc89190d --- /dev/null +++ b/.gitattributes @@ -0,0 +1,36 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +scale-hf-logo.png filter=lfs diff=lfs merge=lfs -text +gif.gif filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore index 08e57889adbc8cb31f2809bb3232c4f42e283a21..4249057d6718f3acdda39a5efbf7a32599f4a522 100644 --- a/.gitignore +++ b/.gitignore @@ -1,45 +1,22 @@ -# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. - -__pycache__ -.cache/ - -# dependencies - -frontend/node_modules -/.pnp -.pnp.js - -# testing - -/coverage - -# production - -/build - -# misc - -.DS_Store -.env.local -.env.development.local -.env.test.local -.env.production.local - -npm-debug.log* -yarn-debug.log* -yarn-error.log\* - -src/dataframe.json - -yarn.lock -package-lock.json - -/public - -.claudesync/ - -# Environment variables +venv/ +.venv/ +__pycache__/ .env -.env.* -!.env.example - +.ipynb_checkpoints +*ipynb +.vscode/ +.DS_Store +.ruff_cache/ +.python-version +.profile_app.python +*pstats +*.lock + +eval-queue/ +eval-results/ +dynamic-info/ +downloads/ +model-votes/ +open-llm-leaderboard___contents/ + +src/assets/model_counts.html diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0710dad252bda2ac9fd5b7e4e2e4dc0afeff43cf --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,53 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +default_language_version: + python: python3 + +ci: + autofix_prs: true + autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions' + autoupdate_schedule: quarterly + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.3.0 + hooks: + - id: check-yaml + - id: check-case-conflict + - id: detect-private-key + - id: check-added-large-files + args: ['--maxkb=1000'] + - id: requirements-txt-fixer + - id: end-of-file-fixer + - id: trailing-whitespace + + - repo: https://github.com/PyCQA/isort + rev: 5.12.0 + hooks: + - id: isort + name: Format imports + + - repo: https://github.com/psf/black + rev: 22.12.0 + hooks: + - id: black + name: Format code + additional_dependencies: ['click==8.0.2'] + + - repo: https://github.com/charliermarsh/ruff-pre-commit + # Ruff version. + rev: 'v0.0.267' + hooks: + - id: ruff diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index f90da3272f5ea911250b43d7eaccf38b3a7b1412..0000000000000000000000000000000000000000 --- a/Dockerfile +++ /dev/null @@ -1,62 +0,0 @@ -# Build frontend -FROM node:18 as frontend-build -WORKDIR /app -COPY frontend/package*.json ./ -RUN npm install -COPY frontend/ ./ - -RUN npm run build - -# Build backend -FROM python:3.12-slim -WORKDIR /app - -# Create non-root user -RUN useradd -m -u 1000 user - -# Install poetry -RUN pip install poetry - -# Create and configure cache directory -RUN mkdir -p /app/.cache && \ - chown -R user:user /app - -# Copy and install backend dependencies -COPY backend/pyproject.toml backend/poetry.lock* ./ -RUN poetry config virtualenvs.create false \ - && poetry install --no-interaction --no-ansi --no-root --only main - -# Copy backend code -COPY backend/ . - -# Install Node.js and npm -RUN apt-get update && apt-get install -y \ - curl \ - netcat-openbsd \ - && curl -fsSL https://deb.nodesource.com/setup_18.x | bash - \ - && apt-get install -y nodejs \ - && rm -rf /var/lib/apt/lists/* - -# Copy frontend server and build -COPY --from=frontend-build /app/build ./frontend/build -COPY --from=frontend-build /app/package*.json ./frontend/ -COPY --from=frontend-build /app/server.js ./frontend/ - -# Install frontend production dependencies -WORKDIR /app/frontend -RUN npm install --production -WORKDIR /app - -# Environment variables -ENV HF_HOME=/app/.cache \ - HF_DATASETS_CACHE=/app/.cache \ - INTERNAL_API_PORT=7861 \ - PORT=7860 \ - NODE_ENV=production - -# Note: HF_TOKEN should be provided at runtime, not build time -USER user -EXPOSE 7860 - -# Start both servers with wait-for -CMD ["sh", "-c", "uvicorn app.asgi:app --host 0.0.0.0 --port 7861 & while ! nc -z localhost 7861; do sleep 1; done && cd frontend && npm run serve"] \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..a99bb53049b7022e7de973aeb72e3b9740c45436 --- /dev/null +++ b/Makefile @@ -0,0 +1,18 @@ +.PHONY: style format quality all + +# Applies code style fixes to the specified file or directory +style: + @echo "Applying style fixes to $(file)" + ruff format $(file) + ruff check --fix $(file) --line-length 119 + +# Checks code quality for the specified file or directory +quality: + @echo "Checking code quality for $(file)" + ruff check $(file) --line-length 119 + +# Applies PEP8 formatting and checks the entire codebase +all: + @echo "Formatting and checking the entire codebase" + ruff format . + ruff check --fix . --line-length 119 diff --git a/README.md b/README.md index ba336d4e80575669a076f8e4353db1f0ca0c6b62..38baa0ab70407aacc0c1a23d8fbb6abef254662f 100644 --- a/README.md +++ b/README.md @@ -1,85 +1,25 @@ --- -title: Open LLM Leaderboard +title: Open LLM Leaderboard 2 emoji: 🏆 -colorFrom: blue -colorTo: red -sdk: docker -hf_oauth: true +colorFrom: green +colorTo: indigo +sdk: gradio +sdk_version: 4.44.0 +app_file: app.py pinned: true license: apache-2.0 duplicated_from: open-llm-leaderboard/open_llm_leaderboard +fullWidth: true +startup_duration_timeout: 1h +hf_oauth: true +space_ci: + private: true + secrets: + - HF_TOKEN + - WEBHOOK_SECRET tags: - leaderboard short_description: Track, rank and evaluate open LLMs and chatbots --- -# Open LLM Leaderboard - -Modern React interface for comparing Large Language Models (LLMs) in an open and reproducible way. - -## Features - -- 📊 Interactive table with advanced sorting and filtering -- 🔍 Semantic model search -- 📌 Pin models for comparison -- 📱 Responsive and modern interface -- 🎨 Dark/Light mode -- ⚡️ Optimized performance with virtualization - -## Architecture - -The project is split into two main parts: - -### Frontend (React) - -``` -frontend/ -├── src/ -│ ├── components/ # Reusable UI components -│ ├── pages/ # Application pages -│ ├── hooks/ # Custom React hooks -│ ├── context/ # React contexts -│ └── constants/ # Constants and configurations -├── public/ # Static assets -└── server.js # Express server for production -``` - -### Backend (FastAPI) - -``` -backend/ -├── app/ -│ ├── api/ # API router and endpoints -│ │ └── endpoints/ # Specific API endpoints -│ ├── core/ # Core functionality -│ ├── config/ # Configuration -│ └── services/ # Business logic services -│ ├── leaderboard.py -│ ├── models.py -│ ├── votes.py -│ └── hf_service.py -└── utils/ # Utility functions -``` - -## Technologies - -### Frontend - -- React -- Material-UI -- TanStack Table & Virtual -- Express.js - -### Backend - -- FastAPI -- Hugging Face API -- Docker - -## Development - -The application is containerized using Docker and can be run using: - -```bash -docker-compose up -``` +Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..c3c991d125b42bdde05ee9ce8dd1229eb8982209 --- /dev/null +++ b/app.py @@ -0,0 +1,484 @@ +import os +import logging +import time +import schedule +import datetime +import gradio as gr +from threading import Thread +import datasets +from huggingface_hub import snapshot_download, WebhooksServer, WebhookPayload, RepoCard +from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns +from apscheduler.schedulers.background import BackgroundScheduler + +# Start ephemeral Spaces on PRs (see config in README.md) +from gradio_space_ci.webhook import IS_EPHEMERAL_SPACE, SPACE_ID, configure_space_ci + +from src.display.about import ( + CITATION_BUTTON_LABEL, + CITATION_BUTTON_TEXT, + EVALUATION_QUEUE_TEXT, + INTRODUCTION_TEXT, + TITLE, +) +from src.display.css_html_js import custom_css +from src.display.utils import ( + BENCHMARK_COLS, + COLS, + EVAL_COLS, + EVAL_TYPES, + AutoEvalColumn, + ModelType, + Precision, + WeightType, + fields, + EvalQueueColumn +) +from src.envs import ( + API, + EVAL_REQUESTS_PATH, + AGGREGATED_REPO, + HF_TOKEN, + QUEUE_REPO, + REPO_ID, + VOTES_REPO, + VOTES_PATH, + HF_HOME, +) +from src.populate import get_evaluation_queue_df, get_leaderboard_df +from src.submission.submit import add_new_eval +from src.voting.vote_system import VoteManager, run_scheduler + +# Configure logging +logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") + +# Start ephemeral Spaces on PRs (see config in README.md) +from gradio_space_ci.webhook import IS_EPHEMERAL_SPACE, SPACE_ID, configure_space_ci + +# Convert the environment variable "LEADERBOARD_FULL_INIT" to a boolean value, defaulting to True if the variable is not set. +# This controls whether a full initialization should be performed. +DO_FULL_INIT = True # os.getenv("LEADERBOARD_FULL_INIT", "True") == "True" +NEW_DATA_ON_LEADERBOARD = True +LEADERBOARD_DF = None + +def restart_space(): + API.restart_space(repo_id=REPO_ID, token=HF_TOKEN) + + +def time_diff_wrapper(func): + def wrapper(*args, **kwargs): + start_time = time.time() + result = func(*args, **kwargs) + end_time = time.time() + diff = end_time - start_time + logging.info(f"Time taken for {func.__name__}: {diff} seconds") + return result + + return wrapper + + +@time_diff_wrapper +def download_dataset(repo_id, local_dir, repo_type="dataset", max_attempts=3, backoff_factor=1.5): + """Download dataset with exponential backoff retries.""" + attempt = 0 + while attempt < max_attempts: + try: + logging.info(f"Downloading {repo_id} to {local_dir}") + snapshot_download( + repo_id=repo_id, + local_dir=local_dir, + repo_type=repo_type, + tqdm_class=None, + etag_timeout=30, + max_workers=8, + ) + logging.info("Download successful") + return + except Exception as e: + wait_time = backoff_factor**attempt + logging.error(f"Error downloading {repo_id}: {e}, retrying in {wait_time}s") + time.sleep(wait_time) + attempt += 1 + raise Exception(f"Failed to download {repo_id} after {max_attempts} attempts") + +def get_latest_data_leaderboard(leaderboard_initial_df = None): + global NEW_DATA_ON_LEADERBOARD + global LEADERBOARD_DF + if NEW_DATA_ON_LEADERBOARD: + print("Leaderboard updated at reload!") + leaderboard_dataset = datasets.load_dataset( + AGGREGATED_REPO, + "default", + split="train", + cache_dir=HF_HOME, + download_mode=datasets.DownloadMode.REUSE_DATASET_IF_EXISTS, # Uses the cached dataset + verification_mode="no_checks" + ) + LEADERBOARD_DF = get_leaderboard_df( + leaderboard_dataset=leaderboard_dataset, + cols=COLS, + benchmark_cols=BENCHMARK_COLS, + ) + NEW_DATA_ON_LEADERBOARD = False + + else: + LEADERBOARD_DF = leaderboard_initial_df + + return LEADERBOARD_DF + + +def get_latest_data_queue(): + eval_queue_dfs = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS) + return eval_queue_dfs + +def init_space(): + """Initializes the application space, loading only necessary data.""" + if DO_FULL_INIT: + # These downloads only occur on full initialization + try: + download_dataset(QUEUE_REPO, EVAL_REQUESTS_PATH) + download_dataset(VOTES_REPO, VOTES_PATH) + except Exception: + restart_space() + + # Always redownload the leaderboard DataFrame + global LEADERBOARD_DF + LEADERBOARD_DF = get_latest_data_leaderboard() + + # Evaluation queue DataFrame retrieval is independent of initialization detail level + eval_queue_dfs = get_latest_data_queue() + + return LEADERBOARD_DF, eval_queue_dfs + +# Initialize VoteManager +vote_manager = VoteManager(VOTES_PATH, EVAL_REQUESTS_PATH, VOTES_REPO) + + +# Schedule the upload_votes method to run every 15 minutes +schedule.every(15).minutes.do(vote_manager.upload_votes) + +# Start the scheduler in a separate thread +scheduler_thread = Thread(target=run_scheduler, args=(vote_manager,), daemon=True) +scheduler_thread.start() + +# Calls the init_space function with the `full_init` parameter determined by the `do_full_init` variable. +# This initializes various DataFrames used throughout the application, with the level of initialization detail controlled by the `do_full_init` flag. +LEADERBOARD_DF, eval_queue_dfs = init_space() +finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df = eval_queue_dfs + + +# Function to check if a user is logged in +def check_login(profile: gr.OAuthProfile | None) -> bool: + if profile is None: + return False + return True + +def init_leaderboard(dataframe): + if dataframe is None or dataframe.empty: + raise ValueError("Leaderboard DataFrame is empty or None.") + return Leaderboard( + value=dataframe, + datatype=[c.type for c in fields(AutoEvalColumn)], + select_columns=SelectColumns( + default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default], + cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden or c.dummy], + label="Select Columns to Display:", + ), + search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.fullname.name, AutoEvalColumn.license.name], + hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden], + filter_columns=[ + ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"), + ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"), + ColumnFilter( + AutoEvalColumn.params.name, + type="slider", + min=0.01, + max=150, + label="Select the number of parameters (B)", + ), + ColumnFilter( + AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True + ), + ColumnFilter( + AutoEvalColumn.merged.name, type="boolean", label="Merge/MoErge", default=True + ), + ColumnFilter(AutoEvalColumn.moe.name, type="boolean", label="MoE", default=False), + ColumnFilter(AutoEvalColumn.not_flagged.name, type="boolean", label="Flagged", default=True), + ColumnFilter(AutoEvalColumn.maintainers_highlight.name, type="boolean", label="Show only maintainer's highlight", default=False), + ], + bool_checkboxgroup_label="Hide models", + interactive=False, + ) + +main_block = gr.Blocks(css=custom_css) +with main_block: + with gr.Row(elem_id="header-row"): + gr.HTML(TITLE) + + gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text") + + with gr.Tabs(elem_classes="tab-buttons") as tabs: + with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0): + leaderboard = init_leaderboard(LEADERBOARD_DF) + + with gr.TabItem("🚀 Submit ", elem_id="llm-benchmark-tab-table", id=5): + with gr.Column(): + with gr.Row(): + gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text") + + with gr.Row(): + gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text") + login_button = gr.LoginButton(elem_id="oauth-button") + + with gr.Row(): + with gr.Column(): + model_name_textbox = gr.Textbox(label="Model name") + revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="latest") + with gr.Row(): + model_type = gr.Dropdown( + choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown], + label="Model type", + multiselect=False, + value=ModelType.FT.to_str(" : "), + interactive=True, + ) + chat_template_toggle = gr.Checkbox( + label="Use chat template", + value=False, + info="Is your model a chat model?", + ) + + with gr.Column(): + precision = gr.Dropdown( + choices=[i.value.name for i in Precision if i != Precision.Unknown], + label="Precision", + multiselect=False, + value="float16", + interactive=True, + ) + weight_type = gr.Dropdown( + choices=[i.value.name for i in WeightType], + label="Weights type", + multiselect=False, + value=WeightType.Original.value.name, + interactive=True, + ) + base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)", interactive=False) + + with gr.Column(): + with gr.Accordion( + f"✅ Finished Evaluations ({len(finished_eval_queue_df)})", + open=False, + ): + with gr.Row(): + finished_eval_table = gr.components.Dataframe( + value=finished_eval_queue_df, + headers=EVAL_COLS, + datatype=EVAL_TYPES, + row_count=5, + interactive=False, + ) + with gr.Accordion( + f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})", + open=False, + ): + with gr.Row(): + running_eval_table = gr.components.Dataframe( + value=running_eval_queue_df, + headers=EVAL_COLS, + datatype=EVAL_TYPES, + row_count=5, + interactive=False, + ) + + with gr.Accordion( + f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})", + open=False, + ): + with gr.Row(): + pending_eval_table = gr.components.Dataframe( + value=pending_eval_queue_df, + headers=EVAL_COLS, + datatype=EVAL_TYPES, + row_count=5, + interactive=False, + ) + + submit_button = gr.Button("Submit Eval") + submission_result = gr.Markdown() + + # The chat template checkbox update function + def update_chat_checkbox(model_type_value): + return ModelType.from_str(model_type_value) == ModelType.chat + + model_type.change( + fn=update_chat_checkbox, + inputs=[model_type], # Pass the current checkbox value + outputs=chat_template_toggle, + ) + + # The base_model_name_textbox interactivity and value reset function + def update_base_model_name_textbox(weight_type_value): + # Convert the dropdown value back to the corresponding WeightType Enum + weight_type_enum = WeightType[weight_type_value] + + # Determine if the textbox should be interactive + interactive = weight_type_enum in [WeightType.Adapter, WeightType.Delta] + + # Reset the value if weight type is "Original" + reset_value = "" if not interactive else None + + return gr.update(interactive=interactive, value=reset_value) + + weight_type.change( + fn=update_base_model_name_textbox, + inputs=[weight_type], + outputs=[base_model_name_textbox], + ) + + submit_button.click( + add_new_eval, + [ + model_name_textbox, + base_model_name_textbox, + revision_name_textbox, + precision, + weight_type, + model_type, + chat_template_toggle, + ], + submission_result, + ) + + # Ensure the values in 'pending_eval_queue_df' are correct and ready for the DataFrame component + with gr.TabItem("🆙 Model Vote"): + with gr.Row(): + gr.Markdown( + "## Vote for the models which should be evaluated first! \nYou'll need to sign in with the button above first. All votes are recorded.", + elem_classes="markdown-text" + ) + login_button = gr.LoginButton(elem_id="oauth-button") + + + with gr.Row(): + pending_models = pending_eval_queue_df[EvalQueueColumn.model_name.name].to_list() + + with gr.Column(): + selected_model = gr.Dropdown( + choices=pending_models, + label="Models", + multiselect=False, + value="str", + interactive=True, + ) + + vote_button = gr.Button("Vote", variant="primary") + + with gr.Row(): + with gr.Accordion( + f"Available models pending ({len(pending_eval_queue_df)})", + open=True, + ): + with gr.Row(): + pending_eval_table_votes = gr.components.Dataframe( + value=vote_manager.create_request_vote_df( + pending_eval_queue_df + ), + headers=EVAL_COLS, + datatype=EVAL_TYPES, + row_count=5, + interactive=False + ) + + # Set the click event for the vote button + vote_button.click( + vote_manager.add_vote, + inputs=[selected_model, pending_eval_table], + outputs=[pending_eval_table_votes] + ) + + + with gr.Row(): + with gr.Accordion("📙 Citation", open=False): + citation_button = gr.Textbox( + value=CITATION_BUTTON_TEXT, + label=CITATION_BUTTON_LABEL, + lines=20, + elem_id="citation-button", + show_copy_button=True, + ) + + main_block.load(fn=get_latest_data_leaderboard, inputs=[leaderboard], outputs=[leaderboard]) + leaderboard.change(fn=get_latest_data_queue, inputs=None, outputs=[finished_eval_table, running_eval_table, pending_eval_table]) + pending_eval_table.change(fn=vote_manager.create_request_vote_df, inputs=[pending_eval_table], outputs=[pending_eval_table_votes]) + +main_block.queue(default_concurrency_limit=40) + + +def enable_space_ci_and_return_server(ui: gr.Blocks) -> WebhooksServer: + # Taken from https://huggingface.co/spaces/Wauplin/gradio-space-ci/blob/075119aee75ab5e7150bf0814eec91c83482e790/src/gradio_space_ci/webhook.py#L61 + # Compared to original, this one do not monkeypatch Gradio which allows us to define more webhooks. + # ht to Lucain! + if SPACE_ID is None: + print("Not in a Space: Space CI disabled.") + return WebhooksServer(ui=main_block) + + if IS_EPHEMERAL_SPACE: + print("In an ephemeral Space: Space CI disabled.") + return WebhooksServer(ui=main_block) + + card = RepoCard.load(repo_id_or_path=SPACE_ID, repo_type="space") + config = card.data.get("space_ci", {}) + print(f"Enabling Space CI with config from README: {config}") + + return configure_space_ci( + blocks=ui, + trusted_authors=config.get("trusted_authors"), + private=config.get("private", "auto"), + variables=config.get("variables", "auto"), + secrets=config.get("secrets"), + hardware=config.get("hardware"), + storage=config.get("storage"), + ) + +# Create webhooks server (with CI url if in Space and not ephemeral) +webhooks_server = enable_space_ci_and_return_server(ui=main_block) + +# Add webhooks +@webhooks_server.add_webhook +def update_leaderboard(payload: WebhookPayload) -> None: + """Redownloads the leaderboard dataset each time it updates""" + if payload.repo.type == "dataset" and payload.event.action == "update": + global NEW_DATA_ON_LEADERBOARD + if NEW_DATA_ON_LEADERBOARD: + return + NEW_DATA_ON_LEADERBOARD = True + + datasets.load_dataset( + AGGREGATED_REPO, + "default", + split="train", + cache_dir=HF_HOME, + download_mode=datasets.DownloadMode.FORCE_REDOWNLOAD, + verification_mode="no_checks" + ) + +# The below code is not used at the moment, as we can manage the queue file locally +LAST_UPDATE_QUEUE = datetime.datetime.now() +@webhooks_server.add_webhook +def update_queue(payload: WebhookPayload) -> None: + """Redownloads the queue dataset each time it updates""" + if payload.repo.type == "dataset" and payload.event.action == "update": + current_time = datetime.datetime.now() + global LAST_UPDATE_QUEUE + if current_time - LAST_UPDATE_QUEUE > datetime.timedelta(minutes=10): + print("Would have updated the queue") + # We only redownload is last update was more than 10 minutes ago, as the queue is + # updated regularly and heavy to download + download_dataset(QUEUE_REPO, EVAL_REQUESTS_PATH) + LAST_UPDATE_QUEUE = datetime.datetime.now() + +webhooks_server.launch() + +scheduler = BackgroundScheduler() +scheduler.add_job(restart_space, "interval", hours=3) # restarted every 3h as backup in case automatic updates are not working +scheduler.start() \ No newline at end of file diff --git a/backend/Dockerfile.dev b/backend/Dockerfile.dev deleted file mode 100644 index f802c87f0d5d730c559b1f21ed715b48cc9ca42a..0000000000000000000000000000000000000000 --- a/backend/Dockerfile.dev +++ /dev/null @@ -1,25 +0,0 @@ -FROM python:3.12-slim - -WORKDIR /app - -# Install required system dependencies -RUN apt-get update && apt-get install -y \ - build-essential \ - && rm -rf /var/lib/apt/lists/* - -# Install poetry -RUN pip install poetry - -# Copy Poetry configuration files -COPY pyproject.toml poetry.lock* ./ - -# Install dependencies -RUN poetry config virtualenvs.create false && \ - poetry install --no-interaction --no-ansi --no-root - -# Environment variables configuration for logs -ENV PYTHONUNBUFFERED=1 -ENV LOG_LEVEL=INFO - -# In dev, mount volume directly -CMD ["uvicorn", "app.asgi:app", "--host", "0.0.0.0", "--port", "7860", "--reload", "--log-level", "warning", "--no-access-log"] \ No newline at end of file diff --git a/backend/README.md b/backend/README.md deleted file mode 100644 index 4a9c1e60d0c77b9add02f0c9ba25acaabe6ab2a5..0000000000000000000000000000000000000000 --- a/backend/README.md +++ /dev/null @@ -1,352 +0,0 @@ -# Backend - Open LLM Leaderboard 🏆 - -FastAPI backend for the Open LLM Leaderboard. This service is part of a larger architecture that includes a React frontend. For complete project installation, see the [main README](../README.md). - -## ✨ Features - -- 📊 REST API for LLM models leaderboard management -- 🗳️ Voting and ranking system -- 🔄 HuggingFace Hub integration -- 🚀 Caching and performance optimizations - -## 🏗 Architecture - -```mermaid -flowchart TD - Client(["**Frontend**

React Application"]) --> API["**API Server**

FastAPI REST Endpoints"] - - subgraph Backend - API --> Core["**Core Layer**

• Middleware
• Cache
• Rate Limiting"] - Core --> Services["**Services Layer**

• Business Logic
• Data Processing"] - - subgraph Services Layer - Services --> Models["**Model Service**

• Model Submission
• Evaluation Pipeline"] - Services --> Votes["**Vote Service**

• Vote Management
• Data Synchronization"] - Services --> Board["**Leaderboard Service**

• Rankings
• Performance Metrics"] - end - - Models --> Cache["**Cache Layer**

• In-Memory Store
• Auto Invalidation"] - Votes --> Cache - Board --> Cache - - Models --> HF["**HuggingFace Hub**

• Models Repository
• Datasets Access"] - Votes --> HF - Board --> HF - end - - style Client fill:#f9f,stroke:#333,stroke-width:2px - style Models fill:#bbf,stroke:#333,stroke-width:2px - style Votes fill:#bbf,stroke:#333,stroke-width:2px - style Board fill:#bbf,stroke:#333,stroke-width:2px - style HF fill:#bfb,stroke:#333,stroke-width:2px -``` - -## 🛠️ HuggingFace Datasets - -The application uses several datasets on the HuggingFace Hub: - -### 1. Requests Dataset (`{HF_ORGANIZATION}/requests`) - -- **Operations**: - - 📤 `POST /api/models/submit`: Adds a JSON file for each new model submission - - 📥 `GET /api/models/status`: Reads files to get models status -- **Format**: One JSON file per model with submission details -- **Updates**: On each new model submission - -### 2. Votes Dataset (`{HF_ORGANIZATION}/votes`) - -- **Operations**: - - 📤 `POST /api/votes/{model_id}`: Adds a new vote - - 📥 `GET /api/votes/model/{provider}/{model}`: Reads model votes - - 📥 `GET /api/votes/user/{user_id}`: Reads user votes -- **Format**: JSONL with one vote per line -- **Sync**: Bidirectional between local cache and Hub - -### 3. Contents Dataset (`{HF_ORGANIZATION}/contents`) - -- **Operations**: - - 📥 `GET /api/leaderboard`: Reads raw data - - 📥 `GET /api/leaderboard/formatted`: Reads and formats data -- **Format**: Main dataset containing all scores and metrics -- **Updates**: Automatic after model evaluations - -### 4. Official Providers Dataset (`{HF_ORGANIZATION}/official-providers`) - -- **Operations**: - - 📥 Read-only access for highlighted models -- **Format**: List of models selected by maintainers -- **Updates**: Manual by maintainers - -## 🛠 Local Development - -### Prerequisites - -- Python 3.9+ -- [Poetry](https://python-poetry.org/docs/#installation) - -### Standalone Installation (without Docker) - -```bash -# Install dependencies -poetry install - -# Setup configuration -cp .env.example .env - -# Start development server -poetry run uvicorn app.asgi:app --host 0.0.0.0 --port 7860 --reload -``` - -Server will be available at http://localhost:7860 - -## ⚙️ Configuration - -| Variable | Description | Default | -| ------------ | ------------------------------------ | ----------- | -| ENVIRONMENT | Environment (development/production) | development | -| HF_TOKEN | HuggingFace authentication token | - | -| PORT | Server port | 7860 | -| LOG_LEVEL | Logging level (INFO/DEBUG/WARNING) | INFO | -| CORS_ORIGINS | Allowed CORS origins | ["*"] | -| CACHE_TTL | Cache Time To Live in seconds | 300 | - -## 🔧 Middleware - -The backend uses several middleware layers for optimal performance and security: - -- **CORS Middleware**: Handles Cross-Origin Resource Sharing -- **GZIP Middleware**: Compresses responses > 500 bytes -- **Rate Limiting**: Prevents API abuse -- **Caching**: In-memory caching with automatic invalidation - -## 📝 Logging - -The application uses a structured logging system with: - -- Formatted console output -- Different log levels per component -- Request/Response logging -- Performance metrics -- Error tracking - -## 📁 File Structure - -``` -backend/ -├── app/ # Source code -│ ├── api/ # Routes and endpoints -│ │ └── endpoints/ # Endpoint handlers -│ ├── core/ # Configurations -│ ├── services/ # Business logic -│ └── utils/ # Utilities -└── tests/ # Tests -``` - -## 📚 API - -Swagger documentation available at http://localhost:7860/docs - -### Main Endpoints & Data Structures - -#### Leaderboard - -- `GET /api/leaderboard/formatted` - Formatted data with computed fields and metadata - - ```typescript - Response { - models: [{ - id: string, // eval_name - model: { - name: string, // fullname - sha: string, // Model sha - precision: string, // e.g. "fp16", "int8" - type: string, // e.g. "fined-tuned-on-domain-specific-dataset" - weight_type: string, - architecture: string, - average_score: number, - has_chat_template: boolean - }, - evaluations: { - ifeval: { - name: "IFEval", - value: number, // Raw score - normalized_score: number - }, - bbh: { - name: "BBH", - value: number, - normalized_score: number - }, - math: { - name: "MATH Level 5", - value: number, - normalized_score: number - }, - gpqa: { - name: "GPQA", - value: number, - normalized_score: number - }, - musr: { - name: "MUSR", - value: number, - normalized_score: number - }, - mmlu_pro: { - name: "MMLU-PRO", - value: number, - normalized_score: number - } - }, - features: { - is_not_available_on_hub: boolean, - is_merged: boolean, - is_moe: boolean, - is_flagged: boolean, - is_official_provider: boolean - }, - metadata: { - upload_date: string, - submission_date: string, - generation: string, - base_model: string, - hub_license: string, - hub_hearts: number, - params_billions: number, - co2_cost: number // CO₂ cost in kg - } - }] - } - ``` - -- `GET /api/leaderboard` - Raw data from the HuggingFace dataset - ```typescript - Response { - models: [{ - eval_name: string, - Precision: string, - Type: string, - "Weight type": string, - Architecture: string, - Model: string, - fullname: string, - "Model sha": string, - "Average ⬆️": number, - "Hub License": string, - "Hub ❤️": number, - "#Params (B)": number, - "Available on the hub": boolean, - Merged: boolean, - MoE: boolean, - Flagged: boolean, - "Chat Template": boolean, - "CO₂ cost (kg)": number, - "IFEval Raw": number, - IFEval: number, - "BBH Raw": number, - BBH: number, - "MATH Lvl 5 Raw": number, - "MATH Lvl 5": number, - "GPQA Raw": number, - GPQA: number, - "MUSR Raw": number, - MUSR: number, - "MMLU-PRO Raw": number, - "MMLU-PRO": number, - "Maintainer's Highlight": boolean, - "Upload To Hub Date": string, - "Submission Date": string, - Generation: string, - "Base Model": string - }] - } - ``` - -#### Models - -- `GET /api/models/status` - Get all models grouped by status - ```typescript - Response { - pending: [{ - name: string, - submitter: string, - revision: string, - wait_time: string, - submission_time: string, - status: "PENDING" | "EVALUATING" | "FINISHED", - precision: string - }], - evaluating: Array, - finished: Array - } - ``` -- `GET /api/models/pending` - Get pending models only -- `POST /api/models/submit` - Submit model - - ```typescript - Request { - user_id: string, - model_id: string, - base_model?: string, - precision?: string, - model_type: string - } - - Response { - status: string, - message: string - } - ``` - -- `GET /api/models/{model_id}/status` - Get model status - -#### Votes - -- `POST /api/votes/{model_id}` - Vote - - ```typescript - Request { - vote_type: "up" | "down", - user_id: string // HuggingFace username - } - - Response { - success: boolean, - message: string - } - ``` - -- `GET /api/votes/model/{provider}/{model}` - Get model votes - ```typescript - Response { - total_votes: number, - up_votes: number, - down_votes: number - } - ``` -- `GET /api/votes/user/{user_id}` - Get user votes - ```typescript - Response Array<{ - model_id: string, - vote_type: string, - timestamp: string - }> - ``` - -## 🔒 Authentication - -The backend uses HuggingFace token-based authentication for secure API access. Make sure to: - -1. Set your HF_TOKEN in the .env file -2. Include the token in API requests via Bearer authentication -3. Keep your token secure and never commit it to version control - -## 🚀 Performance - -The backend implements several optimizations: - -- In-memory caching with configurable TTL (Time To Live) -- Batch processing for model evaluations -- Rate limiting for API endpoints -- Efficient database queries with proper indexing -- Automatic cache invalidation for votes diff --git a/backend/__init__.py b/backend/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/backend/app/api/__init__.py b/backend/app/api/__init__.py deleted file mode 100644 index 41bd81293794127ec484666c9a9bf3b2cd0bbe3c..0000000000000000000000000000000000000000 --- a/backend/app/api/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -""" -API package initialization -""" - -__all__ = ["endpoints"] diff --git a/backend/app/api/dependencies.py b/backend/app/api/dependencies.py deleted file mode 100644 index d9feaf42a38b8fa19b327989542659edfc635519..0000000000000000000000000000000000000000 --- a/backend/app/api/dependencies.py +++ /dev/null @@ -1,34 +0,0 @@ -from fastapi import Depends, HTTPException -import logging -from app.services.models import ModelService -from app.services.votes import VoteService -from app.core.formatting import LogFormatter - -logger = logging.getLogger(__name__) - -model_service = ModelService() -vote_service = VoteService() - -async def get_model_service() -> ModelService: - """Dependency to get ModelService instance""" - try: - logger.info(LogFormatter.info("Initializing model service dependency")) - await model_service.initialize() - logger.info(LogFormatter.success("Model service initialized")) - return model_service - except Exception as e: - error_msg = "Failed to initialize model service" - logger.error(LogFormatter.error(error_msg, e)) - raise HTTPException(status_code=500, detail=str(e)) - -async def get_vote_service() -> VoteService: - """Dependency to get VoteService instance""" - try: - logger.info(LogFormatter.info("Initializing vote service dependency")) - await vote_service.initialize() - logger.info(LogFormatter.success("Vote service initialized")) - return vote_service - except Exception as e: - error_msg = "Failed to initialize vote service" - logger.error(LogFormatter.error(error_msg, e)) - raise HTTPException(status_code=500, detail=str(e)) \ No newline at end of file diff --git a/backend/app/api/endpoints/leaderboard.py b/backend/app/api/endpoints/leaderboard.py deleted file mode 100644 index 261e7f7f4e7309eefb979e238025fbca4c7e44f8..0000000000000000000000000000000000000000 --- a/backend/app/api/endpoints/leaderboard.py +++ /dev/null @@ -1,49 +0,0 @@ -from fastapi import APIRouter -from typing import List, Dict, Any -from app.services.leaderboard import LeaderboardService -from app.core.fastapi_cache import cached, build_cache_key -import logging -from app.core.formatting import LogFormatter - -logger = logging.getLogger(__name__) -router = APIRouter() -leaderboard_service = LeaderboardService() - -def leaderboard_key_builder(func, namespace: str = "leaderboard", **kwargs): - """Build cache key for leaderboard data""" - key_type = "raw" if func.__name__ == "get_leaderboard" else "formatted" - key = build_cache_key(namespace, key_type) - logger.debug(LogFormatter.info(f"Built leaderboard cache key: {key}")) - return key - -@router.get("") -@cached(expire=300, key_builder=leaderboard_key_builder) -async def get_leaderboard() -> List[Dict[str, Any]]: - """ - Get raw leaderboard data - Response will be automatically GZIP compressed if size > 500 bytes - """ - try: - logger.info(LogFormatter.info("Fetching raw leaderboard data")) - data = await leaderboard_service.fetch_raw_data() - logger.info(LogFormatter.success(f"Retrieved {len(data)} leaderboard entries")) - return data - except Exception as e: - logger.error(LogFormatter.error("Failed to fetch raw leaderboard data", e)) - raise - -@router.get("/formatted") -@cached(expire=300, key_builder=leaderboard_key_builder) -async def get_formatted_leaderboard() -> List[Dict[str, Any]]: - """ - Get formatted leaderboard data with restructured objects - Response will be automatically GZIP compressed if size > 500 bytes - """ - try: - logger.info(LogFormatter.info("Fetching formatted leaderboard data")) - data = await leaderboard_service.get_formatted_data() - logger.info(LogFormatter.success(f"Retrieved {len(data)} formatted entries")) - return data - except Exception as e: - logger.error(LogFormatter.error("Failed to fetch formatted leaderboard data", e)) - raise \ No newline at end of file diff --git a/backend/app/api/endpoints/models.py b/backend/app/api/endpoints/models.py deleted file mode 100644 index 5455e9d697d641014d9e48220dcd72085fa14db5..0000000000000000000000000000000000000000 --- a/backend/app/api/endpoints/models.py +++ /dev/null @@ -1,103 +0,0 @@ -from fastapi import APIRouter, HTTPException, Depends -from typing import Dict, Any, List -import logging -from app.services.models import ModelService -from app.api.dependencies import get_model_service -from app.core.fastapi_cache import cached -from app.core.formatting import LogFormatter - -logger = logging.getLogger(__name__) -router = APIRouter(tags=["models"]) - -@router.get("/status") -@cached(expire=300) -async def get_models_status( - model_service: ModelService = Depends(get_model_service) -) -> Dict[str, List[Dict[str, Any]]]: - """Get all models grouped by status""" - try: - logger.info(LogFormatter.info("Fetching status for all models")) - result = await model_service.get_models() - stats = { - status: len(models) for status, models in result.items() - } - for line in LogFormatter.stats(stats, "Models by Status"): - logger.info(line) - return result - except Exception as e: - logger.error(LogFormatter.error("Failed to get models status", e)) - raise HTTPException(status_code=500, detail=str(e)) - -@router.get("/pending") -@cached(expire=60) -async def get_pending_models( - model_service: ModelService = Depends(get_model_service) -) -> List[Dict[str, Any]]: - """Get all models waiting for evaluation""" - try: - logger.info(LogFormatter.info("Fetching pending models")) - models = await model_service.get_models() - pending = models.get("pending", []) - logger.info(LogFormatter.success(f"Found {len(pending)} pending models")) - return pending - except Exception as e: - logger.error(LogFormatter.error("Failed to get pending models", e)) - raise HTTPException(status_code=500, detail=str(e)) - -@router.post("/submit") -async def submit_model( - model_data: Dict[str, Any], - model_service: ModelService = Depends(get_model_service) -) -> Dict[str, Any]: - try: - logger.info(LogFormatter.section("MODEL SUBMISSION")) - - user_id = model_data.pop('user_id', None) - if not user_id: - error_msg = "user_id is required" - logger.error(LogFormatter.error("Validation failed", error_msg)) - raise ValueError(error_msg) - - # Log submission details - submission_info = { - "Model_ID": model_data.get("model_id"), - "User": user_id, - "Base_Model": model_data.get("base_model"), - "Precision": model_data.get("precision"), - "Model_Type": model_data.get("model_type") - } - for line in LogFormatter.tree(submission_info, "Submission Details"): - logger.info(line) - - result = await model_service.submit_model(model_data, user_id) - logger.info(LogFormatter.success("Model submitted successfully")) - return result - - except ValueError as e: - logger.error(LogFormatter.error("Invalid submission data", e)) - raise HTTPException(status_code=400, detail=str(e)) - except Exception as e: - logger.error(LogFormatter.error("Submission failed", e)) - raise HTTPException(status_code=500, detail=str(e)) - -@router.get("/{model_id}/status") -async def get_model_status( - model_id: str, - model_service: ModelService = Depends(get_model_service) -) -> Dict[str, Any]: - try: - logger.info(LogFormatter.info(f"Checking status for model: {model_id}")) - status = await model_service.get_model_status(model_id) - - if status["status"] != "not_found": - logger.info(LogFormatter.success("Status found")) - for line in LogFormatter.tree(status, "Model Status"): - logger.info(line) - else: - logger.warning(LogFormatter.warning(f"No status found for model: {model_id}")) - - return status - - except Exception as e: - logger.error(LogFormatter.error("Failed to get model status", e)) - raise HTTPException(status_code=500, detail=str(e)) \ No newline at end of file diff --git a/backend/app/api/endpoints/votes.py b/backend/app/api/endpoints/votes.py deleted file mode 100644 index e3cd11a5eb2e8f0f4baa73a4d399c22f124ec82f..0000000000000000000000000000000000000000 --- a/backend/app/api/endpoints/votes.py +++ /dev/null @@ -1,105 +0,0 @@ -from fastapi import APIRouter, HTTPException, Query, Depends -from typing import Dict, Any, List -from app.services.votes import VoteService -from app.core.fastapi_cache import cached, build_cache_key, invalidate_cache_key -import logging -from app.core.formatting import LogFormatter - -logger = logging.getLogger(__name__) -router = APIRouter() -vote_service = VoteService() - -def model_votes_key_builder(func, namespace: str = "model_votes", **kwargs): - """Build cache key for model votes""" - provider = kwargs.get('provider') - model = kwargs.get('model') - key = build_cache_key(namespace, provider, model) - logger.debug(LogFormatter.info(f"Built model votes cache key: {key}")) - return key - -def user_votes_key_builder(func, namespace: str = "user_votes", **kwargs): - """Build cache key for user votes""" - user_id = kwargs.get('user_id') - key = build_cache_key(namespace, user_id) - logger.debug(LogFormatter.info(f"Built user votes cache key: {key}")) - return key - -@router.post("/{model_id:path}") -async def add_vote( - model_id: str, - vote_type: str = Query(..., description="Type of vote (up/down)"), - user_id: str = Query(..., description="HuggingFace username") -) -> Dict[str, Any]: - try: - logger.info(LogFormatter.section("ADDING VOTE")) - stats = { - "Model": model_id, - "User": user_id, - "Type": vote_type - } - for line in LogFormatter.tree(stats, "Vote Details"): - logger.info(line) - - await vote_service.initialize() - result = await vote_service.add_vote(model_id, user_id, vote_type) - - # Invalidate affected caches - try: - logger.info(LogFormatter.subsection("CACHE INVALIDATION")) - provider, model = model_id.split('/', 1) - - # Build and invalidate cache keys - model_cache_key = build_cache_key("model_votes", provider, model) - user_cache_key = build_cache_key("user_votes", user_id) - - invalidate_cache_key(model_cache_key) - invalidate_cache_key(user_cache_key) - - cache_stats = { - "Model_Cache": model_cache_key, - "User_Cache": user_cache_key - } - for line in LogFormatter.tree(cache_stats, "Invalidated Caches"): - logger.info(line) - - except Exception as e: - logger.error(LogFormatter.error("Failed to invalidate cache", e)) - - return result - except Exception as e: - logger.error(LogFormatter.error("Failed to add vote", e)) - raise HTTPException(status_code=400, detail=str(e)) - -@router.get("/model/{provider}/{model}") -@cached(expire=60, key_builder=model_votes_key_builder) -async def get_model_votes( - provider: str, - model: str -) -> Dict[str, Any]: - """Get all votes for a specific model""" - try: - logger.info(LogFormatter.info(f"Fetching votes for model: {provider}/{model}")) - await vote_service.initialize() - model_id = f"{provider}/{model}" - result = await vote_service.get_model_votes(model_id) - logger.info(LogFormatter.success(f"Found {result.get('total_votes', 0)} votes")) - return result - except Exception as e: - logger.error(LogFormatter.error("Failed to get model votes", e)) - raise HTTPException(status_code=400, detail=str(e)) - -@router.get("/user/{user_id}") -@cached(expire=60, key_builder=user_votes_key_builder) -async def get_user_votes( - user_id: str -) -> List[Dict[str, Any]]: - """Get all votes from a specific user""" - try: - logger.info(LogFormatter.info(f"Fetching votes for user: {user_id}")) - await vote_service.initialize() - votes = await vote_service.get_user_votes(user_id) - logger.info(LogFormatter.success(f"Found {len(votes)} votes")) - return votes - except Exception as e: - logger.error(LogFormatter.error("Failed to get user votes", e)) - raise HTTPException(status_code=400, detail=str(e)) \ No newline at end of file diff --git a/backend/app/api/router.py b/backend/app/api/router.py deleted file mode 100644 index a2c952105c729b92abc72d59ae5882ee4394c017..0000000000000000000000000000000000000000 --- a/backend/app/api/router.py +++ /dev/null @@ -1,9 +0,0 @@ -from fastapi import APIRouter - -from app.api.endpoints import leaderboard, votes, models - -router = APIRouter() - -router.include_router(leaderboard.router, prefix="/leaderboard", tags=["leaderboard"]) -router.include_router(votes.router, prefix="/votes", tags=["votes"]) -router.include_router(models.router, prefix="/models", tags=["models"]) \ No newline at end of file diff --git a/backend/app/asgi.py b/backend/app/asgi.py deleted file mode 100644 index 4972047f0588791a59cf20ef2fa280e9ca98d38a..0000000000000000000000000000000000000000 --- a/backend/app/asgi.py +++ /dev/null @@ -1,106 +0,0 @@ -""" -ASGI entry point for the Open LLM Leaderboard API. -""" -import os -import uvicorn -import logging -import logging.config -from fastapi import FastAPI -from fastapi.middleware.cors import CORSMiddleware -from fastapi.middleware.gzip import GZipMiddleware -import sys - -from app.api.router import router -from app.core.fastapi_cache import setup_cache -from app.core.formatting import LogFormatter -from app.config import hf_config - -# Configure logging before anything else -LOGGING_CONFIG = { - "version": 1, - "disable_existing_loggers": True, - "formatters": { - "default": { - "format": "%(name)s - %(levelname)s - %(message)s", - } - }, - "handlers": { - "default": { - "formatter": "default", - "class": "logging.StreamHandler", - "stream": "ext://sys.stdout", - } - }, - "loggers": { - "uvicorn": { - "handlers": ["default"], - "level": "WARNING", - "propagate": False, - }, - "uvicorn.error": { - "level": "WARNING", - "handlers": ["default"], - "propagate": False, - }, - "uvicorn.access": { - "handlers": ["default"], - "level": "WARNING", - "propagate": False, - }, - "app": { - "handlers": ["default"], - "level": "WARNING", - "propagate": False, - } - }, - "root": { - "handlers": ["default"], - "level": "WARNING", - } -} - -# Apply logging configuration -logging.config.dictConfig(LOGGING_CONFIG) -logger = logging.getLogger("app") - -# Create FastAPI application -app = FastAPI( - title="Open LLM Leaderboard", - version="1.0.0", - docs_url="/docs", -) - -# Add CORS middleware -app.add_middleware( - CORSMiddleware, - allow_origins=["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], -) - -# Add GZIP compression -app.add_middleware(GZipMiddleware, minimum_size=500) - -# Include API router -app.include_router(router, prefix="/api") - -@app.on_event("startup") -async def startup_event(): - """Initialize services on startup""" - logger.info("\n") - logger.info(LogFormatter.section("APPLICATION STARTUP")) - - # Log HF configuration - logger.info(LogFormatter.section("HUGGING FACE CONFIGURATION")) - logger.info(LogFormatter.info(f"Organization: {hf_config.HF_ORGANIZATION}")) - logger.info(LogFormatter.info(f"Token Status: {'Present' if hf_config.HF_TOKEN else 'Missing'}")) - logger.info(LogFormatter.info(f"Using repositories:")) - logger.info(LogFormatter.info(f" - Queue: {hf_config.QUEUE_REPO}")) - logger.info(LogFormatter.info(f" - Aggregated: {hf_config.AGGREGATED_REPO}")) - logger.info(LogFormatter.info(f" - Votes: {hf_config.VOTES_REPO}")) - logger.info(LogFormatter.info(f" - Official Providers: {hf_config.OFFICIAL_PROVIDERS_REPO}")) - - # Setup cache - setup_cache() - logger.info(LogFormatter.success("FastAPI Cache initialized with in-memory backend")) \ No newline at end of file diff --git a/backend/app/config/__init__.py b/backend/app/config/__init__.py deleted file mode 100644 index 9a8cea98b9ddb1daaf3c9e8e5d2c9be1fc94657e..0000000000000000000000000000000000000000 --- a/backend/app/config/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -""" -Configuration module for the Open LLM Leaderboard backend. -All configuration values are imported from base.py to avoid circular dependencies. -""" - -from .base import * diff --git a/backend/app/config/base.py b/backend/app/config/base.py deleted file mode 100644 index 89a7e65b155fe2d781bc6178fdf2ecea163554b5..0000000000000000000000000000000000000000 --- a/backend/app/config/base.py +++ /dev/null @@ -1,38 +0,0 @@ -import os -from pathlib import Path - -# Server configuration -HOST = "0.0.0.0" -PORT = 7860 -WORKERS = 4 -RELOAD = True if os.environ.get("ENVIRONMENT") == "development" else False - -# CORS configuration -ORIGINS = ["http://localhost:3000"] if os.getenv("ENVIRONMENT") == "development" else ["*"] - -# Cache configuration -CACHE_TTL = int(os.environ.get("CACHE_TTL", 300)) # 5 minutes default - -# Rate limiting -RATE_LIMIT_PERIOD = 7 # days -RATE_LIMIT_QUOTA = 5 -HAS_HIGHER_RATE_LIMIT = [] - -# HuggingFace configuration -HF_TOKEN = os.environ.get("HF_TOKEN") -HF_ORGANIZATION = "open-llm-leaderboard" -API = { - "INFERENCE": "https://api-inference.huggingface.co/models", - "HUB": "https://huggingface.co" -} - -# Cache paths -CACHE_ROOT = Path(os.environ.get("HF_HOME", ".cache")) -DATASETS_CACHE = CACHE_ROOT / "datasets" -MODELS_CACHE = CACHE_ROOT / "models" -VOTES_CACHE = CACHE_ROOT / "votes" -EVAL_CACHE = CACHE_ROOT / "eval-queue" - -# Repository configuration -QUEUE_REPO = f"{HF_ORGANIZATION}/requests" -EVAL_REQUESTS_PATH = EVAL_CACHE / "eval_requests.jsonl" \ No newline at end of file diff --git a/backend/app/config/hf_config.py b/backend/app/config/hf_config.py deleted file mode 100644 index f3c1c6ee93de45159127c4f861dc537bff63917b..0000000000000000000000000000000000000000 --- a/backend/app/config/hf_config.py +++ /dev/null @@ -1,30 +0,0 @@ -import os -import logging -from typing import Optional -from huggingface_hub import HfApi -from pathlib import Path -from app.core.cache import cache_config - -logger = logging.getLogger(__name__) - -# Organization or user who owns the datasets -HF_ORGANIZATION = "open-llm-leaderboard" - -# Get HF token directly from environment -HF_TOKEN = os.environ.get("HF_TOKEN") -if not HF_TOKEN: - logger.warning("HF_TOKEN not found in environment variables. Some features may be limited.") - -# Initialize HF API -API = HfApi(token=HF_TOKEN) - -# Repository configuration -QUEUE_REPO = f"{HF_ORGANIZATION}/requests" -AGGREGATED_REPO = f"{HF_ORGANIZATION}/contents" -VOTES_REPO = f"{HF_ORGANIZATION}/votes" -OFFICIAL_PROVIDERS_REPO = f"{HF_ORGANIZATION}/official-providers" - -# File paths from cache config -VOTES_PATH = cache_config.votes_file -EVAL_REQUESTS_PATH = cache_config.eval_requests_file -MODEL_CACHE_DIR = cache_config.models_cache \ No newline at end of file diff --git a/backend/app/config/logging_config.py b/backend/app/config/logging_config.py deleted file mode 100644 index 96be6f6749cdd79defb975141d857ff216aac420..0000000000000000000000000000000000000000 --- a/backend/app/config/logging_config.py +++ /dev/null @@ -1,38 +0,0 @@ -import logging -import sys -from tqdm import tqdm - -def get_tqdm_handler(): - """ - Creates a special handler for tqdm that doesn't interfere with other logs. - """ - class TqdmLoggingHandler(logging.Handler): - def emit(self, record): - try: - msg = self.format(record) - tqdm.write(msg) - self.flush() - except Exception: - self.handleError(record) - - return TqdmLoggingHandler() - -def setup_service_logger(service_name: str) -> logging.Logger: - """ - Configure a specific logger for a given service. - """ - logger = logging.getLogger(f"app.services.{service_name}") - - # If the logger already has handlers, don't reconfigure it - if logger.handlers: - return logger - - # Add tqdm handler for this service - tqdm_handler = get_tqdm_handler() - tqdm_handler.setFormatter(logging.Formatter('%(name)s - %(levelname)s - %(message)s')) - logger.addHandler(tqdm_handler) - - # Don't propagate logs to parent loggers - logger.propagate = False - - return logger \ No newline at end of file diff --git a/backend/app/core/cache.py b/backend/app/core/cache.py deleted file mode 100644 index 070f81bc1bb2932109f69b9dc67332b18a1a5fbb..0000000000000000000000000000000000000000 --- a/backend/app/core/cache.py +++ /dev/null @@ -1,109 +0,0 @@ -import os -import shutil -from pathlib import Path -from datetime import timedelta -import logging -from app.core.formatting import LogFormatter -from app.config.base import ( - CACHE_ROOT, - DATASETS_CACHE, - MODELS_CACHE, - VOTES_CACHE, - EVAL_CACHE, - CACHE_TTL -) - -logger = logging.getLogger(__name__) - -class CacheConfig: - def __init__(self): - # Get cache paths from config - self.cache_root = CACHE_ROOT - self.datasets_cache = DATASETS_CACHE - self.models_cache = MODELS_CACHE - self.votes_cache = VOTES_CACHE - self.eval_cache = EVAL_CACHE - - # Specific files - self.votes_file = self.votes_cache / "votes_data.jsonl" - self.eval_requests_file = self.eval_cache / "eval_requests.jsonl" - - # Cache TTL - self.cache_ttl = timedelta(seconds=CACHE_TTL) - - self._initialize_cache_dirs() - self._setup_environment() - - def _initialize_cache_dirs(self): - """Initialize all necessary cache directories""" - try: - logger.info(LogFormatter.section("CACHE INITIALIZATION")) - - cache_dirs = { - "Root": self.cache_root, - "Datasets": self.datasets_cache, - "Models": self.models_cache, - "Votes": self.votes_cache, - "Eval": self.eval_cache - } - - for name, cache_dir in cache_dirs.items(): - cache_dir.mkdir(parents=True, exist_ok=True) - logger.info(LogFormatter.success(f"{name} cache directory: {cache_dir}")) - - except Exception as e: - logger.error(LogFormatter.error("Failed to create cache directories", e)) - raise - - def _setup_environment(self): - """Configure HuggingFace environment variables""" - logger.info(LogFormatter.subsection("ENVIRONMENT SETUP")) - - env_vars = { - "HF_HOME": str(self.cache_root), - "HF_DATASETS_CACHE": str(self.datasets_cache) - } - - for var, value in env_vars.items(): - os.environ[var] = value - logger.info(LogFormatter.info(f"Set {var}={value}")) - - - def get_cache_path(self, cache_type: str) -> Path: - """Returns the path for a specific cache type""" - cache_paths = { - "datasets": self.datasets_cache, - "models": self.models_cache, - "votes": self.votes_cache, - "eval": self.eval_cache - } - return cache_paths.get(cache_type, self.cache_root) - - def flush_cache(self, cache_type: str = None): - """Flush specified cache or all caches if no type is specified""" - try: - if cache_type: - logger.info(LogFormatter.section(f"FLUSHING {cache_type.upper()} CACHE")) - cache_dir = self.get_cache_path(cache_type) - if cache_dir.exists(): - stats = { - "Cache_Type": cache_type, - "Directory": str(cache_dir) - } - for line in LogFormatter.tree(stats, "Cache Details"): - logger.info(line) - shutil.rmtree(cache_dir) - cache_dir.mkdir(parents=True, exist_ok=True) - logger.info(LogFormatter.success("Cache cleared successfully")) - else: - logger.info(LogFormatter.section("FLUSHING ALL CACHES")) - for cache_type in ["datasets", "models", "votes", "eval"]: - self.flush_cache(cache_type) - logger.info(LogFormatter.success("All caches cleared successfully")) - - except Exception as e: - logger.error(LogFormatter.error("Failed to flush cache", e)) - raise - -# Singleton instance of cache configuration -cache_config = CacheConfig() \ No newline at end of file diff --git a/backend/app/core/fastapi_cache.py b/backend/app/core/fastapi_cache.py deleted file mode 100644 index dd6a71296760cd28791106aa12a62ca1869455e8..0000000000000000000000000000000000000000 --- a/backend/app/core/fastapi_cache.py +++ /dev/null @@ -1,48 +0,0 @@ -from fastapi_cache import FastAPICache -from fastapi_cache.backends.inmemory import InMemoryBackend -from fastapi_cache.decorator import cache -from datetime import timedelta -from app.config import CACHE_TTL -import logging -from app.core.formatting import LogFormatter - -logger = logging.getLogger(__name__) - -def setup_cache(): - """Initialize FastAPI Cache with in-memory backend""" - FastAPICache.init( - backend=InMemoryBackend(), - prefix="fastapi-cache", - expire=CACHE_TTL - ) - logger.info(LogFormatter.success("FastAPI Cache initialized with in-memory backend")) - -def invalidate_cache_key(key: str): - """Invalidate a specific cache key""" - try: - backend = FastAPICache.get_backend() - if hasattr(backend, 'delete'): - backend.delete(key) - logger.info(LogFormatter.success(f"Cache invalidated for key: {key}")) - else: - logger.warning(LogFormatter.warning("Cache backend does not support deletion")) - except Exception as e: - logger.error(LogFormatter.error(f"Failed to invalidate cache key: {key}", e)) - -def build_cache_key(namespace: str, *args) -> str: - """Build a consistent cache key""" - key = f"fastapi-cache:{namespace}:{':'.join(str(arg) for arg in args)}" - logger.debug(LogFormatter.info(f"Built cache key: {key}")) - return key - -def cached(expire: int = CACHE_TTL, key_builder=None): - """Decorator for caching endpoint responses - - Args: - expire (int): Cache TTL in seconds - key_builder (callable, optional): Custom key builder function - """ - return cache( - expire=expire, - key_builder=key_builder - ) \ No newline at end of file diff --git a/backend/app/core/formatting.py b/backend/app/core/formatting.py deleted file mode 100644 index 0d5b0643019dcc0eaca92cd695c94aeda64cfc94..0000000000000000000000000000000000000000 --- a/backend/app/core/formatting.py +++ /dev/null @@ -1,104 +0,0 @@ -import logging -from typing import Dict, Any, List, Optional - -logger = logging.getLogger(__name__) - -class LogFormatter: - """Utility class for consistent log formatting across the application""" - - @staticmethod - def section(title: str) -> str: - """Create a section header""" - return f"\n{'='*20} {title.upper()} {'='*20}" - - @staticmethod - def subsection(title: str) -> str: - """Create a subsection header""" - return f"\n{'─'*20} {title} {'─'*20}" - - @staticmethod - def tree(items: Dict[str, Any], title: str = None) -> List[str]: - """Create a tree view of dictionary data""" - lines = [] - if title: - lines.append(f"📊 {title}:") - - # Get the maximum length for alignment - max_key_length = max(len(str(k)) for k in items.keys()) - - # Format each item - for i, (key, value) in enumerate(items.items()): - prefix = "└──" if i == len(items) - 1 else "├──" - if isinstance(value, (int, float)): - value = f"{value:,}" # Add thousand separators - lines.append(f"{prefix} {str(key):<{max_key_length}}: {value}") - - return lines - - @staticmethod - def stats(stats: Dict[str, int], title: str = None) -> List[str]: - """Format statistics with icons""" - lines = [] - if title: - lines.append(f"📊 {title}:") - - # Get the maximum length for alignment - max_key_length = max(len(str(k)) for k in stats.keys()) - - # Format each stat with an appropriate icon - icons = { - "total": "📌", - "success": "✅", - "error": "❌", - "pending": "⏳", - "processing": "⚙️", - "finished": "✨", - "evaluating": "🔄", - "downloads": "⬇️", - "files": "📁", - "cached": "💾", - "size": "📏", - "time": "⏱️", - "rate": "🚀" - } - - # Format each item - for i, (key, value) in enumerate(stats.items()): - prefix = "└──" if i == len(stats) - 1 else "├──" - icon = icons.get(key.lower().split('_')[0], "•") - if isinstance(value, (int, float)): - value = f"{value:,}" # Add thousand separators - lines.append(f"{prefix} {icon} {str(key):<{max_key_length}}: {value}") - - return lines - - @staticmethod - def progress_bar(current: int, total: int, width: int = 20) -> str: - """Create a progress bar""" - percentage = (current * 100) // total - filled = "█" * (percentage * width // 100) - empty = "░" * (width - len(filled)) - return f"{filled}{empty} {percentage:3d}%" - - @staticmethod - def error(message: str, error: Optional[Exception] = None) -> str: - """Format error message""" - error_msg = f"\n❌ Error: {message}" - if error: - error_msg += f"\n └── Details: {str(error)}" - return error_msg - - @staticmethod - def success(message: str) -> str: - """Format success message""" - return f"✅ {message}" - - @staticmethod - def warning(message: str) -> str: - """Format warning message""" - return f"⚠️ {message}" - - @staticmethod - def info(message: str) -> str: - """Format info message""" - return f"ℹ️ {message}" \ No newline at end of file diff --git a/backend/app/main.py b/backend/app/main.py deleted file mode 100644 index 86a00401700d1a97f9c7e3cd67509f51d7808c84..0000000000000000000000000000000000000000 --- a/backend/app/main.py +++ /dev/null @@ -1,18 +0,0 @@ -from fastapi import FastAPI -from app.config.logging_config import setup_logging -import logging - -# Initialize logging configuration -setup_logging() -logger = logging.getLogger(__name__) - -app = FastAPI(title="Open LLM Leaderboard API") - -@app.on_event("startup") -async def startup_event(): - logger.info("Starting up the application...") - -# Import and include routers after app initialization -from app.api import models, votes -app.include_router(models.router, prefix="/api", tags=["models"]) -app.include_router(votes.router, prefix="/api", tags=["votes"]) \ No newline at end of file diff --git a/backend/app/services/__init__.py b/backend/app/services/__init__.py deleted file mode 100644 index 399192f82143e7bf446fa183fa9e7779adab2bd7..0000000000000000000000000000000000000000 --- a/backend/app/services/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from . import hf_service, leaderboard, votes, models - -__all__ = ["hf_service", "leaderboard", "votes", "models"] diff --git a/backend/app/services/hf_service.py b/backend/app/services/hf_service.py deleted file mode 100644 index 5f8ff28aa9ad352e0994848a00c5cae2a6b1f6d7..0000000000000000000000000000000000000000 --- a/backend/app/services/hf_service.py +++ /dev/null @@ -1,50 +0,0 @@ -from typing import Optional -from huggingface_hub import HfApi -from app.config import HF_TOKEN, API -from app.core.cache import cache_config -from app.core.formatting import LogFormatter -import logging - -logger = logging.getLogger(__name__) - -class HuggingFaceService: - def __init__(self): - self.api = API - self.token = HF_TOKEN - self.cache_dir = cache_config.models_cache - - async def check_authentication(self) -> bool: - """Check if the HF token is valid""" - if not self.token: - return False - try: - logger.info(LogFormatter.info("Checking HF token validity...")) - self.api.get_token_permission() - logger.info(LogFormatter.success("HF token is valid")) - return True - except Exception as e: - logger.error(LogFormatter.error("HF token validation failed", e)) - return False - - async def get_user_info(self) -> Optional[dict]: - """Get information about the authenticated user""" - try: - logger.info(LogFormatter.info("Fetching user information...")) - info = self.api.get_token_permission() - logger.info(LogFormatter.success(f"User info retrieved for: {info.get('user', 'Unknown')}")) - return info - except Exception as e: - logger.error(LogFormatter.error("Failed to get user info", e)) - return None - - def _log_repo_operation(self, operation: str, repo: str, details: str = None): - """Helper to log repository operations""" - logger.info(LogFormatter.section(f"HF REPOSITORY OPERATION - {operation.upper()}")) - stats = { - "Operation": operation, - "Repository": repo, - } - if details: - stats["Details"] = details - for line in LogFormatter.tree(stats): - logger.info(line) \ No newline at end of file diff --git a/backend/app/services/leaderboard.py b/backend/app/services/leaderboard.py deleted file mode 100644 index a83172295b004945c90e0e679af974b61917ab39..0000000000000000000000000000000000000000 --- a/backend/app/services/leaderboard.py +++ /dev/null @@ -1,208 +0,0 @@ -from app.core.cache import cache_config -from datetime import datetime -from typing import List, Dict, Any -import datasets -from fastapi import HTTPException -import logging -from app.config.base import HF_ORGANIZATION -from app.core.formatting import LogFormatter - -logger = logging.getLogger(__name__) - -class LeaderboardService: - def __init__(self): - pass - - async def fetch_raw_data(self) -> List[Dict[str, Any]]: - """Fetch raw leaderboard data from HuggingFace dataset""" - try: - logger.info(LogFormatter.section("FETCHING LEADERBOARD DATA")) - logger.info(LogFormatter.info(f"Loading dataset from {HF_ORGANIZATION}/contents")) - - dataset = datasets.load_dataset( - f"{HF_ORGANIZATION}/contents", - cache_dir=cache_config.get_cache_path("datasets") - )["train"] - - df = dataset.to_pandas() - data = df.to_dict('records') - - stats = { - "Total_Entries": len(data), - "Dataset_Size": f"{df.memory_usage(deep=True).sum() / 1024 / 1024:.1f}MB" - } - for line in LogFormatter.stats(stats, "Dataset Statistics"): - logger.info(line) - - return data - - except Exception as e: - logger.error(LogFormatter.error("Failed to fetch leaderboard data", e)) - raise HTTPException(status_code=500, detail=str(e)) - - async def get_formatted_data(self) -> List[Dict[str, Any]]: - """Get formatted leaderboard data""" - try: - logger.info(LogFormatter.section("FORMATTING LEADERBOARD DATA")) - - raw_data = await self.fetch_raw_data() - formatted_data = [] - type_counts = {} - error_count = 0 - - # Initialize progress tracking - total_items = len(raw_data) - logger.info(LogFormatter.info(f"Processing {total_items:,} entries...")) - - for i, item in enumerate(raw_data, 1): - try: - formatted_item = await self.transform_data(item) - formatted_data.append(formatted_item) - - # Count model types - model_type = formatted_item["model"]["type"] - type_counts[model_type] = type_counts.get(model_type, 0) + 1 - - except Exception as e: - error_count += 1 - logger.error(LogFormatter.error(f"Failed to format entry {i}/{total_items}", e)) - continue - - # Log progress every 10% - if i % max(1, total_items // 10) == 0: - progress = (i / total_items) * 100 - logger.info(LogFormatter.info(f"Progress: {LogFormatter.progress_bar(i, total_items)}")) - - # Log final statistics - stats = { - "Total_Processed": total_items, - "Successful": len(formatted_data), - "Failed": error_count - } - logger.info(LogFormatter.section("PROCESSING SUMMARY")) - for line in LogFormatter.stats(stats, "Processing Statistics"): - logger.info(line) - - # Log model type distribution - type_stats = {f"Type_{k}": v for k, v in type_counts.items()} - logger.info(LogFormatter.subsection("MODEL TYPE DISTRIBUTION")) - for line in LogFormatter.stats(type_stats): - logger.info(line) - - return formatted_data - - except Exception as e: - logger.error(LogFormatter.error("Failed to format leaderboard data", e)) - raise HTTPException(status_code=500, detail=str(e)) - - async def transform_data(self, data: Dict[str, Any]) -> Dict[str, Any]: - """Transform raw data into the format expected by the frontend""" - try: - # Extract model name for logging - model_name = data.get("fullname", "Unknown") - logger.debug(LogFormatter.info(f"Transforming data for model: {model_name}")) - - # Create unique ID combining model name, precision, sha and chat template status - unique_id = f"{data.get('fullname', 'Unknown')}_{data.get('Precision', 'Unknown')}_{data.get('Model sha', 'Unknown')}_{str(data.get('Chat Template', False))}" - - evaluations = { - "ifeval": { - "name": "IFEval", - "value": data.get("IFEval Raw", 0), - "normalized_score": data.get("IFEval", 0) - }, - "bbh": { - "name": "BBH", - "value": data.get("BBH Raw", 0), - "normalized_score": data.get("BBH", 0) - }, - "math": { - "name": "MATH Level 5", - "value": data.get("MATH Lvl 5 Raw", 0), - "normalized_score": data.get("MATH Lvl 5", 0) - }, - "gpqa": { - "name": "GPQA", - "value": data.get("GPQA Raw", 0), - "normalized_score": data.get("GPQA", 0) - }, - "musr": { - "name": "MUSR", - "value": data.get("MUSR Raw", 0), - "normalized_score": data.get("MUSR", 0) - }, - "mmlu_pro": { - "name": "MMLU-PRO", - "value": data.get("MMLU-PRO Raw", 0), - "normalized_score": data.get("MMLU-PRO", 0) - } - } - - features = { - "is_not_available_on_hub": data.get("Available on the hub", False), - "is_merged": data.get("Merged", False), - "is_moe": data.get("MoE", False), - "is_flagged": data.get("Flagged", False), - "is_official_provider": data.get("Official Providers", False) - } - - metadata = { - "upload_date": data.get("Upload To Hub Date"), - "submission_date": data.get("Submission Date"), - "generation": data.get("Generation"), - "base_model": data.get("Base Model"), - "hub_license": data.get("Hub License"), - "hub_hearts": data.get("Hub ❤️"), - "params_billions": data.get("#Params (B)"), - "co2_cost": data.get("CO₂ cost (kg)", 0) - } - - # Clean model type by removing emojis if present - original_type = data.get("Type", "") - model_type = original_type.lower().strip() - - # Remove emojis and parentheses - if "(" in model_type: - model_type = model_type.split("(")[0].strip() - model_type = ''.join(c for c in model_type if not c in '🔶🟢🟩💬🤝🌸 ') - - # Map old model types to new ones - model_type_mapping = { - "fine-tuned": "fined-tuned-on-domain-specific-dataset", - "fine tuned": "fined-tuned-on-domain-specific-dataset", - "finetuned": "fined-tuned-on-domain-specific-dataset", - "fine_tuned": "fined-tuned-on-domain-specific-dataset", - "ft": "fined-tuned-on-domain-specific-dataset", - "finetuning": "fined-tuned-on-domain-specific-dataset", - "fine tuning": "fined-tuned-on-domain-specific-dataset", - "fine-tuning": "fined-tuned-on-domain-specific-dataset" - } - - mapped_type = model_type_mapping.get(model_type.lower().strip(), model_type) - - if mapped_type != model_type: - logger.debug(LogFormatter.info(f"Model type mapped: {original_type} -> {mapped_type}")) - - transformed_data = { - "id": unique_id, - "model": { - "name": data.get("fullname"), - "sha": data.get("Model sha"), - "precision": data.get("Precision"), - "type": mapped_type, - "weight_type": data.get("Weight type"), - "architecture": data.get("Architecture"), - "average_score": data.get("Average ⬆️"), - "has_chat_template": data.get("Chat Template", False) - }, - "evaluations": evaluations, - "features": features, - "metadata": metadata - } - - logger.debug(LogFormatter.success(f"Successfully transformed data for {model_name}")) - return transformed_data - - except Exception as e: - logger.error(LogFormatter.error(f"Failed to transform data for {data.get('fullname', 'Unknown')}", e)) - raise \ No newline at end of file diff --git a/backend/app/services/models.py b/backend/app/services/models.py deleted file mode 100644 index 8d596cbaf02f2b87fb3e02aaa1aff0f719076d01..0000000000000000000000000000000000000000 --- a/backend/app/services/models.py +++ /dev/null @@ -1,587 +0,0 @@ -from datetime import datetime, timezone -from typing import Dict, Any, Optional, List -import json -import os -from pathlib import Path -import logging -import aiohttp -import asyncio -import time -from huggingface_hub import HfApi, CommitOperationAdd -from huggingface_hub.utils import build_hf_headers -from datasets import disable_progress_bar -import sys -import contextlib -from concurrent.futures import ThreadPoolExecutor -import tempfile - -from app.config import ( - QUEUE_REPO, - HF_TOKEN, - EVAL_REQUESTS_PATH -) -from app.config.hf_config import HF_ORGANIZATION -from app.services.hf_service import HuggingFaceService -from app.utils.model_validation import ModelValidator -from app.services.votes import VoteService -from app.core.cache import cache_config -from app.core.formatting import LogFormatter - -# Disable datasets progress bars globally -disable_progress_bar() - -logger = logging.getLogger(__name__) - -# Context manager to temporarily disable stdout and stderr -@contextlib.contextmanager -def suppress_output(): - stdout = sys.stdout - stderr = sys.stderr - devnull = open(os.devnull, 'w') - try: - sys.stdout = devnull - sys.stderr = devnull - yield - finally: - sys.stdout = stdout - sys.stderr = stderr - devnull.close() - -class ProgressTracker: - def __init__(self, total: int, desc: str = "Progress", update_frequency: int = 10): - self.total = total - self.current = 0 - self.desc = desc - self.start_time = time.time() - self.update_frequency = update_frequency # Percentage steps - self.last_update = -1 - - # Initial log with fancy formatting - logger.info(LogFormatter.section(desc)) - logger.info(LogFormatter.info(f"Starting processing of {total:,} items...")) - sys.stdout.flush() - - def update(self, n: int = 1): - self.current += n - current_percentage = (self.current * 100) // self.total - - # Only update on frequency steps (e.g., 0%, 10%, 20%, etc.) - if current_percentage >= self.last_update + self.update_frequency or current_percentage == 100: - elapsed = time.time() - self.start_time - rate = self.current / elapsed if elapsed > 0 else 0 - remaining = (self.total - self.current) / rate if rate > 0 else 0 - - # Create progress stats - stats = { - "Progress": LogFormatter.progress_bar(self.current, self.total), - "Items": f"{self.current:,}/{self.total:,}", - "Time": f"⏱️ {elapsed:.1f}s elapsed, {remaining:.1f}s remaining", - "Rate": f"🚀 {rate:.1f} items/s" - } - - # Log progress using tree format - for line in LogFormatter.tree(stats): - logger.info(line) - sys.stdout.flush() - - self.last_update = (current_percentage // self.update_frequency) * self.update_frequency - - def close(self): - elapsed = time.time() - self.start_time - rate = self.total / elapsed if elapsed > 0 else 0 - - # Final summary with fancy formatting - logger.info(LogFormatter.section("COMPLETED")) - stats = { - "Total": f"{self.total:,} items", - "Time": f"{elapsed:.1f}s", - "Rate": f"{rate:.1f} items/s" - } - for line in LogFormatter.stats(stats): - logger.info(line) - logger.info("="*50) - sys.stdout.flush() - -class ModelService(HuggingFaceService): - _instance: Optional['ModelService'] = None - _initialized = False - - def __new__(cls): - if cls._instance is None: - logger.info(LogFormatter.info("Creating new ModelService instance")) - cls._instance = super(ModelService, cls).__new__(cls) - return cls._instance - - def __init__(self): - if not hasattr(self, '_init_done'): - logger.info(LogFormatter.section("MODEL SERVICE INITIALIZATION")) - super().__init__() - self.validator = ModelValidator() - self.vote_service = VoteService() - self.eval_requests_path = cache_config.eval_requests_file - logger.info(LogFormatter.info(f"Using eval requests path: {self.eval_requests_path}")) - - self.eval_requests_path.parent.mkdir(parents=True, exist_ok=True) - self.hf_api = HfApi(token=HF_TOKEN) - self.cached_models = None - self.last_cache_update = 0 - self.cache_ttl = cache_config.cache_ttl.total_seconds() - self._init_done = True - logger.info(LogFormatter.success("Initialization complete")) - - async def _download_and_process_file(self, file: str, session: aiohttp.ClientSession, progress: ProgressTracker) -> Optional[Dict]: - """Download and process a file asynchronously""" - try: - # Build file URL - url = f"https://huggingface.co/datasets/{QUEUE_REPO}/resolve/main/{file}" - headers = build_hf_headers(token=self.token) - - # Download file - async with session.get(url, headers=headers) as response: - if response.status != 200: - logger.error(LogFormatter.error(f"Failed to download {file}", f"HTTP {response.status}")) - progress.update() - return None - - try: - # First read content as text - text_content = await response.text() - # Then parse JSON - content = json.loads(text_content) - except json.JSONDecodeError as e: - logger.error(LogFormatter.error(f"Failed to decode JSON from {file}", e)) - progress.update() - return None - - # Get status and determine target status - status = content.get("status", "PENDING").upper() - target_status = None - status_map = { - "PENDING": ["PENDING"], - "EVALUATING": ["RUNNING"], - "FINISHED": ["FINISHED"] - } - - for target, source_statuses in status_map.items(): - if status in source_statuses: - target_status = target - break - - if not target_status: - progress.update() - return None - - # Calculate wait time - try: - submit_time = datetime.fromisoformat(content["submitted_time"].replace("Z", "+00:00")) - if submit_time.tzinfo is None: - submit_time = submit_time.replace(tzinfo=timezone.utc) - current_time = datetime.now(timezone.utc) - wait_time = current_time - submit_time - - model_info = { - "name": content["model"], - "submitter": content.get("sender", "Unknown"), - "revision": content["revision"], - "wait_time": f"{wait_time.total_seconds():.1f}s", - "submission_time": content["submitted_time"], - "status": target_status, - "precision": content.get("precision", "Unknown") - } - - progress.update() - return model_info - - except (ValueError, TypeError) as e: - logger.error(LogFormatter.error(f"Failed to process {file}", e)) - progress.update() - return None - - except Exception as e: - logger.error(LogFormatter.error(f"Failed to load {file}", e)) - progress.update() - return None - - async def _refresh_models_cache(self): - """Refresh the models cache""" - try: - logger.info(LogFormatter.section("CACHE REFRESH")) - self._log_repo_operation("read", f"{HF_ORGANIZATION}/requests", "Refreshing models cache") - - # Initialize models dictionary - models = { - "finished": [], - "evaluating": [], - "pending": [] - } - - try: - logger.info(LogFormatter.subsection("DATASET LOADING")) - logger.info(LogFormatter.info("Loading dataset files...")) - - # List files in repository - with suppress_output(): - files = self.hf_api.list_repo_files( - repo_id=QUEUE_REPO, - repo_type="dataset", - token=self.token - ) - - # Filter JSON files - json_files = [f for f in files if f.endswith('.json')] - total_files = len(json_files) - - # Log repository stats - stats = { - "Total_Files": len(files), - "JSON_Files": total_files, - } - for line in LogFormatter.stats(stats, "Repository Statistics"): - logger.info(line) - - if not json_files: - raise Exception("No JSON files found in repository") - - # Initialize progress tracker - progress = ProgressTracker(total_files, "PROCESSING FILES") - - try: - # Create aiohttp session to reuse connections - async with aiohttp.ClientSession() as session: - # Process files in chunks - chunk_size = 50 - - for i in range(0, len(json_files), chunk_size): - chunk = json_files[i:i + chunk_size] - chunk_tasks = [ - self._download_and_process_file(file, session, progress) - for file in chunk - ] - results = await asyncio.gather(*chunk_tasks) - - # Process results - for result in results: - if result: - status = result.pop("status") - models[status.lower()].append(result) - - finally: - progress.close() - - # Final summary with fancy formatting - logger.info(LogFormatter.section("CACHE SUMMARY")) - stats = { - "Finished": len(models["finished"]), - "Evaluating": len(models["evaluating"]), - "Pending": len(models["pending"]) - } - for line in LogFormatter.stats(stats, "Models by Status"): - logger.info(line) - logger.info("="*50) - - except Exception as e: - logger.error(LogFormatter.error("Error processing files", e)) - raise - - # Update cache - self.cached_models = models - self.last_cache_update = time.time() - logger.info(LogFormatter.success("Cache updated successfully")) - - return models - - except Exception as e: - logger.error(LogFormatter.error("Cache refresh failed", e)) - raise - - async def initialize(self): - """Initialize the model service""" - if self._initialized: - logger.info(LogFormatter.info("Service already initialized, using cached data")) - return - - try: - logger.info(LogFormatter.section("MODEL SERVICE INITIALIZATION")) - - # Check if cache already exists - cache_path = cache_config.get_cache_path("datasets") - if not cache_path.exists() or not any(cache_path.iterdir()): - logger.info(LogFormatter.info("No existing cache found, initializing datasets cache...")) - cache_config.flush_cache("datasets") - else: - logger.info(LogFormatter.info("Using existing datasets cache")) - - # Ensure eval requests directory exists - self.eval_requests_path.parent.mkdir(parents=True, exist_ok=True) - logger.info(LogFormatter.info(f"Eval requests directory: {self.eval_requests_path}")) - - # List existing files - if self.eval_requests_path.exists(): - files = list(self.eval_requests_path.glob("**/*.json")) - stats = { - "Total_Files": len(files), - "Directory": str(self.eval_requests_path) - } - for line in LogFormatter.stats(stats, "Eval Requests"): - logger.info(line) - - # Load initial cache - await self._refresh_models_cache() - - self._initialized = True - logger.info(LogFormatter.success("Model service initialization complete")) - - except Exception as e: - logger.error(LogFormatter.error("Initialization failed", e)) - raise - - async def get_models(self) -> Dict[str, List[Dict[str, Any]]]: - """Get all models with their status""" - if not self._initialized: - logger.info(LogFormatter.info("Service not initialized, initializing now...")) - await self.initialize() - - current_time = time.time() - cache_age = current_time - self.last_cache_update - - # Check if cache needs refresh - if not self.cached_models: - logger.info(LogFormatter.info("No cached data available, refreshing cache...")) - return await self._refresh_models_cache() - elif cache_age > self.cache_ttl: - logger.info(LogFormatter.info(f"Cache expired ({cache_age:.1f}s old, TTL: {self.cache_ttl}s)")) - return await self._refresh_models_cache() - else: - logger.info(LogFormatter.info(f"Using cached data ({cache_age:.1f}s old)")) - return self.cached_models - - async def submit_model( - self, - model_data: Dict[str, Any], - user_id: str - ) -> Dict[str, Any]: - logger.info(LogFormatter.section("MODEL SUBMISSION")) - self._log_repo_operation("write", f"{HF_ORGANIZATION}/requests", f"Submitting model {model_data['model_id']} by {user_id}") - stats = { - "Model": model_data["model_id"], - "User": user_id, - "Revision": model_data["revision"], - "Precision": model_data["precision"], - "Type": model_data["model_type"] - } - for line in LogFormatter.tree(stats, "Submission Details"): - logger.info(line) - - # Validate required fields - required_fields = [ - "model_id", "base_model", "revision", "precision", - "weight_type", "model_type", "use_chat_template" - ] - for field in required_fields: - if field not in model_data: - raise ValueError(f"Missing required field: {field}") - - # Get model info and validate it exists on HuggingFace - try: - logger.info(LogFormatter.subsection("MODEL VALIDATION")) - - # Get the model info to check if it exists - model_info = self.hf_api.model_info( - model_data["model_id"], - revision=model_data["revision"], - token=self.token - ) - - if not model_info: - raise Exception(f"Model {model_data['model_id']} not found on HuggingFace Hub") - - logger.info(LogFormatter.success("Model exists on HuggingFace Hub")) - - except Exception as e: - logger.error(LogFormatter.error("Model validation failed", e)) - raise - - # Update model revision with commit sha - model_data["revision"] = model_info.sha - - # Check if model already exists in the system - try: - logger.info(LogFormatter.subsection("CHECKING EXISTING SUBMISSIONS")) - existing_models = await self.get_models() - - # Call the official provider status check - is_valid, error_message = await self.validator.check_official_provider_status( - model_data["model_id"], - existing_models - ) - if not is_valid: - raise ValueError(error_message) - - # Check in all statuses (pending, evaluating, finished) - for status, models in existing_models.items(): - for model in models: - if model["name"] == model_data["model_id"] and model["revision"] == model_data["revision"]: - error_msg = f"Model {model_data['model_id']} revision {model_data['revision']} is already in the system with status: {status}" - logger.error(LogFormatter.error("Submission rejected", error_msg)) - raise ValueError(error_msg) - - logger.info(LogFormatter.success("No existing submission found")) - except ValueError: - raise - except Exception as e: - logger.error(LogFormatter.error("Failed to check existing submissions", e)) - raise - - # Check that model on hub and valid - valid, error, model_config = await self.validator.is_model_on_hub( - model_data["model_id"], - model_data["revision"], - test_tokenizer=True - ) - if not valid: - logger.error(LogFormatter.error("Model on hub validation failed", error)) - raise Exception(error) - logger.info(LogFormatter.success("Model on hub validation passed")) - - # Validate model card - valid, error, model_card = await self.validator.check_model_card( - model_data["model_id"] - ) - if not valid: - logger.error(LogFormatter.error("Model card validation failed", error)) - raise Exception(error) - logger.info(LogFormatter.success("Model card validation passed")) - - # Check size limits - model_size, error = await self.validator.get_model_size( - model_info, - model_data["precision"], - model_data["base_model"], - revision=model_data["revision"] - ) - if model_size is None: - logger.error(LogFormatter.error("Model size validation failed", error)) - raise Exception(error) - logger.info(LogFormatter.success(f"Model size validation passed: {model_size:.1f}B")) - - # Size limits based on precision - if model_data["precision"] in ["float16", "bfloat16"] and model_size > 100: - error_msg = f"Model too large for {model_data['precision']} (limit: 100B)" - logger.error(LogFormatter.error("Size limit exceeded", error_msg)) - raise Exception(error_msg) - - # Chat template validation if requested - if model_data["use_chat_template"]: - valid, error = await self.validator.check_chat_template( - model_data["model_id"], - model_data["revision"] - ) - if not valid: - logger.error(LogFormatter.error("Chat template validation failed", error)) - raise Exception(error) - logger.info(LogFormatter.success("Chat template validation passed")) - - - architectures = model_info.config.get("architectures", "") - if architectures: - architectures = ";".join(architectures) - - # Create eval entry - eval_entry = { - "model": model_data["model_id"], - "base_model": model_data["base_model"], - "revision": model_info.sha, - "precision": model_data["precision"], - "params": model_size, - "architectures": architectures, - "weight_type": model_data["weight_type"], - "status": "PENDING", - "submitted_time": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"), - "model_type": model_data["model_type"], - "job_id": -1, - "job_start_time": None, - "use_chat_template": model_data["use_chat_template"], - "sender": user_id - } - - logger.info(LogFormatter.subsection("EVALUATION ENTRY")) - for line in LogFormatter.tree(eval_entry): - logger.info(line) - - # Upload to HF dataset - try: - logger.info(LogFormatter.subsection("UPLOADING TO HUGGINGFACE")) - logger.info(LogFormatter.info(f"Uploading to {HF_ORGANIZATION}/requests...")) - - # Construct the path in the dataset - org_or_user = model_data["model_id"].split("/")[0] if "/" in model_data["model_id"] else "" - model_path = model_data["model_id"].split("/")[-1] - relative_path = f"{org_or_user}/{model_path}_eval_request_False_{model_data['precision']}_{model_data['weight_type']}.json" - - # Create a temporary file with the request - with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as temp_file: - json.dump(eval_entry, temp_file, indent=2) - temp_file.flush() - temp_path = temp_file.name - - # Upload file directly - self.hf_api.upload_file( - path_or_fileobj=temp_path, - path_in_repo=relative_path, - repo_id=f"{HF_ORGANIZATION}/requests", - repo_type="dataset", - commit_message=f"Add {model_data['model_id']} to eval queue", - token=self.token - ) - - # Clean up temp file - os.unlink(temp_path) - - logger.info(LogFormatter.success("Upload successful")) - - except Exception as e: - logger.error(LogFormatter.error("Upload failed", e)) - raise - - # Add automatic vote - try: - logger.info(LogFormatter.subsection("AUTOMATIC VOTE")) - logger.info(LogFormatter.info(f"Adding upvote for {model_data['model_id']} by {user_id}")) - await self.vote_service.add_vote( - model_data["model_id"], - user_id, - "up" - ) - logger.info(LogFormatter.success("Vote recorded successfully")) - except Exception as e: - logger.error(LogFormatter.error("Failed to record vote", e)) - # Don't raise here as the main submission was successful - - return { - "status": "success", - "message": "The model was submitted successfully, and the vote has been recorded" - } - - async def get_model_status(self, model_id: str) -> Dict[str, Any]: - """Get evaluation status of a model""" - logger.info(LogFormatter.info(f"Checking status for model: {model_id}")) - eval_path = self.eval_requests_path - - for user_folder in eval_path.iterdir(): - if user_folder.is_dir(): - for file in user_folder.glob("*.json"): - with open(file, "r") as f: - data = json.load(f) - if data["model"] == model_id: - status = { - "status": data["status"], - "submitted_time": data["submitted_time"], - "job_id": data.get("job_id", -1) - } - logger.info(LogFormatter.success("Status found")) - for line in LogFormatter.tree(status, "Model Status"): - logger.info(line) - return status - - logger.warning(LogFormatter.warning(f"No status found for model: {model_id}")) - return {"status": "not_found"} \ No newline at end of file diff --git a/backend/app/services/rate_limiter.py b/backend/app/services/rate_limiter.py deleted file mode 100644 index 988c68e2f7d7f3847d6691c70f55975648aa3c8f..0000000000000000000000000000000000000000 --- a/backend/app/services/rate_limiter.py +++ /dev/null @@ -1,72 +0,0 @@ -""" -import logging -from datetime import datetime, timedelta, timezone -from typing import Tuple, Dict, List - -logger = logging.getLogger(__name__) - -class RateLimiter: - def __init__(self, period_days: int = 7, quota: int = 5): - self.period_days = period_days - self.quota = quota - self.submission_history: Dict[str, List[datetime]] = {} - self.higher_quota_users = set() # Users with higher quotas - self.unlimited_users = set() # Users with no quota limits - - def add_unlimited_user(self, user_id: str): - """Add a user to the unlimited users list""" - self.unlimited_users.add(user_id) - - def add_higher_quota_user(self, user_id: str): - """Add a user to the higher quota users list""" - self.higher_quota_users.add(user_id) - - def record_submission(self, user_id: str): - """Record a new submission for a user""" - current_time = datetime.now(timezone.utc) - if user_id not in self.submission_history: - self.submission_history[user_id] = [] - self.submission_history[user_id].append(current_time) - - def clean_old_submissions(self, user_id: str): - """Remove submissions older than the period""" - if user_id not in self.submission_history: - return - - current_time = datetime.now(timezone.utc) - cutoff_time = current_time - timedelta(days=self.period_days) - - self.submission_history[user_id] = [ - time for time in self.submission_history[user_id] - if time > cutoff_time - ] - - async def check_rate_limit(self, user_id: str) -> Tuple[bool, str]: - """Check if a user has exceeded their rate limit - - Returns: - Tuple[bool, str]: (is_allowed, error_message) - """ - # Unlimited users bypass all checks - if user_id in self.unlimited_users: - return True, "" - - # Clean old submissions - self.clean_old_submissions(user_id) - - # Get current submission count - submission_count = len(self.submission_history.get(user_id, [])) - - # Calculate user's quota - user_quota = self.quota * 2 if user_id in self.higher_quota_users else self.quota - - # Check if user has exceeded their quota - if submission_count >= user_quota: - error_msg = ( - f"User '{user_id}' has reached the limit of {user_quota} submissions " - f"in the last {self.period_days} days. Please wait before submitting again." - ) - return False, error_msg - - return True, "" -""" \ No newline at end of file diff --git a/backend/app/services/votes.py b/backend/app/services/votes.py deleted file mode 100644 index 841499f2a3cdcd93b5f541833c4c65c1af8520c0..0000000000000000000000000000000000000000 --- a/backend/app/services/votes.py +++ /dev/null @@ -1,390 +0,0 @@ -from datetime import datetime, timezone -from typing import Dict, Any, List, Set, Tuple, Optional -import json -import logging -import asyncio -from pathlib import Path -import aiohttp -from huggingface_hub import HfApi -import datasets - -from app.services.hf_service import HuggingFaceService -from app.config import HF_TOKEN -from app.config.hf_config import HF_ORGANIZATION -from app.core.cache import cache_config -from app.core.formatting import LogFormatter - -logger = logging.getLogger(__name__) - -class VoteService(HuggingFaceService): - _instance: Optional['VoteService'] = None - _initialized = False - - def __new__(cls): - if cls._instance is None: - cls._instance = super(VoteService, cls).__new__(cls) - return cls._instance - - def __init__(self): - if not hasattr(self, '_init_done'): - super().__init__() - self.votes_file = cache_config.votes_file - self.votes_to_upload: List[Dict[str, Any]] = [] - self.vote_check_set: Set[Tuple[str, str, str]] = set() - self._votes_by_model: Dict[str, List[Dict[str, Any]]] = {} - self._votes_by_user: Dict[str, List[Dict[str, Any]]] = {} - self._upload_lock = asyncio.Lock() - self._last_sync = None - self._sync_interval = 300 # 5 minutes - self._total_votes = 0 - self._last_vote_timestamp = None - self._max_retries = 3 - self._retry_delay = 1 # seconds - self._upload_batch_size = 10 - self.hf_api = HfApi(token=HF_TOKEN) - self._init_done = True - - async def initialize(self): - """Initialize the vote service""" - if self._initialized: - await self._check_for_new_votes() - return - - try: - logger.info(LogFormatter.section("VOTE SERVICE INITIALIZATION")) - - # Ensure votes directory exists - self.votes_file.parent.mkdir(parents=True, exist_ok=True) - - # Load existing votes if file exists - local_vote_count = 0 - if self.votes_file.exists(): - logger.info(LogFormatter.info(f"Loading votes from {self.votes_file}")) - local_vote_count = await self._count_local_votes() - logger.info(LogFormatter.info(f"Found {local_vote_count:,} local votes")) - - # Check remote votes count - remote_vote_count = await self._count_remote_votes() - logger.info(LogFormatter.info(f"Found {remote_vote_count:,} remote votes")) - - if remote_vote_count > local_vote_count: - logger.info(LogFormatter.info(f"Fetching {remote_vote_count - local_vote_count:,} new votes")) - await self._sync_with_hub() - elif remote_vote_count < local_vote_count: - logger.warning(LogFormatter.warning(f"Local votes ({local_vote_count:,}) > Remote votes ({remote_vote_count:,})")) - await self._load_existing_votes() - else: - logger.info(LogFormatter.success("Local and remote votes are in sync")) - if local_vote_count > 0: - await self._load_existing_votes() - else: - logger.info(LogFormatter.info("No votes found")) - - self._initialized = True - self._last_sync = datetime.now(timezone.utc) - - # Final summary - stats = { - "Total_Votes": self._total_votes, - "Last_Sync": self._last_sync.strftime("%Y-%m-%d %H:%M:%S UTC") - } - logger.info(LogFormatter.section("INITIALIZATION COMPLETE")) - for line in LogFormatter.stats(stats): - logger.info(line) - - except Exception as e: - logger.error(LogFormatter.error("Initialization failed", e)) - raise - - async def _count_local_votes(self) -> int: - """Count votes in local file""" - if not self.votes_file.exists(): - return 0 - - count = 0 - try: - with open(self.votes_file, 'r') as f: - for _ in f: - count += 1 - return count - except Exception as e: - logger.error(f"Error counting local votes: {str(e)}") - return 0 - - async def _count_remote_votes(self) -> int: - """Count votes in remote file""" - url = f"https://huggingface.co/datasets/{HF_ORGANIZATION}/votes/raw/main/votes_data.jsonl" - headers = {"Authorization": f"Bearer {HF_TOKEN}"} if HF_TOKEN else {} - - try: - async with aiohttp.ClientSession() as session: - async with session.get(url, headers=headers) as response: - if response.status == 200: - count = 0 - async for line in response.content: - if line.strip(): # Skip empty lines - count += 1 - return count - else: - logger.error(f"Failed to get remote votes: HTTP {response.status}") - return 0 - except Exception as e: - logger.error(f"Error counting remote votes: {str(e)}") - return 0 - - async def _sync_with_hub(self): - """Sync votes with HuggingFace hub using datasets""" - try: - logger.info(LogFormatter.section("VOTE SYNC")) - self._log_repo_operation("sync", f"{HF_ORGANIZATION}/votes", "Syncing local votes with HF hub") - logger.info(LogFormatter.info("Syncing with HuggingFace hub...")) - - # Load votes from HF dataset - dataset = datasets.load_dataset( - f"{HF_ORGANIZATION}/votes", - split="train", - cache_dir=cache_config.get_cache_path("datasets") - ) - - remote_votes = len(dataset) - logger.info(LogFormatter.info(f"Dataset loaded with {remote_votes:,} votes")) - - # Convert to list of dictionaries - df = dataset.to_pandas() - if 'timestamp' in df.columns: - df['timestamp'] = df['timestamp'].dt.strftime('%Y-%m-%dT%H:%M:%SZ') - remote_votes = df.to_dict('records') - - # If we have more remote votes than local - if len(remote_votes) > self._total_votes: - new_votes = len(remote_votes) - self._total_votes - logger.info(LogFormatter.info(f"Processing {new_votes:,} new votes...")) - - # Save votes to local file - with open(self.votes_file, 'w') as f: - for vote in remote_votes: - f.write(json.dumps(vote) + '\n') - - # Reload votes in memory - await self._load_existing_votes() - logger.info(LogFormatter.success("Sync completed successfully")) - else: - logger.info(LogFormatter.success("Local votes are up to date")) - - self._last_sync = datetime.now(timezone.utc) - - except Exception as e: - logger.error(LogFormatter.error("Sync failed", e)) - raise - - async def _check_for_new_votes(self): - """Check for new votes on the hub""" - try: - self._log_repo_operation("check", f"{HF_ORGANIZATION}/votes", "Checking for new votes") - # Load only dataset metadata - dataset_info = datasets.load_dataset(f"{HF_ORGANIZATION}/votes", split="train") - remote_vote_count = len(dataset_info) - - if remote_vote_count > self._total_votes: - logger.info(f"Found {remote_vote_count - self._total_votes} new votes on hub") - await self._sync_with_hub() - else: - logger.info("No new votes found on hub") - - except Exception as e: - logger.error(f"Error checking for new votes: {str(e)}") - - async def _load_existing_votes(self): - """Load existing votes from file""" - if not self.votes_file.exists(): - logger.warning(LogFormatter.warning("No votes file found")) - return - - try: - logger.info(LogFormatter.section("LOADING VOTES")) - - # Clear existing data structures - self.vote_check_set.clear() - self._votes_by_model.clear() - self._votes_by_user.clear() - - vote_count = 0 - latest_timestamp = None - - with open(self.votes_file, "r") as f: - for line in f: - try: - vote = json.loads(line.strip()) - vote_count += 1 - - # Track latest timestamp - try: - vote_timestamp = datetime.fromisoformat(vote["timestamp"].replace("Z", "+00:00")) - if not latest_timestamp or vote_timestamp > latest_timestamp: - latest_timestamp = vote_timestamp - vote["timestamp"] = vote_timestamp.strftime("%Y-%m-%dT%H:%M:%SZ") - except (KeyError, ValueError) as e: - logger.warning(LogFormatter.warning(f"Invalid timestamp in vote: {str(e)}")) - continue - - if vote_count % 1000 == 0: - logger.info(LogFormatter.info(f"Processed {vote_count:,} votes...")) - - self._add_vote_to_memory(vote) - - except json.JSONDecodeError as e: - logger.error(LogFormatter.error("Vote parsing failed", e)) - continue - except Exception as e: - logger.error(LogFormatter.error("Vote processing failed", e)) - continue - - self._total_votes = vote_count - self._last_vote_timestamp = latest_timestamp - - # Final summary - stats = { - "Total_Votes": vote_count, - "Latest_Vote": latest_timestamp.strftime("%Y-%m-%d %H:%M:%S UTC") if latest_timestamp else "None", - "Unique_Models": len(self._votes_by_model), - "Unique_Users": len(self._votes_by_user) - } - - logger.info(LogFormatter.section("VOTE SUMMARY")) - for line in LogFormatter.stats(stats): - logger.info(line) - - except Exception as e: - logger.error(LogFormatter.error("Failed to load votes", e)) - raise - - def _add_vote_to_memory(self, vote: Dict[str, Any]): - """Add vote to memory structures""" - try: - check_tuple = (vote["model"], vote["revision"], vote["username"]) - - # Skip if we already have this vote - if check_tuple in self.vote_check_set: - return - - self.vote_check_set.add(check_tuple) - - # Update model votes - if vote["model"] not in self._votes_by_model: - self._votes_by_model[vote["model"]] = [] - self._votes_by_model[vote["model"]].append(vote) - - # Update user votes - if vote["username"] not in self._votes_by_user: - self._votes_by_user[vote["username"]] = [] - self._votes_by_user[vote["username"]].append(vote) - - except KeyError as e: - logger.error(f"Malformed vote data, missing key: {str(e)}") - except Exception as e: - logger.error(f"Error adding vote to memory: {str(e)}") - - async def get_user_votes(self, user_id: str) -> List[Dict[str, Any]]: - """Get all votes from a specific user""" - logger.info(LogFormatter.info(f"Fetching votes for user: {user_id}")) - votes = self._votes_by_user.get(user_id, []) - logger.info(LogFormatter.success(f"Found {len(votes):,} votes")) - return votes - - async def get_model_votes(self, model_id: str) -> Dict[str, Any]: - """Get all votes for a specific model""" - logger.info(LogFormatter.info(f"Fetching votes for model: {model_id}")) - votes = self._votes_by_model.get(model_id, []) - - # Group votes by revision - votes_by_revision = {} - for vote in votes: - revision = vote["revision"] - if revision not in votes_by_revision: - votes_by_revision[revision] = 0 - votes_by_revision[revision] += 1 - - stats = { - "Total_Votes": len(votes), - **{f"Revision_{k}": v for k, v in votes_by_revision.items()} - } - - logger.info(LogFormatter.section("VOTE STATISTICS")) - for line in LogFormatter.stats(stats): - logger.info(line) - - return { - "total_votes": len(votes), - "votes_by_revision": votes_by_revision, - "votes": votes - } - - async def _get_model_revision(self, model_id: str) -> str: - """Get current revision of a model with retries""" - logger.info(f"Getting revision for model: {model_id}") - for attempt in range(self._max_retries): - try: - model_info = await asyncio.to_thread(self.hf_api.model_info, model_id) - logger.info(f"Successfully got revision {model_info.sha} for model {model_id}") - return model_info.sha - except Exception as e: - logger.error(f"Error getting model revision for {model_id} (attempt {attempt + 1}): {str(e)}") - if attempt < self._max_retries - 1: - retry_delay = self._retry_delay * (attempt + 1) - logger.info(f"Retrying in {retry_delay} seconds...") - await asyncio.sleep(retry_delay) - else: - logger.warning(f"Using 'main' as fallback revision for {model_id} after {self._max_retries} failed attempts") - return "main" - - async def add_vote(self, model_id: str, user_id: str, vote_type: str) -> Dict[str, Any]: - """Add a vote for a model""" - try: - self._log_repo_operation("add", f"{HF_ORGANIZATION}/votes", f"Adding {vote_type} vote for {model_id} by {user_id}") - logger.info(LogFormatter.section("NEW VOTE")) - stats = { - "Model": model_id, - "User": user_id, - "Type": vote_type - } - for line in LogFormatter.tree(stats, "Vote Details"): - logger.info(line) - - revision = await self._get_model_revision(model_id) - check_tuple = (model_id, revision, user_id) - - if check_tuple in self.vote_check_set: - raise ValueError("Vote already recorded for this model") - - vote = { - "model": model_id, - "revision": revision, - "username": user_id, - "timestamp": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"), - "vote_type": vote_type - } - - # Update local storage - with open(self.votes_file, "a") as f: - f.write(json.dumps(vote) + "\n") - - self._add_vote_to_memory(vote) - self.votes_to_upload.append(vote) - - stats = { - "Status": "Success", - "Queue_Size": len(self.votes_to_upload) - } - for line in LogFormatter.stats(stats): - logger.info(line) - - # Try to upload if batch size reached - if len(self.votes_to_upload) >= self._upload_batch_size: - logger.info(LogFormatter.info(f"Upload batch size reached ({self._upload_batch_size}), triggering sync")) - await self._sync_with_hub() - - return {"status": "success", "message": "Vote added successfully"} - - except Exception as e: - logger.error(LogFormatter.error("Failed to add vote", e)) - raise \ No newline at end of file diff --git a/backend/app/utils/__init__.py b/backend/app/utils/__init__.py deleted file mode 100644 index 69a93acb760828c13400cfcd19da2822dfd83e5e..0000000000000000000000000000000000000000 --- a/backend/app/utils/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from . import model_validation - -__all__ = ["model_validation"] diff --git a/backend/app/utils/logging.py b/backend/app/utils/logging.py deleted file mode 100644 index 3a720f0c226faa0d0390a0c561be75db0194ca7f..0000000000000000000000000000000000000000 --- a/backend/app/utils/logging.py +++ /dev/null @@ -1,3 +0,0 @@ -from app.core.formatting import LogFormatter - -__all__ = ['LogFormatter'] \ No newline at end of file diff --git a/backend/app/utils/model_validation.py b/backend/app/utils/model_validation.py deleted file mode 100644 index 7cec5e092d07a0759deecbe5a4afdda4471bbf19..0000000000000000000000000000000000000000 --- a/backend/app/utils/model_validation.py +++ /dev/null @@ -1,266 +0,0 @@ -import json -import logging -import asyncio -from typing import Tuple, Optional, Dict, Any -from datasets import load_dataset -from huggingface_hub import HfApi, ModelCard, hf_hub_download -from huggingface_hub import hf_api -from transformers import AutoConfig, AutoTokenizer -from app.config.base import HF_TOKEN -from app.config.hf_config import OFFICIAL_PROVIDERS_REPO -from app.core.formatting import LogFormatter - -logger = logging.getLogger(__name__) - -class ModelValidator: - def __init__(self): - self.token = HF_TOKEN - self.api = HfApi(token=self.token) - self.headers = {"Authorization": f"Bearer {self.token}"} if self.token else {} - - async def check_model_card(self, model_id: str) -> Tuple[bool, str, Optional[Dict[str, Any]]]: - """Check if model has a valid model card""" - try: - logger.info(LogFormatter.info(f"Checking model card for {model_id}")) - - # Get model card content using ModelCard.load - try: - model_card = await asyncio.to_thread( - ModelCard.load, - model_id - ) - logger.info(LogFormatter.success("Model card found")) - except Exception as e: - error_msg = "Please add a model card to your model to explain how you trained/fine-tuned it." - logger.error(LogFormatter.error(error_msg, e)) - return False, error_msg, None - - # Check license in model card data - if model_card.data.license is None and not ("license_name" in model_card.data and "license_link" in model_card.data): - error_msg = "License not found. Please add a license to your model card using the `license` metadata or a `license_name`/`license_link` pair." - logger.warning(LogFormatter.warning(error_msg)) - return False, error_msg, None - - # Enforce card content length - if len(model_card.text) < 200: - error_msg = "Please add a description to your model card, it is too short." - logger.warning(LogFormatter.warning(error_msg)) - return False, error_msg, None - - logger.info(LogFormatter.success("Model card validation passed")) - return True, "", model_card - - except Exception as e: - error_msg = "Failed to validate model card" - logger.error(LogFormatter.error(error_msg, e)) - return False, str(e), None - - async def get_safetensors_metadata(self, model_id: str, is_adapter: bool = False, revision: str = "main") -> Optional[Dict]: - """Get metadata from a safetensors file""" - try: - if is_adapter: - metadata = await asyncio.to_thread( - hf_api.parse_safetensors_file_metadata, - model_id, - "adapter_model.safetensors", - token=self.token, - revision=revision, - ) - else: - metadata = await asyncio.to_thread( - hf_api.get_safetensors_metadata, - repo_id=model_id, - token=self.token, - revision=revision, - ) - return metadata - - except Exception as e: - logger.error(f"Failed to get safetensors metadata: {str(e)}") - return None - - async def get_model_size( - self, - model_info: Any, - precision: str, - base_model: str, - revision: str - ) -> Tuple[Optional[float], Optional[str]]: - """Get model size in billions of parameters""" - try: - logger.info(LogFormatter.info(f"Checking model size for {model_info.modelId}")) - - # Check if model is adapter - is_adapter = any(s.rfilename == "adapter_config.json" for s in model_info.siblings if hasattr(s, 'rfilename')) - - # Try to get size from safetensors first - model_size = None - - if is_adapter and base_model: - # For adapters, we need both adapter and base model sizes - adapter_meta = await self.get_safetensors_metadata(model_info.id, is_adapter=True, revision=revision) - base_meta = await self.get_safetensors_metadata(base_model, revision="main") - - if adapter_meta and base_meta: - adapter_size = sum(adapter_meta.parameter_count.values()) - base_size = sum(base_meta.parameter_count.values()) - model_size = adapter_size + base_size - else: - # For regular models, just get the model size - meta = await self.get_safetensors_metadata(model_info.id, revision=revision) - if meta: - model_size = sum(meta.parameter_count.values()) # total params - - if model_size is None: - # If model size could not be determined, return an error - return None, "Model size could not be determined" - - # Adjust size for GPTQ models - size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.id.lower()) else 1 - model_size = model_size / 1e9 # Convert to billions, assuming float16 - model_size = round(size_factor * model_size, 3) - - logger.info(LogFormatter.success(f"Model size: {model_size}B parameters")) - return model_size, None - - except Exception as e: - logger.error(LogFormatter.error(f"Error while determining model size: {e}")) - return None, str(e) - - - async def check_chat_template( - self, - model_id: str, - revision: str - ) -> Tuple[bool, Optional[str]]: - """Check if model has a valid chat template""" - try: - logger.info(LogFormatter.info(f"Checking chat template for {model_id}")) - - try: - config_file = await asyncio.to_thread( - hf_hub_download, - repo_id=model_id, - filename="tokenizer_config.json", - revision=revision, - repo_type="model" - ) - - with open(config_file, 'r') as f: - tokenizer_config = json.load(f) - - if 'chat_template' not in tokenizer_config: - error_msg = f"The model {model_id} doesn't have a chat_template in its tokenizer_config.json. Please add a chat_template before submitting or submit without it." - logger.error(LogFormatter.error(error_msg)) - return False, error_msg - - logger.info(LogFormatter.success("Valid chat template found")) - return True, None - - except Exception as e: - error_msg = f"Error checking chat_template: {str(e)}" - logger.error(LogFormatter.error(error_msg)) - return False, error_msg - - except Exception as e: - error_msg = "Failed to check chat template" - logger.error(LogFormatter.error(error_msg, e)) - return False, str(e) - - async def is_model_on_hub( - self, - model_name: str, - revision: str, - test_tokenizer: bool = False, - trust_remote_code: bool = False - ) -> Tuple[bool, Optional[str], Optional[Any]]: - """Check if model exists and is properly configured on the Hub""" - try: - config = await asyncio.to_thread( - AutoConfig.from_pretrained, - model_name, - revision=revision, - trust_remote_code=trust_remote_code, - token=self.token, - force_download=True - ) - - if test_tokenizer: - try: - await asyncio.to_thread( - AutoTokenizer.from_pretrained, - model_name, - revision=revision, - trust_remote_code=trust_remote_code, - token=self.token - ) - except ValueError as e: - return False, f"The tokenizer is not available in an official Transformers release: {e}", None - except Exception: - return False, "The tokenizer cannot be loaded. Ensure the tokenizer class is part of a stable Transformers release and correctly configured.", None - - return True, None, config - - except ValueError: - return False, "The model requires `trust_remote_code=True` to launch, and for safety reasons, we don't accept such models automatically.", None - except Exception as e: - if "You are trying to access a gated repo." in str(e): - return True, "The model is gated and requires special access permissions.", None - return False, f"The model was not found or is misconfigured on the Hub. Error: {e.args[0]}", None - - async def check_official_provider_status( - self, - model_id: str, - existing_models: Dict[str, list] - ) -> Tuple[bool, Optional[str]]: - """ - Check if model is from official provider and has finished submission. - - Args: - model_id: The model identifier (org/model-name) - existing_models: Dictionary of models by status from get_models() - - Returns: - Tuple[bool, Optional[str]]: (is_valid, error_message) - """ - try: - logger.info(LogFormatter.info(f"Checking official provider status for {model_id}")) - - # Get model organization - model_org = model_id.split('/')[0] if '/' in model_id else None - - if not model_org: - return True, None - - # Load official providers dataset - dataset = load_dataset(OFFICIAL_PROVIDERS_REPO) - official_providers = dataset["train"][0]["CURATED_SET"] - - # Check if model org is in official providers - is_official = model_org in official_providers - - if is_official: - logger.info(LogFormatter.info(f"Model organization '{model_org}' is an official provider")) - - # Check for finished submissions - if "finished" in existing_models: - for model in existing_models["finished"]: - if model["name"] == model_id: - error_msg = ( - f"Model {model_id} is an official provider model " - f"with a completed evaluation. " - f"To re-evaluate, please open a discussion." - ) - logger.error(LogFormatter.error("Validation failed", error_msg)) - return False, error_msg - - logger.info(LogFormatter.success("No finished submission found for this official provider model")) - else: - logger.info(LogFormatter.info(f"Model organization '{model_org}' is not an official provider")) - - return True, None - - except Exception as e: - error_msg = f"Failed to check official provider status: {str(e)}" - logger.error(LogFormatter.error(error_msg)) - return False, error_msg diff --git a/backend/pyproject.toml b/backend/pyproject.toml deleted file mode 100644 index 48d06c05092b2f89e6fab480d9bbddbadd041504..0000000000000000000000000000000000000000 --- a/backend/pyproject.toml +++ /dev/null @@ -1,31 +0,0 @@ -[tool.poetry] -name = "llm-leaderboard-backend" -version = "0.1.0" -description = "Backend for the Open LLM Leaderboard" -authors = ["Your Name "] - -[tool.poetry.dependencies] -python = "^3.12" -fastapi = "^0.115.6" -uvicorn = {extras = ["standard"], version = "^0.34.0"} -numpy = "^2.2.0" -pandas = "^2.2.3" -datasets = "^3.2.0" -pyarrow = "^18.1.0" -python-multipart = "^0.0.20" -huggingface-hub = "^0.27.1" -transformers = "4.48.0" -safetensors = "^0.4.5" -aiofiles = "^24.1.0" -fastapi-cache2 = "^0.2.1" -python-dotenv = "^1.0.1" - -[tool.poetry.group.dev.dependencies] -pytest = "^8.3.4" -black = "^24.10.0" -isort = "^5.13.2" -flake8 = "^6.1.0" - -[build-system] -requires = ["poetry-core>=1.0.0"] -build-backend = "poetry.core.masonry.api" \ No newline at end of file diff --git a/backend/utils/analyze_prod_datasets.py b/backend/utils/analyze_prod_datasets.py deleted file mode 100644 index 346d4f7dc543c8ea3e08ab7124d3008e2a5530b5..0000000000000000000000000000000000000000 --- a/backend/utils/analyze_prod_datasets.py +++ /dev/null @@ -1,170 +0,0 @@ -import os -import json -import logging -from datetime import datetime -from pathlib import Path -from typing import Dict, Any, List -from huggingface_hub import HfApi -from dotenv import load_dotenv -from app.config.hf_config import HF_ORGANIZATION - -# Get the backend directory path -BACKEND_DIR = Path(__file__).parent.parent -ROOT_DIR = BACKEND_DIR.parent - -# Load environment variables from .env file in root directory -load_dotenv(ROOT_DIR / ".env") - -# Configure logging -logging.basicConfig( - level=logging.INFO, - format='%(message)s' -) -logger = logging.getLogger(__name__) - -# Initialize Hugging Face API -HF_TOKEN = os.getenv("HF_TOKEN") -if not HF_TOKEN: - raise ValueError("HF_TOKEN not found in environment variables") -api = HfApi(token=HF_TOKEN) - -def analyze_dataset(repo_id: str) -> Dict[str, Any]: - """Analyze a dataset and return statistics""" - try: - # Get dataset info - dataset_info = api.dataset_info(repo_id=repo_id) - - # Get file list - files = api.list_repo_files(repo_id, repo_type="dataset") - - # Get last commit info - commits = api.list_repo_commits(repo_id, repo_type="dataset") - last_commit = next(commits, None) - - # Count lines in jsonl files - total_entries = 0 - for file in files: - if file.endswith('.jsonl'): - try: - # Download file content - content = api.hf_hub_download( - repo_id=repo_id, - filename=file, - repo_type="dataset" - ) - - # Count lines - with open(content, 'r') as f: - for _ in f: - total_entries += 1 - - except Exception as e: - logger.error(f"Error processing file {file}: {str(e)}") - continue - - # Special handling for requests dataset - if repo_id == f"{HF_ORGANIZATION}/requests": - pending_count = 0 - completed_count = 0 - - try: - content = api.hf_hub_download( - repo_id=repo_id, - filename="eval_requests.jsonl", - repo_type="dataset" - ) - - with open(content, 'r') as f: - for line in f: - try: - entry = json.loads(line) - if entry.get("status") == "pending": - pending_count += 1 - elif entry.get("status") == "completed": - completed_count += 1 - except json.JSONDecodeError: - continue - - except Exception as e: - logger.error(f"Error analyzing requests: {str(e)}") - - # Build response - response = { - "id": repo_id, - "last_modified": last_commit.created_at if last_commit else None, - "total_entries": total_entries, - "file_count": len(files), - "size_bytes": dataset_info.size_in_bytes, - "downloads": dataset_info.downloads, - } - - # Add request-specific info if applicable - if repo_id == f"{HF_ORGANIZATION}/requests": - response.update({ - "pending_requests": pending_count, - "completed_requests": completed_count - }) - - return response - - except Exception as e: - logger.error(f"Error analyzing dataset {repo_id}: {str(e)}") - return { - "id": repo_id, - "error": str(e) - } - -def main(): - """Main function to analyze all datasets""" - try: - # List of datasets to analyze - datasets = [ - { - "id": f"{HF_ORGANIZATION}/contents", - "description": "Aggregated results" - }, - { - "id": f"{HF_ORGANIZATION}/requests", - "description": "Evaluation requests" - }, - { - "id": f"{HF_ORGANIZATION}/votes", - "description": "User votes" - }, - { - "id": f"{HF_ORGANIZATION}/official-providers", - "description": "Highlighted models" - } - ] - - # Analyze each dataset - results = [] - for dataset in datasets: - logger.info(f"\nAnalyzing {dataset['description']} ({dataset['id']})...") - result = analyze_dataset(dataset['id']) - results.append(result) - - if 'error' in result: - logger.error(f"❌ Error: {result['error']}") - else: - logger.info(f"✓ {result['total_entries']} entries") - logger.info(f"✓ {result['file_count']} files") - logger.info(f"✓ {result['size_bytes'] / 1024:.1f} KB") - logger.info(f"✓ {result['downloads']} downloads") - - if 'pending_requests' in result: - logger.info(f"✓ {result['pending_requests']} pending requests") - logger.info(f"✓ {result['completed_requests']} completed requests") - - if result['last_modified']: - last_modified = datetime.fromisoformat(result['last_modified'].replace('Z', '+00:00')) - logger.info(f"✓ Last modified: {last_modified.strftime('%Y-%m-%d %H:%M:%S')}") - - return results - - except Exception as e: - logger.error(f"Global error: {str(e)}") - return [] - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/backend/utils/analyze_prod_models.py b/backend/utils/analyze_prod_models.py deleted file mode 100644 index 90a066dbb76e98ae1e13f1e969f527b695146cce..0000000000000000000000000000000000000000 --- a/backend/utils/analyze_prod_models.py +++ /dev/null @@ -1,106 +0,0 @@ -import os -import json -import logging -from datetime import datetime -from pathlib import Path -from huggingface_hub import HfApi -from dotenv import load_dotenv -from app.config.hf_config import HF_ORGANIZATION - -# Get the backend directory path -BACKEND_DIR = Path(__file__).parent.parent -ROOT_DIR = BACKEND_DIR.parent - -# Load environment variables from .env file in root directory -load_dotenv(ROOT_DIR / ".env") - -# Configure logging -logging.basicConfig( - level=logging.INFO, - format='%(message)s' -) -logger = logging.getLogger(__name__) - -# Initialize Hugging Face API -HF_TOKEN = os.getenv("HF_TOKEN") -if not HF_TOKEN: - raise ValueError("HF_TOKEN not found in environment variables") -api = HfApi(token=HF_TOKEN) - -def count_evaluated_models(): - """Count the number of evaluated models""" - try: - # Get dataset info - dataset_info = api.dataset_info(repo_id=f"{HF_ORGANIZATION}/contents", repo_type="dataset") - - # Get file list - files = api.list_repo_files(f"{HF_ORGANIZATION}/contents", repo_type="dataset") - - # Get last commit info - commits = api.list_repo_commits(f"{HF_ORGANIZATION}/contents", repo_type="dataset") - last_commit = next(commits, None) - - # Count lines in jsonl files - total_entries = 0 - for file in files: - if file.endswith('.jsonl'): - try: - # Download file content - content = api.hf_hub_download( - repo_id=f"{HF_ORGANIZATION}/contents", - filename=file, - repo_type="dataset" - ) - - # Count lines - with open(content, 'r') as f: - for _ in f: - total_entries += 1 - - except Exception as e: - logger.error(f"Error processing file {file}: {str(e)}") - continue - - # Build response - response = { - "total_models": total_entries, - "last_modified": last_commit.created_at if last_commit else None, - "file_count": len(files), - "size_bytes": dataset_info.size_in_bytes, - "downloads": dataset_info.downloads - } - - return response - - except Exception as e: - logger.error(f"Error counting evaluated models: {str(e)}") - return { - "error": str(e) - } - -def main(): - """Main function to count evaluated models""" - try: - logger.info("\nAnalyzing evaluated models...") - result = count_evaluated_models() - - if 'error' in result: - logger.error(f"❌ Error: {result['error']}") - else: - logger.info(f"✓ {result['total_models']} models evaluated") - logger.info(f"✓ {result['file_count']} files") - logger.info(f"✓ {result['size_bytes'] / 1024:.1f} KB") - logger.info(f"✓ {result['downloads']} downloads") - - if result['last_modified']: - last_modified = datetime.fromisoformat(result['last_modified'].replace('Z', '+00:00')) - logger.info(f"✓ Last modified: {last_modified.strftime('%Y-%m-%d %H:%M:%S')}") - - return result - - except Exception as e: - logger.error(f"Global error: {str(e)}") - return {"error": str(e)} - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/backend/utils/fix_wrong_model_size.py b/backend/utils/fix_wrong_model_size.py deleted file mode 100644 index 3b464f873c6465d077e4da735935b14884ace254..0000000000000000000000000000000000000000 --- a/backend/utils/fix_wrong_model_size.py +++ /dev/null @@ -1,110 +0,0 @@ -import os -import json -import pytz -import logging -import asyncio -from datetime import datetime -from pathlib import Path -import huggingface_hub -from huggingface_hub.errors import RepositoryNotFoundError, RevisionNotFoundError -from dotenv import load_dotenv -from git import Repo -from datetime import datetime -from tqdm.auto import tqdm -from tqdm.contrib.logging import logging_redirect_tqdm - -from app.config.hf_config import HF_TOKEN, API - -from app.utils.model_validation import ModelValidator - -huggingface_hub.logging.set_verbosity_error() -huggingface_hub.utils.disable_progress_bars() - -logging.basicConfig( - level=logging.ERROR, - format='%(message)s' -) -logger = logging.getLogger(__name__) -load_dotenv() - -validator = ModelValidator() - -def get_changed_files(repo_path, start_date, end_date): - repo = Repo(repo_path) - start = datetime.strptime(start_date, '%Y-%m-%d') - end = datetime.strptime(end_date, '%Y-%m-%d') - - changed_files = set() - pbar = tqdm(repo.iter_commits(), desc=f"Reading commits from {end_date} to {start_date}") - for commit in pbar: - commit_date = datetime.fromtimestamp(commit.committed_date) - pbar.set_postfix_str(f"Commit date: {commit_date}") - if start <= commit_date <= end: - changed_files.update(item.a_path for item in commit.diff(commit.parents[0])) - - if commit_date < start: - break - - return changed_files - - -def read_json(repo_path, file): - with open(f"{repo_path}/{file}") as file: - return json.load(file) - - -def write_json(repo_path, file, content): - with open(f"{repo_path}/{file}", "w") as file: - json.dump(content, file, indent=2) - - -def main(): - requests_path = "/requests" - start_date = "2024-12-09" - end_date = "2025-01-07" - - changed_files = get_changed_files(requests_path, start_date, end_date) - - for file in tqdm(changed_files): - try: - request_data = read_json(requests_path, file) - except FileNotFoundError as e: - tqdm.write(f"File {file} not found") - continue - - try: - model_info = API.model_info( - repo_id=request_data["model"], - revision=request_data["revision"], - token=HF_TOKEN - ) - except (RepositoryNotFoundError, RevisionNotFoundError) as e: - tqdm.write(f"Model info for {request_data["model"]} not found") - continue - - with logging_redirect_tqdm(): - new_model_size, error = asyncio.run(validator.get_model_size( - model_info=model_info, - precision=request_data["precision"], - base_model=request_data["base_model"], - revision=request_data["revision"] - )) - - if error: - tqdm.write(f"Error getting model size info for {request_data["model"]}, {error}") - continue - - old_model_size = request_data["params"] - if old_model_size != new_model_size: - if new_model_size > 100: - tqdm.write(f"Model: {request_data["model"]}, size is more 100B: {new_model_size}") - - tqdm.write(f"Model: {request_data["model"]}, old size: {request_data["params"]} new size: {new_model_size}") - tqdm.write(f"Updating request file {file}") - - request_data["params"] = new_model_size - write_json(requests_path, file, content=request_data) - - -if __name__ == "__main__": - main() diff --git a/backend/utils/last_activity.py b/backend/utils/last_activity.py deleted file mode 100644 index 9f403ef0d223f79c9f7d2633ecbee5c3044ed5ae..0000000000000000000000000000000000000000 --- a/backend/utils/last_activity.py +++ /dev/null @@ -1,164 +0,0 @@ -import os -import json -import logging -from datetime import datetime -from pathlib import Path -from typing import Dict, Any, List, Tuple -from huggingface_hub import HfApi -from dotenv import load_dotenv - -# Get the backend directory path -BACKEND_DIR = Path(__file__).parent.parent -ROOT_DIR = BACKEND_DIR.parent - -# Load environment variables from .env file in root directory -load_dotenv(ROOT_DIR / ".env") - -# Configure logging -logging.basicConfig( - level=logging.INFO, - format='%(message)s' -) -logger = logging.getLogger(__name__) - -# Initialize Hugging Face API -HF_TOKEN = os.getenv("HF_TOKEN") -if not HF_TOKEN: - raise ValueError("HF_TOKEN not found in environment variables") -api = HfApi(token=HF_TOKEN) - -# Default organization -HF_ORGANIZATION = os.getenv('HF_ORGANIZATION', 'open-llm-leaderboard') - -def get_last_votes(limit: int = 5) -> List[Dict]: - """Get the last votes from the votes dataset""" - try: - logger.info("\nFetching last votes...") - - # Download and read votes file - logger.info("Downloading votes file...") - votes_file = api.hf_hub_download( - repo_id=f"{HF_ORGANIZATION}/votes", - filename="votes_data.jsonl", - repo_type="dataset" - ) - - logger.info("Reading votes file...") - votes = [] - with open(votes_file, 'r') as f: - for line in f: - try: - vote = json.loads(line) - votes.append(vote) - except json.JSONDecodeError: - continue - - # Sort by timestamp and get last n votes - logger.info("Sorting votes...") - votes.sort(key=lambda x: x.get('timestamp', ''), reverse=True) - last_votes = votes[:limit] - - logger.info(f"✓ Found {len(last_votes)} recent votes") - return last_votes - - except Exception as e: - logger.error(f"Error reading votes: {str(e)}") - return [] - -def get_last_models(limit: int = 5) -> List[Dict]: - """Get the last models from the requests dataset using commit history""" - try: - logger.info("\nFetching last model submissions...") - - # Get commit history - logger.info("Getting commit history...") - commits = list(api.list_repo_commits( - repo_id=f"{HF_ORGANIZATION}/requests", - repo_type="dataset" - )) - logger.info(f"Found {len(commits)} commits") - - # Track processed files to avoid duplicates - processed_files = set() - models = [] - - # Process commits until we have enough models - for i, commit in enumerate(commits): - logger.info(f"Processing commit {i+1}/{len(commits)} ({commit.created_at})") - - # Look at added/modified files in this commit - files_to_process = [f for f in (commit.added + commit.modified) if f.endswith('.json')] - if files_to_process: - logger.info(f"Found {len(files_to_process)} JSON files in commit") - - for file in files_to_process: - if file in processed_files: - continue - - processed_files.add(file) - logger.info(f"Downloading {file}...") - - try: - # Download and read the file - content = api.hf_hub_download( - repo_id=f"{HF_ORGANIZATION}/requests", - filename=file, - repo_type="dataset" - ) - - with open(content, 'r') as f: - model_data = json.load(f) - models.append(model_data) - logger.info(f"✓ Added model {model_data.get('model', 'Unknown')}") - - if len(models) >= limit: - logger.info("Reached desired number of models") - break - - except Exception as e: - logger.error(f"Error reading file {file}: {str(e)}") - continue - - if len(models) >= limit: - break - - logger.info(f"✓ Found {len(models)} recent model submissions") - return models - - except Exception as e: - logger.error(f"Error reading models: {str(e)}") - return [] - -def main(): - """Display last activities from the leaderboard""" - try: - # Get last votes - logger.info("\n=== Last Votes ===") - last_votes = get_last_votes() - if last_votes: - for vote in last_votes: - logger.info(f"\nModel: {vote.get('model')}") - logger.info(f"User: {vote.get('username')}") - logger.info(f"Timestamp: {vote.get('timestamp')}") - else: - logger.info("No votes found") - - # Get last model submissions - logger.info("\n=== Last Model Submissions ===") - last_models = get_last_models() - if last_models: - for model in last_models: - logger.info(f"\nModel: {model.get('model')}") - logger.info(f"Submitter: {model.get('sender', 'Unknown')}") - logger.info(f"Status: {model.get('status', 'Unknown')}") - logger.info(f"Submission Time: {model.get('submitted_time', 'Unknown')}") - logger.info(f"Precision: {model.get('precision', 'Unknown')}") - logger.info(f"Weight Type: {model.get('weight_type', 'Unknown')}") - else: - logger.info("No models found") - - except Exception as e: - logger.error(f"Global error: {str(e)}") - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/backend/utils/sync_datasets_locally.py b/backend/utils/sync_datasets_locally.py deleted file mode 100644 index c06326899e79b974d9d621e6531ea8f6b9563f9c..0000000000000000000000000000000000000000 --- a/backend/utils/sync_datasets_locally.py +++ /dev/null @@ -1,130 +0,0 @@ -import os -import shutil -import tempfile -import logging -from pathlib import Path -from huggingface_hub import HfApi, snapshot_download, upload_folder, create_repo -from dotenv import load_dotenv - -# Configure source and destination usernames -SOURCE_USERNAME = "open-llm-leaderboard" -DESTINATION_USERNAME = "tfrere" - -# Get the backend directory path -BACKEND_DIR = Path(__file__).parent.parent -ROOT_DIR = BACKEND_DIR.parent - -# Load environment variables from .env file in root directory -load_dotenv(ROOT_DIR / ".env") - -# Configure logging -logging.basicConfig( - level=logging.INFO, - format='%(message)s' -) -logger = logging.getLogger(__name__) - -# List of dataset names to sync -DATASET_NAMES = [ - "votes", - "results", - "requests", - "contents", - "official-providers", -] - -# Build list of datasets with their source and destination paths -DATASETS = [ - (name, f"{SOURCE_USERNAME}/{name}", f"{DESTINATION_USERNAME}/{name}") - for name in DATASET_NAMES -] - -# Initialize Hugging Face API -api = HfApi() - -def ensure_repo_exists(repo_id, token): - """Ensure the repository exists, create it if it doesn't""" - try: - api.repo_info(repo_id=repo_id, repo_type="dataset") - logger.info(f"✓ Repository {repo_id} already exists") - except Exception: - logger.info(f"Creating repository {repo_id}...") - create_repo( - repo_id=repo_id, - repo_type="dataset", - token=token, - private=True - ) - logger.info(f"✓ Repository {repo_id} created") - -def process_dataset(dataset_info, token): - """Process a single dataset""" - name, source_dataset, destination_dataset = dataset_info - try: - logger.info(f"\n📥 Processing dataset: {name}") - - # Ensure destination repository exists - ensure_repo_exists(destination_dataset, token) - - # Create a temporary directory for this dataset - with tempfile.TemporaryDirectory() as temp_dir: - try: - # List files in source dataset - logger.info(f"Listing files in {source_dataset}...") - files = api.list_repo_files(source_dataset, repo_type="dataset") - logger.info(f"Detected structure: {len(files)} files") - - # Download dataset - logger.info(f"Downloading from {source_dataset}...") - local_dir = snapshot_download( - repo_id=source_dataset, - repo_type="dataset", - local_dir=temp_dir, - token=token - ) - logger.info(f"✓ Download complete") - - # Upload to destination while preserving structure - logger.info(f"📤 Uploading to {destination_dataset}...") - api.upload_folder( - folder_path=local_dir, - repo_id=destination_dataset, - repo_type="dataset", - token=token - ) - logger.info(f"✅ {name} copied successfully!") - return True - - except Exception as e: - logger.error(f"❌ Error processing {name}: {str(e)}") - return False - - except Exception as e: - logger.error(f"❌ Error for {name}: {str(e)}") - return False - -def copy_datasets(): - try: - logger.info("🔑 Checking authentication...") - # Get token from .env file - token = os.getenv("HF_TOKEN") - if not token: - raise ValueError("HF_TOKEN not found in .env file") - - # Process datasets sequentially - results = [] - for dataset_info in DATASETS: - success = process_dataset(dataset_info, token) - results.append((dataset_info[0], success)) - - # Print final summary - logger.info("\n📊 Final summary:") - for dataset, success in results: - status = "✅ Success" if success else "❌ Failure" - logger.info(f"{dataset}: {status}") - - except Exception as e: - logger.error(f"❌ Global error: {str(e)}") - -if __name__ == "__main__": - copy_datasets() \ No newline at end of file diff --git a/backend/uv.lock b/backend/uv.lock deleted file mode 100644 index 40e5dae9d4cd0420befa92ec740c2c031349f0a0..0000000000000000000000000000000000000000 --- a/backend/uv.lock +++ /dev/null @@ -1,971 +0,0 @@ -version = 1 -requires-python = "==3.12.1" - -[[package]] -name = "aiofiles" -version = "24.1.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/0b/03/a88171e277e8caa88a4c77808c20ebb04ba74cc4681bf1e9416c862de237/aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c", size = 30247 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a5/45/30bb92d442636f570cb5651bc661f52b610e2eec3f891a5dc3a4c3667db0/aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5", size = 15896 }, -] - -[[package]] -name = "aiohappyeyeballs" -version = "2.4.4" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/7f/55/e4373e888fdacb15563ef6fa9fa8c8252476ea071e96fb46defac9f18bf2/aiohappyeyeballs-2.4.4.tar.gz", hash = "sha256:5fdd7d87889c63183afc18ce9271f9b0a7d32c2303e394468dd45d514a757745", size = 21977 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b9/74/fbb6559de3607b3300b9be3cc64e97548d55678e44623db17820dbd20002/aiohappyeyeballs-2.4.4-py3-none-any.whl", hash = "sha256:a980909d50efcd44795c4afeca523296716d50cd756ddca6af8c65b996e27de8", size = 14756 }, -] - -[[package]] -name = "aiohttp" -version = "3.11.10" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "aiohappyeyeballs" }, - { name = "aiosignal" }, - { name = "attrs" }, - { name = "frozenlist" }, - { name = "multidict" }, - { name = "propcache" }, - { name = "yarl" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/94/c4/3b5a937b16f6c2a0ada842a9066aad0b7a5708427d4a202a07bf09c67cbb/aiohttp-3.11.10.tar.gz", hash = "sha256:b1fc6b45010a8d0ff9e88f9f2418c6fd408c99c211257334aff41597ebece42e", size = 7668832 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/25/17/1dbe2f619f77795409c1a13ab395b98ed1b215d3e938cacde9b8ffdac53d/aiohttp-3.11.10-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b78f053a7ecfc35f0451d961dacdc671f4bcbc2f58241a7c820e9d82559844cf", size = 704448 }, - { url = "https://files.pythonhosted.org/packages/e3/9b/112247ad47e9d7f6640889c6e42cc0ded8c8345dd0033c66bcede799b051/aiohttp-3.11.10-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab7485222db0959a87fbe8125e233b5a6f01f4400785b36e8a7878170d8c3138", size = 463829 }, - { url = "https://files.pythonhosted.org/packages/8a/36/a64b583771fc673062a7a1374728a6241d49e2eda5a9041fbf248e18c804/aiohttp-3.11.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cf14627232dfa8730453752e9cdc210966490992234d77ff90bc8dc0dce361d5", size = 455774 }, - { url = "https://files.pythonhosted.org/packages/e5/75/ee1b8f510978b3de5f185c62535b135e4fc3f5a247ca0c2245137a02d800/aiohttp-3.11.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:076bc454a7e6fd646bc82ea7f98296be0b1219b5e3ef8a488afbdd8e81fbac50", size = 1682134 }, - { url = "https://files.pythonhosted.org/packages/87/46/65e8259432d5f73ca9ebf5edb645ef90e5303724e4e52477516cb4042240/aiohttp-3.11.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:482cafb7dc886bebeb6c9ba7925e03591a62ab34298ee70d3dd47ba966370d2c", size = 1736757 }, - { url = "https://files.pythonhosted.org/packages/03/f6/a6d1e791b7153fb2d101278f7146c0771b0e1569c547f8a8bc3035651984/aiohttp-3.11.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf3d1a519a324af764a46da4115bdbd566b3c73fb793ffb97f9111dbc684fc4d", size = 1793033 }, - { url = "https://files.pythonhosted.org/packages/a8/e9/1ac90733e36e7848693aece522936a13bf17eeb617da662f94adfafc1c25/aiohttp-3.11.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24213ba85a419103e641e55c27dc7ff03536c4873470c2478cce3311ba1eee7b", size = 1691609 }, - { url = "https://files.pythonhosted.org/packages/6d/a6/77b33da5a0bc04566c7ddcca94500f2c2a2334eecab4885387fffd1fc600/aiohttp-3.11.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b99acd4730ad1b196bfb03ee0803e4adac371ae8efa7e1cbc820200fc5ded109", size = 1619082 }, - { url = "https://files.pythonhosted.org/packages/48/94/5bf5f927d9a2fedd2c978adfb70a3680e16f46d178361685b56244eb52ed/aiohttp-3.11.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:14cdb5a9570be5a04eec2ace174a48ae85833c2aadc86de68f55541f66ce42ab", size = 1641186 }, - { url = "https://files.pythonhosted.org/packages/99/2d/e85103aa01d1064e51bc50cb51e7b40150a8ff5d34e5a3173a46b241860b/aiohttp-3.11.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:7e97d622cb083e86f18317282084bc9fbf261801b0192c34fe4b1febd9f7ae69", size = 1646280 }, - { url = "https://files.pythonhosted.org/packages/7b/e0/44651fda8c1d865a51b3a81f1956ea55ce16fc568fe7a3e05db7fc22f139/aiohttp-3.11.10-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:012f176945af138abc10c4a48743327a92b4ca9adc7a0e078077cdb5dbab7be0", size = 1701862 }, - { url = "https://files.pythonhosted.org/packages/4e/1e/0804459ae325a5b95f6f349778fb465f29d2b863e522b6a349db0aaad54c/aiohttp-3.11.10-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44224d815853962f48fe124748227773acd9686eba6dc102578defd6fc99e8d9", size = 1734373 }, - { url = "https://files.pythonhosted.org/packages/07/87/b8f6721668cad74bcc9c7cfe6d0230b304d1250196b221e54294a0d78dbe/aiohttp-3.11.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c87bf31b7fdab94ae3adbe4a48e711bfc5f89d21cf4c197e75561def39e223bc", size = 1694343 }, - { url = "https://files.pythonhosted.org/packages/4b/20/42813fc60d9178ba9b1b86c58a5441ddb6cf8ffdfe66387345bff173bcff/aiohttp-3.11.10-cp312-cp312-win32.whl", hash = "sha256:06a8e2ee1cbac16fe61e51e0b0c269400e781b13bcfc33f5425912391a542985", size = 411118 }, - { url = "https://files.pythonhosted.org/packages/3a/51/df9c263c861ce93998b5ad2ba3212caab2112d5b66dbe91ddbe90c41ded4/aiohttp-3.11.10-cp312-cp312-win_amd64.whl", hash = "sha256:be2b516f56ea883a3e14dda17059716593526e10fb6303189aaf5503937db408", size = 437424 }, -] - -[[package]] -name = "aiosignal" -version = "1.3.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "frozenlist" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ba/b5/6d55e80f6d8a08ce22b982eafa278d823b541c925f11ee774b0b9c43473d/aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54", size = 19424 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ec/6a/bc7e17a3e87a2985d3e8f4da4cd0f481060eb78fb08596c42be62c90a4d9/aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5", size = 7597 }, -] - -[[package]] -name = "annotated-types" -version = "0.7.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 }, -] - -[[package]] -name = "anyio" -version = "4.7.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "idna" }, - { name = "sniffio" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/f6/40/318e58f669b1a9e00f5c4453910682e2d9dd594334539c7b7817dabb765f/anyio-4.7.0.tar.gz", hash = "sha256:2f834749c602966b7d456a7567cafcb309f96482b5081d14ac93ccd457f9dd48", size = 177076 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a0/7a/4daaf3b6c08ad7ceffea4634ec206faeff697526421c20f07628c7372156/anyio-4.7.0-py3-none-any.whl", hash = "sha256:ea60c3723ab42ba6fff7e8ccb0488c898ec538ff4df1f1d5e642c3601d07e352", size = 93052 }, -] - -[[package]] -name = "attrs" -version = "24.3.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/48/c8/6260f8ccc11f0917360fc0da435c5c9c7504e3db174d5a12a1494887b045/attrs-24.3.0.tar.gz", hash = "sha256:8f5c07333d543103541ba7be0e2ce16eeee8130cb0b3f9238ab904ce1e85baff", size = 805984 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/89/aa/ab0f7891a01eeb2d2e338ae8fecbe57fcebea1a24dbb64d45801bfab481d/attrs-24.3.0-py3-none-any.whl", hash = "sha256:ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308", size = 63397 }, -] - -[[package]] -name = "black" -version = "24.10.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "click" }, - { name = "mypy-extensions" }, - { name = "packaging" }, - { name = "pathspec" }, - { name = "platformdirs" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/d8/0d/cc2fb42b8c50d80143221515dd7e4766995bd07c56c9a3ed30baf080b6dc/black-24.10.0.tar.gz", hash = "sha256:846ea64c97afe3bc677b761787993be4991810ecc7a4a937816dd6bddedc4875", size = 645813 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/90/04/bf74c71f592bcd761610bbf67e23e6a3cff824780761f536512437f1e655/black-24.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b5e39e0fae001df40f95bd8cc36b9165c5e2ea88900167bddf258bacef9bbdc3", size = 1644256 }, - { url = "https://files.pythonhosted.org/packages/4c/ea/a77bab4cf1887f4b2e0bce5516ea0b3ff7d04ba96af21d65024629afedb6/black-24.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d37d422772111794b26757c5b55a3eade028aa3fde43121ab7b673d050949d65", size = 1448534 }, - { url = "https://files.pythonhosted.org/packages/4e/3e/443ef8bc1fbda78e61f79157f303893f3fddf19ca3c8989b163eb3469a12/black-24.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:14b3502784f09ce2443830e3133dacf2c0110d45191ed470ecb04d0f5f6fcb0f", size = 1761892 }, - { url = "https://files.pythonhosted.org/packages/52/93/eac95ff229049a6901bc84fec6908a5124b8a0b7c26ea766b3b8a5debd22/black-24.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:30d2c30dc5139211dda799758559d1b049f7f14c580c409d6ad925b74a4208a8", size = 1434796 }, - { url = "https://files.pythonhosted.org/packages/8d/a7/4b27c50537ebca8bec139b872861f9d2bf501c5ec51fcf897cb924d9e264/black-24.10.0-py3-none-any.whl", hash = "sha256:3bb2b7a1f7b685f85b11fed1ef10f8a9148bceb49853e47a294a3dd963c1dd7d", size = 206898 }, -] - -[[package]] -name = "certifi" -version = "2024.12.14" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/0f/bd/1d41ee578ce09523c81a15426705dd20969f5abf006d1afe8aeff0dd776a/certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db", size = 166010 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a5/32/8f6669fc4798494966bf446c8c4a162e0b5d893dff088afddf76414f70e1/certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56", size = 164927 }, -] - -[[package]] -name = "charset-normalizer" -version = "3.4.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f2/4f/e1808dc01273379acc506d18f1504eb2d299bd4131743b9fc54d7be4df1e/charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e", size = 106620 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d3/0b/4b7a70987abf9b8196845806198975b6aab4ce016632f817ad758a5aa056/charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6", size = 194445 }, - { url = "https://files.pythonhosted.org/packages/50/89/354cc56cf4dd2449715bc9a0f54f3aef3dc700d2d62d1fa5bbea53b13426/charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf", size = 125275 }, - { url = "https://files.pythonhosted.org/packages/fa/44/b730e2a2580110ced837ac083d8ad222343c96bb6b66e9e4e706e4d0b6df/charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db", size = 119020 }, - { url = "https://files.pythonhosted.org/packages/9d/e4/9263b8240ed9472a2ae7ddc3e516e71ef46617fe40eaa51221ccd4ad9a27/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1", size = 139128 }, - { url = "https://files.pythonhosted.org/packages/6b/e3/9f73e779315a54334240353eaea75854a9a690f3f580e4bd85d977cb2204/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03", size = 149277 }, - { url = "https://files.pythonhosted.org/packages/1a/cf/f1f50c2f295312edb8a548d3fa56a5c923b146cd3f24114d5adb7e7be558/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284", size = 142174 }, - { url = "https://files.pythonhosted.org/packages/16/92/92a76dc2ff3a12e69ba94e7e05168d37d0345fa08c87e1fe24d0c2a42223/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15", size = 143838 }, - { url = "https://files.pythonhosted.org/packages/a4/01/2117ff2b1dfc61695daf2babe4a874bca328489afa85952440b59819e9d7/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8", size = 146149 }, - { url = "https://files.pythonhosted.org/packages/f6/9b/93a332b8d25b347f6839ca0a61b7f0287b0930216994e8bf67a75d050255/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2", size = 140043 }, - { url = "https://files.pythonhosted.org/packages/ab/f6/7ac4a01adcdecbc7a7587767c776d53d369b8b971382b91211489535acf0/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719", size = 148229 }, - { url = "https://files.pythonhosted.org/packages/9d/be/5708ad18161dee7dc6a0f7e6cf3a88ea6279c3e8484844c0590e50e803ef/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631", size = 151556 }, - { url = "https://files.pythonhosted.org/packages/5a/bb/3d8bc22bacb9eb89785e83e6723f9888265f3a0de3b9ce724d66bd49884e/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b", size = 149772 }, - { url = "https://files.pythonhosted.org/packages/f7/fa/d3fc622de05a86f30beea5fc4e9ac46aead4731e73fd9055496732bcc0a4/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565", size = 144800 }, - { url = "https://files.pythonhosted.org/packages/9a/65/bdb9bc496d7d190d725e96816e20e2ae3a6fa42a5cac99c3c3d6ff884118/charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7", size = 94836 }, - { url = "https://files.pythonhosted.org/packages/3e/67/7b72b69d25b89c0b3cea583ee372c43aa24df15f0e0f8d3982c57804984b/charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9", size = 102187 }, - { url = "https://files.pythonhosted.org/packages/bf/9b/08c0432272d77b04803958a4598a51e2a4b51c06640af8b8f0f908c18bf2/charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079", size = 49446 }, -] - -[[package]] -name = "click" -version = "8.1.7" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "colorama", marker = "platform_system == 'Windows'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/96/d3/f04c7bfcf5c1862a2a5b845c6b2b360488cf47af55dfa79c98f6a6bf98b5/click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de", size = 336121 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/00/2e/d53fa4befbf2cfa713304affc7ca780ce4fc1fd8710527771b58311a3229/click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28", size = 97941 }, -] - -[[package]] -name = "colorama" -version = "0.4.6" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, -] - -[[package]] -name = "datasets" -version = "3.2.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "aiohttp" }, - { name = "dill" }, - { name = "filelock" }, - { name = "fsspec", extra = ["http"] }, - { name = "huggingface-hub" }, - { name = "multiprocess" }, - { name = "numpy" }, - { name = "packaging" }, - { name = "pandas" }, - { name = "pyarrow" }, - { name = "pyyaml" }, - { name = "requests" }, - { name = "tqdm" }, - { name = "xxhash" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/fc/48/744286c044e2b942d4fa67f92816126522ad1f0675def0ea3264e6242005/datasets-3.2.0.tar.gz", hash = "sha256:9a6e1a356052866b5dbdd9c9eedb000bf3fc43d986e3584d9b028f4976937229", size = 558366 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d7/84/0df6c5981f5fc722381662ff8cfbdf8aad64bec875f75d80b55bfef394ce/datasets-3.2.0-py3-none-any.whl", hash = "sha256:f3d2ba2698b7284a4518019658596a6a8bc79f31e51516524249d6c59cf0fe2a", size = 480647 }, -] - -[[package]] -name = "dill" -version = "0.3.8" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/17/4d/ac7ffa80c69ea1df30a8aa11b3578692a5118e7cd1aa157e3ef73b092d15/dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca", size = 184847 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c9/7a/cef76fd8438a42f96db64ddaa85280485a9c395e7df3db8158cfec1eee34/dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7", size = 116252 }, -] - -[[package]] -name = "fastapi" -version = "0.115.6" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pydantic" }, - { name = "starlette" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/93/72/d83b98cd106541e8f5e5bfab8ef2974ab45a62e8a6c5b5e6940f26d2ed4b/fastapi-0.115.6.tar.gz", hash = "sha256:9ec46f7addc14ea472958a96aae5b5de65f39721a46aaf5705c480d9a8b76654", size = 301336 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/52/b3/7e4df40e585df024fac2f80d1a2d579c854ac37109675db2b0cc22c0bb9e/fastapi-0.115.6-py3-none-any.whl", hash = "sha256:e9240b29e36fa8f4bb7290316988e90c381e5092e0cbe84e7818cc3713bcf305", size = 94843 }, -] - -[[package]] -name = "fastapi-cache2" -version = "0.2.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "fastapi" }, - { name = "pendulum" }, - { name = "typing-extensions" }, - { name = "uvicorn" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/37/6f/7c2078bf097634276a266fe225d9d6a1f882fe505a662bd1835fb2cf6891/fastapi_cache2-0.2.2.tar.gz", hash = "sha256:71bf4450117dc24224ec120be489dbe09e331143c9f74e75eb6f576b78926026", size = 17950 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/6d/b3/ce7c5d9f5e75257a3039ee1e38feb77bee29da3a1792c57d6ea1acb55d17/fastapi_cache2-0.2.2-py3-none-any.whl", hash = "sha256:e1fae86d8eaaa6c8501dfe08407f71d69e87cc6748042d59d51994000532846c", size = 25411 }, -] - -[[package]] -name = "filelock" -version = "3.16.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/9d/db/3ef5bb276dae18d6ec2124224403d1d67bccdbefc17af4cc8f553e341ab1/filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435", size = 18037 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b9/f8/feced7779d755758a52d1f6635d990b8d98dc0a29fa568bbe0625f18fdf3/filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0", size = 16163 }, -] - -[[package]] -name = "flake8" -version = "7.1.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "mccabe" }, - { name = "pycodestyle" }, - { name = "pyflakes" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/37/72/e8d66150c4fcace3c0a450466aa3480506ba2cae7b61e100a2613afc3907/flake8-7.1.1.tar.gz", hash = "sha256:049d058491e228e03e67b390f311bbf88fce2dbaa8fa673e7aea87b7198b8d38", size = 48054 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d9/42/65004373ac4617464f35ed15931b30d764f53cdd30cc78d5aea349c8c050/flake8-7.1.1-py2.py3-none-any.whl", hash = "sha256:597477df7860daa5aa0fdd84bf5208a043ab96b8e96ab708770ae0364dd03213", size = 57731 }, -] - -[[package]] -name = "frozenlist" -version = "1.5.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/8f/ed/0f4cec13a93c02c47ec32d81d11c0c1efbadf4a471e3f3ce7cad366cbbd3/frozenlist-1.5.0.tar.gz", hash = "sha256:81d5af29e61b9c8348e876d442253723928dce6433e0e76cd925cd83f1b4b817", size = 39930 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/79/73/fa6d1a96ab7fd6e6d1c3500700963eab46813847f01ef0ccbaa726181dd5/frozenlist-1.5.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:31115ba75889723431aa9a4e77d5f398f5cf976eea3bdf61749731f62d4a4a21", size = 94026 }, - { url = "https://files.pythonhosted.org/packages/ab/04/ea8bf62c8868b8eada363f20ff1b647cf2e93377a7b284d36062d21d81d1/frozenlist-1.5.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7437601c4d89d070eac8323f121fcf25f88674627505334654fd027b091db09d", size = 54150 }, - { url = "https://files.pythonhosted.org/packages/d0/9a/8e479b482a6f2070b26bda572c5e6889bb3ba48977e81beea35b5ae13ece/frozenlist-1.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7948140d9f8ece1745be806f2bfdf390127cf1a763b925c4a805c603df5e697e", size = 51927 }, - { url = "https://files.pythonhosted.org/packages/e3/12/2aad87deb08a4e7ccfb33600871bbe8f0e08cb6d8224371387f3303654d7/frozenlist-1.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feeb64bc9bcc6b45c6311c9e9b99406660a9c05ca8a5b30d14a78555088b0b3a", size = 282647 }, - { url = "https://files.pythonhosted.org/packages/77/f2/07f06b05d8a427ea0060a9cef6e63405ea9e0d761846b95ef3fb3be57111/frozenlist-1.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:683173d371daad49cffb8309779e886e59c2f369430ad28fe715f66d08d4ab1a", size = 289052 }, - { url = "https://files.pythonhosted.org/packages/bd/9f/8bf45a2f1cd4aa401acd271b077989c9267ae8463e7c8b1eb0d3f561b65e/frozenlist-1.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7d57d8f702221405a9d9b40f9da8ac2e4a1a8b5285aac6100f3393675f0a85ee", size = 291719 }, - { url = "https://files.pythonhosted.org/packages/41/d1/1f20fd05a6c42d3868709b7604c9f15538a29e4f734c694c6bcfc3d3b935/frozenlist-1.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c72000fbcc35b129cb09956836c7d7abf78ab5416595e4857d1cae8d6251a6", size = 267433 }, - { url = "https://files.pythonhosted.org/packages/af/f2/64b73a9bb86f5a89fb55450e97cd5c1f84a862d4ff90d9fd1a73ab0f64a5/frozenlist-1.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:000a77d6034fbad9b6bb880f7ec073027908f1b40254b5d6f26210d2dab1240e", size = 283591 }, - { url = "https://files.pythonhosted.org/packages/29/e2/ffbb1fae55a791fd6c2938dd9ea779509c977435ba3940b9f2e8dc9d5316/frozenlist-1.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5d7f5a50342475962eb18b740f3beecc685a15b52c91f7d975257e13e029eca9", size = 273249 }, - { url = "https://files.pythonhosted.org/packages/2e/6e/008136a30798bb63618a114b9321b5971172a5abddff44a100c7edc5ad4f/frozenlist-1.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:87f724d055eb4785d9be84e9ebf0f24e392ddfad00b3fe036e43f489fafc9039", size = 271075 }, - { url = "https://files.pythonhosted.org/packages/ae/f0/4e71e54a026b06724cec9b6c54f0b13a4e9e298cc8db0f82ec70e151f5ce/frozenlist-1.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:6e9080bb2fb195a046e5177f10d9d82b8a204c0736a97a153c2466127de87784", size = 285398 }, - { url = "https://files.pythonhosted.org/packages/4d/36/70ec246851478b1c0b59f11ef8ade9c482ff447c1363c2bd5fad45098b12/frozenlist-1.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b93d7aaa36c966fa42efcaf716e6b3900438632a626fb09c049f6a2f09fc631", size = 294445 }, - { url = "https://files.pythonhosted.org/packages/37/e0/47f87544055b3349b633a03c4d94b405956cf2437f4ab46d0928b74b7526/frozenlist-1.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:52ef692a4bc60a6dd57f507429636c2af8b6046db8b31b18dac02cbc8f507f7f", size = 280569 }, - { url = "https://files.pythonhosted.org/packages/f9/7c/490133c160fb6b84ed374c266f42800e33b50c3bbab1652764e6e1fc498a/frozenlist-1.5.0-cp312-cp312-win32.whl", hash = "sha256:29d94c256679247b33a3dc96cce0f93cbc69c23bf75ff715919332fdbb6a32b8", size = 44721 }, - { url = "https://files.pythonhosted.org/packages/b1/56/4e45136ffc6bdbfa68c29ca56ef53783ef4c2fd395f7cbf99a2624aa9aaa/frozenlist-1.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:8969190d709e7c48ea386db202d708eb94bdb29207a1f269bab1196ce0dcca1f", size = 51329 }, - { url = "https://files.pythonhosted.org/packages/c6/c8/a5be5b7550c10858fcf9b0ea054baccab474da77d37f1e828ce043a3a5d4/frozenlist-1.5.0-py3-none-any.whl", hash = "sha256:d994863bba198a4a518b467bb971c56e1db3f180a25c6cf7bb1949c267f748c3", size = 11901 }, -] - -[[package]] -name = "fsspec" -version = "2024.9.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/62/7c/12b0943011daaaa9c35c2a2e22e5eb929ac90002f08f1259d69aedad84de/fsspec-2024.9.0.tar.gz", hash = "sha256:4b0afb90c2f21832df142f292649035d80b421f60a9e1c027802e5a0da2b04e8", size = 286206 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/1d/a0/6aaea0c2fbea2f89bfd5db25fb1e3481896a423002ebe4e55288907a97a3/fsspec-2024.9.0-py3-none-any.whl", hash = "sha256:a0947d552d8a6efa72cc2c730b12c41d043509156966cca4fb157b0f2a0c574b", size = 179253 }, -] - -[package.optional-dependencies] -http = [ - { name = "aiohttp" }, -] - -[[package]] -name = "h11" -version = "0.14.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f5/38/3af3d3633a34a3316095b39c8e8fb4853a28a536e55d347bd8d8e9a14b03/h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", size = 100418 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761", size = 58259 }, -] - -[[package]] -name = "huggingface-hub" -version = "0.27.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "filelock" }, - { name = "fsspec" }, - { name = "packaging" }, - { name = "pyyaml" }, - { name = "requests" }, - { name = "tqdm" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/36/c6/e3709b61de8e7832dbe19f0d9637e81356cede733d99359fbce125423774/huggingface_hub-0.27.0.tar.gz", hash = "sha256:902cce1a1be5739f5589e560198a65a8edcfd3b830b1666f36e4b961f0454fac", size = 379286 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/61/8c/fbdc0a88a622d9fa54e132d7bf3ee03ec602758658a2db5b339a65be2cfe/huggingface_hub-0.27.0-py3-none-any.whl", hash = "sha256:8f2e834517f1f1ddf1ecc716f91b120d7333011b7485f665a9a412eacb1a2a81", size = 450537 }, -] - -[[package]] -name = "idna" -version = "3.10" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, -] - -[[package]] -name = "iniconfig" -version = "2.0.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892 }, -] - -[[package]] -name = "isort" -version = "5.13.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/87/f9/c1eb8635a24e87ade2efce21e3ce8cd6b8630bb685ddc9cdaca1349b2eb5/isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109", size = 175303 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d1/b3/8def84f539e7d2289a02f0524b944b15d7c75dab7628bedf1c4f0992029c/isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6", size = 92310 }, -] - -[[package]] -name = "llm-leaderboard-backend" -version = "0.1.0" -source = { editable = "." } -dependencies = [ - { name = "aiofiles" }, - { name = "datasets" }, - { name = "fastapi" }, - { name = "fastapi-cache2" }, - { name = "huggingface-hub" }, - { name = "numpy" }, - { name = "pandas" }, - { name = "pyarrow" }, - { name = "python-multipart" }, - { name = "safetensors" }, - { name = "transformers" }, - { name = "uvicorn" }, -] - -[package.optional-dependencies] -dev = [ - { name = "black" }, - { name = "flake8" }, - { name = "isort" }, - { name = "pytest" }, -] - -[package.metadata] -requires-dist = [ - { name = "aiofiles", specifier = ">=24.1.0" }, - { name = "black", marker = "extra == 'dev'", specifier = ">=24.10.0" }, - { name = "datasets", specifier = ">=3.2.0" }, - { name = "fastapi", specifier = ">=0.115.6" }, - { name = "fastapi-cache2", specifier = ">=0.2.1" }, - { name = "flake8", marker = "extra == 'dev'", specifier = ">=7.1.1" }, - { name = "huggingface-hub", specifier = ">=0.27.0" }, - { name = "isort", marker = "extra == 'dev'", specifier = ">=5.13.2" }, - { name = "numpy", specifier = ">=2.2.0" }, - { name = "pandas", specifier = ">=2.2.3" }, - { name = "pyarrow", specifier = ">=18.1.0" }, - { name = "pytest", marker = "extra == 'dev'", specifier = ">=8.3.4" }, - { name = "python-multipart", specifier = ">=0.0.20" }, - { name = "safetensors", specifier = ">=0.4.5" }, - { name = "transformers", specifier = ">=4.47.0" }, - { name = "uvicorn", specifier = ">=0.34.0" }, -] - -[[package]] -name = "mccabe" -version = "0.7.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e7/ff/0ffefdcac38932a54d2b5eed4e0ba8a408f215002cd178ad1df0f2806ff8/mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", size = 9658 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/27/1a/1f68f9ba0c207934b35b86a8ca3aad8395a3d6dd7921c0686e23853ff5a9/mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e", size = 7350 }, -] - -[[package]] -name = "multidict" -version = "6.1.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d6/be/504b89a5e9ca731cd47487e91c469064f8ae5af93b7259758dcfc2b9c848/multidict-6.1.0.tar.gz", hash = "sha256:22ae2ebf9b0c69d206c003e2f6a914ea33f0a932d4aa16f236afc049d9958f4a", size = 64002 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/fd/16/92057c74ba3b96d5e211b553895cd6dc7cc4d1e43d9ab8fafc727681ef71/multidict-6.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b04772ed465fa3cc947db808fa306d79b43e896beb677a56fb2347ca1a49c1fa", size = 48713 }, - { url = "https://files.pythonhosted.org/packages/94/3d/37d1b8893ae79716179540b89fc6a0ee56b4a65fcc0d63535c6f5d96f217/multidict-6.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6180c0ae073bddeb5a97a38c03f30c233e0a4d39cd86166251617d1bbd0af436", size = 29516 }, - { url = "https://files.pythonhosted.org/packages/a2/12/adb6b3200c363062f805275b4c1e656be2b3681aada66c80129932ff0bae/multidict-6.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:071120490b47aa997cca00666923a83f02c7fbb44f71cf7f136df753f7fa8761", size = 29557 }, - { url = "https://files.pythonhosted.org/packages/47/e9/604bb05e6e5bce1e6a5cf80a474e0f072e80d8ac105f1b994a53e0b28c42/multidict-6.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50b3a2710631848991d0bf7de077502e8994c804bb805aeb2925a981de58ec2e", size = 130170 }, - { url = "https://files.pythonhosted.org/packages/7e/13/9efa50801785eccbf7086b3c83b71a4fb501a4d43549c2f2f80b8787d69f/multidict-6.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58c621844d55e71c1b7f7c498ce5aa6985d743a1a59034c57a905b3f153c1ef", size = 134836 }, - { url = "https://files.pythonhosted.org/packages/bf/0f/93808b765192780d117814a6dfcc2e75de6dcc610009ad408b8814dca3ba/multidict-6.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55b6d90641869892caa9ca42ff913f7ff1c5ece06474fbd32fb2cf6834726c95", size = 133475 }, - { url = "https://files.pythonhosted.org/packages/d3/c8/529101d7176fe7dfe1d99604e48d69c5dfdcadb4f06561f465c8ef12b4df/multidict-6.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b820514bfc0b98a30e3d85462084779900347e4d49267f747ff54060cc33925", size = 131049 }, - { url = "https://files.pythonhosted.org/packages/ca/0c/fc85b439014d5a58063e19c3a158a889deec399d47b5269a0f3b6a2e28bc/multidict-6.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10a9b09aba0c5b48c53761b7c720aaaf7cf236d5fe394cd399c7ba662d5f9966", size = 120370 }, - { url = "https://files.pythonhosted.org/packages/db/46/d4416eb20176492d2258fbd47b4abe729ff3b6e9c829ea4236f93c865089/multidict-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e16bf3e5fc9f44632affb159d30a437bfe286ce9e02754759be5536b169b305", size = 125178 }, - { url = "https://files.pythonhosted.org/packages/5b/46/73697ad7ec521df7de5531a32780bbfd908ded0643cbe457f981a701457c/multidict-6.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76f364861c3bfc98cbbcbd402d83454ed9e01a5224bb3a28bf70002a230f73e2", size = 119567 }, - { url = "https://files.pythonhosted.org/packages/cd/ed/51f060e2cb0e7635329fa6ff930aa5cffa17f4c7f5c6c3ddc3500708e2f2/multidict-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:820c661588bd01a0aa62a1283f20d2be4281b086f80dad9e955e690c75fb54a2", size = 129822 }, - { url = "https://files.pythonhosted.org/packages/df/9e/ee7d1954b1331da3eddea0c4e08d9142da5f14b1321c7301f5014f49d492/multidict-6.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0e5f362e895bc5b9e67fe6e4ded2492d8124bdf817827f33c5b46c2fe3ffaca6", size = 128656 }, - { url = "https://files.pythonhosted.org/packages/77/00/8538f11e3356b5d95fa4b024aa566cde7a38aa7a5f08f4912b32a037c5dc/multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3", size = 125360 }, - { url = "https://files.pythonhosted.org/packages/be/05/5d334c1f2462d43fec2363cd00b1c44c93a78c3925d952e9a71caf662e96/multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133", size = 26382 }, - { url = "https://files.pythonhosted.org/packages/a3/bf/f332a13486b1ed0496d624bcc7e8357bb8053823e8cd4b9a18edc1d97e73/multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1", size = 28529 }, - { url = "https://files.pythonhosted.org/packages/99/b7/b9e70fde2c0f0c9af4cc5277782a89b66d35948ea3369ec9f598358c3ac5/multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506", size = 10051 }, -] - -[[package]] -name = "multiprocess" -version = "0.70.16" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "dill" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/b5/ae/04f39c5d0d0def03247c2893d6f2b83c136bf3320a2154d7b8858f2ba72d/multiprocess-0.70.16.tar.gz", hash = "sha256:161af703d4652a0e1410be6abccecde4a7ddffd19341be0a7011b94aeb171ac1", size = 1772603 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/bc/f7/7ec7fddc92e50714ea3745631f79bd9c96424cb2702632521028e57d3a36/multiprocess-0.70.16-py310-none-any.whl", hash = "sha256:c4a9944c67bd49f823687463660a2d6daae94c289adff97e0f9d696ba6371d02", size = 134824 }, - { url = "https://files.pythonhosted.org/packages/50/15/b56e50e8debaf439f44befec5b2af11db85f6e0f344c3113ae0be0593a91/multiprocess-0.70.16-py311-none-any.whl", hash = "sha256:af4cabb0dac72abfb1e794fa7855c325fd2b55a10a44628a3c1ad3311c04127a", size = 143519 }, - { url = "https://files.pythonhosted.org/packages/0a/7d/a988f258104dcd2ccf1ed40fdc97e26c4ac351eeaf81d76e266c52d84e2f/multiprocess-0.70.16-py312-none-any.whl", hash = "sha256:fc0544c531920dde3b00c29863377f87e1632601092ea2daca74e4beb40faa2e", size = 146741 }, - { url = "https://files.pythonhosted.org/packages/ea/89/38df130f2c799090c978b366cfdf5b96d08de5b29a4a293df7f7429fa50b/multiprocess-0.70.16-py38-none-any.whl", hash = "sha256:a71d82033454891091a226dfc319d0cfa8019a4e888ef9ca910372a446de4435", size = 132628 }, - { url = "https://files.pythonhosted.org/packages/da/d9/f7f9379981e39b8c2511c9e0326d212accacb82f12fbfdc1aa2ce2a7b2b6/multiprocess-0.70.16-py39-none-any.whl", hash = "sha256:a0bafd3ae1b732eac64be2e72038231c1ba97724b60b09400d68f229fcc2fbf3", size = 133351 }, -] - -[[package]] -name = "mypy-extensions" -version = "1.0.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/98/a4/1ab47638b92648243faf97a5aeb6ea83059cc3624972ab6b8d2316078d3f/mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782", size = 4433 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2a/e2/5d3f6ada4297caebe1a2add3b126fe800c96f56dbe5d1988a2cbe0b267aa/mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", size = 4695 }, -] - -[[package]] -name = "numpy" -version = "2.2.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/47/1b/1d565e0f6e156e1522ab564176b8b29d71e13d8caf003a08768df3d5cec5/numpy-2.2.0.tar.gz", hash = "sha256:140dd80ff8981a583a60980be1a655068f8adebf7a45a06a6858c873fcdcd4a0", size = 20225497 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/7f/bc/a20dc4e1d051149052762e7647455311865d11c603170c476d1e910a353e/numpy-2.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cff210198bb4cae3f3c100444c5eaa573a823f05c253e7188e1362a5555235b3", size = 20909153 }, - { url = "https://files.pythonhosted.org/packages/60/3d/ac4fb63f36db94f4c7db05b45e3ecb3f88f778ca71850664460c78cfde41/numpy-2.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58b92a5828bd4d9aa0952492b7de803135038de47343b2aa3cc23f3b71a3dc4e", size = 14095021 }, - { url = "https://files.pythonhosted.org/packages/41/6d/a654d519d24e4fcc7a83d4a51209cda086f26cf30722b3d8ffc1aa9b775e/numpy-2.2.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:ebe5e59545401fbb1b24da76f006ab19734ae71e703cdb4a8b347e84a0cece67", size = 5125491 }, - { url = "https://files.pythonhosted.org/packages/e6/22/fab7e1510a62e5092f4e6507a279020052b89f11d9cfe52af7f52c243b04/numpy-2.2.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:e2b8cd48a9942ed3f85b95ca4105c45758438c7ed28fff1e4ce3e57c3b589d8e", size = 6658534 }, - { url = "https://files.pythonhosted.org/packages/fc/29/a3d938ddc5a534cd53df7ab79d20a68db8c67578de1df0ae0118230f5f54/numpy-2.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57fcc997ffc0bef234b8875a54d4058afa92b0b0c4223fc1f62f24b3b5e86038", size = 14046306 }, - { url = "https://files.pythonhosted.org/packages/90/24/d0bbb56abdd8934f30384632e3c2ca1ebfeb5d17e150c6e366ba291de36b/numpy-2.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85ad7d11b309bd132d74397fcf2920933c9d1dc865487128f5c03d580f2c3d03", size = 16095819 }, - { url = "https://files.pythonhosted.org/packages/99/9c/58a673faa9e8a0e77248e782f7a17410cf7259b326265646fd50ed49c4e1/numpy-2.2.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:cb24cca1968b21355cc6f3da1a20cd1cebd8a023e3c5b09b432444617949085a", size = 15243215 }, - { url = "https://files.pythonhosted.org/packages/9c/61/f311693f78cbf635cfb69ce9e1e857ff83937a27d93c96ac5932fd33e330/numpy-2.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0798b138c291d792f8ea40fe3768610f3c7dd2574389e37c3f26573757c8f7ef", size = 17860175 }, - { url = "https://files.pythonhosted.org/packages/11/3e/491c34262cb1fc9dd13a00beb80d755ee0517b17db20e54cac7aa524533e/numpy-2.2.0-cp312-cp312-win32.whl", hash = "sha256:afe8fb968743d40435c3827632fd36c5fbde633b0423da7692e426529b1759b1", size = 6273281 }, - { url = "https://files.pythonhosted.org/packages/89/ea/00537f599eb230771157bc509f6ea5b2dddf05d4b09f9d2f1d7096a18781/numpy-2.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:3a4199f519e57d517ebd48cb76b36c82da0360781c6a0353e64c0cac30ecaad3", size = 12613227 }, -] - -[[package]] -name = "packaging" -version = "24.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451 }, -] - -[[package]] -name = "pandas" -version = "2.2.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "numpy" }, - { name = "python-dateutil" }, - { name = "pytz" }, - { name = "tzdata" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/9c/d6/9f8431bacc2e19dca897724cd097b1bb224a6ad5433784a44b587c7c13af/pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667", size = 4399213 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/17/a3/fb2734118db0af37ea7433f57f722c0a56687e14b14690edff0cdb4b7e58/pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9", size = 12529893 }, - { url = "https://files.pythonhosted.org/packages/e1/0c/ad295fd74bfac85358fd579e271cded3ac969de81f62dd0142c426b9da91/pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4", size = 11363475 }, - { url = "https://files.pythonhosted.org/packages/c6/2a/4bba3f03f7d07207481fed47f5b35f556c7441acddc368ec43d6643c5777/pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3", size = 15188645 }, - { url = "https://files.pythonhosted.org/packages/38/f8/d8fddee9ed0d0c0f4a2132c1dfcf0e3e53265055da8df952a53e7eaf178c/pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319", size = 12739445 }, - { url = "https://files.pythonhosted.org/packages/20/e8/45a05d9c39d2cea61ab175dbe6a2de1d05b679e8de2011da4ee190d7e748/pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8", size = 16359235 }, - { url = "https://files.pythonhosted.org/packages/1d/99/617d07a6a5e429ff90c90da64d428516605a1ec7d7bea494235e1c3882de/pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a", size = 14056756 }, - { url = "https://files.pythonhosted.org/packages/29/d4/1244ab8edf173a10fd601f7e13b9566c1b525c4f365d6bee918e68381889/pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13", size = 11504248 }, -] - -[[package]] -name = "pathspec" -version = "0.12.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191 }, -] - -[[package]] -name = "pendulum" -version = "3.0.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "python-dateutil" }, - { name = "time-machine", marker = "implementation_name != 'pypy'" }, - { name = "tzdata" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/b8/fe/27c7438c6ac8b8f8bef3c6e571855602ee784b85d072efddfff0ceb1cd77/pendulum-3.0.0.tar.gz", hash = "sha256:5d034998dea404ec31fae27af6b22cff1708f830a1ed7353be4d1019bb9f584e", size = 84524 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/1e/37/17c8f0e7481a32f21b9002dd68912a8813f2c1d77b984e00af56eb9ae31b/pendulum-3.0.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:409e64e41418c49f973d43a28afe5df1df4f1dd87c41c7c90f1a63f61ae0f1f7", size = 362284 }, - { url = "https://files.pythonhosted.org/packages/12/e6/08f462f6ea87e2159f19b43ff88231d26e02bda31c10bcb29290a617ace4/pendulum-3.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a38ad2121c5ec7c4c190c7334e789c3b4624798859156b138fcc4d92295835dc", size = 352964 }, - { url = "https://files.pythonhosted.org/packages/47/29/b6877f6b53b91356c2c56d19ddab17b165ca994ad1e57b32c089e79f3fb5/pendulum-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fde4d0b2024b9785f66b7f30ed59281bd60d63d9213cda0eb0910ead777f6d37", size = 335848 }, - { url = "https://files.pythonhosted.org/packages/2b/77/62ca666f30b2558342deadda26290a575459a7b59248ea1e978b84175227/pendulum-3.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b2c5675769fb6d4c11238132962939b960fcb365436b6d623c5864287faa319", size = 362215 }, - { url = "https://files.pythonhosted.org/packages/e0/29/ce37593f5ea51862c60dadf4e863d604f954478b3abbcc60a14dc05e242c/pendulum-3.0.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8af95e03e066826f0f4c65811cbee1b3123d4a45a1c3a2b4fc23c4b0dff893b5", size = 448673 }, - { url = "https://files.pythonhosted.org/packages/72/6a/68a8c7b8f1977d89aabfd0e2becb0921e5515dfb365097e98a522334a151/pendulum-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2165a8f33cb15e06c67070b8afc87a62b85c5a273e3aaa6bc9d15c93a4920d6f", size = 384891 }, - { url = "https://files.pythonhosted.org/packages/30/e6/edd699300f47a3c53c0d8ed26e905b9a31057c3646211e58cc540716a440/pendulum-3.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ad5e65b874b5e56bd942546ea7ba9dd1d6a25121db1c517700f1c9de91b28518", size = 559558 }, - { url = "https://files.pythonhosted.org/packages/d4/97/95a44aa5e1763d3a966551ed0e12f56508d8dfcc60e1f0395909b6a08626/pendulum-3.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:17fe4b2c844bbf5f0ece69cfd959fa02957c61317b2161763950d88fed8e13b9", size = 558240 }, - { url = "https://files.pythonhosted.org/packages/9a/91/fcd992eb36b77ab43f2cf44307b72c01a6fbb27f55c1bb2d4af30e9a6cb7/pendulum-3.0.0-cp312-none-win_amd64.whl", hash = "sha256:78f8f4e7efe5066aca24a7a57511b9c2119f5c2b5eb81c46ff9222ce11e0a7a5", size = 293456 }, - { url = "https://files.pythonhosted.org/packages/3b/60/ba8aa296ca6d76603d58146b4a222cd99e7da33831158b8c00240a896a56/pendulum-3.0.0-cp312-none-win_arm64.whl", hash = "sha256:28f49d8d1e32aae9c284a90b6bb3873eee15ec6e1d9042edd611b22a94ac462f", size = 288054 }, -] - -[[package]] -name = "platformdirs" -version = "4.3.6" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/13/fc/128cc9cb8f03208bdbf93d3aa862e16d376844a14f9a0ce5cf4507372de4/platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907", size = 21302 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/3c/a6/bc1012356d8ece4d66dd75c4b9fc6c1f6650ddd5991e421177d9f8f671be/platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb", size = 18439 }, -] - -[[package]] -name = "pluggy" -version = "1.5.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 }, -] - -[[package]] -name = "propcache" -version = "0.2.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/20/c8/2a13f78d82211490855b2fb303b6721348d0787fdd9a12ac46d99d3acde1/propcache-0.2.1.tar.gz", hash = "sha256:3f77ce728b19cb537714499928fe800c3dda29e8d9428778fc7c186da4c09a64", size = 41735 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/4c/28/1d205fe49be8b1b4df4c50024e62480a442b1a7b818e734308bb0d17e7fb/propcache-0.2.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:081a430aa8d5e8876c6909b67bd2d937bfd531b0382d3fdedb82612c618bc41a", size = 79588 }, - { url = "https://files.pythonhosted.org/packages/21/ee/fc4d893f8d81cd4971affef2a6cb542b36617cd1d8ce56b406112cb80bf7/propcache-0.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d2ccec9ac47cf4e04897619c0e0c1a48c54a71bdf045117d3a26f80d38ab1fb0", size = 45825 }, - { url = "https://files.pythonhosted.org/packages/4a/de/bbe712f94d088da1d237c35d735f675e494a816fd6f54e9db2f61ef4d03f/propcache-0.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:14d86fe14b7e04fa306e0c43cdbeebe6b2c2156a0c9ce56b815faacc193e320d", size = 45357 }, - { url = "https://files.pythonhosted.org/packages/7f/14/7ae06a6cf2a2f1cb382586d5a99efe66b0b3d0c6f9ac2f759e6f7af9d7cf/propcache-0.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:049324ee97bb67285b49632132db351b41e77833678432be52bdd0289c0e05e4", size = 241869 }, - { url = "https://files.pythonhosted.org/packages/cc/59/227a78be960b54a41124e639e2c39e8807ac0c751c735a900e21315f8c2b/propcache-0.2.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1cd9a1d071158de1cc1c71a26014dcdfa7dd3d5f4f88c298c7f90ad6f27bb46d", size = 247884 }, - { url = "https://files.pythonhosted.org/packages/84/58/f62b4ffaedf88dc1b17f04d57d8536601e4e030feb26617228ef930c3279/propcache-0.2.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98110aa363f1bb4c073e8dcfaefd3a5cea0f0834c2aab23dda657e4dab2f53b5", size = 248486 }, - { url = "https://files.pythonhosted.org/packages/1c/07/ebe102777a830bca91bbb93e3479cd34c2ca5d0361b83be9dbd93104865e/propcache-0.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:647894f5ae99c4cf6bb82a1bb3a796f6e06af3caa3d32e26d2350d0e3e3faf24", size = 243649 }, - { url = "https://files.pythonhosted.org/packages/ed/bc/4f7aba7f08f520376c4bb6a20b9a981a581b7f2e385fa0ec9f789bb2d362/propcache-0.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfd3223c15bebe26518d58ccf9a39b93948d3dcb3e57a20480dfdd315356baff", size = 229103 }, - { url = "https://files.pythonhosted.org/packages/fe/d5/04ac9cd4e51a57a96f78795e03c5a0ddb8f23ec098b86f92de028d7f2a6b/propcache-0.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d71264a80f3fcf512eb4f18f59423fe82d6e346ee97b90625f283df56aee103f", size = 226607 }, - { url = "https://files.pythonhosted.org/packages/e3/f0/24060d959ea41d7a7cc7fdbf68b31852331aabda914a0c63bdb0e22e96d6/propcache-0.2.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:e73091191e4280403bde6c9a52a6999d69cdfde498f1fdf629105247599b57ec", size = 221153 }, - { url = "https://files.pythonhosted.org/packages/77/a7/3ac76045a077b3e4de4859a0753010765e45749bdf53bd02bc4d372da1a0/propcache-0.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3935bfa5fede35fb202c4b569bb9c042f337ca4ff7bd540a0aa5e37131659348", size = 222151 }, - { url = "https://files.pythonhosted.org/packages/e7/af/5e29da6f80cebab3f5a4dcd2a3240e7f56f2c4abf51cbfcc99be34e17f0b/propcache-0.2.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f508b0491767bb1f2b87fdfacaba5f7eddc2f867740ec69ece6d1946d29029a6", size = 233812 }, - { url = "https://files.pythonhosted.org/packages/8c/89/ebe3ad52642cc5509eaa453e9f4b94b374d81bae3265c59d5c2d98efa1b4/propcache-0.2.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:1672137af7c46662a1c2be1e8dc78cb6d224319aaa40271c9257d886be4363a6", size = 238829 }, - { url = "https://files.pythonhosted.org/packages/e9/2f/6b32f273fa02e978b7577159eae7471b3cfb88b48563b1c2578b2d7ca0bb/propcache-0.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b74c261802d3d2b85c9df2dfb2fa81b6f90deeef63c2db9f0e029a3cac50b518", size = 230704 }, - { url = "https://files.pythonhosted.org/packages/5c/2e/f40ae6ff5624a5f77edd7b8359b208b5455ea113f68309e2b00a2e1426b6/propcache-0.2.1-cp312-cp312-win32.whl", hash = "sha256:d09c333d36c1409d56a9d29b3a1b800a42c76a57a5a8907eacdbce3f18768246", size = 40050 }, - { url = "https://files.pythonhosted.org/packages/3b/77/a92c3ef994e47180862b9d7d11e37624fb1c00a16d61faf55115d970628b/propcache-0.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:c214999039d4f2a5b2073ac506bba279945233da8c786e490d411dfc30f855c1", size = 44117 }, - { url = "https://files.pythonhosted.org/packages/41/b6/c5319caea262f4821995dca2107483b94a3345d4607ad797c76cb9c36bcc/propcache-0.2.1-py3-none-any.whl", hash = "sha256:52277518d6aae65536e9cea52d4e7fd2f7a66f4aa2d30ed3f2fcea620ace3c54", size = 11818 }, -] - -[[package]] -name = "pyarrow" -version = "18.1.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/7f/7b/640785a9062bb00314caa8a387abce547d2a420cf09bd6c715fe659ccffb/pyarrow-18.1.0.tar.gz", hash = "sha256:9386d3ca9c145b5539a1cfc75df07757dff870168c959b473a0bccbc3abc8c73", size = 1118671 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/6a/50/12829e7111b932581e51dda51d5cb39207a056c30fe31ef43f14c63c4d7e/pyarrow-18.1.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:9f3a76670b263dc41d0ae877f09124ab96ce10e4e48f3e3e4257273cee61ad0d", size = 29514620 }, - { url = "https://files.pythonhosted.org/packages/d1/41/468c944eab157702e96abab3d07b48b8424927d4933541ab43788bb6964d/pyarrow-18.1.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:da31fbca07c435be88a0c321402c4e31a2ba61593ec7473630769de8346b54ee", size = 30856494 }, - { url = "https://files.pythonhosted.org/packages/68/f9/29fb659b390312a7345aeb858a9d9c157552a8852522f2c8bad437c29c0a/pyarrow-18.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:543ad8459bc438efc46d29a759e1079436290bd583141384c6f7a1068ed6f992", size = 39203624 }, - { url = "https://files.pythonhosted.org/packages/6e/f6/19360dae44200e35753c5c2889dc478154cd78e61b1f738514c9f131734d/pyarrow-18.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0743e503c55be0fdb5c08e7d44853da27f19dc854531c0570f9f394ec9671d54", size = 40139341 }, - { url = "https://files.pythonhosted.org/packages/bb/e6/9b3afbbcf10cc724312e824af94a2e993d8ace22994d823f5c35324cebf5/pyarrow-18.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:d4b3d2a34780645bed6414e22dda55a92e0fcd1b8a637fba86800ad737057e33", size = 38618629 }, - { url = "https://files.pythonhosted.org/packages/3a/2e/3b99f8a3d9e0ccae0e961978a0d0089b25fb46ebbcfb5ebae3cca179a5b3/pyarrow-18.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:c52f81aa6f6575058d8e2c782bf79d4f9fdc89887f16825ec3a66607a5dd8e30", size = 40078661 }, - { url = "https://files.pythonhosted.org/packages/76/52/f8da04195000099d394012b8d42c503d7041b79f778d854f410e5f05049a/pyarrow-18.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:0ad4892617e1a6c7a551cfc827e072a633eaff758fa09f21c4ee548c30bcaf99", size = 25092330 }, -] - -[[package]] -name = "pycodestyle" -version = "2.12.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/43/aa/210b2c9aedd8c1cbeea31a50e42050ad56187754b34eb214c46709445801/pycodestyle-2.12.1.tar.gz", hash = "sha256:6838eae08bbce4f6accd5d5572075c63626a15ee3e6f842df996bf62f6d73521", size = 39232 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/3a/d8/a211b3f85e99a0daa2ddec96c949cac6824bd305b040571b82a03dd62636/pycodestyle-2.12.1-py2.py3-none-any.whl", hash = "sha256:46f0fb92069a7c28ab7bb558f05bfc0110dac69a0cd23c61ea0040283a9d78b3", size = 31284 }, -] - -[[package]] -name = "pydantic" -version = "2.10.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "annotated-types" }, - { name = "pydantic-core" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/45/0f/27908242621b14e649a84e62b133de45f84c255eecb350ab02979844a788/pydantic-2.10.3.tar.gz", hash = "sha256:cb5ac360ce894ceacd69c403187900a02c4b20b693a9dd1d643e1effab9eadf9", size = 786486 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/62/51/72c18c55cf2f46ff4f91ebcc8f75aa30f7305f3d726be3f4ebffb4ae972b/pydantic-2.10.3-py3-none-any.whl", hash = "sha256:be04d85bbc7b65651c5f8e6b9976ed9c6f41782a55524cef079a34a0bb82144d", size = 456997 }, -] - -[[package]] -name = "pydantic-core" -version = "2.27.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/a6/9f/7de1f19b6aea45aeb441838782d68352e71bfa98ee6fa048d5041991b33e/pydantic_core-2.27.1.tar.gz", hash = "sha256:62a763352879b84aa31058fc931884055fd75089cccbd9d58bb6afd01141b235", size = 412785 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/be/51/2e9b3788feb2aebff2aa9dfbf060ec739b38c05c46847601134cc1fed2ea/pydantic_core-2.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9cbd94fc661d2bab2bc702cddd2d3370bbdcc4cd0f8f57488a81bcce90c7a54f", size = 1895239 }, - { url = "https://files.pythonhosted.org/packages/7b/9e/f8063952e4a7d0127f5d1181addef9377505dcce3be224263b25c4f0bfd9/pydantic_core-2.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f8c4718cd44ec1580e180cb739713ecda2bdee1341084c1467802a417fe0f02", size = 1805070 }, - { url = "https://files.pythonhosted.org/packages/2c/9d/e1d6c4561d262b52e41b17a7ef8301e2ba80b61e32e94520271029feb5d8/pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15aae984e46de8d376df515f00450d1522077254ef6b7ce189b38ecee7c9677c", size = 1828096 }, - { url = "https://files.pythonhosted.org/packages/be/65/80ff46de4266560baa4332ae3181fffc4488ea7d37282da1a62d10ab89a4/pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ba5e3963344ff25fc8c40da90f44b0afca8cfd89d12964feb79ac1411a260ac", size = 1857708 }, - { url = "https://files.pythonhosted.org/packages/d5/ca/3370074ad758b04d9562b12ecdb088597f4d9d13893a48a583fb47682cdf/pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:992cea5f4f3b29d6b4f7f1726ed8ee46c8331c6b4eed6db5b40134c6fe1768bb", size = 2037751 }, - { url = "https://files.pythonhosted.org/packages/b1/e2/4ab72d93367194317b99d051947c071aef6e3eb95f7553eaa4208ecf9ba4/pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0325336f348dbee6550d129b1627cb8f5351a9dc91aad141ffb96d4937bd9529", size = 2733863 }, - { url = "https://files.pythonhosted.org/packages/8a/c6/8ae0831bf77f356bb73127ce5a95fe115b10f820ea480abbd72d3cc7ccf3/pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7597c07fbd11515f654d6ece3d0e4e5093edc30a436c63142d9a4b8e22f19c35", size = 2161161 }, - { url = "https://files.pythonhosted.org/packages/f1/f4/b2fe73241da2429400fc27ddeaa43e35562f96cf5b67499b2de52b528cad/pydantic_core-2.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bbd5d8cc692616d5ef6fbbbd50dbec142c7e6ad9beb66b78a96e9c16729b089", size = 1993294 }, - { url = "https://files.pythonhosted.org/packages/77/29/4bb008823a7f4cc05828198153f9753b3bd4c104d93b8e0b1bfe4e187540/pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:dc61505e73298a84a2f317255fcc72b710b72980f3a1f670447a21efc88f8381", size = 2001468 }, - { url = "https://files.pythonhosted.org/packages/f2/a9/0eaceeba41b9fad851a4107e0cf999a34ae8f0d0d1f829e2574f3d8897b0/pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:e1f735dc43da318cad19b4173dd1ffce1d84aafd6c9b782b3abc04a0d5a6f5bb", size = 2091413 }, - { url = "https://files.pythonhosted.org/packages/d8/36/eb8697729725bc610fd73940f0d860d791dc2ad557faaefcbb3edbd2b349/pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f4e5658dbffe8843a0f12366a4c2d1c316dbe09bb4dfbdc9d2d9cd6031de8aae", size = 2154735 }, - { url = "https://files.pythonhosted.org/packages/52/e5/4f0fbd5c5995cc70d3afed1b5c754055bb67908f55b5cb8000f7112749bf/pydantic_core-2.27.1-cp312-none-win32.whl", hash = "sha256:672ebbe820bb37988c4d136eca2652ee114992d5d41c7e4858cdd90ea94ffe5c", size = 1833633 }, - { url = "https://files.pythonhosted.org/packages/ee/f2/c61486eee27cae5ac781305658779b4a6b45f9cc9d02c90cb21b940e82cc/pydantic_core-2.27.1-cp312-none-win_amd64.whl", hash = "sha256:66ff044fd0bb1768688aecbe28b6190f6e799349221fb0de0e6f4048eca14c16", size = 1986973 }, - { url = "https://files.pythonhosted.org/packages/df/a6/e3f12ff25f250b02f7c51be89a294689d175ac76e1096c32bf278f29ca1e/pydantic_core-2.27.1-cp312-none-win_arm64.whl", hash = "sha256:9a3b0793b1bbfd4146304e23d90045f2a9b5fd5823aa682665fbdaf2a6c28f3e", size = 1883215 }, -] - -[[package]] -name = "pyflakes" -version = "3.2.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/57/f9/669d8c9c86613c9d568757c7f5824bd3197d7b1c6c27553bc5618a27cce2/pyflakes-3.2.0.tar.gz", hash = "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f", size = 63788 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d4/d7/f1b7db88d8e4417c5d47adad627a93547f44bdc9028372dbd2313f34a855/pyflakes-3.2.0-py2.py3-none-any.whl", hash = "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a", size = 62725 }, -] - -[[package]] -name = "pytest" -version = "8.3.4" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, - { name = "iniconfig" }, - { name = "packaging" }, - { name = "pluggy" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/05/35/30e0d83068951d90a01852cb1cef56e5d8a09d20c7f511634cc2f7e0372a/pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761", size = 1445919 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/11/92/76a1c94d3afee238333bc0a42b82935dd8f9cf8ce9e336ff87ee14d9e1cf/pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6", size = 343083 }, -] - -[[package]] -name = "python-dateutil" -version = "2.9.0.post0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "six" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892 }, -] - -[[package]] -name = "python-multipart" -version = "0.0.20" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f3/87/f44d7c9f274c7ee665a29b885ec97089ec5dc034c7f3fafa03da9e39a09e/python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13", size = 37158 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546 }, -] - -[[package]] -name = "pytz" -version = "2024.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/3a/31/3c70bf7603cc2dca0f19bdc53b4537a797747a58875b552c8c413d963a3f/pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a", size = 319692 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/11/c3/005fcca25ce078d2cc29fd559379817424e94885510568bc1bc53d7d5846/pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725", size = 508002 }, -] - -[[package]] -name = "pyyaml" -version = "6.0.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873 }, - { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302 }, - { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154 }, - { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223 }, - { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542 }, - { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164 }, - { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611 }, - { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591 }, - { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338 }, -] - -[[package]] -name = "regex" -version = "2024.11.6" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/8e/5f/bd69653fbfb76cf8604468d3b4ec4c403197144c7bfe0e6a5fc9e02a07cb/regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519", size = 399494 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ba/30/9a87ce8336b172cc232a0db89a3af97929d06c11ceaa19d97d84fa90a8f8/regex-2024.11.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:52fb28f528778f184f870b7cf8f225f5eef0a8f6e3778529bdd40c7b3920796a", size = 483781 }, - { url = "https://files.pythonhosted.org/packages/01/e8/00008ad4ff4be8b1844786ba6636035f7ef926db5686e4c0f98093612add/regex-2024.11.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdd6028445d2460f33136c55eeb1f601ab06d74cb3347132e1c24250187500d9", size = 288455 }, - { url = "https://files.pythonhosted.org/packages/60/85/cebcc0aff603ea0a201667b203f13ba75d9fc8668fab917ac5b2de3967bc/regex-2024.11.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805e6b60c54bf766b251e94526ebad60b7de0c70f70a4e6210ee2891acb70bf2", size = 284759 }, - { url = "https://files.pythonhosted.org/packages/94/2b/701a4b0585cb05472a4da28ee28fdfe155f3638f5e1ec92306d924e5faf0/regex-2024.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b85c2530be953a890eaffde05485238f07029600e8f098cdf1848d414a8b45e4", size = 794976 }, - { url = "https://files.pythonhosted.org/packages/4b/bf/fa87e563bf5fee75db8915f7352e1887b1249126a1be4813837f5dbec965/regex-2024.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb26437975da7dc36b7efad18aa9dd4ea569d2357ae6b783bf1118dabd9ea577", size = 833077 }, - { url = "https://files.pythonhosted.org/packages/a1/56/7295e6bad94b047f4d0834e4779491b81216583c00c288252ef625c01d23/regex-2024.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abfa5080c374a76a251ba60683242bc17eeb2c9818d0d30117b4486be10c59d3", size = 823160 }, - { url = "https://files.pythonhosted.org/packages/fb/13/e3b075031a738c9598c51cfbc4c7879e26729c53aa9cca59211c44235314/regex-2024.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b7fa6606c2881c1db9479b0eaa11ed5dfa11c8d60a474ff0e095099f39d98e", size = 796896 }, - { url = "https://files.pythonhosted.org/packages/24/56/0b3f1b66d592be6efec23a795b37732682520b47c53da5a32c33ed7d84e3/regex-2024.11.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c32f75920cf99fe6b6c539c399a4a128452eaf1af27f39bce8909c9a3fd8cbe", size = 783997 }, - { url = "https://files.pythonhosted.org/packages/f9/a1/eb378dada8b91c0e4c5f08ffb56f25fcae47bf52ad18f9b2f33b83e6d498/regex-2024.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:982e6d21414e78e1f51cf595d7f321dcd14de1f2881c5dc6a6e23bbbbd68435e", size = 781725 }, - { url = "https://files.pythonhosted.org/packages/83/f2/033e7dec0cfd6dda93390089864732a3409246ffe8b042e9554afa9bff4e/regex-2024.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a7c2155f790e2fb448faed6dd241386719802296ec588a8b9051c1f5c481bc29", size = 789481 }, - { url = "https://files.pythonhosted.org/packages/83/23/15d4552ea28990a74e7696780c438aadd73a20318c47e527b47a4a5a596d/regex-2024.11.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149f5008d286636e48cd0b1dd65018548944e495b0265b45e1bffecce1ef7f39", size = 852896 }, - { url = "https://files.pythonhosted.org/packages/e3/39/ed4416bc90deedbfdada2568b2cb0bc1fdb98efe11f5378d9892b2a88f8f/regex-2024.11.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:e5364a4502efca094731680e80009632ad6624084aff9a23ce8c8c6820de3e51", size = 860138 }, - { url = "https://files.pythonhosted.org/packages/93/2d/dd56bb76bd8e95bbce684326302f287455b56242a4f9c61f1bc76e28360e/regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad", size = 787692 }, - { url = "https://files.pythonhosted.org/packages/0b/55/31877a249ab7a5156758246b9c59539abbeba22461b7d8adc9e8475ff73e/regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54", size = 262135 }, - { url = "https://files.pythonhosted.org/packages/38/ec/ad2d7de49a600cdb8dd78434a1aeffe28b9d6fc42eb36afab4a27ad23384/regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b", size = 273567 }, -] - -[[package]] -name = "requests" -version = "2.32.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "certifi" }, - { name = "charset-normalizer" }, - { name = "idna" }, - { name = "urllib3" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 }, -] - -[[package]] -name = "safetensors" -version = "0.4.5" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cb/46/a1c56ed856c6ac3b1a8b37abe5be0cac53219367af1331e721b04d122577/safetensors-0.4.5.tar.gz", hash = "sha256:d73de19682deabb02524b3d5d1f8b3aaba94c72f1bbfc7911b9b9d5d391c0310", size = 65702 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/ac/5a63082f931e99200db95fd46fb6734f050bb6e96bf02521904c6518b7aa/safetensors-0.4.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:473300314e026bd1043cef391bb16a8689453363381561b8a3e443870937cc1e", size = 392015 }, - { url = "https://files.pythonhosted.org/packages/73/95/ab32aa6e9bdc832ff87784cdf9da26192b93de3ef82b8d1ada8f345c5044/safetensors-0.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:801183a0f76dc647f51a2d9141ad341f9665602a7899a693207a82fb102cc53e", size = 381774 }, - { url = "https://files.pythonhosted.org/packages/d6/6c/7e04b7626809fc63f3698f4c50e43aff2864b40089aa4506c918a75b8eed/safetensors-0.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1524b54246e422ad6fb6aea1ac71edeeb77666efa67230e1faf6999df9b2e27f", size = 441134 }, - { url = "https://files.pythonhosted.org/packages/58/2b/ffe7c86a277e6c1595fbdf415cfe2903f253f574a5405e93fda8baaa582c/safetensors-0.4.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b3139098e3e8b2ad7afbca96d30ad29157b50c90861084e69fcb80dec7430461", size = 438467 }, - { url = "https://files.pythonhosted.org/packages/67/9c/f271bd804e08c7fda954d17b70ff281228a88077337a9e70feace4f4cc93/safetensors-0.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65573dc35be9059770808e276b017256fa30058802c29e1038eb1c00028502ea", size = 476566 }, - { url = "https://files.pythonhosted.org/packages/4c/ad/4cf76a3e430a8a26108407fa6cb93e6f80d996a5cb75d9540c8fe3862990/safetensors-0.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd33da8e9407559f8779c82a0448e2133737f922d71f884da27184549416bfed", size = 492253 }, - { url = "https://files.pythonhosted.org/packages/d9/40/a6f75ea449a9647423ec8b6f72c16998d35aa4b43cb38536ac060c5c7bf5/safetensors-0.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3685ce7ed036f916316b567152482b7e959dc754fcc4a8342333d222e05f407c", size = 434769 }, - { url = "https://files.pythonhosted.org/packages/52/47/d4b49b1231abf3131f7bb0bc60ebb94b27ee33e0a1f9569da05f8ac65dee/safetensors-0.4.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dde2bf390d25f67908278d6f5d59e46211ef98e44108727084d4637ee70ab4f1", size = 457166 }, - { url = "https://files.pythonhosted.org/packages/c3/cd/006468b03b0fa42ff82d795d47c4193e99001e96c3f08bd62ef1b5cab586/safetensors-0.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7469d70d3de970b1698d47c11ebbf296a308702cbaae7fcb993944751cf985f4", size = 619280 }, - { url = "https://files.pythonhosted.org/packages/22/4d/b6208d918e83daa84b424c0ac3191ae61b44b3191613a3a5a7b38f94b8ad/safetensors-0.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a6ba28118636a130ccbb968bc33d4684c48678695dba2590169d5ab03a45646", size = 605390 }, - { url = "https://files.pythonhosted.org/packages/e8/20/bf0e01825dc01ed75538021a98b9a046e60ead63c6c6700764c821a8c873/safetensors-0.4.5-cp312-none-win32.whl", hash = "sha256:c859c7ed90b0047f58ee27751c8e56951452ed36a67afee1b0a87847d065eec6", size = 273250 }, - { url = "https://files.pythonhosted.org/packages/f1/5f/ab6b6cec85b40789801f35b7d2fb579ae242d8193929974a106d5ff5c835/safetensors-0.4.5-cp312-none-win_amd64.whl", hash = "sha256:b5a8810ad6a6f933fff6c276eae92c1da217b39b4d8b1bc1c0b8af2d270dc532", size = 286307 }, -] - -[[package]] -name = "six" -version = "1.17.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050 }, -] - -[[package]] -name = "sniffio" -version = "1.3.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 }, -] - -[[package]] -name = "starlette" -version = "0.41.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "anyio" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/1a/4c/9b5764bd22eec91c4039ef4c55334e9187085da2d8a2df7bd570869aae18/starlette-0.41.3.tar.gz", hash = "sha256:0e4ab3d16522a255be6b28260b938eae2482f98ce5cc934cb08dce8dc3ba5835", size = 2574159 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/96/00/2b325970b3060c7cecebab6d295afe763365822b1306a12eeab198f74323/starlette-0.41.3-py3-none-any.whl", hash = "sha256:44cedb2b7c77a9de33a8b74b2b90e9f50d11fcf25d8270ea525ad71a25374ff7", size = 73225 }, -] - -[[package]] -name = "time-machine" -version = "2.16.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "python-dateutil" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/fb/dd/5022939b9cadefe3af04f4012186c29b8afbe858b1ec2cfa38baeec94dab/time_machine-2.16.0.tar.gz", hash = "sha256:4a99acc273d2f98add23a89b94d4dd9e14969c01214c8514bfa78e4e9364c7e2", size = 24626 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/4a/f4/603a84e7ae6427a53953db9f61b689dc6adf233e03c5f5ca907a901452fd/time_machine-2.16.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:84788f4d62a8b1bf5e499bb9b0e23ceceea21c415ad6030be6267ce3d639842f", size = 20155 }, - { url = "https://files.pythonhosted.org/packages/d8/94/dbe69aecb4b84be52d34814e63176c5ca61f38ee9e6ecda11104653405b5/time_machine-2.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:15ec236b6571730236a193d9d6c11d472432fc6ab54e85eac1c16d98ddcd71bf", size = 16640 }, - { url = "https://files.pythonhosted.org/packages/da/13/27f11be25d7bd298e033b9da93217e5b68309bf724b6e494cdadb471d00d/time_machine-2.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cedc989717c8b44a3881ac3d68ab5a95820448796c550de6a2149ed1525157f0", size = 33721 }, - { url = "https://files.pythonhosted.org/packages/e6/9d/70e4640fed1fd8122204ae825c688d0ef8c04f515ec6bf3c5f3086d6510e/time_machine-2.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9d26d79de1c63a8c6586c75967e09b0ff306aa7e944a1eaddb74595c9b1839ca", size = 31646 }, - { url = "https://files.pythonhosted.org/packages/a1/cb/93bc0e51bea4e171a85151dbba3c3b3f612b50b953cd3076f5b4f0db9e14/time_machine-2.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:317b68b56a9c3731e0cf8886e0f94230727159e375988b36c60edce0ddbcb44a", size = 33403 }, - { url = "https://files.pythonhosted.org/packages/89/71/2c6a63ad4fbce3d62d46bbd9ac4433f30bade7f25978ce00815b905bcfcf/time_machine-2.16.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:43e1e18279759897be3293a255d53e6b1cb0364b69d9591d0b80c51e461c94b0", size = 33327 }, - { url = "https://files.pythonhosted.org/packages/68/4e/205c2b26763b8817cd6b8868242843800a1fbf275f2af35f5ba35ff2b01a/time_machine-2.16.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e43adb22def972a29d2b147999b56897116085777a0fea182fd93ee45730611e", size = 31454 }, - { url = "https://files.pythonhosted.org/packages/d7/95/44c1aa3994919f93534244c40cfd2fb9416d7686dc0c8b9b262c751b5118/time_machine-2.16.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0c766bea27a0600e36806d628ebc4b47178b12fcdfb6c24dc0a566a9c06bfe7f", size = 32972 }, - { url = "https://files.pythonhosted.org/packages/d4/ee/75243df9c7cf30f108758e887141a58e6544baaa46e2e647b9ccc56db819/time_machine-2.16.0-cp312-cp312-win32.whl", hash = "sha256:6dae82ab647d107817e013db82223e20a9853fa88543fec853ae326382d03c2e", size = 19078 }, - { url = "https://files.pythonhosted.org/packages/d4/7c/d4e67cc031f9653c92167ccf87d241e3208653d191c96ac79281c273ab92/time_machine-2.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:265462c77dc9576267c3c7f20707780a171a9fdbac93ac22e608c309efd68c33", size = 19923 }, - { url = "https://files.pythonhosted.org/packages/aa/b6/7047226fcb9afefe47fc80f605530535bf71ad99b6797f057abbfa4cd9a5/time_machine-2.16.0-cp312-cp312-win_arm64.whl", hash = "sha256:ef768e14768eebe3bb1196c0dece8e14c1c6991605721214a0c3c68cf77eb216", size = 18003 }, -] - -[[package]] -name = "tokenizers" -version = "0.21.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "huggingface-hub" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/20/41/c2be10975ca37f6ec40d7abd7e98a5213bb04f284b869c1a24e6504fd94d/tokenizers-0.21.0.tar.gz", hash = "sha256:ee0894bf311b75b0c03079f33859ae4b2334d675d4e93f5a4132e1eae2834fe4", size = 343021 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b0/5c/8b09607b37e996dc47e70d6a7b6f4bdd4e4d5ab22fe49d7374565c7fefaf/tokenizers-0.21.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:3c4c93eae637e7d2aaae3d376f06085164e1660f89304c0ab2b1d08a406636b2", size = 2647461 }, - { url = "https://files.pythonhosted.org/packages/22/7a/88e58bb297c22633ed1c9d16029316e5b5ac5ee44012164c2edede599a5e/tokenizers-0.21.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:f53ea537c925422a2e0e92a24cce96f6bc5046bbef24a1652a5edc8ba975f62e", size = 2563639 }, - { url = "https://files.pythonhosted.org/packages/f7/14/83429177c19364df27d22bc096d4c2e431e0ba43e56c525434f1f9b0fd00/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b177fb54c4702ef611de0c069d9169f0004233890e0c4c5bd5508ae05abf193", size = 2903304 }, - { url = "https://files.pythonhosted.org/packages/7e/db/3433eab42347e0dc5452d8fcc8da03f638c9accffefe5a7c78146666964a/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6b43779a269f4629bebb114e19c3fca0223296ae9fea8bb9a7a6c6fb0657ff8e", size = 2804378 }, - { url = "https://files.pythonhosted.org/packages/57/8b/7da5e6f89736c2ade02816b4733983fca1c226b0c42980b1ae9dc8fcf5cc/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9aeb255802be90acfd363626753fda0064a8df06031012fe7d52fd9a905eb00e", size = 3095488 }, - { url = "https://files.pythonhosted.org/packages/4d/f6/5ed6711093dc2c04a4e03f6461798b12669bc5a17c8be7cce1240e0b5ce8/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d8b09dbeb7a8d73ee204a70f94fc06ea0f17dcf0844f16102b9f414f0b7463ba", size = 3121410 }, - { url = "https://files.pythonhosted.org/packages/81/42/07600892d48950c5e80505b81411044a2d969368cdc0d929b1c847bf6697/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:400832c0904f77ce87c40f1a8a27493071282f785724ae62144324f171377273", size = 3388821 }, - { url = "https://files.pythonhosted.org/packages/22/06/69d7ce374747edaf1695a4f61b83570d91cc8bbfc51ccfecf76f56ab4aac/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e84ca973b3a96894d1707e189c14a774b701596d579ffc7e69debfc036a61a04", size = 3008868 }, - { url = "https://files.pythonhosted.org/packages/c8/69/54a0aee4d576045b49a0eb8bffdc495634309c823bf886042e6f46b80058/tokenizers-0.21.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:eb7202d231b273c34ec67767378cd04c767e967fda12d4a9e36208a34e2f137e", size = 8975831 }, - { url = "https://files.pythonhosted.org/packages/f7/f3/b776061e4f3ebf2905ba1a25d90380aafd10c02d406437a8ba22d1724d76/tokenizers-0.21.0-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:089d56db6782a73a27fd8abf3ba21779f5b85d4a9f35e3b493c7bbcbbf0d539b", size = 8920746 }, - { url = "https://files.pythonhosted.org/packages/d8/ee/ce83d5ec8b6844ad4c3ecfe3333d58ecc1adc61f0878b323a15355bcab24/tokenizers-0.21.0-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:c87ca3dc48b9b1222d984b6b7490355a6fdb411a2d810f6f05977258400ddb74", size = 9161814 }, - { url = "https://files.pythonhosted.org/packages/18/07/3e88e65c0ed28fa93aa0c4d264988428eef3df2764c3126dc83e243cb36f/tokenizers-0.21.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:4145505a973116f91bc3ac45988a92e618a6f83eb458f49ea0790df94ee243ff", size = 9357138 }, - { url = "https://files.pythonhosted.org/packages/15/b0/dc4572ca61555fc482ebc933f26cb407c6aceb3dc19c301c68184f8cad03/tokenizers-0.21.0-cp39-abi3-win32.whl", hash = "sha256:eb1702c2f27d25d9dd5b389cc1f2f51813e99f8ca30d9e25348db6585a97e24a", size = 2202266 }, - { url = "https://files.pythonhosted.org/packages/44/69/d21eb253fa91622da25585d362a874fa4710be600f0ea9446d8d0217cec1/tokenizers-0.21.0-cp39-abi3-win_amd64.whl", hash = "sha256:87841da5a25a3a5f70c102de371db120f41873b854ba65e52bccd57df5a3780c", size = 2389192 }, -] - -[[package]] -name = "tqdm" -version = "4.67.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "colorama", marker = "platform_system == 'Windows'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540 }, -] - -[[package]] -name = "transformers" -version = "4.47.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "filelock" }, - { name = "huggingface-hub" }, - { name = "numpy" }, - { name = "packaging" }, - { name = "pyyaml" }, - { name = "regex" }, - { name = "requests" }, - { name = "safetensors" }, - { name = "tokenizers" }, - { name = "tqdm" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/b1/5a/0ecfde3264bed0579c37f249e04e15f3c1451ba864d78bbe390177664cac/transformers-4.47.0.tar.gz", hash = "sha256:f8ead7a5a4f6937bb507e66508e5e002dc5930f7b6122a9259c37b099d0f3b19", size = 8693668 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d0/a7/7eedcf6a359e1e1eff3bc204ad022485aa5d88c08e1e3e0e0aee8a2e2235/transformers-4.47.0-py3-none-any.whl", hash = "sha256:a8e1bafdaae69abdda3cad638fe392e37c86d2ce0ecfcae11d60abb8f949ff4d", size = 10133426 }, -] - -[[package]] -name = "typing-extensions" -version = "4.12.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438 }, -] - -[[package]] -name = "tzdata" -version = "2024.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e1/34/943888654477a574a86a98e9896bae89c7aa15078ec29f490fef2f1e5384/tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc", size = 193282 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a6/ab/7e5f53c3b9d14972843a647d8d7a853969a58aecc7559cb3267302c94774/tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd", size = 346586 }, -] - -[[package]] -name = "urllib3" -version = "2.2.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ed/63/22ba4ebfe7430b76388e7cd448d5478814d3032121827c12a2cc287e2260/urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9", size = 300677 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ce/d9/5f4c13cecde62396b0d3fe530a50ccea91e7dfc1ccf0e09c228841bb5ba8/urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", size = 126338 }, -] - -[[package]] -name = "uvicorn" -version = "0.34.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "click" }, - { name = "h11" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/4b/4d/938bd85e5bf2edeec766267a5015ad969730bb91e31b44021dfe8b22df6c/uvicorn-0.34.0.tar.gz", hash = "sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9", size = 76568 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/61/14/33a3a1352cfa71812a3a21e8c9bfb83f60b0011f5e36f2b1399d51928209/uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4", size = 62315 }, -] - -[[package]] -name = "xxhash" -version = "3.5.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/00/5e/d6e5258d69df8b4ed8c83b6664f2b47d30d2dec551a29ad72a6c69eafd31/xxhash-3.5.0.tar.gz", hash = "sha256:84f2caddf951c9cbf8dc2e22a89d4ccf5d86391ac6418fe81e3c67d0cf60b45f", size = 84241 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/07/0e/1bfce2502c57d7e2e787600b31c83535af83746885aa1a5f153d8c8059d6/xxhash-3.5.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:14470ace8bd3b5d51318782cd94e6f94431974f16cb3b8dc15d52f3b69df8e00", size = 31969 }, - { url = "https://files.pythonhosted.org/packages/3f/d6/8ca450d6fe5b71ce521b4e5db69622383d039e2b253e9b2f24f93265b52c/xxhash-3.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:59aa1203de1cb96dbeab595ded0ad0c0056bb2245ae11fac11c0ceea861382b9", size = 30787 }, - { url = "https://files.pythonhosted.org/packages/5b/84/de7c89bc6ef63d750159086a6ada6416cc4349eab23f76ab870407178b93/xxhash-3.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08424f6648526076e28fae6ea2806c0a7d504b9ef05ae61d196d571e5c879c84", size = 220959 }, - { url = "https://files.pythonhosted.org/packages/fe/86/51258d3e8a8545ff26468c977101964c14d56a8a37f5835bc0082426c672/xxhash-3.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61a1ff00674879725b194695e17f23d3248998b843eb5e933007ca743310f793", size = 200006 }, - { url = "https://files.pythonhosted.org/packages/02/0a/96973bd325412feccf23cf3680fd2246aebf4b789122f938d5557c54a6b2/xxhash-3.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2f2c61bee5844d41c3eb015ac652a0229e901074951ae48581d58bfb2ba01be", size = 428326 }, - { url = "https://files.pythonhosted.org/packages/11/a7/81dba5010f7e733de88af9555725146fc133be97ce36533867f4c7e75066/xxhash-3.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d32a592cac88d18cc09a89172e1c32d7f2a6e516c3dfde1b9adb90ab5df54a6", size = 194380 }, - { url = "https://files.pythonhosted.org/packages/fb/7d/f29006ab398a173f4501c0e4977ba288f1c621d878ec217b4ff516810c04/xxhash-3.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70dabf941dede727cca579e8c205e61121afc9b28516752fd65724be1355cc90", size = 207934 }, - { url = "https://files.pythonhosted.org/packages/8a/6e/6e88b8f24612510e73d4d70d9b0c7dff62a2e78451b9f0d042a5462c8d03/xxhash-3.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e5d0ddaca65ecca9c10dcf01730165fd858533d0be84c75c327487c37a906a27", size = 216301 }, - { url = "https://files.pythonhosted.org/packages/af/51/7862f4fa4b75a25c3b4163c8a873f070532fe5f2d3f9b3fc869c8337a398/xxhash-3.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e5b5e16c5a480fe5f59f56c30abdeba09ffd75da8d13f6b9b6fd224d0b4d0a2", size = 203351 }, - { url = "https://files.pythonhosted.org/packages/22/61/8d6a40f288f791cf79ed5bb113159abf0c81d6efb86e734334f698eb4c59/xxhash-3.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149b7914451eb154b3dfaa721315117ea1dac2cc55a01bfbd4df7c68c5dd683d", size = 210294 }, - { url = "https://files.pythonhosted.org/packages/17/02/215c4698955762d45a8158117190261b2dbefe9ae7e5b906768c09d8bc74/xxhash-3.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:eade977f5c96c677035ff39c56ac74d851b1cca7d607ab3d8f23c6b859379cab", size = 414674 }, - { url = "https://files.pythonhosted.org/packages/31/5c/b7a8db8a3237cff3d535261325d95de509f6a8ae439a5a7a4ffcff478189/xxhash-3.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fa9f547bd98f5553d03160967866a71056a60960be00356a15ecc44efb40ba8e", size = 192022 }, - { url = "https://files.pythonhosted.org/packages/78/e3/dd76659b2811b3fd06892a8beb850e1996b63e9235af5a86ea348f053e9e/xxhash-3.5.0-cp312-cp312-win32.whl", hash = "sha256:f7b58d1fd3551b8c80a971199543379be1cee3d0d409e1f6d8b01c1a2eebf1f8", size = 30170 }, - { url = "https://files.pythonhosted.org/packages/d9/6b/1c443fe6cfeb4ad1dcf231cdec96eb94fb43d6498b4469ed8b51f8b59a37/xxhash-3.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:fa0cafd3a2af231b4e113fba24a65d7922af91aeb23774a8b78228e6cd785e3e", size = 30040 }, - { url = "https://files.pythonhosted.org/packages/0f/eb/04405305f290173acc0350eba6d2f1a794b57925df0398861a20fbafa415/xxhash-3.5.0-cp312-cp312-win_arm64.whl", hash = "sha256:586886c7e89cb9828bcd8a5686b12e161368e0064d040e225e72607b43858ba2", size = 26796 }, -] - -[[package]] -name = "yarl" -version = "1.18.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "idna" }, - { name = "multidict" }, - { name = "propcache" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/b7/9d/4b94a8e6d2b51b599516a5cb88e5bc99b4d8d4583e468057eaa29d5f0918/yarl-1.18.3.tar.gz", hash = "sha256:ac1801c45cbf77b6c99242eeff4fffb5e4e73a800b5c4ad4fc0be5def634d2e1", size = 181062 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/33/85/bd2e2729752ff4c77338e0102914897512e92496375e079ce0150a6dc306/yarl-1.18.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1dd4bdd05407ced96fed3d7f25dbbf88d2ffb045a0db60dbc247f5b3c5c25d50", size = 142644 }, - { url = "https://files.pythonhosted.org/packages/ff/74/1178322cc0f10288d7eefa6e4a85d8d2e28187ccab13d5b844e8b5d7c88d/yarl-1.18.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7c33dd1931a95e5d9a772d0ac5e44cac8957eaf58e3c8da8c1414de7dd27c576", size = 94962 }, - { url = "https://files.pythonhosted.org/packages/be/75/79c6acc0261e2c2ae8a1c41cf12265e91628c8c58ae91f5ff59e29c0787f/yarl-1.18.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:25b411eddcfd56a2f0cd6a384e9f4f7aa3efee14b188de13048c25b5e91f1640", size = 92795 }, - { url = "https://files.pythonhosted.org/packages/6b/32/927b2d67a412c31199e83fefdce6e645247b4fb164aa1ecb35a0f9eb2058/yarl-1.18.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:436c4fc0a4d66b2badc6c5fc5ef4e47bb10e4fd9bf0c79524ac719a01f3607c2", size = 332368 }, - { url = "https://files.pythonhosted.org/packages/19/e5/859fca07169d6eceeaa4fde1997c91d8abde4e9a7c018e371640c2da2b71/yarl-1.18.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e35ef8683211db69ffe129a25d5634319a677570ab6b2eba4afa860f54eeaf75", size = 342314 }, - { url = "https://files.pythonhosted.org/packages/08/75/76b63ccd91c9e03ab213ef27ae6add2e3400e77e5cdddf8ed2dbc36e3f21/yarl-1.18.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84b2deecba4a3f1a398df819151eb72d29bfeb3b69abb145a00ddc8d30094512", size = 341987 }, - { url = "https://files.pythonhosted.org/packages/1a/e1/a097d5755d3ea8479a42856f51d97eeff7a3a7160593332d98f2709b3580/yarl-1.18.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00e5a1fea0fd4f5bfa7440a47eff01d9822a65b4488f7cff83155a0f31a2ecba", size = 336914 }, - { url = "https://files.pythonhosted.org/packages/0b/42/e1b4d0e396b7987feceebe565286c27bc085bf07d61a59508cdaf2d45e63/yarl-1.18.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0e883008013c0e4aef84dcfe2a0b172c4d23c2669412cf5b3371003941f72bb", size = 325765 }, - { url = "https://files.pythonhosted.org/packages/7e/18/03a5834ccc9177f97ca1bbb245b93c13e58e8225276f01eedc4cc98ab820/yarl-1.18.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5a3f356548e34a70b0172d8890006c37be92995f62d95a07b4a42e90fba54272", size = 344444 }, - { url = "https://files.pythonhosted.org/packages/c8/03/a713633bdde0640b0472aa197b5b86e90fbc4c5bc05b727b714cd8a40e6d/yarl-1.18.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ccd17349166b1bee6e529b4add61727d3f55edb7babbe4069b5764c9587a8cc6", size = 340760 }, - { url = "https://files.pythonhosted.org/packages/eb/99/f6567e3f3bbad8fd101886ea0276c68ecb86a2b58be0f64077396cd4b95e/yarl-1.18.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b958ddd075ddba5b09bb0be8a6d9906d2ce933aee81100db289badbeb966f54e", size = 346484 }, - { url = "https://files.pythonhosted.org/packages/8e/a9/84717c896b2fc6cb15bd4eecd64e34a2f0a9fd6669e69170c73a8b46795a/yarl-1.18.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c7d79f7d9aabd6011004e33b22bc13056a3e3fb54794d138af57f5ee9d9032cb", size = 359864 }, - { url = "https://files.pythonhosted.org/packages/1e/2e/d0f5f1bef7ee93ed17e739ec8dbcb47794af891f7d165fa6014517b48169/yarl-1.18.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4891ed92157e5430874dad17b15eb1fda57627710756c27422200c52d8a4e393", size = 364537 }, - { url = "https://files.pythonhosted.org/packages/97/8a/568d07c5d4964da5b02621a517532adb8ec5ba181ad1687191fffeda0ab6/yarl-1.18.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ce1af883b94304f493698b00d0f006d56aea98aeb49d75ec7d98cd4a777e9285", size = 357861 }, - { url = "https://files.pythonhosted.org/packages/7d/e3/924c3f64b6b3077889df9a1ece1ed8947e7b61b0a933f2ec93041990a677/yarl-1.18.3-cp312-cp312-win32.whl", hash = "sha256:f91c4803173928a25e1a55b943c81f55b8872f0018be83e3ad4938adffb77dd2", size = 84097 }, - { url = "https://files.pythonhosted.org/packages/34/45/0e055320daaabfc169b21ff6174567b2c910c45617b0d79c68d7ab349b02/yarl-1.18.3-cp312-cp312-win_amd64.whl", hash = "sha256:7e2ee16578af3b52ac2f334c3b1f92262f47e02cc6193c598502bd46f5cd1477", size = 90399 }, - { url = "https://files.pythonhosted.org/packages/f5/4b/a06e0ec3d155924f77835ed2d167ebd3b211a7b0853da1cf8d8414d784ef/yarl-1.18.3-py3-none-any.whl", hash = "sha256:b57f4f58099328dfb26c6a771d09fb20dbbae81d20cfb66141251ea063bd101b", size = 45109 }, -] diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index f2893d3eef1fc6b5af918d0debd30e9c45de5733..0000000000000000000000000000000000000000 --- a/docker-compose.yml +++ /dev/null @@ -1,33 +0,0 @@ -services: - backend: - build: - context: ./backend - dockerfile: Dockerfile.dev - args: - - HF_TOKEN=${HF_TOKEN} - ports: - - "${BACKEND_PORT:-8000}:8000" - volumes: - - ./backend:/app - environment: - - ENVIRONMENT=${ENVIRONMENT:-development} - - HF_TOKEN=${HF_TOKEN} - - HF_HOME=${HF_HOME:-/.cache} - command: uvicorn app.asgi:app --host 0.0.0.0 --port 8000 --reload - - frontend: - build: - context: ./frontend - dockerfile: Dockerfile.dev - ports: - - "${FRONTEND_PORT:-7860}:7860" - volumes: - - ./frontend:/app - - /app/node_modules - environment: - - NODE_ENV=${ENVIRONMENT:-development} - - CHOKIDAR_USEPOLLING=true - - PORT=${FRONTEND_PORT:-7860} - command: npm start - stdin_open: true - tty: true \ No newline at end of file diff --git a/frontend/Dockerfile.dev b/frontend/Dockerfile.dev deleted file mode 100644 index 259f7c9d8746db26bee8ee531d96cbe0d619321e..0000000000000000000000000000000000000000 --- a/frontend/Dockerfile.dev +++ /dev/null @@ -1,15 +0,0 @@ -FROM node:18 - -WORKDIR /app - -# Install required global dependencies -RUN npm install -g react-scripts - -# Copy package.json and package-lock.json -COPY package*.json ./ - -# Install project dependencies -RUN npm install - -# Volume will be mounted here, no need for COPY -CMD ["npm", "start"] \ No newline at end of file diff --git a/frontend/README.md b/frontend/README.md deleted file mode 100644 index 7ef4ff265f3c870efce128f47bdda8d266689a88..0000000000000000000000000000000000000000 --- a/frontend/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# Frontend - Open LLM Leaderboard 🏆 - -React interface for exploring and comparing open-source language models. - -## 🏗 Architecture - -```mermaid -flowchart TD - Client(["User Browser"]) --> Components["React Components"] - - subgraph Frontend - Components --> Context["Context Layer
• LeaderboardContext
• Global State"] - - API["API Layer
• /api/leaderboard/formatted
• TanStack Query"] --> |Data Feed| Context - - Context --> Hooks["Hooks Layer
• Data Processing
• Filtering
• Caching"] - - Hooks --> Features["Features
• Table Management
• Search & Filters
• Display Options"] - Features --> Cache["Cache Layer
• LocalStorage
• URL State"] - end - - API --> Backend["Backend Server"] - - style Backend fill:#f96,stroke:#333,stroke-width:2px -``` - -## ✨ Core Features - -- 🔍 **Search & Filters**: Real-time filtering, regex search, advanced filters -- 📊 **Data Visualization**: Interactive table, customizable columns, sorting -- 🔄 **State Management**: URL sync, client-side caching (5min TTL) -- 📱 **Responsive Design**: Mobile-friendly, dark/light themes - -## 🛠 Tech Stack - -- React 18 + Material-UI -- TanStack Query & Table -- React Router v6 - -## 📁 Project Structure - -``` -src/ -├── pages/ -│ └── LeaderboardPage/ -│ ├── components/ # UI Components -│ ├── context/ # Global State -│ └── hooks/ # Data Processing -├── components/ # Shared Components -└── utils/ # Helper Functions -``` - -## 🚀 Development - -```bash -# Install dependencies -npm install - -# Start development server -npm start - -# Production build -npm run build -``` - -## 🔧 Environment Variables - -```env -# API Configuration -REACT_APP_API_URL=http://localhost:8000 -REACT_APP_CACHE_DURATION=300000 # 5 minutes -``` - -## 🔄 Data Flow - -1. API fetches leaderboard data from backend -2. Context stores and manages global state -3. Hooks handle data processing and filtering -4. Components render based on processed data -5. Cache maintains user preferences and URL state diff --git a/frontend/package.json b/frontend/package.json deleted file mode 100644 index 93de14fd49415a97be66fa06310e2a1249b85ad6..0000000000000000000000000000000000000000 --- a/frontend/package.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "name": "open-llm-leaderboard", - "version": "0.1.0", - "private": true, - "dependencies": { - "@emotion/react": "^11.13.3", - "@emotion/styled": "^11.13.0", - "@huggingface/hub": "^0.14.0", - "@mui/icons-material": "^6.1.7", - "@mui/lab": "^6.0.0-beta.16", - "@mui/material": "^6.1.6", - "@mui/x-data-grid": "^7.22.2", - "@tanstack/react-query": "^5.62.2", - "@tanstack/react-table": "^8.20.5", - "@tanstack/react-virtual": "^3.10.9", - "@testing-library/jest-dom": "^5.17.0", - "@testing-library/react": "^13.4.0", - "@testing-library/user-event": "^13.5.0", - "compression": "^1.7.4", - "cors": "^2.8.5", - "express": "^4.18.2", - "react": "^18.3.1", - "react-dom": "^18.3.1", - "react-router-dom": "^6.28.0", - "react-scripts": "5.0.1", - "serve-static": "^1.15.0", - "web-vitals": "^2.1.4" - }, - "scripts": { - "start": "react-scripts start", - "build": "react-scripts build", - "test": "react-scripts test", - "eject": "react-scripts eject", - "serve": "node server.js" - }, - "eslintConfig": { - "extends": [ - "react-app", - "react-app/jest" - ] - }, - "browserslist": { - "production": [ - ">0.2%", - "not dead", - "not op_mini all" - ], - "development": [ - "last 1 chrome version", - "last 1 firefox version", - "last 1 safari version" - ] - }, - "proxy": "http://backend:8000" -} diff --git a/frontend/public/index.html b/frontend/public/index.html deleted file mode 100644 index a8591a1fee67f55b23e147afb2b8a5e7afc5005a..0000000000000000000000000000000000000000 --- a/frontend/public/index.html +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - Open LLM Leaderboard - Compare Open Source Large Language Models - - - - - - -
- - - diff --git a/frontend/public/logo256.png b/frontend/public/logo256.png deleted file mode 100644 index 58547e134af0ac1200a4608fb1c800b3e8e9ddf1..0000000000000000000000000000000000000000 Binary files a/frontend/public/logo256.png and /dev/null differ diff --git a/frontend/public/logo32.png b/frontend/public/logo32.png deleted file mode 100644 index 1b6e8fbd42dd1bcc599649bf6f230fde89a6908a..0000000000000000000000000000000000000000 Binary files a/frontend/public/logo32.png and /dev/null differ diff --git a/frontend/public/og-image.jpg b/frontend/public/og-image.jpg deleted file mode 100644 index 1d4a3f3cb7d838489ef0a5dde1ce7c493273f98d..0000000000000000000000000000000000000000 Binary files a/frontend/public/og-image.jpg and /dev/null differ diff --git a/frontend/public/robots.txt b/frontend/public/robots.txt deleted file mode 100644 index e9e57dc4d41b9b46e05112e9f45b7ea6ac0ba15e..0000000000000000000000000000000000000000 --- a/frontend/public/robots.txt +++ /dev/null @@ -1,3 +0,0 @@ -# https://www.robotstxt.org/robotstxt.html -User-agent: * -Disallow: diff --git a/frontend/server.js b/frontend/server.js deleted file mode 100644 index 653befea69419568b117ce809871639d86d65581..0000000000000000000000000000000000000000 --- a/frontend/server.js +++ /dev/null @@ -1,85 +0,0 @@ -const express = require("express"); -const cors = require("cors"); -const compression = require("compression"); -const path = require("path"); -const serveStatic = require("serve-static"); -const { createProxyMiddleware } = require("http-proxy-middleware"); - -const app = express(); -const port = process.env.PORT || 7860; -const apiPort = process.env.INTERNAL_API_PORT || 7861; - -// Enable CORS for all routes -app.use(cors()); - -// Enable GZIP compression -app.use(compression()); - -// Proxy all API requests to the Python backend -app.use( - "/api", - createProxyMiddleware({ - target: `http://127.0.0.1:${apiPort}`, - changeOrigin: true, - onError: (err, req, res) => { - console.error("Proxy Error:", err); - res.status(500).json({ error: "Proxy Error", details: err.message }); - }, - }) -); - -// Serve static files from the build directory -app.use( - express.static(path.join(__dirname, "build"), { - // Don't cache HTML files - setHeaders: (res, path) => { - if (path.endsWith(".html")) { - res.setHeader("Cache-Control", "no-cache, no-store, must-revalidate"); - res.setHeader("Pragma", "no-cache"); - res.setHeader("Expires", "0"); - } else { - // Cache other static resources for 1 year - res.setHeader("Cache-Control", "public, max-age=31536000"); - } - }, - }) -); - -// Middleware to preserve URL parameters -app.use((req, res, next) => { - // Don't interfere with API requests - if (req.url.startsWith("/api")) { - return next(); - } - - // Preserve original URL parameters - req.originalUrl = req.url; - next(); -}); - -// Handle all other routes by serving index.html -app.get("*", (req, res) => { - // Don't interfere with API requests - if (req.url.startsWith("/api")) { - return next(); - } - - // Headers for client-side routing - res.set({ - "Cache-Control": "no-cache, no-store, must-revalidate", - Pragma: "no-cache", - Expires: "0", - }); - - // Send index.html for all other routes - res.sendFile(path.join(__dirname, "build", "index.html")); -}); - -app.listen(port, "0.0.0.0", () => { - console.log( - `Frontend server is running on port ${port} in ${ - process.env.NODE_ENV || "development" - } mode` - ); - console.log(`API proxy target: http://127.0.0.1:${apiPort}`); -}); diff --git a/frontend/src/App.js b/frontend/src/App.js deleted file mode 100644 index 2c406222601bba8f7fe2173564438908e55d5760..0000000000000000000000000000000000000000 --- a/frontend/src/App.js +++ /dev/null @@ -1,125 +0,0 @@ -import React, { useEffect } from "react"; -import { - HashRouter as Router, - Routes, - Route, - useSearchParams, - useLocation, -} from "react-router-dom"; -import { ThemeProvider } from "@mui/material/styles"; -import { Box, CssBaseline } from "@mui/material"; -import Navigation from "./components/Navigation/Navigation"; -import LeaderboardPage from "./pages/LeaderboardPage/LeaderboardPage"; -import AddModelPage from "./pages/AddModelPage/AddModelPage"; -import QuotePage from "./pages/QuotePage/QuotePage"; -import VoteModelPage from "./pages/VoteModelPage/VoteModelPage"; -import Footer from "./components/Footer/Footer"; -import getTheme from "./config/theme"; -import { useThemeMode } from "./hooks/useThemeMode"; -import { QueryClient, QueryClientProvider } from "@tanstack/react-query"; -import LeaderboardProvider from "./pages/LeaderboardPage/components/Leaderboard/context/LeaderboardContext"; - -const queryClient = new QueryClient({ - defaultOptions: { - queries: { - retry: 1, - refetchOnWindowFocus: false, - }, - }, -}); - -function UrlHandler() { - const location = useLocation(); - const [searchParams] = useSearchParams(); - - // Synchroniser l'URL avec la page parente HF - useEffect(() => { - // Vérifier si nous sommes dans un iframe HF Space - const isHFSpace = window.location !== window.parent.location; - if (!isHFSpace) return; - - // Sync query and hash from this embedded app to the parent page URL - const queryString = window.location.search; - const hash = window.location.hash; - - // HF Spaces' special message type to update the query string and the hash in the parent page URL - window.parent.postMessage( - { - queryString, - hash, - }, - "https://huggingface.co" - ); - }, [location, searchParams]); - - // Read the updated hash reactively - useEffect(() => { - const handleHashChange = (event) => { - console.log("hash change event", event); - }; - - window.addEventListener("hashchange", handleHashChange); - return () => window.removeEventListener("hashchange", handleHashChange); - }, []); - - return null; -} - -function App() { - const { mode, toggleTheme } = useThemeMode(); - const theme = getTheme(mode); - - return ( -
- - - - - - - - - - - } /> - } /> - } /> - } /> - - -
- - - - - -
- ); -} - -export default App; diff --git a/frontend/src/components/Footer/Footer.js b/frontend/src/components/Footer/Footer.js deleted file mode 100644 index 2064e062f55de1cf477fd80211f2bd5d9835fb63..0000000000000000000000000000000000000000 --- a/frontend/src/components/Footer/Footer.js +++ /dev/null @@ -1,30 +0,0 @@ -import React from "react"; -import { Box, Typography, Link } from "@mui/material"; - -const Footer = () => { - return ( - - - © 2024 Hugging Face - Open LLM Leaderboard - Made with 🤗 by the HF team - -{" "} - - huggingface.co - - - - ); -}; - -export default Footer; diff --git a/frontend/src/components/Logo/HFLogo.js b/frontend/src/components/Logo/HFLogo.js deleted file mode 100644 index e49263da5f52e62f50db806f6f295d94e75be47f..0000000000000000000000000000000000000000 --- a/frontend/src/components/Logo/HFLogo.js +++ /dev/null @@ -1,19 +0,0 @@ -import React from 'react'; - -const HFLogo = () => ( - - hg-logo - - -); - -export default HFLogo; \ No newline at end of file diff --git a/frontend/src/components/Logo/Logo.js b/frontend/src/components/Logo/Logo.js deleted file mode 100644 index 55db4a876d67bdc378ac86c8a5aba2276ff6df33..0000000000000000000000000000000000000000 --- a/frontend/src/components/Logo/Logo.js +++ /dev/null @@ -1,56 +0,0 @@ -import React from "react"; -import { useNavigate, useSearchParams, useLocation } from "react-router-dom"; -import { Box } from "@mui/material"; -import HFLogo from "./HFLogo"; -import { useLeaderboard } from "../../pages/LeaderboardPage/components/Leaderboard/context/LeaderboardContext"; - -const Logo = ({ height = "40px" }) => { - const navigate = useNavigate(); - const [searchParams, setSearchParams] = useSearchParams(); - const location = useLocation(); - const { actions } = useLeaderboard(); - - const handleReset = () => { - // Reset all leaderboard state first - actions.resetAll(); - - // Then clean URL in one go - if ( - location.pathname !== "/" || - searchParams.toString() !== "" || - location.hash !== "" - ) { - window.history.replaceState(null, "", "/"); - navigate("/", { replace: true, state: { skipUrlSync: true } }); - setSearchParams({}, { replace: true, state: { skipUrlSync: true } }); - } - }; - - return ( - - - - - - ); -}; - -export default Logo; diff --git a/frontend/src/components/Navigation/Navigation.js b/frontend/src/components/Navigation/Navigation.js deleted file mode 100644 index aef6ce63105663596092a643387c2310f191a575..0000000000000000000000000000000000000000 --- a/frontend/src/components/Navigation/Navigation.js +++ /dev/null @@ -1,504 +0,0 @@ -import React, { useState } from "react"; -import { - AppBar, - Toolbar, - Box, - Link as MuiLink, - IconButton, - Tooltip, - ButtonBase, - Typography, -} from "@mui/material"; -import { useLocation, useNavigate, useSearchParams } from "react-router-dom"; -import OpenInNewIcon from "@mui/icons-material/OpenInNew"; -import LightModeOutlinedIcon from "@mui/icons-material/LightModeOutlined"; -import DarkModeOutlinedIcon from "@mui/icons-material/DarkModeOutlined"; -import { alpha } from "@mui/material/styles"; -import MenuIcon from "@mui/icons-material/Menu"; -import { Menu, MenuItem, useMediaQuery, useTheme } from "@mui/material"; - -const Navigation = ({ onToggleTheme, mode }) => { - const location = useLocation(); - const navigate = useNavigate(); - const [searchParams] = useSearchParams(); - const [anchorEl, setAnchorEl] = useState(null); - const theme = useTheme(); - const isMobile = useMediaQuery(theme.breakpoints.down("md")); - const [hasChanged, setHasChanged] = useState(false); - - const handleThemeToggle = () => { - setHasChanged(true); - onToggleTheme(); - }; - - const iconStyle = { - fontSize: "1.125rem", - ...(hasChanged && { - animation: "rotateIn 0.3s cubic-bezier(0.4, 0, 0.2, 1)", - "@keyframes rotateIn": { - "0%": { - opacity: 0, - transform: - mode === "light" - ? "rotate(-90deg) scale(0.8)" - : "rotate(90deg) scale(0.8)", - }, - "100%": { - opacity: 1, - transform: "rotate(0) scale(1)", - }, - }, - }), - }; - - // Function to sync URL with parent HF page - const syncUrlWithParent = (queryString, hash) => { - // Check if we're in an HF Space iframe - const isHFSpace = window.location !== window.parent.location; - if (isHFSpace) { - try { - // Build complete URL with hash - const fullPath = `${queryString}${hash ? "#" + hash : ""}`; - window.parent.postMessage( - { - type: "urlUpdate", - path: fullPath, - }, - "https://huggingface.co" - ); - } catch (e) { - console.warn("Unable to sync URL with parent:", e); - } - } - }; - - const linkStyle = (isActive = false) => ({ - textDecoration: "none", - color: isActive ? "text.primary" : "text.secondary", - fontSize: "0.8125rem", - opacity: isActive ? 1 : 0.8, - display: "flex", - alignItems: "center", - gap: 0.5, - paddingBottom: "2px", - cursor: "pointer", - position: "relative", - "&:hover": { - opacity: 1, - color: "text.primary", - }, - "&::after": isActive - ? { - content: '""', - position: "absolute", - bottom: "-4px", - left: "0", - width: "100%", - height: "2px", - backgroundColor: (theme) => - alpha( - theme.palette.text.primary, - theme.palette.mode === "dark" ? 0.3 : 0.2 - ), - borderRadius: "2px", - } - : {}, - }); - - const Separator = () => ( - ({ - width: "4px", - height: "4px", - borderRadius: "100%", - backgroundColor: alpha( - theme.palette.text.primary, - theme.palette.mode === "dark" ? 0.2 : 0.15 - ), - })} - /> - ); - - const handleNavigation = (path) => (e) => { - e.preventDefault(); - const searchString = searchParams.toString(); - const queryString = searchString ? `?${searchString}` : ""; - const newPath = `${path}${queryString}`; - - // Local navigation via React Router - navigate(newPath); - - // If in HF Space, sync with parent - if (window.location !== window.parent.location) { - syncUrlWithParent(queryString, newPath); - } - }; - - const handleMenuOpen = (event) => { - setAnchorEl(event.currentTarget); - }; - - const handleMenuClose = () => { - setAnchorEl(null); - }; - - return ( - - - {isMobile ? ( - - - - - - - `1px solid ${alpha(theme.palette.divider, 0.1)}`, - backgroundColor: (theme) => - theme.palette.mode === "dark" - ? alpha(theme.palette.background.paper, 0.8) - : theme.palette.background.paper, - backdropFilter: "blur(20px)", - "& .MuiList-root": { - py: 1, - }, - "& .MuiMenuItem-root": { - px: 2, - py: 1, - fontSize: "0.8125rem", - color: "text.secondary", - transition: "all 0.2s ease-in-out", - position: "relative", - "&:hover": { - backgroundColor: (theme) => - alpha( - theme.palette.text.primary, - theme.palette.mode === "dark" ? 0.1 : 0.06 - ), - color: "text.primary", - }, - "&.Mui-selected": { - backgroundColor: "transparent", - color: "text.primary", - "&::after": { - content: '""', - position: "absolute", - left: "8px", - width: "4px", - height: "100%", - top: "0", - backgroundColor: (theme) => - alpha( - theme.palette.text.primary, - theme.palette.mode === "dark" ? 0.3 : 0.2 - ), - borderRadius: "2px", - }, - "&:hover": { - backgroundColor: (theme) => - alpha( - theme.palette.text.primary, - theme.palette.mode === "dark" ? 0.1 : 0.06 - ), - }, - }, - }, - }, - }} - transformOrigin={{ horizontal: "left", vertical: "top" }} - anchorOrigin={{ horizontal: "left", vertical: "bottom" }} - > - {/* Navigation Section */} - - - Navigation - - - { - handleNavigation("/")(e); - handleMenuClose(); - }} - selected={location.pathname === "/"} - > - Leaderboard - - { - handleNavigation("/add")(e); - handleMenuClose(); - }} - selected={location.pathname === "/add"} - > - Submit model - - { - handleNavigation("/vote")(e); - handleMenuClose(); - }} - selected={location.pathname === "/vote"} - > - Vote for next model - - { - handleNavigation("/quote")(e); - handleMenuClose(); - }} - selected={location.pathname === "/quote"} - > - Citations - - - {/* Separator */} - - `1px solid ${alpha(theme.palette.divider, 0.1)}`, - }} - /> - - {/* External Links Section */} - - - External links - - - - Compare models - - - - About - - - - - - ({ - color: "text.secondary", - borderRadius: "100%", - padding: 0, - width: "36px", - height: "36px", - display: "flex", - alignItems: "center", - justifyContent: "center", - transition: "all 0.2s ease-in-out", - "&:hover": { - color: "text.primary", - backgroundColor: alpha( - theme.palette.text.primary, - theme.palette.mode === "dark" ? 0.1 : 0.06 - ), - }, - "&.MuiButtonBase-root": { - overflow: "hidden", - }, - "& .MuiTouchRipple-root": { - color: alpha(theme.palette.text.primary, 0.3), - }, - })} - > - {mode === "light" ? ( - - ) : ( - - )} - - - - ) : ( - // Desktop version - - {/* Internal navigation */} - - - Leaderboard - - - Submit model - - - Vote for next model - - - Citations - - - - - - {/* External links */} - - - Compare models - - - - About - - - - - - - {/* Dark mode toggle */} - - ({ - color: "text.secondary", - borderRadius: "100%", - padding: 0, - width: "36px", - height: "36px", - display: "flex", - alignItems: "center", - justifyContent: "center", - transition: "all 0.2s ease-in-out", - "&:hover": { - color: "text.primary", - backgroundColor: alpha( - theme.palette.text.primary, - theme.palette.mode === "dark" ? 0.1 : 0.06 - ), - }, - "&.MuiButtonBase-root": { - overflow: "hidden", - }, - "& .MuiTouchRipple-root": { - color: alpha(theme.palette.text.primary, 0.3), - }, - })} - > - {mode === "light" ? ( - - ) : ( - - )} - - - - )} - - - ); -}; - -export default Navigation; diff --git a/frontend/src/components/shared/AuthContainer.js b/frontend/src/components/shared/AuthContainer.js deleted file mode 100644 index 7b4955845c03b07e6918827808723186997938b4..0000000000000000000000000000000000000000 --- a/frontend/src/components/shared/AuthContainer.js +++ /dev/null @@ -1,139 +0,0 @@ -import React from "react"; -import { - Box, - Typography, - Button, - Chip, - Stack, - Paper, - CircularProgress, -} from "@mui/material"; -import HFLogo from "../Logo/HFLogo"; -import { useAuth } from "../../hooks/useAuth"; -import LogoutIcon from "@mui/icons-material/Logout"; -import { useNavigate } from "react-router-dom"; - -function AuthContainer({ actionText = "DO_ACTION" }) { - const { isAuthenticated, user, login, logout, loading } = useAuth(); - const navigate = useNavigate(); - - const handleLogout = () => { - if (isAuthenticated && logout) { - logout(); - navigate("/", { replace: true }); - window.location.reload(); - } - }; - - if (loading) { - return ( - - - - ); - } - - if (!isAuthenticated) { - return ( - - - Login to {actionText} - - - You need to be logged in with your Hugging Face account to{" "} - {actionText.toLowerCase()} - - - - ); - } - - return ( - - - - - Connected as {user?.username} - - - - - - - ); -} - -export default AuthContainer; diff --git a/frontend/src/components/shared/CodeBlock.js b/frontend/src/components/shared/CodeBlock.js deleted file mode 100644 index 6f06f6eed1f6a17dd70334d3a7bb4d0ab897355c..0000000000000000000000000000000000000000 --- a/frontend/src/components/shared/CodeBlock.js +++ /dev/null @@ -1,37 +0,0 @@ -import React from 'react'; -import { Box, IconButton } from '@mui/material'; -import ContentCopyIcon from '@mui/icons-material/ContentCopy'; - -const CodeBlock = ({ code }) => ( - - navigator.clipboard.writeText(code)} - sx={{ - position: 'absolute', - top: 8, - right: 8, - color: 'grey.500', - '&:hover': { color: 'grey.300' }, - }} - > - - - - {code} - - -); - -export default CodeBlock; \ No newline at end of file diff --git a/frontend/src/components/shared/FilterTag.js b/frontend/src/components/shared/FilterTag.js deleted file mode 100644 index 3cd154cb61a699bf94a2af0ba78286e3588aa754..0000000000000000000000000000000000000000 --- a/frontend/src/components/shared/FilterTag.js +++ /dev/null @@ -1,139 +0,0 @@ -import React from "react"; -import { Chip } from "@mui/material"; -import { useTheme } from "@mui/material/styles"; -import { alpha } from "@mui/material/styles"; -import CheckBoxOutlineBlankIcon from "@mui/icons-material/CheckBoxOutlineBlank"; -import CheckBoxOutlinedIcon from "@mui/icons-material/CheckBoxOutlined"; - -const FilterTag = ({ - label, - checked, - onChange, - count, - isHideFilter = false, - totalCount = 0, - variant = "tag", - showCheckbox = false, - stacked = false, - sx = {}, -}) => { - const theme = useTheme(); - - const formatCount = (count) => { - if (count === undefined) return ""; - return `${count}`; - }; - - const mainLabel = label; - const countLabel = count !== undefined ? formatCount(count) : ""; - - return ( - - ) : ( - - ) - ) : null - } - label={ - - {mainLabel} - {countLabel && ( - <> - - {countLabel} - - )} - - } - onClick={onChange} - variant="outlined" - color={ - checked - ? variant === "secondary" - ? "secondary" - : "primary" - : "default" - } - size="small" - data-checked={checked} - sx={{ - height: "32px", - fontWeight: 600, - opacity: checked ? 1 : 0.8, - borderRadius: "5px", - borderWidth: "1px", - borderStyle: "solid", - cursor: "pointer", - pl: showCheckbox ? 0.5 : 0, - mr: 0.5, - mb: 0.5, - transition: "opacity 0.2s ease, border-color 0.2s ease", - "& .MuiChip-label": { - px: 0.75, - pl: showCheckbox ? 0.6 : 0.75, - }, - "& .MuiChip-icon": { - mr: 0.5, - pl: 0.2, - }, - "&:hover": { - opacity: 1, - backgroundColor: checked - ? alpha( - theme.palette[variant === "secondary" ? "secondary" : "primary"] - .main, - theme.palette.mode === "light" ? 0.08 : 0.16 - ) - : "action.hover", - borderWidth: "1px", - }, - backgroundColor: checked - ? alpha( - theme.palette[variant === "secondary" ? "secondary" : "primary"] - .main, - theme.palette.mode === "light" ? 0.08 : 0.16 - ) - : "background.paper", - borderColor: checked - ? variant === "secondary" - ? "secondary.main" - : "primary.main" - : "divider", - ...sx, - }} - /> - ); -}; - -export default FilterTag; diff --git a/frontend/src/components/shared/InfoIconWithTooltip.js b/frontend/src/components/shared/InfoIconWithTooltip.js deleted file mode 100644 index 2b307ccaf8d7bebb91c81b2ff7cf746a4fbac05e..0000000000000000000000000000000000000000 --- a/frontend/src/components/shared/InfoIconWithTooltip.js +++ /dev/null @@ -1,87 +0,0 @@ -import React from "react"; -import { Box, Tooltip, Portal, Backdrop } from "@mui/material"; -import InfoOutlinedIcon from "@mui/icons-material/InfoOutlined"; - -const InfoIconWithTooltip = ({ tooltip, iconProps = {}, sx = {} }) => { - const [open, setOpen] = React.useState(false); - - return ( - <> - setOpen(true)} - onClose={() => setOpen(false)} - componentsProps={{ - tooltip: { - sx: { - bgcolor: "rgba(33, 33, 33, 0.95)", - padding: "12px 16px", - maxWidth: "none !important", - width: "auto", - minWidth: "200px", - fontSize: "0.875rem", - lineHeight: 1.5, - position: "relative", - zIndex: 1501, - "& .MuiTooltip-arrow": { - color: "rgba(33, 33, 33, 0.95)", - }, - }, - }, - popper: { - sx: { - zIndex: 1501, - maxWidth: "min(600px, 90vw) !important", - '&[data-popper-placement*="bottom"] .MuiTooltip-tooltip': { - marginTop: "10px", - }, - '&[data-popper-placement*="top"] .MuiTooltip-tooltip': { - marginBottom: "10px", - }, - }, - }, - }} - > - - - - - {open && ( - - - - )} - - ); -}; - -export default InfoIconWithTooltip; diff --git a/frontend/src/components/shared/PageHeader.js b/frontend/src/components/shared/PageHeader.js deleted file mode 100644 index 5aada38223f216bc7df8c65725dda552947e757d..0000000000000000000000000000000000000000 --- a/frontend/src/components/shared/PageHeader.js +++ /dev/null @@ -1,29 +0,0 @@ -import React from "react"; -import { Box, Typography } from "@mui/material"; - -const PageHeader = ({ title, subtitle }) => { - return ( - - - {title} - - {subtitle && ( - - {subtitle} - - )} - - ); -}; - -export default PageHeader; diff --git a/frontend/src/config/auth.js b/frontend/src/config/auth.js deleted file mode 100644 index 250e7b0a8de7128983ac3e5f36f9fd1f82046122..0000000000000000000000000000000000000000 --- a/frontend/src/config/auth.js +++ /dev/null @@ -1,7 +0,0 @@ -export const HF_CONFIG = { - CLIENT_ID: "18fe6b93-6921-444c-9a20-5c22c578f2d8", - STORAGE_KEY: "hf_oauth", - SCOPE: "openid profile", - PROD_URL: "https://open-llm-leaderboard-open-llm-leaderboard.hf.space", - DEV_URL: "http://localhost:7860" -}; \ No newline at end of file diff --git a/frontend/src/config/theme.js b/frontend/src/config/theme.js deleted file mode 100644 index 4bd6e4ae0ac0810a89f7aafb480b3b12fbe0f524..0000000000000000000000000000000000000000 --- a/frontend/src/config/theme.js +++ /dev/null @@ -1,390 +0,0 @@ -import { createTheme, alpha } from "@mui/material/styles"; - -const getDesignTokens = (mode) => ({ - typography: { - fontFamily: [ - "-apple-system", - "BlinkMacSystemFont", - '"Segoe UI"', - "Roboto", - '"Helvetica Neue"', - "Arial", - "sans-serif", - ].join(","), - h1: { - fontFamily: '"Source Sans Pro", sans-serif', - }, - h2: { - fontFamily: '"Source Sans Pro", sans-serif', - }, - h3: { - fontFamily: '"Source Sans Pro", sans-serif', - }, - h4: { - fontFamily: '"Source Sans Pro", sans-serif', - }, - h5: { - fontFamily: '"Source Sans Pro", sans-serif', - }, - h6: { - fontFamily: '"Source Sans Pro", sans-serif', - }, - subtitle1: { - fontFamily: '"Source Sans Pro", sans-serif', - }, - subtitle2: { - fontFamily: '"Source Sans Pro", sans-serif', - }, - }, - palette: { - mode, - primary: { - main: "#4F86C6", - light: mode === "light" ? "#7BA7D7" : "#6B97D7", - dark: mode === "light" ? "#2B5C94" : "#3B6CA4", - 50: mode === "light" ? alpha("#4F86C6", 0.05) : alpha("#4F86C6", 0.15), - 100: mode === "light" ? alpha("#4F86C6", 0.1) : alpha("#4F86C6", 0.2), - 200: mode === "light" ? alpha("#4F86C6", 0.2) : alpha("#4F86C6", 0.3), - contrastText: "#fff", - }, - background: { - default: mode === "light" ? "#f8f9fa" : "#0a0a0a", - paper: mode === "light" ? "#fff" : "#1a1a1a", - subtle: mode === "light" ? "grey.100" : "grey.900", - hover: mode === "light" ? "action.hover" : alpha("#fff", 0.08), - tooltip: mode === "light" ? alpha("#212121", 0.9) : alpha("#fff", 0.9), - }, - text: { - primary: mode === "light" ? "rgba(0, 0, 0, 0.87)" : "#fff", - secondary: - mode === "light" ? "rgba(0, 0, 0, 0.6)" : "rgba(255, 255, 255, 0.7)", - disabled: - mode === "light" ? "rgba(0, 0, 0, 0.38)" : "rgba(255, 255, 255, 0.5)", - hint: - mode === "light" ? "rgba(0, 0, 0, 0.38)" : "rgba(255, 255, 255, 0.5)", - }, - divider: - mode === "light" ? "rgba(0, 0, 0, 0.12)" : "rgba(255, 255, 255, 0.12)", - action: { - active: - mode === "light" ? "rgba(0, 0, 0, 0.54)" : "rgba(255, 255, 255, 0.7)", - hover: - mode === "light" ? "rgba(0, 0, 0, 0.04)" : "rgba(255, 255, 255, 0.08)", - selected: - mode === "light" ? "rgba(0, 0, 0, 0.08)" : "rgba(255, 255, 255, 0.16)", - disabled: - mode === "light" ? "rgba(0, 0, 0, 0.26)" : "rgba(255, 255, 255, 0.3)", - disabledBackground: - mode === "light" ? "rgba(0, 0, 0, 0.12)" : "rgba(255, 255, 255, 0.12)", - }, - }, - shape: { - borderRadius: 8, - }, - components: { - MuiCssBaseline: { - styleOverrides: { - "html, body": { - backgroundColor: "background.default", - color: mode === "dark" ? "#fff" : "#000", - }, - body: { - "& *::-webkit-scrollbar": { - width: 8, - height: 8, - backgroundColor: "transparent", - }, - "& *::-webkit-scrollbar-thumb": { - borderRadius: 8, - backgroundColor: - mode === "light" ? alpha("#000", 0.2) : alpha("#fff", 0.1), - "&:hover": { - backgroundColor: - mode === "light" ? alpha("#000", 0.3) : alpha("#fff", 0.15), - }, - }, - }, - }, - }, - MuiButton: { - styleOverrides: { - root: { - borderRadius: 8, - }, - }, - }, - MuiPaper: { - defaultProps: { - elevation: 0, - }, - styleOverrides: { - root: { - backgroundImage: "none", - boxShadow: "none", - border: "1px solid", - borderColor: - mode === "light" - ? "rgba(0, 0, 0, 0.12)!important" - : "rgba(255, 255, 255, 0.25)!important", - }, - rounded: { - borderRadius: 12, - }, - }, - }, - - MuiTableCell: { - styleOverrides: { - root: { - borderColor: (theme) => - alpha( - theme.palette.divider, - theme.palette.mode === "dark" ? 0.1 : 0.2 - ), - }, - head: { - backgroundColor: mode === "light" ? "grey.50" : "grey.900", - color: "text.primary", - fontWeight: 600, - }, - }, - }, - MuiTableRow: { - styleOverrides: { - root: { - backgroundColor: "transparent", - }, - }, - }, - MuiTableContainer: { - styleOverrides: { - root: { - backgroundColor: "background.paper", - borderRadius: 8, - border: "none", - boxShadow: "none", - }, - }, - }, - MuiSlider: { - styleOverrides: { - root: { - "& .MuiSlider-valueLabel": { - backgroundColor: "background.paper", - color: "text.primary", - border: "1px solid", - borderColor: "divider", - boxShadow: - mode === "light" - ? "0px 2px 4px rgba(0, 0, 0, 0.1)" - : "0px 2px 4px rgba(0, 0, 0, 0.3)", - }, - }, - thumb: { - "&:hover": { - boxShadow: (theme) => - `0px 0px 0px 8px ${alpha( - theme.palette.primary.main, - mode === "light" ? 0.08 : 0.16 - )}`, - }, - "&.Mui-active": { - boxShadow: (theme) => - `0px 0px 0px 12px ${alpha( - theme.palette.primary.main, - mode === "light" ? 0.08 : 0.16 - )}`, - }, - }, - track: { - border: "none", - }, - rail: { - opacity: mode === "light" ? 0.38 : 0.3, - }, - mark: { - backgroundColor: mode === "light" ? "grey.400" : "grey.600", - }, - markLabel: { - color: "text.secondary", - }, - }, - }, - MuiTextField: { - styleOverrides: { - root: { - "& .MuiOutlinedInput-root": { - borderRadius: 8, - }, - }, - }, - }, - MuiChip: { - styleOverrides: { - root: { - borderRadius: 8, - }, - outlinedInfo: { - borderWidth: 2, - fontWeight: 600, - bgcolor: "info.100", - borderColor: "info.400", - color: "info.700", - "& .MuiChip-label": { - px: 1.2, - }, - "&:hover": { - bgcolor: "info.200", - }, - }, - outlinedWarning: { - borderWidth: 2, - fontWeight: 600, - bgcolor: "warning.100", - borderColor: "warning.400", - color: "warning.700", - "& .MuiChip-label": { - px: 1.2, - }, - "&:hover": { - bgcolor: "warning.200", - }, - }, - outlinedSuccess: { - borderWidth: 2, - fontWeight: 600, - bgcolor: "success.100", - borderColor: "success.400", - color: "success.700", - "& .MuiChip-label": { - px: 1.2, - }, - "&:hover": { - bgcolor: "success.200", - }, - }, - outlinedError: { - borderWidth: 2, - fontWeight: 600, - bgcolor: "error.100", - borderColor: "error.400", - color: "error.700", - "& .MuiChip-label": { - px: 1.2, - }, - "&:hover": { - bgcolor: "error.200", - }, - }, - outlinedPrimary: { - borderWidth: 2, - fontWeight: 600, - bgcolor: "primary.100", - borderColor: "primary.400", - color: "primary.700", - "& .MuiChip-label": { - px: 1.2, - }, - "&:hover": { - bgcolor: "primary.200", - }, - }, - outlinedSecondary: { - borderWidth: 2, - fontWeight: 600, - bgcolor: "secondary.100", - borderColor: "secondary.400", - color: "secondary.700", - "& .MuiChip-label": { - px: 1.2, - }, - "&:hover": { - bgcolor: "secondary.200", - }, - }, - }, - }, - MuiIconButton: { - styleOverrides: { - root: { - borderRadius: 8, - padding: "8px", - "&.MuiIconButton-sizeSmall": { - padding: "4px", - borderRadius: 6, - }, - }, - }, - }, - MuiTooltip: { - styleOverrides: { - tooltip: { - backgroundColor: - mode === "light" ? alpha("#212121", 0.9) : alpha("#424242", 0.9), - color: "#fff", - fontSize: "0.875rem", - padding: "8px 12px", - maxWidth: 400, - borderRadius: 8, - lineHeight: 1.4, - border: "1px solid", - borderColor: - mode === "light" ? alpha("#fff", 0.1) : alpha("#fff", 0.05), - boxShadow: - mode === "light" - ? "0 2px 8px rgba(0, 0, 0, 0.15)" - : "0 2px 8px rgba(0, 0, 0, 0.5)", - "& b": { - fontWeight: 600, - color: "inherit", - }, - "& a": { - color: mode === "light" ? "#90caf9" : "#64b5f6", - textDecoration: "none", - "&:hover": { - textDecoration: "underline", - }, - }, - }, - arrow: { - color: - mode === "light" ? alpha("#212121", 0.9) : alpha("#424242", 0.9), - "&:before": { - border: "1px solid", - borderColor: - mode === "light" ? alpha("#fff", 0.1) : alpha("#fff", 0.05), - }, - }, - }, - defaultProps: { - arrow: true, - enterDelay: 400, - leaveDelay: 200, - }, - }, - MuiAppBar: { - styleOverrides: { - root: { - border: "none", - borderBottom: "none", - }, - }, - }, - }, - breakpoints: { - values: { - xs: 0, - sm: 600, - md: 900, - lg: 1240, - xl: 1536, - }, - }, -}); - -const getTheme = (mode) => { - const tokens = getDesignTokens(mode); - return createTheme(tokens); -}; - -export default getTheme; diff --git a/frontend/src/hooks/useAuth.js b/frontend/src/hooks/useAuth.js deleted file mode 100644 index 166d61aaaea425b8ec6e0c1d6bcf16311a94f369..0000000000000000000000000000000000000000 --- a/frontend/src/hooks/useAuth.js +++ /dev/null @@ -1,173 +0,0 @@ -import { useState, useEffect } from "react"; -import { useLocation, useNavigate } from "react-router-dom"; -import { oauthLoginUrl, oauthHandleRedirectIfPresent } from "@huggingface/hub"; -import { HF_CONFIG } from "../config/auth"; - -async function fetchUserInfo(token) { - const response = await fetch("https://huggingface.co/api/whoami-v2", { - headers: { - Authorization: `Bearer ${token}`, - }, - }); - if (!response.ok) { - throw new Error("Failed to fetch user info"); - } - return response.json(); -} - -export function useAuth() { - const [isAuthenticated, setIsAuthenticated] = useState(false); - const [user, setUser] = useState(null); - const [loading, setLoading] = useState(true); - const [error, setError] = useState(null); - const location = useLocation(); - const navigate = useNavigate(); - - // Initialisation de l'authentification - useEffect(() => { - let mounted = true; - const initAuth = async () => { - try { - console.group("Auth Initialization"); - setLoading(true); - - // Vérifier s'il y a une redirection OAuth d'abord - let oauthResult = await oauthHandleRedirectIfPresent(); - - // Si pas de redirection, vérifier le localStorage - if (!oauthResult) { - const storedAuth = localStorage.getItem(HF_CONFIG.STORAGE_KEY); - if (storedAuth) { - try { - oauthResult = JSON.parse(storedAuth); - console.log("Found existing auth"); - const userInfo = await fetchUserInfo(oauthResult.access_token); - if (mounted) { - setIsAuthenticated(true); - setUser({ - username: userInfo.name, - token: oauthResult.access_token, - }); - } - } catch (err) { - console.log("Invalid stored auth data, clearing...", err); - localStorage.removeItem(HF_CONFIG.STORAGE_KEY); - if (mounted) { - setIsAuthenticated(false); - setUser(null); - } - } - } - } else { - console.log("Processing OAuth redirect"); - const token = oauthResult.accessToken; - const userInfo = await fetchUserInfo(token); - - const authData = { - access_token: token, - username: userInfo.name, - }; - - localStorage.setItem(HF_CONFIG.STORAGE_KEY, JSON.stringify(authData)); - - if (mounted) { - setIsAuthenticated(true); - setUser({ - username: userInfo.name, - token: token, - }); - } - - // Rediriger vers la page d'origine - const returnTo = localStorage.getItem("auth_return_to"); - if (returnTo) { - navigate(returnTo); - localStorage.removeItem("auth_return_to"); - } - } - } catch (err) { - console.error("Auth initialization error:", err); - if (mounted) { - setError(err.message); - setIsAuthenticated(false); - setUser(null); - } - } finally { - if (mounted) { - setLoading(false); - } - console.groupEnd(); - } - }; - - initAuth(); - - return () => { - mounted = false; - }; - }, [navigate, location.pathname]); - - const login = async () => { - try { - console.group("Login Process"); - setLoading(true); - - // Sauvegarder la route actuelle pour la redirection post-auth - const currentRoute = window.location.hash.replace("#", "") || "/"; - localStorage.setItem("auth_return_to", currentRoute); - - // Déterminer l'URL de redirection en fonction de l'environnement - const redirectUrl = - window.location.hostname === "localhost" || - window.location.hostname === "127.0.0.1" - ? HF_CONFIG.DEV_URL - : HF_CONFIG.PROD_URL; - - console.log("Using redirect URL:", redirectUrl); - - // Générer l'URL de login et rediriger - const loginUrl = await oauthLoginUrl({ - clientId: HF_CONFIG.CLIENT_ID, - redirectUrl, - scope: HF_CONFIG.SCOPE, - }); - - window.location.href = loginUrl + "&prompt=consent"; - - console.groupEnd(); - } catch (err) { - console.error("Login error:", err); - setError(err.message); - setLoading(false); - console.groupEnd(); - } - }; - - const logout = () => { - console.group("Logout Process"); - setLoading(true); - try { - console.log("Clearing auth data..."); - localStorage.removeItem(HF_CONFIG.STORAGE_KEY); - localStorage.removeItem("auth_return_to"); - setIsAuthenticated(false); - setUser(null); - console.log("Logged out successfully"); - } catch (err) { - console.error("Logout error:", err); - setError(err.message); - } finally { - setLoading(false); - console.groupEnd(); - } - }; - - return { - isAuthenticated, - user, - loading, - error, - login, - logout, - }; -} diff --git a/frontend/src/hooks/useThemeMode.js b/frontend/src/hooks/useThemeMode.js deleted file mode 100644 index 93030109e2b32281c05178cc4207cb5544e94e4f..0000000000000000000000000000000000000000 --- a/frontend/src/hooks/useThemeMode.js +++ /dev/null @@ -1,28 +0,0 @@ -import { useState, useEffect } from 'react'; - -export const useThemeMode = () => { - // Get system preference - const getSystemPreference = () => { - return window.matchMedia('(prefers-color-scheme: dark)').matches ? 'dark' : 'light'; - }; - - // Initialize theme mode from system preference - const [mode, setMode] = useState(getSystemPreference); - - // Listen to system preference changes - useEffect(() => { - const mediaQuery = window.matchMedia('(prefers-color-scheme: dark)'); - const handleChange = (e) => { - setMode(e.matches ? 'dark' : 'light'); - }; - - mediaQuery.addEventListener('change', handleChange); - return () => mediaQuery.removeEventListener('change', handleChange); - }, []); - - const toggleTheme = () => { - setMode((prevMode) => (prevMode === 'light' ? 'dark' : 'light')); - }; - - return { mode, toggleTheme }; -}; \ No newline at end of file diff --git a/frontend/src/index.js b/frontend/src/index.js deleted file mode 100644 index 8db5acb8fb94a08138a3901be0b5b810c9e50931..0000000000000000000000000000000000000000 --- a/frontend/src/index.js +++ /dev/null @@ -1,10 +0,0 @@ -import React from "react"; -import ReactDOM from "react-dom/client"; -import App from "./App"; - -const root = ReactDOM.createRoot(document.getElementById("root")); -root.render( - - - -); diff --git a/frontend/src/pages/AddModelPage/AddModelPage.js b/frontend/src/pages/AddModelPage/AddModelPage.js deleted file mode 100644 index 5cfb4c9b7af6e2f64b22f7d06cc82c940d981c84..0000000000000000000000000000000000000000 --- a/frontend/src/pages/AddModelPage/AddModelPage.js +++ /dev/null @@ -1,48 +0,0 @@ -import React from "react"; -import { Box, CircularProgress } from "@mui/material"; -import { useAuth } from "../../hooks/useAuth"; -import PageHeader from "../../components/shared/PageHeader"; -import EvaluationQueues from "./components/EvaluationQueues/EvaluationQueues"; -import ModelSubmissionForm from "./components/ModelSubmissionForm/ModelSubmissionForm"; -import SubmissionGuide from "./components/SubmissionGuide/SubmissionGuide"; - -function AddModelPage() { - const { isAuthenticated, loading, user } = useAuth(); - - if (loading) { - return ( - - - - ); - } - - return ( - - - Add your model to the Open - LLM Leaderboard - - } - /> - - - - - - - - ); -} - -export default AddModelPage; diff --git a/frontend/src/pages/AddModelPage/components/EvaluationQueues/EvaluationQueues.js b/frontend/src/pages/AddModelPage/components/EvaluationQueues/EvaluationQueues.js deleted file mode 100644 index 2c82c74741d1ce24dbe6e809f90c9da562a1b20b..0000000000000000000000000000000000000000 --- a/frontend/src/pages/AddModelPage/components/EvaluationQueues/EvaluationQueues.js +++ /dev/null @@ -1,718 +0,0 @@ -import React, { useState, useEffect, useRef } from "react"; -import { - Box, - Typography, - Table, - TableBody, - TableCell, - TableContainer, - TableHead, - TableRow, - Chip, - Link, - CircularProgress, - Alert, - Accordion, - AccordionSummary, - AccordionDetails, - Stack, - Tooltip, -} from "@mui/material"; -import AccessTimeIcon from "@mui/icons-material/AccessTime"; -import CheckCircleIcon from "@mui/icons-material/CheckCircle"; -import PendingIcon from "@mui/icons-material/Pending"; -import AutorenewIcon from "@mui/icons-material/Autorenew"; -import ExpandMoreIcon from "@mui/icons-material/ExpandMore"; -import OpenInNewIcon from "@mui/icons-material/OpenInNew"; -import { useVirtualizer } from "@tanstack/react-virtual"; - -// Function to format wait time -const formatWaitTime = (waitTimeStr) => { - const seconds = parseFloat(waitTimeStr.replace("s", "")); - - if (seconds < 60) { - return "just now"; - } - - const minutes = Math.floor(seconds / 60); - if (minutes < 60) { - return `${minutes}m ago`; - } - - const hours = Math.floor(minutes / 60); - if (hours < 24) { - return `${hours}h ago`; - } - - const days = Math.floor(hours / 24); - return `${days}d ago`; -}; - -// Column definitions with their properties -const columns = [ - { - id: "model", - label: "Model", - width: "35%", - align: "left", - }, - { - id: "submitter", - label: "Submitted by", - width: "15%", - align: "left", - }, - { - id: "wait_time", - label: "Submitted", - width: "12%", - align: "center", - }, - { - id: "precision", - label: "Precision", - width: "13%", - align: "center", - }, - { - id: "revision", - label: "Revision", - width: "12%", - align: "center", - }, - { - id: "status", - label: "Status", - width: "13%", - align: "center", - }, -]; - -const StatusChip = ({ status }) => { - const statusConfig = { - finished: { - icon: , - label: "Completed", - color: "success", - }, - evaluating: { - icon: , - label: "Evaluating", - color: "warning", - }, - pending: { icon: , label: "Pending", color: "info" }, - }; - - const config = statusConfig[status] || statusConfig.pending; - - return ( - - ); -}; - -const ModelTable = ({ models, emptyMessage, status }) => { - const parentRef = useRef(null); - const rowVirtualizer = useVirtualizer({ - count: models.length, - getScrollElement: () => parentRef.current, - estimateSize: () => 53, - overscan: 5, - }); - - if (models.length === 0) { - return ( - - {emptyMessage} - - ); - } - - return ( - - - - {columns.map((column) => ( - - ))} - - - - {columns.map((column, index) => ( - - {column.label} - - ))} - - - - - -
- {rowVirtualizer.getVirtualItems().map((virtualRow) => { - const model = models[virtualRow.index]; - const waitTime = formatWaitTime(model.wait_time); - - return ( - - - - {model.name} - - - - - {model.submitter} - - - - - - {waitTime} - - - - - - {model.precision} - - - - {model.revision.substring(0, 7)} - - - - - - ); - })} -
-
-
-
-
-
- ); -}; - -const QueueAccordion = ({ - title, - models, - status, - emptyMessage, - expanded, - onChange, - loading, -}) => ( - - }> - - {title} - - ({ - borderWidth: 2, - fontWeight: 600, - bgcolor: - status === "finished" - ? theme.palette.success[100] - : status === "evaluating" - ? theme.palette.warning[100] - : theme.palette.info[100], - borderColor: - status === "finished" - ? theme.palette.success[400] - : status === "evaluating" - ? theme.palette.warning[400] - : theme.palette.info[400], - color: - status === "finished" - ? theme.palette.success[700] - : status === "evaluating" - ? theme.palette.warning[700] - : theme.palette.info[700], - "& .MuiChip-label": { - px: 1.2, - }, - "&:hover": { - bgcolor: - status === "finished" - ? theme.palette.success[200] - : status === "evaluating" - ? theme.palette.warning[200] - : theme.palette.info[200], - }, - })} - /> - {loading && ( - - )} - - - - - - - - - -); - -const EvaluationQueues = ({ defaultExpanded = true }) => { - const [expanded, setExpanded] = useState(defaultExpanded); - const [expandedQueues, setExpandedQueues] = useState(new Set()); - const [models, setModels] = useState({ - pending: [], - evaluating: [], - finished: [], - }); - const [loading, setLoading] = useState(true); - const [error, setError] = useState(null); - - useEffect(() => { - const fetchModels = async () => { - try { - const response = await fetch("/api/models/status"); - if (!response.ok) { - throw new Error("Failed to fetch models"); - } - const data = await response.json(); - - // Sort models by submission date (most recent first) - const sortByDate = (models) => { - return [...models].sort((a, b) => { - const dateA = new Date(a.submission_time); - const dateB = new Date(b.submission_time); - return dateB - dateA; - }); - }; - - setModels({ - finished: sortByDate(data.finished), - evaluating: sortByDate(data.evaluating), - pending: sortByDate(data.pending), - }); - } catch (err) { - setError(err.message); - } finally { - setLoading(false); - } - }; - - fetchModels(); - const interval = setInterval(fetchModels, 30000); - return () => clearInterval(interval); - }, []); - - const handleMainAccordionChange = (panel) => (event, isExpanded) => { - setExpanded(isExpanded ? panel : false); - }; - - const handleQueueAccordionChange = (queueName) => (event, isExpanded) => { - setExpandedQueues((prev) => { - const newSet = new Set(prev); - if (isExpanded) { - newSet.add(queueName); - } else { - newSet.delete(queueName); - } - return newSet; - }); - }; - - if (error) { - return ( - - {error} - - ); - } - - return ( - - } - sx={{ - px: 3, - "& .MuiAccordionSummary-expandIconWrapper": { - color: "text.secondary", - transform: "rotate(0deg)", - transition: "transform 150ms", - "&.Mui-expanded": { - transform: "rotate(180deg)", - }, - }, - }} - > - - - Evaluation Status - - {!loading && ( - - - - - - )} - {loading && ( - - )} - - - - {loading ? ( - - - - ) : ( - <> - - - - - - - )} - - - ); -}; - -export default EvaluationQueues; diff --git a/frontend/src/pages/AddModelPage/components/ModelSubmissionForm/ModelSubmissionForm.js b/frontend/src/pages/AddModelPage/components/ModelSubmissionForm/ModelSubmissionForm.js deleted file mode 100644 index d922d094163a2d2aa5f30fe81a7675575a9f44be..0000000000000000000000000000000000000000 --- a/frontend/src/pages/AddModelPage/components/ModelSubmissionForm/ModelSubmissionForm.js +++ /dev/null @@ -1,599 +0,0 @@ -import React, { useState } from "react"; -import { - Box, - Paper, - Typography, - TextField, - Button, - FormControl, - InputLabel, - Select, - MenuItem, - FormControlLabel, - Switch, - Stack, - Grid, - CircularProgress, - Alert, -} from "@mui/material"; -import RocketLaunchIcon from "@mui/icons-material/RocketLaunch"; -import CheckCircleOutlineIcon from "@mui/icons-material/CheckCircleOutline"; -import ThumbUpIcon from "@mui/icons-material/ThumbUp"; -import { alpha } from "@mui/material/styles"; -import InfoIconWithTooltip from "../../../../components/shared/InfoIconWithTooltip"; -import { MODEL_TYPES } from "../../../../pages/LeaderboardPage/components/Leaderboard/constants/modelTypes"; -import { SUBMISSION_PRECISIONS } from "../../../../pages/LeaderboardPage/components/Leaderboard/constants/defaults"; -import AuthContainer from "../../../../components/shared/AuthContainer"; - -const WEIGHT_TYPES = [ - { value: "Original", label: "Original" }, - { value: "Delta", label: "Delta" }, - { value: "Adapter", label: "Adapter" }, -]; - -const HELP_TEXTS = { - modelName: ( - - - Model Name on Hugging Face Hub - - - Your model must be public and loadable with AutoClasses without - trust_remote_code. The model should be in Safetensors format for better - safety and loading performance. Example: mistralai/Mistral-7B-v0.1 - - - ), - revision: ( - - - Model Revision - - - Git branch, tag or commit hash. The evaluation will be strictly tied to - this specific commit to ensure consistency. Make sure this version is - stable and contains all necessary files. - - - ), - modelType: ( - - - Model Category - - - 🟢 Pretrained: Base models trained on text using masked modeling 🟩 - Continuously Pretrained: Extended training on additional corpus 🔶 - Fine-tuned: Domain-specific optimization 💬 Chat: Models using RLHF, - DPO, or IFT for conversation 🤝 Merge: Combined weights without - additional training 🌸 Multimodal: Handles multiple input types - - - ), - baseModel: ( - - - Base Model Reference - - - Required for delta weights or adapters. This information is used to - identify the original model and calculate the total parameter count by - combining base model and adapter/delta parameters. - - - ), - precision: ( - - - Model Precision - - - Size limits vary by precision: • FP16/BF16: up to 100B parameters • - 8-bit: up to 280B parameters (2x) • 4-bit: up to 560B parameters (4x) - Choose carefully as incorrect precision can cause evaluation errors. - - - ), - weightsType: ( - - - Weights Format - - - Original: Complete model weights in safetensors format Delta: Weight - differences from base model (requires base model for size calculation) - Adapter: Lightweight fine-tuning layers (requires base model for size - calculation) - - - ), - chatTemplate: ( - - - Chat Template Support - - - Activates automatically for chat models. It uses the standardized Hugging - Face chat template for consistent prompt formatting during evaluation. - Required for models using RLHF, DPO, or instruction fine-tuning. - - - ), -}; - -// Convert MODEL_TYPES to format expected by Select component -const modelTypeOptions = Object.entries(MODEL_TYPES).map( - ([value, { icon, label }]) => ({ - value, - label: `${icon} ${label}`, - }) -); - -function ModelSubmissionForm({ user, isAuthenticated }) { - const [formData, setFormData] = useState({ - modelName: "", - revision: "main", - modelType: "fine-tuned", - isChatModel: false, - useChatTemplate: false, - precision: "float16", - weightsType: "Original", - baseModel: "", - }); - const [error, setError] = useState(null); - const [submitting, setSubmitting] = useState(false); - const [success, setSuccess] = useState(false); - const [submittedData, setSubmittedData] = useState(null); - - const handleChange = (event) => { - const { name, value, checked } = event.target; - setFormData((prev) => ({ - ...prev, - [name]: event.target.type === "checkbox" ? checked : value, - })); - }; - - const handleSubmit = async (e) => { - e.preventDefault(); - setError(null); - setSubmitting(true); - - try { - const response = await fetch("/api/models/submit", { - method: "POST", - headers: { - "Content-Type": "application/json", - }, - body: JSON.stringify({ - model_id: formData.modelName, - revision: formData.revision, - model_type: formData.modelType, - precision: formData.precision, - weight_type: formData.weightsType, - base_model: formData.baseModel, - use_chat_template: formData.useChatTemplate, - user_id: user.username, - }), - }); - - if (!response.ok) { - const error = await response.json(); - throw new Error(error.detail || "Failed to submit model"); - } - - setSubmittedData(formData); - setSuccess(true); - } catch (error) { - setError(error.message); - } finally { - setSubmitting(false); - } - }; - - if (success && submittedData) { - return ( - ({ - p: 6, - mb: 3, - bgcolor: alpha(theme.palette.success.main, 0.05), - borderColor: alpha(theme.palette.success.main, 0.2), - })} - > - - - - - Model submitted successfully! - - - - - Your model {submittedData.modelName} has been added - to the evaluation queue with the following parameters: - - - - - - - Model: - - - {submittedData.modelName} - - - - - Type: - - - {submittedData.modelType} - - - - - Revision: - - - {submittedData.revision} - - - - - Precision: - - - {submittedData.precision} - - - - - Weight type: - - - {submittedData.weightsType} - - - {submittedData.baseModel && ( - - - Base model: - - - {submittedData.baseModel} - - - )} - - - Chat template: - - - {submittedData.useChatTemplate ? "Yes" : "No"} - - - - - - - An automatic upvote has been added to your model to help with - prioritization. - - - - - - - - ); - } - - return ( - <> - {error && ( - - {error} - - )} - - {isAuthenticated && ( - - {/* Header */} - - theme.palette.mode === "dark" - ? alpha(theme.palette.divider, 0.1) - : "grey.200", - bgcolor: (theme) => - theme.palette.mode === "dark" - ? alpha(theme.palette.background.paper, 0.5) - : "grey.50", - }} - > - - Model Submission Form - - - - {/* Form Content */} - - - {/* Model Information */} - - - Model Information - - - - - - - ), - }} - /> - - - - - ), - }} - /> - - - {/* Model Configuration */} - - - Model Configuration - - - - - - Model Type - - - - - - - - } - label="Use Chat Template" - /> - - - - - - - Precision - - - - - - - Weights Type - - - - - {formData.weightsType !== "Original" && ( - - - ), - }} - /> - - )} - - {/* Submit Button */} - - - - All fields marked with * are required - - - - - - - - )} - - ); -} - -export default ModelSubmissionForm; diff --git a/frontend/src/pages/AddModelPage/components/SubmissionGuide/SubmissionGuide.js b/frontend/src/pages/AddModelPage/components/SubmissionGuide/SubmissionGuide.js deleted file mode 100644 index c023f0ba51929d7bc854df404d5766bba2d5a8ee..0000000000000000000000000000000000000000 --- a/frontend/src/pages/AddModelPage/components/SubmissionGuide/SubmissionGuide.js +++ /dev/null @@ -1,274 +0,0 @@ -import React, { useState, useEffect } from "react"; -import { useLocation, useNavigate } from "react-router-dom"; -import { Box, Paper, Typography, Button, Stack, Collapse } from "@mui/material"; -import ExpandMoreIcon from "@mui/icons-material/ExpandMore"; - -const DocLink = ({ href, children }) => ( - -); - -const StepNumber = ({ number }) => ( - - {number} - -); - -const TUTORIAL_STEPS = [ - { - title: "Model Information", - content: ( - - - Your model should be public on the Hub and follow the{" "} - username/model-id format (e.g. - mistralai/Mistral-7B-v0.1). Specify the revision{" "} - (commit hash or branch) and model type. - - - Model uploading guide - - - ), - }, - { - title: "Technical Details", - content: ( - - - Make sure your model can be loaded locally before - submitting: - - - theme.palette.mode === "dark" ? "grey.50" : "grey.900", - borderRadius: 1, - "& pre": { - m: 0, - p: 0, - fontFamily: "monospace", - fontSize: "0.875rem", - color: (theme) => - theme.palette.mode === "dark" ? "grey.900" : "grey.50", - }, - }} - > -
-            {`from transformers import AutoConfig, AutoModel, AutoTokenizer
-
-config = AutoConfig.from_pretrained("your-username/your-model", revision="main")
-model = AutoModel.from_pretrained("your-username/your-model", revision="main")
-tokenizer = AutoTokenizer.from_pretrained("your-username/your-model", revision="main")`}
-          
-
- - Transformers documentation - -
- ), - }, - { - title: "License Requirements", - content: ( - - - A license tag is required.{" "} - Open licenses (Apache, MIT, etc) are strongly - recommended. - - - About model licenses - - - ), - }, - { - title: "Model Card Requirements", - content: ( - - - Your model card must include: architecture,{" "} - training details,{" "} - dataset information, intended use, limitations, and{" "} - performance metrics. - - - Model cards guide - - - ), - }, - { - title: "Final Checklist", - content: ( - - - Ensure your model is public, uses{" "} - safetensors format, has a{" "} - license tag, and loads correctly{" "} - with the provided code. - - - Sharing best practices - - - ), - }, -]; - -function SubmissionGuide() { - const location = useLocation(); - const navigate = useNavigate(); - - // Initialize state directly with URL value - const initialExpanded = !new URLSearchParams(location.search).get("guide"); - const [expanded, setExpanded] = useState(initialExpanded); - - // Sync expanded state with URL changes after initial render - useEffect(() => { - const guideOpen = !new URLSearchParams(location.search).get("guide"); - if (guideOpen !== expanded) { - setExpanded(guideOpen); - } - }, [location.search, expanded]); - - const handleAccordionChange = () => { - const newExpanded = !expanded; - setExpanded(newExpanded); - const params = new URLSearchParams(location.search); - if (newExpanded) { - params.delete("guide"); - } else { - params.set("guide", "closed"); - } - navigate({ search: params.toString() }, { replace: true }); - }; - - return ( - - theme.palette.mode === "dark" ? "grey.800" : "grey.200", - overflow: "hidden", - }} - > - - theme.palette.mode === "dark" ? "grey.900" : "grey.50", - borderBottom: "1px solid", - borderColor: (theme) => - expanded - ? theme.palette.mode === "dark" - ? "grey.800" - : "grey.200" - : "transparent", - }} - > - - Submission Guide - - - - - - - {TUTORIAL_STEPS.map((step, index) => ( - - - - - - {step.title} - - - {step.content} - - {index < TUTORIAL_STEPS.length - 1 && ( - - theme.palette.mode === "dark" ? "grey.800" : "grey.100", - }} - /> - )} - - ))} - - - - - ); -} - -export default SubmissionGuide; diff --git a/frontend/src/pages/LeaderboardPage/LeaderboardPage.js b/frontend/src/pages/LeaderboardPage/LeaderboardPage.js deleted file mode 100644 index 5c2ad4f7e94b6690ea314613633948b9bfd342be..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/LeaderboardPage.js +++ /dev/null @@ -1,50 +0,0 @@ -import { useEffect } from "react"; -import Leaderboard from "./components/Leaderboard/Leaderboard"; -import { Box } from "@mui/material"; -import PageHeader from "../../components/shared/PageHeader"; -import Logo from "../../components/Logo/Logo"; -import { useLeaderboardData } from "../../pages/LeaderboardPage/components/Leaderboard/hooks/useLeaderboardData"; -import { useLeaderboard } from "../../pages/LeaderboardPage/components/Leaderboard/context/LeaderboardContext"; - -function LeaderboardPage() { - const { data, isLoading, error } = useLeaderboardData(); - const { actions } = useLeaderboard(); - - useEffect(() => { - if (data) { - actions.setModels(data); - } - actions.setLoading(isLoading); - actions.setError(error); - }, [data, isLoading, error, actions]); - - return ( - - - - - - Comparing Large Language Models in an{" "} - open and{" "} - reproducible way - - } - /> - - - ); -} - -export default LeaderboardPage; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/Leaderboard.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/Leaderboard.js deleted file mode 100644 index 5c41ce7fa5eeeb9b00bc657c174c9653c5d31503..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/Leaderboard.js +++ /dev/null @@ -1,449 +0,0 @@ -import React, { useMemo, useEffect, useCallback } from "react"; -import { Box, Typography } from "@mui/material"; -import { useSearchParams } from "react-router-dom"; - -import { TABLE_DEFAULTS } from "./constants/defaults"; -import { useLeaderboard } from "./context/LeaderboardContext"; -import { useLeaderboardProcessing } from "./hooks/useLeaderboardData"; -import { useLeaderboardData } from "./hooks/useLeaderboardData"; - -import LeaderboardFilters from "./components/Filters/Filters"; -import LeaderboardTable from "./components/Table/Table"; -import SearchBar, { SearchBarSkeleton } from "./components/Filters/SearchBar"; -import PerformanceMonitor from "./components/PerformanceMonitor"; -import QuickFilters, { - QuickFiltersSkeleton, -} from "./components/Filters/QuickFilters"; - -const FilterAccordion = ({ expanded, quickFilters, advancedFilters }) => { - const advancedFiltersRef = React.useRef(null); - const quickFiltersRef = React.useRef(null); - const [height, setHeight] = React.useState("auto"); - const resizeTimeoutRef = React.useRef(null); - - const updateHeight = React.useCallback(() => { - if (expanded && advancedFiltersRef.current) { - setHeight(`${advancedFiltersRef.current.scrollHeight}px`); - } else if (!expanded && quickFiltersRef.current) { - setHeight(`${quickFiltersRef.current.scrollHeight}px`); - } - }, [expanded]); - - React.useEffect(() => { - // Initial height calculation - const timer = setTimeout(updateHeight, 100); - - // Resize handler with debounce - const handleResize = () => { - if (resizeTimeoutRef.current) { - clearTimeout(resizeTimeoutRef.current); - } - resizeTimeoutRef.current = setTimeout(updateHeight, 150); - }; - - window.addEventListener("resize", handleResize); - - return () => { - clearTimeout(timer); - window.removeEventListener("resize", handleResize); - if (resizeTimeoutRef.current) { - clearTimeout(resizeTimeoutRef.current); - } - }; - }, [updateHeight]); - - // Update height when expanded state changes - React.useEffect(() => { - updateHeight(); - }, [expanded, updateHeight]); - - return ( - - - {quickFilters} - - - {advancedFilters} - - - ); -}; - -const Leaderboard = () => { - const { state, actions } = useLeaderboard(); - const [searchParams, setSearchParams] = useSearchParams(); - const { - data, - isLoading: dataLoading, - error: dataError, - } = useLeaderboardData(); - const { - table, - filteredData, - error: processingError, - } = useLeaderboardProcessing(); - - // Memoize filtered data - const memoizedFilteredData = useMemo(() => filteredData, [filteredData]); - const memoizedTable = useMemo(() => table, [table]); - - // Memoize table options - const hasTableOptionsChanges = useMemo(() => { - return ( - state.display.rowSize !== TABLE_DEFAULTS.ROW_SIZE || - JSON.stringify(state.display.scoreDisplay) !== - JSON.stringify(TABLE_DEFAULTS.SCORE_DISPLAY) || - state.display.averageMode !== TABLE_DEFAULTS.AVERAGE_MODE || - state.display.rankingMode !== TABLE_DEFAULTS.RANKING_MODE - ); - }, [state.display]); - - const hasColumnFilterChanges = useMemo(() => { - return ( - JSON.stringify([...state.display.visibleColumns].sort()) !== - JSON.stringify([...TABLE_DEFAULTS.COLUMNS.DEFAULT_VISIBLE].sort()) - ); - }, [state.display.visibleColumns]); - - // Memoize callbacks - const onToggleFilters = useCallback(() => { - actions.toggleFiltersExpanded(); - }, [actions]); - - const onColumnVisibilityChange = useCallback( - (newVisibility) => { - actions.setDisplayOption( - "visibleColumns", - Object.keys(newVisibility).filter((key) => newVisibility[key]) - ); - }, - [actions] - ); - - const onRowSizeChange = useCallback( - (size) => { - actions.setDisplayOption("rowSize", size); - }, - [actions] - ); - - const onScoreDisplayChange = useCallback( - (display) => { - actions.setDisplayOption("scoreDisplay", display); - }, - [actions] - ); - - const onAverageModeChange = useCallback( - (mode) => { - actions.setDisplayOption("averageMode", mode); - }, - [actions] - ); - - const onRankingModeChange = useCallback( - (mode) => { - actions.setDisplayOption("rankingMode", mode); - }, - [actions] - ); - - const onPrecisionsChange = useCallback( - (precisions) => { - actions.setFilter("precisions", precisions); - }, - [actions] - ); - - const onTypesChange = useCallback( - (types) => { - actions.setFilter("types", types); - }, - [actions] - ); - - const onParamsRangeChange = useCallback( - (range) => { - actions.setFilter("paramsRange", range); - }, - [actions] - ); - - const onBooleanFiltersChange = useCallback( - (filters) => { - actions.setFilter("booleanFilters", filters); - }, - [actions] - ); - - const onReset = useCallback(() => { - actions.resetFilters(); - }, [actions]); - - // Memoize loading states - const loadingStates = useMemo(() => { - const isInitialLoading = dataLoading || !data; - const isProcessingData = !memoizedTable || !memoizedFilteredData; - const isApplyingFilters = state.models.length > 0 && !memoizedFilteredData; - const hasValidFilterCounts = - state.countsReady && - state.filterCounts && - state.filterCounts.normal && - state.filterCounts.officialOnly; - - return { - isInitialLoading, - isProcessingData, - isApplyingFilters, - showSearchSkeleton: isInitialLoading || !hasValidFilterCounts, - showFiltersSkeleton: isInitialLoading || !hasValidFilterCounts, - showTableSkeleton: - isInitialLoading || - isProcessingData || - isApplyingFilters || - !hasValidFilterCounts, - }; - }, [ - dataLoading, - data, - memoizedTable, - memoizedFilteredData, - state.models.length, - state.filterCounts, - state.countsReady, - ]); - - // Memoize child components - const memoizedSearchBar = useMemo( - () => ( - - ), - [ - onToggleFilters, - state.filtersExpanded, - loadingStates.showTableSkeleton, - memoizedFilteredData, - table, - ] - ); - - const memoizedQuickFilters = useMemo( - () => ( - - ), - [state.models.length, memoizedFilteredData, memoizedTable] - ); - - const memoizedLeaderboardFilters = useMemo( - () => ( - - ), - [ - memoizedFilteredData, - loadingStates.showFiltersSkeleton, - state.filters.precisions, - state.filters.types, - state.filters.paramsRange, - state.filters.booleanFilters, - onPrecisionsChange, - onTypesChange, - onParamsRangeChange, - onBooleanFiltersChange, - onReset, - ] - ); - - // No need to memoize LeaderboardTable as it handles its own sorting state - const tableComponent = ( - - ); - - // Update context with loaded data - useEffect(() => { - if (data) { - actions.setModels(data); - } - }, [data, actions]); - - // Log to understand loading state - useEffect(() => { - if (process.env.NODE_ENV === "development") { - console.log("Loading state:", { - dataLoading, - hasData: !!data, - hasTable: !!table, - hasFilteredData: !!filteredData, - filteredDataLength: filteredData?.length, - stateModelsLength: state.models.length, - hasFilters: Object.keys(state.filters).some((key) => { - if (Array.isArray(state.filters[key])) { - return state.filters[key].length > 0; - } - return !!state.filters[key]; - }), - }); - } - }, [ - dataLoading, - data, - table, - filteredData?.length, - state.models.length, - filteredData, - state.filters, - ]); - - // If an error occurred, display it - if (dataError || processingError) { - return ( - - - {(dataError || processingError)?.message || - "An error occurred while loading the data"} - - - ); - } - - return ( - - - - - {loadingStates.showSearchSkeleton ? ( - - ) : ( - memoizedSearchBar - )} - - {loadingStates.showFiltersSkeleton ? ( - - ) : ( - - )} - - - - - - {tableComponent} - - - - - ); -}; - -export default Leaderboard; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/ColumnSelector/ColumnSelector.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/ColumnSelector/ColumnSelector.js deleted file mode 100644 index 5a67cacd3d1d3343d22abcf7fd083440bcb94881..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/ColumnSelector/ColumnSelector.js +++ /dev/null @@ -1,217 +0,0 @@ -import React from "react"; -import { Box, Typography } from "@mui/material"; -import ViewColumnIcon from "@mui/icons-material/ViewColumn"; -import CloseIcon from "@mui/icons-material/Close"; -import FilterTag from "../../../../../../components/shared/FilterTag"; -import RestartAltIcon from "@mui/icons-material/RestartAlt"; -import { TABLE_DEFAULTS } from "../../constants/defaults"; -import DropdownButton from "../shared/DropdownButton"; -import InfoIconWithTooltip from "../../../../../../components/shared/InfoIconWithTooltip"; -import { UI_TOOLTIPS } from "../../constants/tooltips"; - -const FilterGroup = ({ title, children, count, total }) => ( - - - {title} - {count !== undefined && total !== undefined && ( - - ({count}/{total}) - - )} - - - {children} - - -); - -const ColumnSelector = ({ - table, - onReset, - hasChanges, - onColumnVisibilityChange, - loading = false, -}) => { - const { getState, setColumnVisibility } = table; - const { columnVisibility } = getState(); - - // Filter columns to only show filterable ones - const filterableColumns = [ - ...TABLE_DEFAULTS.COLUMNS.EVALUATION, - ...TABLE_DEFAULTS.COLUMNS.OPTIONAL, - ]; - - const handleReset = (e) => { - e.preventDefault(); - e.stopPropagation(); - - if (!hasChanges) return; - - // Call onReset first - onReset?.(); - - // Create object with all columns set to false by default - const defaultVisibility = {}; - - // Set to true all columns that should be visible by default - TABLE_DEFAULTS.COLUMNS.DEFAULT_VISIBLE.forEach((col) => { - defaultVisibility[col] = true; - }); - - onColumnVisibilityChange?.(defaultVisibility); - setColumnVisibility(defaultVisibility); - }; - - const toggleColumn = (columnId) => { - if (TABLE_DEFAULTS.COLUMNS.FIXED.includes(columnId)) return; - - const newVisibility = { - ...columnVisibility, - [columnId]: !columnVisibility[columnId], - }; - - setColumnVisibility(newVisibility); - onColumnVisibilityChange?.(newVisibility); - }; - - return ( - - - - - Column Visibility - - - - - - - Reset - - - - - {Object.entries(TABLE_DEFAULTS.COLUMNS.COLUMN_GROUPS).map( - ([groupTitle, columns]) => { - // Calculer le nombre de colonnes cochées pour les évaluations - const isEvalGroup = groupTitle === "Evaluation Scores"; - const filteredColumns = columns.filter((col) => - filterableColumns.includes(col) - ); - const checkedCount = isEvalGroup - ? filteredColumns.filter((col) => columnVisibility[col]).length - : undefined; - const totalCount = isEvalGroup ? filteredColumns.length : undefined; - - return ( - - {filteredColumns.map((columnName) => { - const isFixed = - TABLE_DEFAULTS.COLUMNS.FIXED.includes(columnName); - return ( - toggleColumn(columnName)} - disabled={isFixed} - variant="tag" - /> - ); - })} - - ); - } - )} - - ); -}; - -export default ColumnSelector; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/DisplayOptions/DisplayOptions.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/DisplayOptions/DisplayOptions.js deleted file mode 100644 index 8ec6c2bf0b68a6f2372d867a5a6487128956fb4c..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/DisplayOptions/DisplayOptions.js +++ /dev/null @@ -1,238 +0,0 @@ -import React from "react"; -import { Box, Typography } from "@mui/material"; -import TuneIcon from "@mui/icons-material/Tune"; -import CloseIcon from "@mui/icons-material/Close"; -import RestartAltIcon from "@mui/icons-material/RestartAlt"; -import FilterTag from "../../../../../../components/shared/FilterTag"; -import { - TABLE_DEFAULTS, - ROW_SIZES, - SCORE_DISPLAY_OPTIONS, - RANKING_MODE_OPTIONS, -} from "../../constants/defaults"; -import { UI_TOOLTIPS } from "../../constants/tooltips"; -import DropdownButton from "../shared/DropdownButton"; -import InfoIconWithTooltip from "../../../../../../components/shared/InfoIconWithTooltip"; - -const TableOptions = ({ - rowSize, - onRowSizeChange, - scoreDisplay = "normalized", - onScoreDisplayChange, - averageMode = "all", - onAverageModeChange, - rankingMode = "static", - onRankingModeChange, - hasChanges, - searchParams, - setSearchParams, - loading = false, -}) => { - const handleReset = () => { - onRowSizeChange(TABLE_DEFAULTS.ROW_SIZE); - onScoreDisplayChange(TABLE_DEFAULTS.SCORE_DISPLAY); - onAverageModeChange(TABLE_DEFAULTS.AVERAGE_MODE); - onRankingModeChange(TABLE_DEFAULTS.RANKING_MODE); - - const newParams = new URLSearchParams(searchParams); - ["rowSize", "scoreDisplay", "averageMode", "rankingMode"].forEach( - (param) => { - newParams.delete(param); - } - ); - setSearchParams(newParams); - }; - - return ( - - - - - Table Options - - - - - - - Reset - - - - - - - - - - {UI_TOOLTIPS.ROW_SIZE.title} - - - - - {Object.keys(ROW_SIZES).map((size) => ( - onRowSizeChange(size)} - variant="tag" - /> - ))} - - - - - - - {UI_TOOLTIPS.SCORE_DISPLAY.title} - - - - - {SCORE_DISPLAY_OPTIONS.map(({ value, label }) => ( - onScoreDisplayChange(value)} - variant="tag" - /> - ))} - - - - - - - {UI_TOOLTIPS.RANKING_MODE.title} - - - - - {RANKING_MODE_OPTIONS.map(({ value, label }) => ( - onRankingModeChange(value)} - variant="tag" - /> - ))} - - - - - - - {UI_TOOLTIPS.AVERAGE_SCORE.title} - - - - - onAverageModeChange("all")} - variant="tag" - /> - onAverageModeChange("visible")} - variant="tag" - /> - - - - - - ); -}; - -export default TableOptions; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/FilteredModelCount.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/FilteredModelCount.js deleted file mode 100644 index f35223166eb572d3d09527bd60129a006d85f7c8..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/FilteredModelCount.js +++ /dev/null @@ -1,246 +0,0 @@ -import React from "react"; -import { Box, Typography, Skeleton } from "@mui/material"; -import { useMemo } from "react"; -import { useLeaderboard } from "../../context/LeaderboardContext"; - -const useModelCount = ({ totalCount, filteredCount, data, table, loading }) => { - const { state } = useLeaderboard(); - const isOfficialProviderActive = state.filters.isOfficialProviderActive; - const { officialOnly: officialOnlyCounts } = state.filterCounts; - - return useMemo(() => { - if (loading) { - return { - displayCount: 0, - currentFilteredCount: 0, - totalPinnedCount: 0, - filteredPinnedCount: 0, - isOfficialProviderActive, - }; - } - const displayCount = isOfficialProviderActive - ? officialOnlyCounts.officialProviders - : totalCount; - - // Calculate total number of pinned models - const totalPinnedCount = - data?.filter((model) => model.isPinned)?.length || 0; - - // Get current filter criteria - const filterConfig = { - selectedPrecisions: state.filters.precisions, - selectedTypes: state.filters.types, - paramsRange: state.filters.paramsRange, - searchValue: state.filters.search, - selectedBooleanFilters: state.filters.booleanFilters, - isOfficialProviderActive: state.filters.isOfficialProviderActive, - }; - - // Check each pinned model if it would pass filters without its pinned status - const filteredPinnedCount = - data?.filter((model) => { - if (!model.isPinned) return false; - - // Check each filter criteria - - // Filter by official providers - if (filterConfig.isOfficialProviderActive) { - if ( - !model.features?.is_official_provider && - !model.metadata?.is_official_provider - ) { - return false; - } - } - - // Filter by precision - if (filterConfig.selectedPrecisions.length > 0) { - if ( - !filterConfig.selectedPrecisions.includes(model.model.precision) - ) { - return false; - } - } - - // Filter by type - if (filterConfig.selectedTypes.length > 0) { - const modelType = model.model.type?.toLowerCase().trim(); - if ( - !filterConfig.selectedTypes.some((type) => - modelType?.includes(type) - ) - ) { - return false; - } - } - - // Filter by parameters - const params = model.metadata.params_billions; - if ( - params < filterConfig.paramsRange[0] || - params >= filterConfig.paramsRange[1] - ) { - return false; - } - - // Filter by search - if (filterConfig.searchValue) { - const searchLower = filterConfig.searchValue.toLowerCase(); - const modelName = model.model.name.toLowerCase(); - if (!modelName.includes(searchLower)) { - return false; - } - } - - // Filter by boolean flags - if (filterConfig.selectedBooleanFilters.length > 0) { - if ( - !filterConfig.selectedBooleanFilters.every((filter) => { - const filterValue = - typeof filter === "object" ? filter.value : filter; - - // Maintainer's Highlight keeps positive logic - if (filterValue === "is_official_provider") { - return model.features[filterValue]; - } - - // For all other filters, invert the logic - if (filterValue === "is_not_available_on_hub") { - return model.features[filterValue]; - } - - return !model.features[filterValue]; - }) - ) { - return false; - } - } - - // If we get here, the model passes all filters - return true; - })?.length || 0; - - return { - displayCount, - currentFilteredCount: filteredCount, - totalPinnedCount, - filteredPinnedCount, - isOfficialProviderActive, - }; - }, [ - loading, - totalCount, - filteredCount, - data, - state.filters, - isOfficialProviderActive, - officialOnlyCounts.officialProviders, - ]); -}; - -const CountTypography = ({ - value, - color = "text.primary", - loading = false, - pinnedCount = 0, - filteredPinnedCount = 0, - showPinned = false, -}) => { - if (loading) { - return ( - - ); - } - - return ( - - - {value} - - {showPinned && pinnedCount > 0 && ( - - {`+${pinnedCount}`} - - )} - - ); -}; - -const FilteredModelCount = React.memo( - ({ - totalCount = 0, - filteredCount = 0, - hasFilterChanges = false, - loading = false, - data = [], - table = null, - }) => { - const { - displayCount, - currentFilteredCount, - totalPinnedCount, - filteredPinnedCount, - isOfficialProviderActive, - } = useModelCount({ - totalCount, - filteredCount, - data, - table, - loading, - }); - - const shouldHighlight = - !loading && hasFilterChanges && currentFilteredCount !== displayCount; - - // Always show pinned models when they exist - const pinnedToShow = totalPinnedCount; - - return ( - - 0} - /> - - - - ); - } -); - -FilteredModelCount.displayName = "FilteredModelCount"; - -export default FilteredModelCount; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/Filters.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/Filters.js deleted file mode 100644 index 1fa0572d69fee9212d4bcd01058fc7acdb4d1de2..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/Filters.js +++ /dev/null @@ -1,850 +0,0 @@ -import React, { - useState, - useEffect, - useMemo, - useRef, - forwardRef, - useCallback, -} from "react"; -import { - Box, - Typography, - Collapse, - Slider, - Grid, - Accordion, - AccordionDetails, - alpha, - useTheme, - TextField, -} from "@mui/material"; -import { - TABLE_DEFAULTS, - BOOLEAN_FILTER_OPTIONS, - FILTER_PRECISIONS, -} from "../../constants/defaults"; -import FilterTag from "../../../../../../components/shared/FilterTag"; -import { MODEL_TYPE_ORDER, MODEL_TYPES } from "../../constants/modelTypes"; -import { useLeaderboard } from "../../context/LeaderboardContext"; -import InfoIconWithTooltip from "../../../../../../components/shared/InfoIconWithTooltip"; -import { COLUMN_TOOLTIPS } from "../../constants/tooltips"; - -const getTooltipContent = (title) => { - switch (title) { - case "Model Type": - return COLUMN_TOOLTIPS.ARCHITECTURE; - case "Precision format": - return COLUMN_TOOLTIPS.PRECISION; - case "Flags": - return COLUMN_TOOLTIPS.FLAGS; - case "Parameters": - return COLUMN_TOOLTIPS.PARAMETERS; - default: - return null; - } -}; - -const FilterGroup = ({ - title, - tooltip, - children, - paramsRange, - onParamsRangeChange, -}) => { - const theme = useTheme(); - const [localParamsRange, setLocalParamsRange] = useState(paramsRange); - const stableTimerRef = useRef(null); - - // Handle local range change - const handleLocalRangeChange = useCallback((event, newValue) => { - setLocalParamsRange(newValue); - }, []); - - // Handle input change - const handleInputChange = useCallback( - (index) => (event) => { - const value = event.target.value === "" ? "" : Number(event.target.value); - if (value === "" || (value >= -1 && value <= 140)) { - const newRange = [...localParamsRange]; - newRange[index] = value; - setLocalParamsRange(newRange); - } - }, - [localParamsRange] - ); - - // Sync local state with props - useEffect(() => { - setLocalParamsRange(paramsRange); - }, [paramsRange]); - - // Propagate changes to parent after delay - useEffect(() => { - if (stableTimerRef.current) { - clearTimeout(stableTimerRef.current); - } - - stableTimerRef.current = setTimeout(() => { - if (Array.isArray(localParamsRange) && localParamsRange.length === 2) { - onParamsRangeChange(localParamsRange); - } - }, 300); - - return () => { - if (stableTimerRef.current) { - clearTimeout(stableTimerRef.current); - } - }; - }, [localParamsRange, onParamsRangeChange]); - - const renderContent = () => { - if (title === "Parameters") { - return ( - - - - - - - - (value === -1 ? "All" : `${value}B`)} - sx={{ - "& .MuiSlider-rail": { - height: 10, - backgroundColor: "background.paper", - border: "1px solid", - borderColor: "divider", - opacity: 1, - }, - "& .MuiSlider-track": { - height: 10, - border: "1px solid", - borderColor: (theme) => - alpha( - theme.palette.primary.main, - theme.palette.mode === "light" ? 0.3 : 0.5 - ), - backgroundColor: (theme) => - alpha( - theme.palette.primary.main, - theme.palette.mode === "light" ? 0.1 : 0.2 - ), - }, - "& .MuiSlider-thumb": { - width: 20, - height: 20, - backgroundColor: "background.paper", - border: "1px solid", - borderColor: "primary.main", - "&:hover, &.Mui-focusVisible": { - boxShadow: (theme) => - `0 0 0 8px ${alpha( - theme.palette.primary.main, - theme.palette.mode === "light" ? 0.08 : 0.16 - )}`, - }, - "&.Mui-active": { - boxShadow: (theme) => - `0 0 0 12px ${alpha( - theme.palette.primary.main, - theme.palette.mode === "light" ? 0.08 : 0.16 - )}`, - }, - }, - "& .MuiSlider-valueLabel": { - backgroundColor: theme.palette.primary.main, - }, - "& .MuiSlider-mark": { - width: 2, - height: 10, - backgroundColor: "divider", - }, - "& .MuiSlider-markLabel": { - fontSize: "0.875rem", - "&::after": { - content: '"B"', - marginLeft: "1px", - opacity: 0.5, - }, - '&[data-index="0"]::after': { - content: '""', - }, - }, - }} - /> - - ); - } - return ( - - {children} - - ); - }; - - return ( - - - - {title} - - - - {renderContent()} - - ); -}; - -const CustomCollapse = forwardRef((props, ref) => { - const { children, style = {}, ...other } = props; - const collapsedHeight = "0px"; - const timeout = 300; - - const wrapperRef = useRef(null); - const [animatedHeight, setAnimatedHeight] = useState( - props.in ? "auto" : collapsedHeight - ); - - useEffect(() => { - if (!wrapperRef.current) return; - - if (props.in) { - const contentHeight = wrapperRef.current.scrollHeight; - setAnimatedHeight(`${contentHeight}px`); - } else { - setAnimatedHeight(collapsedHeight); - } - }, [props.in, children]); - - const handleEntered = (node) => { - setAnimatedHeight("auto"); - if (props.onEntered) { - props.onEntered(node); - } - }; - - return ( - -
{children}
-
- ); -}); - -const LeaderboardFilters = ({ - selectedPrecisions = FILTER_PRECISIONS, - onPrecisionsChange = () => {}, - selectedTypes = MODEL_TYPE_ORDER, - onTypesChange = () => {}, - paramsRange = [-1, 140], - onParamsRangeChange = () => {}, - selectedBooleanFilters = [], - onBooleanFiltersChange = () => {}, - data = [], - expanded, - onToggleExpanded, - loading = false, -}) => { - const [localParamsRange, setLocalParamsRange] = useState(paramsRange); - const stableTimerRef = useRef(null); - const { state, actions } = useLeaderboard(); - const { normal: filterCounts, officialOnly: officialOnlyCounts } = - state.filterCounts; - const isOfficialProviderActive = state.filters.isOfficialProviderActive; - const currentCounts = useMemo( - () => (isOfficialProviderActive ? officialOnlyCounts : filterCounts), - [isOfficialProviderActive, officialOnlyCounts, filterCounts] - ); - - useEffect(() => { - setLocalParamsRange(paramsRange); - }, [paramsRange]); - - // Clean up timer when component unmounts - useEffect(() => { - return () => { - if (stableTimerRef.current) { - clearTimeout(stableTimerRef.current); - } - }; - }, []); - - const handleParamsRangeChange = (event, newValue) => { - setLocalParamsRange(newValue); - }; - - const handleParamsRangeChangeCommitted = (event, newValue) => { - // Reset timer on each change - if (stableTimerRef.current) { - clearTimeout(stableTimerRef.current); - } - - // Update URL immediately - onParamsRangeChange(newValue); - - // Trigger data update after debounce - stableTimerRef.current = setTimeout(() => { - actions.updateFilteredData(); - }, TABLE_DEFAULTS.DEBOUNCE.SEARCH); - }; - - const handlePrecisionToggle = (precision) => { - const newPrecisions = selectedPrecisions.includes(precision) - ? selectedPrecisions.filter((p) => p !== precision) - : [...selectedPrecisions, precision]; - onPrecisionsChange(newPrecisions); - }; - - const handleBooleanFilterToggle = (filter) => { - const newFilters = selectedBooleanFilters.includes(filter) - ? selectedBooleanFilters.filter((f) => f !== filter) - : [...selectedBooleanFilters, filter]; - onBooleanFiltersChange(newFilters); - }; - - // Filter options based on their hide property - const showFilterOptions = BOOLEAN_FILTER_OPTIONS.filter( - (option) => !option.hide - ); - const hideFilterOptions = BOOLEAN_FILTER_OPTIONS.filter( - (option) => option.hide - ); - - const handleOfficialProviderToggle = () => { - actions.toggleOfficialProvider(); - }; - - return loading ? null : ( - - - - - - - - alpha(theme.palette.primary.main, 0.02), - border: "1px solid", - borderColor: (theme) => - alpha(theme.palette.primary.main, 0.2), - borderRadius: 1, - p: 3, - position: "relative", - width: "100%", - display: "flex", - flexDirection: "column", - "&:hover": { - borderColor: (theme) => - alpha(theme.palette.primary.main, 0.3), - backgroundColor: (theme) => - alpha(theme.palette.primary.main, 0.03), - }, - transition: (theme) => - theme.transitions.create( - ["border-color", "background-color"], - { - duration: theme.transitions.duration.short, - } - ), - }} - > - - Advanced Filters - - - - - - - {FILTER_PRECISIONS.map((precision) => ( - - handlePrecisionToggle(precision) - } - count={currentCounts.precisions[precision]} - showCheckbox={true} - /> - ))} - - - - - - - - - - - alpha( - theme.palette.primary.main, - theme.palette.mode === "light" - ? 0.3 - : 0.5 - ), - backgroundColor: (theme) => - alpha( - theme.palette.primary.main, - theme.palette.mode === "light" - ? 0.1 - : 0.2 - ), - }, - "& .MuiSlider-thumb": { - width: 20, - height: 20, - backgroundColor: "background.paper", - border: "1px solid", - borderColor: "primary.main", - "&:hover, &.Mui-focusVisible": { - boxShadow: (theme) => - `0 0 0 8px ${alpha( - theme.palette.primary.main, - theme.palette.mode === "light" - ? 0.08 - : 0.16 - )}`, - }, - "&.Mui-active": { - boxShadow: (theme) => - `0 0 0 12px ${alpha( - theme.palette.primary.main, - theme.palette.mode === "light" - ? 0.08 - : 0.16 - )}`, - }, - }, - "& .MuiSlider-mark": { - backgroundColor: "text.disabled", - height: 2, - width: 2, - borderRadius: "50%", - }, - "& .MuiSlider-markLabel": { - color: "text.secondary", - }, - }} - /> - - - - - - - {/* Deuxième ligne */} - - - - {MODEL_TYPE_ORDER.sort( - (a, b) => - MODEL_TYPES[a].order - MODEL_TYPES[b].order - ).map((type) => ( - { - const newTypes = selectedTypes.includes(type) - ? selectedTypes.filter((t) => t !== type) - : [...selectedTypes, type]; - onTypesChange(newTypes); - }} - count={currentCounts.modelTypes[type]} - variant="tag" - showCheckbox={true} - /> - ))} - - - - - - - - {hideFilterOptions.map((filter) => ( - { - const newFilters = - selectedBooleanFilters.includes( - filter.value - ) - ? selectedBooleanFilters.filter( - (f) => f !== filter.value - ) - : [ - ...selectedBooleanFilters, - filter.value, - ]; - onBooleanFiltersChange(newFilters); - }} - count={ - filter.value === "is_moe" - ? currentCounts.mixtureOfExperts - : filter.value === "is_flagged" - ? currentCounts.flagged - : filter.value === "is_merged" - ? currentCounts.merged - : filter.value === "is_not_available_on_hub" - ? currentCounts.notOnHub - : 0 - } - isHideFilter={false} - totalCount={data.length} - showCheckbox={true} - /> - ))} - - - - - - - - - - - alpha(theme.palette.secondary.main, 0.02), - border: "1px solid", - borderColor: (theme) => - alpha(theme.palette.secondary.main, 0.15), - borderRadius: 1, - p: 3, - position: "relative", - width: "100%", - display: "flex", - flexDirection: "column", - alignItems: "center", - justifyContent: "center", - textAlign: "center", - minHeight: "100%", - "&:hover": { - borderColor: (theme) => - alpha(theme.palette.secondary.main, 0.25), - backgroundColor: (theme) => - alpha(theme.palette.secondary.main, 0.03), - }, - transition: (theme) => - theme.transitions.create( - ["border-color", "background-color"], - { - duration: theme.transitions.duration.short, - } - ), - }} - > - - - Official Models - - - Show only models that are officially provided and - maintained by their original creators. - - - {showFilterOptions.map((filter) => ( - - handleBooleanFilterToggle(filter.value) - } - count={ - filter.value === "is_official_provider" - ? currentCounts.officialProviders - : 0 - } - showCheckbox={true} - variant="secondary" - /> - - - {( - filter.value === "is_official_provider" - ? isOfficialProviderActive - : selectedBooleanFilters.includes(filter.value) - ) - ? "Filter active" - : "Filter inactive"} - - - ))} - - - - - - - - - - ); -}; - -export default LeaderboardFilters; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/QuickFilters.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/QuickFilters.js deleted file mode 100644 index 91d074c6375e8129eda09cea299b6aa36e26c3f9..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/QuickFilters.js +++ /dev/null @@ -1,226 +0,0 @@ -import React, { useCallback, useMemo } from "react"; -import { Box, Typography, Skeleton } from "@mui/material"; -import { alpha } from "@mui/material/styles"; -import { QUICK_FILTER_PRESETS } from "../../constants/quickFilters"; -import FilterTag from "../../../../../../components/shared/FilterTag"; -import { useLeaderboard } from "../../context/LeaderboardContext"; -import InfoIconWithTooltip from "../../../../../../components/shared/InfoIconWithTooltip"; -import { UI_TOOLTIPS } from "../../constants/tooltips"; - -const QuickFiltersTitle = ({ sx = {} }) => ( - - - Quick Filters - - - -); - -export const QuickFiltersSkeleton = () => ( - - ({ - xs: alpha(theme.palette.primary.main, 0.02), - lg: "transparent", - }), - borderColor: (theme) => ({ - xs: alpha(theme.palette.primary.main, 0.2), - lg: "transparent", - }), - border: "1px solid", - borderRadius: 1, - p: 3, - display: "flex", - flexDirection: { xs: "column", md: "column", lg: "row" }, - gap: 2, - mb: 2, - width: "100%", - }} - > - - - {[1, 2, 3, 4].map((i) => ( - - ))} - - - -); - -const QuickFilters = ({ totalCount = 0, loading = false }) => { - const { state, actions } = useLeaderboard(); - const { normal: filterCounts, officialOnly: officialOnlyCounts } = - state.filterCounts; - const isOfficialProviderActive = state.filters.isOfficialProviderActive; - const currentParams = state.filters.paramsRange; - - const currentCounts = useMemo( - () => (isOfficialProviderActive ? officialOnlyCounts : filterCounts), - [isOfficialProviderActive, officialOnlyCounts, filterCounts] - ); - - const modelSizePresets = useMemo( - () => - QUICK_FILTER_PRESETS.filter( - (preset) => preset.id !== "official_providers" - ), - [] - ); - - const officialProvidersPreset = useMemo( - () => - QUICK_FILTER_PRESETS.find((preset) => preset.id === "official_providers"), - [] - ); - - const handleSizePresetClick = useCallback( - (preset) => { - const isActive = - currentParams[0] === preset.filters.paramsRange[0] && - currentParams[1] === preset.filters.paramsRange[1]; - - if (isActive) { - actions.setFilter("paramsRange", [-1, 140]); // Reset to default - } else { - actions.setFilter("paramsRange", preset.filters.paramsRange); - } - }, - [currentParams, actions] - ); - - const getPresetCount = useCallback( - (preset) => { - const range = preset.id.split("_")[0]; - return currentCounts.parameterRanges[range] || 0; - }, - [currentCounts] - ); - - const handleOfficialProviderToggle = useCallback(() => { - actions.toggleOfficialProvider(); - }, [actions]); - - if (loading) { - return ; - } - - return ( - - ({ - xs: alpha(theme.palette.primary.main, 0.02), - lg: "transparent", - }), - borderColor: (theme) => ({ - xs: alpha(theme.palette.primary.main, 0.2), - lg: "transparent", - }), - border: "1px solid", - borderRadius: 1, - p: 3, - display: "flex", - flexDirection: { xs: "column", lg: "row" }, - alignItems: "center", - gap: 2, - width: "100%", - }} - > - - - - div": { - width: { xs: "100%", md: 0, lg: "auto" }, - flex: { - xs: "auto", - md: "1 1 0", - lg: "0 0 auto", - }, - }, - }} - > - {modelSizePresets.map((preset) => ( - handleSizePresetClick(preset)} - count={getPresetCount(preset)} - totalCount={totalCount} - /> - ))} - - - - {officialProvidersPreset && ( - - )} - - - - ); -}; - -QuickFilters.displayName = "QuickFilters"; - -export default React.memo(QuickFilters); diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/SearchBar.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/SearchBar.js deleted file mode 100644 index c32cd8f8640b0d2e8fa7c1928f76fcd8d53fe494..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/SearchBar.js +++ /dev/null @@ -1,329 +0,0 @@ -import React, { useState, useEffect } from "react"; -import { Box, InputBase, Typography, Paper, Skeleton } from "@mui/material"; - -import SearchIcon from "@mui/icons-material/Search"; -import FilterListIcon from "@mui/icons-material/FilterList"; -import RestartAltIcon from "@mui/icons-material/RestartAlt"; -import { useTheme } from "@mui/material/styles"; -import { generateSearchDescription } from "../../utils/searchUtils"; -import { - HIGHLIGHT_COLORS, - TABLE_DEFAULTS, - FILTER_PRECISIONS, -} from "../../constants/defaults"; -import { MODEL_TYPE_ORDER } from "../../constants/modelTypes"; -import { alpha } from "@mui/material/styles"; -import FilteredModelCount from "./FilteredModelCount"; -import { useLeaderboard } from "../../context/LeaderboardContext"; -import InfoIconWithTooltip from "../../../../../../components/shared/InfoIconWithTooltip"; -import { UI_TOOLTIPS } from "../../constants/tooltips"; - -export const SearchBarSkeleton = () => ( - - alpha(theme.palette.background.paper, 0.8), - borderRadius: 1, - border: (theme) => - `1px solid ${alpha( - theme.palette.divider, - theme.palette.mode === "dark" ? 0.05 : 0.1 - )}`, - display: "flex", - alignItems: "center", - px: 2, - gap: 2, - }} - > - - - - - - - - - - - Supports strict search and regex • Use semicolons for multiple terms - - - -); - -const SearchDescription = ({ searchValue }) => { - const searchGroups = generateSearchDescription(searchValue); - - if (!searchGroups || searchGroups.length === 0) return null; - - return ( - - - Showing models matching: - - {searchGroups.map(({ text, index }, i) => ( - - {i > 0 && ( - - and - - )} - - theme.palette.getContrastText( - HIGHLIGHT_COLORS[index % HIGHLIGHT_COLORS.length] - ), - padding: "2px 4px", - borderRadius: "4px", - fontSize: "0.85rem", - fontWeight: 500, - }} - > - {text} - - - ))} - - ); -}; - -const SearchBar = ({ - onToggleFilters, - filtersOpen, - loading = false, - data = [], - table = null, -}) => { - const theme = useTheme(); - const { state, actions } = useLeaderboard(); - const [localValue, setLocalValue] = useState(state.filters.search); - - useEffect(() => { - setLocalValue(state.filters.search); - }, [state.filters.search]); - - useEffect(() => { - const timer = setTimeout(() => { - if (localValue !== state.filters.search) { - actions.setFilter("search", localValue); - } - }, TABLE_DEFAULTS.DEBOUNCE.SEARCH); - - return () => clearTimeout(timer); - }, [localValue, state.filters.search, actions]); - - const handleLocalChange = (e) => { - setLocalValue(e.target.value); - }; - - const hasActiveFilters = - Object.values(state.filters.booleanFilters).some((value) => value) || - state.filters.precisions.length !== FILTER_PRECISIONS.length || - state.filters.types.length !== MODEL_TYPE_ORDER.length || - state.filters.paramsRange[0] !== -1 || - state.filters.paramsRange[1] !== 140 || - state.filters.isOfficialProviderActive; - - const shouldShowReset = localValue || hasActiveFilters; - - return ( - - - - - {!loading && ( - - )} - - {shouldShowReset && ( - { - setLocalValue(""); - actions.resetFilters(); - }} - sx={{ - display: "flex", - alignItems: "center", - gap: 0.5, - cursor: "pointer", - color: "text.secondary", - backgroundColor: "transparent", - border: "1px solid", - borderColor: "divider", - borderRadius: 1, - padding: "4px 8px", - "&:hover": { - backgroundColor: "action.hover", - color: "text.primary", - }, - userSelect: "none", - transition: "all 0.2s ease", - }} - > - - - Reset - - - )} - - - - Advanced Filters - - - - - - - {localValue ? ( - - ) : ( - - - Supports strict search and regex • Use semicolons for multiple - terms - - - )} - - - ); -}; - -export default SearchBar; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/hooks/useOfficialProvidersMode.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/hooks/useOfficialProvidersMode.js deleted file mode 100644 index 729129cb3081bb525bcae2fc707f70658f74e778..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/hooks/useOfficialProvidersMode.js +++ /dev/null @@ -1,130 +0,0 @@ -import { useCallback, useState, useEffect, useRef } from "react"; -import { useSearchParams } from "react-router-dom"; - -const useRouterSearchParams = () => { - try { - return useSearchParams(); - } catch { - return [null, () => {}]; - } -}; - -export const useOfficialProvidersMode = () => { - const [isOfficialProviderActive, setIsOfficialProviderActive] = - useState(false); - const [searchParams, setSearchParams] = useRouterSearchParams(); - const normalFiltersRef = useRef(null); - const isInitialLoadRef = useRef(true); - const lastToggleSourceRef = useRef(null); - - // Effect to handle initial state and updates - useEffect(() => { - if (!searchParams) return; - - const filters = searchParams.get("filters"); - const isHighlighted = - filters?.includes("is_official_provider") || false; - - // On initial load - if (isInitialLoadRef.current) { - isInitialLoadRef.current = false; - - // If official mode is active at start, store filters without the highlightFilter - if (isHighlighted && filters) { - const initialNormalFilters = filters - .split(",") - .filter((f) => f !== "is_official_provider" && f !== "") - .filter(Boolean); - if (initialNormalFilters.length > 0) { - normalFiltersRef.current = initialNormalFilters.join(","); - } - } - - // Update state without triggering URL change - setIsOfficialProviderActive(isHighlighted); - return; - } - - // For subsequent changes - if (!isHighlighted && filters) { - normalFiltersRef.current = filters; - } - - setIsOfficialProviderActive(isHighlighted); - }, [searchParams]); - - const toggleOfficialProviderMode = useCallback( - (source = null) => { - if (!searchParams || !setSearchParams) return; - - // If source is the same as last time and last change was less than 100ms ago, ignore - const now = Date.now(); - if ( - source && - source === lastToggleSourceRef.current?.source && - now - (lastToggleSourceRef.current?.timestamp || 0) < 100 - ) { - return; - } - - const currentFiltersStr = searchParams.get("filters"); - const currentFilters = - currentFiltersStr?.split(",").filter(Boolean) || []; - const highlightFilter = "is_official_provider"; - const newSearchParams = new URLSearchParams(searchParams); - - if (currentFilters.includes(highlightFilter)) { - // Deactivating official provider mode - if (normalFiltersRef.current) { - const normalFilters = normalFiltersRef.current - .split(",") - .filter((f) => f !== highlightFilter && f !== "") - .filter(Boolean); - - if (normalFilters.length > 0) { - newSearchParams.set("filters", normalFilters.join(",")); - } else { - newSearchParams.delete("filters"); - } - } else { - const newFilters = currentFilters.filter( - (f) => f !== highlightFilter && f !== "" - ); - if (newFilters.length === 0) { - newSearchParams.delete("filters"); - } else { - newSearchParams.set("filters", newFilters.join(",")); - } - } - } else { - // Activating official provider mode - if (currentFiltersStr) { - normalFiltersRef.current = currentFiltersStr; - } - - const filtersToSet = [ - ...new Set([...currentFilters, highlightFilter]), - ].filter(Boolean); - newSearchParams.set("filters", filtersToSet.join(",")); - } - - // Update state immediately - setIsOfficialProviderActive(!currentFilters.includes(highlightFilter)); - - // Save source and timestamp of last change - lastToggleSourceRef.current = { - source, - timestamp: now, - }; - - // Update search params and let HashRouter handle the URL - setSearchParams(newSearchParams); - }, - [searchParams, setSearchParams] - ); - - return { - isOfficialProviderActive, - toggleOfficialProviderMode, - }; -}; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/hooks/usePresets.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/hooks/usePresets.js deleted file mode 100644 index 35e17e54b0e1978635440908d3de6c742b37a856..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/hooks/usePresets.js +++ /dev/null @@ -1,98 +0,0 @@ -import { useCallback } from "react"; -import { QUICK_FILTER_PRESETS } from "../../../constants/quickFilters"; -import { TABLE_DEFAULTS } from "../../../constants/defaults"; - -const DEFAULT_FILTERS = { - searchValue: "", - selectedPrecisions: TABLE_DEFAULTS.SEARCH.PRECISIONS, - selectedTypes: TABLE_DEFAULTS.SEARCH.TYPES, - paramsRange: TABLE_DEFAULTS.SEARCH.PARAMS_RANGE, - selectedBooleanFilters: [], -}; - -export const usePresets = (searchFilters) => { - const handlePresetChange = useCallback( - (preset) => { - if (!searchFilters?.batchUpdateState) return; - - if (preset === null) { - // Reset with default values - searchFilters.batchUpdateState(DEFAULT_FILTERS, true); - return; - } - - // Apply preset with default values as base - const updates = { - ...DEFAULT_FILTERS, - ...preset.filters, - }; - - // Apply all changes at once - searchFilters.batchUpdateState(updates, true); - }, - [searchFilters] - ); - - const resetPreset = useCallback(() => { - handlePresetChange(null); - }, [handlePresetChange]); - - const getActivePreset = useCallback(() => { - // If searchFilters is not initialized yet, return null - if (!searchFilters) return null; - - // Dynamic detection of preset matching current filters - const currentParamsRange = Array.isArray(searchFilters.paramsRange) - ? searchFilters.paramsRange - : DEFAULT_FILTERS.paramsRange; - const currentBooleanFilters = Array.isArray( - searchFilters.selectedBooleanFilters - ) - ? searchFilters.selectedBooleanFilters - : DEFAULT_FILTERS.selectedBooleanFilters; - const currentPrecisions = Array.isArray(searchFilters.selectedPrecisions) - ? searchFilters.selectedPrecisions - : DEFAULT_FILTERS.selectedPrecisions; - const currentTypes = Array.isArray(searchFilters.selectedTypes) - ? searchFilters.selectedTypes - : DEFAULT_FILTERS.selectedTypes; - - return ( - QUICK_FILTER_PRESETS.find((preset) => { - const presetParamsRange = Array.isArray(preset.filters.paramsRange) - ? preset.filters.paramsRange - : DEFAULT_FILTERS.paramsRange; - const presetBooleanFilters = Array.isArray( - preset.filters.selectedBooleanFilters - ) - ? preset.filters.selectedBooleanFilters - : DEFAULT_FILTERS.selectedBooleanFilters; - - const paramsMatch = - JSON.stringify(presetParamsRange) === - JSON.stringify(currentParamsRange); - const booleanFiltersMatch = - JSON.stringify(presetBooleanFilters.sort()) === - JSON.stringify(currentBooleanFilters.sort()); - - // Check if other filters match default values - const precisionMatch = - JSON.stringify(currentPrecisions.sort()) === - JSON.stringify(DEFAULT_FILTERS.selectedPrecisions.sort()); - const typesMatch = - JSON.stringify(currentTypes.sort()) === - JSON.stringify(DEFAULT_FILTERS.selectedTypes.sort()); - - return ( - paramsMatch && booleanFiltersMatch && precisionMatch && typesMatch - ); - })?.id || null - ); - }, [searchFilters]); - - return { - activePreset: getActivePreset(), - handlePresetChange, - resetPreset, - }; -}; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/PerformanceMonitor.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/PerformanceMonitor.js deleted file mode 100644 index d3a20d28639f0d84835d854fe405795e14499d01..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/PerformanceMonitor.js +++ /dev/null @@ -1,570 +0,0 @@ -import React, { useEffect, useState, useRef } from "react"; -import { Box, Typography, Tooltip, useTheme } from "@mui/material"; -import NetworkCheckIcon from "@mui/icons-material/NetworkCheck"; -import MemoryIcon from "@mui/icons-material/Memory"; -import SpeedIcon from "@mui/icons-material/Speed"; -import GpuIcon from "@mui/icons-material/Memory"; -import InfoOutlinedIcon from "@mui/icons-material/InfoOutlined"; - -const getGPUStats = () => { - try { - const canvas = document.createElement("canvas"); - const gl = - canvas.getContext("webgl") || canvas.getContext("experimental-webgl"); - - if (!gl) { - canvas.remove(); - return null; - } - - // Try to get GPU info extensions - const debugInfo = gl.getExtension("WEBGL_debug_renderer_info"); - - // Estimate GPU memory usage (very approximate) - let usedMemoryEstimate = 0; - - try { - // Create test texture - const testTexture = gl.createTexture(); - gl.bindTexture(gl.TEXTURE_2D, testTexture); - - // Test size: 1024x1024 RGBA - const testSize = 1024; - const pixels = new Uint8Array(testSize * testSize * 4); - gl.texImage2D( - gl.TEXTURE_2D, - 0, - gl.RGBA, - testSize, - testSize, - 0, - gl.RGBA, - gl.UNSIGNED_BYTE, - pixels - ); - - // Estimate memory usage (very approximate) - usedMemoryEstimate = (testSize * testSize * 4) / (1024 * 1024); // In MB - - gl.deleteTexture(testTexture); - gl.getExtension("WEBGL_lose_context")?.loseContext(); - } catch (e) { - console.warn("GPU memory estimation failed:", e); - } finally { - // Cleanup WebGL resources - const loseContext = gl.getExtension("WEBGL_lose_context"); - if (loseContext) loseContext.loseContext(); - gl.canvas.remove(); - } - - return { - vendor: debugInfo - ? gl.getParameter(debugInfo.UNMASKED_VENDOR_WEBGL) - : "Unknown", - renderer: debugInfo - ? gl.getParameter(debugInfo.UNMASKED_RENDERER_WEBGL) - : "Unknown", - usedMemory: Math.round(usedMemoryEstimate), - }; - } catch (e) { - return null; - } -}; - -const MetricBox = ({ icon, label, value, tooltip }) => { - const theme = useTheme(); - return ( - - {icon} - - - {label} - - - {React.isValidElement(value) ? value : {value}} - - {tooltip && ( - - - - - - )} - - ); -}; - -const formatNumber = (num) => { - return num.toString().replace(/\B(?=(\d{3})+(?!\d))/g, " "); -}; - -const PerformanceMonitor = () => { - const theme = useTheme(); - - const [stats, setStats] = useState({ - fps: 0, - memory: { - usedJSHeapSize: 0, - totalJSHeapSize: 0, - }, - renders: 0, - network: { - transferSize: 0, - decodedBodySize: 0, - compressionRatio: 0, - }, - gpu: getGPUStats(), - fcp: null, - }); - const [isVisible, setIsVisible] = useState( - process.env.NODE_ENV === "development" - ); - const renderCountRef = useRef(0); - const originalCreateElementRef = useRef(null); - - useEffect(() => { - const handleKeyDown = (event) => { - // Ignore if user is in an input field - if ( - event.target.tagName === "INPUT" || - event.target.tagName === "TEXTAREA" - ) { - return; - } - - if (event.key === "p" || event.key === "P") { - setIsVisible((prev) => !prev); - } - }; - - window.addEventListener("keydown", handleKeyDown); - return () => window.removeEventListener("keydown", handleKeyDown); - }, []); - - useEffect(() => { - let frameCount = 0; - let lastTime = performance.now(); - let animationFrameId; - - const getNetworkStats = () => { - const resources = performance.getEntriesByType("resource"); - const navigation = performance.getEntriesByType("navigation")[0]; - - let totalTransferSize = navigation ? navigation.transferSize : 0; - let totalDecodedSize = navigation ? navigation.decodedBodySize : 0; - - resources.forEach((resource) => { - totalTransferSize += resource.transferSize || 0; - totalDecodedSize += resource.decodedBodySize || 0; - }); - - const compressionRatio = totalDecodedSize - ? Math.round((1 - totalTransferSize / totalDecodedSize) * 100) - : 0; - - return { - transferSize: Math.round(totalTransferSize / 1024), - decodedBodySize: Math.round(totalDecodedSize / 1024), - compressionRatio, - }; - }; - - // Save original function - originalCreateElementRef.current = React.createElement; - - // Replace createElement - React.createElement = function (...args) { - renderCountRef.current++; - return originalCreateElementRef.current.apply(this, args); - }; - - const updateStats = () => { - frameCount++; - const now = performance.now(); - const delta = now - lastTime; - - if (delta >= 1000) { - const fps = Math.round((frameCount * 1000) / delta); - - const memory = window.performance?.memory - ? { - usedJSHeapSize: Math.round( - window.performance.memory.usedJSHeapSize / 1048576 - ), - totalJSHeapSize: Math.round( - window.performance.memory.totalJSHeapSize / 1048576 - ), - } - : null; - - const network = getNetworkStats(); - const gpu = getGPUStats(); - - setStats((prev) => ({ - ...prev, - fps, - memory: memory || prev.memory, - renders: renderCountRef.current, - network, - gpu, - })); - - frameCount = 0; - lastTime = now; - } - - animationFrameId = requestAnimationFrame(updateStats); - }; - - updateStats(); - - return () => { - cancelAnimationFrame(animationFrameId); - // Restore original function - if (originalCreateElementRef.current) { - React.createElement = originalCreateElementRef.current; - } - // Clean up counters - renderCountRef.current = 0; - delete window.__REACT_RENDERS__; - }; - }, []); - - useEffect(() => { - // Add FCP observer - if (window.PerformanceObserver) { - try { - const fcpObserver = new PerformanceObserver((entryList) => { - const entries = entryList.getEntries(); - if (entries.length > 0) { - const fcp = entries[0].startTime; - setStats((prev) => ({ - ...prev, - fcp, - })); - } - }); - - fcpObserver.observe({ entryTypes: ["paint"] }); - return () => fcpObserver.disconnect(); - } catch (e) { - console.warn("FCP observation failed:", e); - } - } - }, []); - - const getFpsColor = (fps) => { - if (fps >= 55) return "#4CAF50"; - if (fps >= 30) return "#FFC107"; - return "#F44336"; - }; - - return isVisible ? ( - - - - Performances{" "} - dev only - - - {/* Performance Metrics */} - - - } - label="FPS" - value={ - - {stats.fps} - - } - tooltip="Frames Per Second - Indicates how smooth the UI is running" - /> - - {stats.fcp !== null && ( - - } - label="FCP" - value={ - - {Math.round(stats.fcp)}ms - - } - tooltip="First Contentful Paint - Time until first content is rendered" - /> - )} - - ⚛️ - - } - label="React" - value={ - - {formatNumber(stats.renders)} - cycles - - } - tooltip="Total number of React render cycles" - /> - - - {/* Memory Metrics */} - - {window.performance?.memory && ( - } - label="Mem" - value={ - - {stats.memory.usedJSHeapSize} - / - {stats.memory.totalJSHeapSize} - MB - - } - tooltip="JavaScript heap memory usage (Used / Total)" - /> - )} - {stats.gpu && ( - } - label="GPU" - value={ - - {stats.gpu.usedMemory} - MB - - } - tooltip="Estimated GPU memory usage" - /> - )} - - - {/* Network Metrics */} - - - } - label="Net" - value={ - - {stats.network.transferSize} - KB - - } - tooltip="Network data transferred" - /> - } - label="Size" - value={ - - {formatNumber(stats.network.decodedBodySize)} - KB - 0 ? "#81C784" : "inherit", - fontSize: "0.7rem", - opacity: 0.8, - ml: 1, - }} - > - (-{stats.network.compressionRatio}%) - - - } - tooltip="Total decoded size and compression ratio" - /> - - - Press "P" to show/hide - - - -
- ) : null; -}; - -export default React.memo(PerformanceMonitor); diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Table/Table.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Table/Table.js deleted file mode 100644 index b9279247881135a2d4cf2122ed542474fc20f6be..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Table/Table.js +++ /dev/null @@ -1,720 +0,0 @@ -import React, { useRef, useCallback, useMemo } from "react"; -import { - Paper, - Table, - TableContainer, - TableHead, - TableBody, - TableRow, - TableCell, - Box, - Typography, - Skeleton, -} from "@mui/material"; -import { flexRender } from "@tanstack/react-table"; -import { useVirtualizer } from "@tanstack/react-virtual"; -import KeyboardArrowUpIcon from "@mui/icons-material/KeyboardArrowUp"; -import KeyboardArrowDownIcon from "@mui/icons-material/KeyboardArrowDown"; -import UnfoldMoreIcon from "@mui/icons-material/UnfoldMore"; -import SearchOffIcon from "@mui/icons-material/SearchOff"; -import { - TABLE_DEFAULTS, - ROW_SIZES, - SKELETON_COLUMNS, -} from "../../constants/defaults"; -import { alpha } from "@mui/material/styles"; -import TableOptions from "../DisplayOptions/DisplayOptions"; -import ColumnSelector from "../ColumnSelector/ColumnSelector"; - -const NoResultsFound = () => ( - - - - No models found - - - Try modifying your filters or search to see more models. - - -); - -const TableSkeleton = ({ rowSize = "normal" }) => { - const currentRowHeight = Math.floor(ROW_SIZES[rowSize]); - const headerHeight = Math.floor(currentRowHeight * 1.25); - const skeletonRows = 10; - - return ( - - `1px solid ${alpha( - theme.palette.divider, - theme.palette.mode === "dark" ? 0.05 : 0.1 - )}`, - borderRadius: 1, - }} - > - - - - {SKELETON_COLUMNS.map((width, index) => ( - 3 ? "right" : "left", - borderRight: (theme) => `1px solid ${theme.palette.divider}`, - "&:last-child": { - borderRight: "none", - }, - position: "sticky", - top: 0, - backgroundColor: (theme) => theme.palette.background.paper, - zIndex: 2, - }} - /> - ))} - - - - {[...Array(skeletonRows)].map((_, index) => ( - - index % 2 === 0 ? "transparent" : theme.palette.action.hover, - }} - > - {SKELETON_COLUMNS.map((width, cellIndex) => ( - - `1px solid ${theme.palette.divider}`, - "&:last-child": { - borderRight: "none", - }, - }} - > - 3 ? "auto" : 0, - backgroundColor: (theme) => - alpha(theme.palette.text.primary, 0.11), - "&::after": { - background: (theme) => - `linear-gradient(90deg, ${alpha( - theme.palette.text.primary, - 0.11 - )}, ${alpha( - theme.palette.text.primary, - 0.14 - )}, ${alpha(theme.palette.text.primary, 0.11)})`, - }, - }} - /> - - ))} - - ))} - -
-
- ); -}; - -const TableControls = React.memo( - ({ - loading, - rowSize, - onRowSizeChange, - scoreDisplay, - onScoreDisplayChange, - averageMode, - onAverageModeChange, - rankingMode, - onRankingModeChange, - hasTableOptionsChanges, - searchParams, - setSearchParams, - table, - handleColumnReset, - hasColumnFilterChanges, - onColumnVisibilityChange, - }) => ( - - - - - ) -); - -TableControls.displayName = "TableControls"; - -const LeaderboardTable = ({ - table, - rowSize = "normal", - loading = false, - hasTableOptionsChanges, - hasColumnFilterChanges, - onColumnVisibilityChange, - scoreDisplay, - onScoreDisplayChange, - averageMode, - onAverageModeChange, - rankingMode, - onRankingModeChange, - onRowSizeChange, - searchParams, - setSearchParams, - pinnedModels = [], -}) => { - const { rows } = table.getRowModel(); - const parentRef = useRef(null); - - const currentRowHeight = useMemo(() => ROW_SIZES[rowSize], [rowSize]); - const headerHeight = useMemo( - () => Math.floor(currentRowHeight * 1.25), - [currentRowHeight] - ); - - // Separate pinned rows from normal rows while preserving original order - const pinnedRows = useMemo(() => { - const pinnedModelRows = rows.filter((row) => row.original.isPinned); - // Sort pinned models according to their original order in pinnedModels - return pinnedModelRows.sort((a, b) => { - const aIndex = pinnedModels.indexOf(a.original.id); - const bIndex = pinnedModels.indexOf(b.original.id); - return aIndex - bIndex; - }); - }, [rows, pinnedModels]); - - const unpinnedRows = useMemo( - () => rows.filter((row) => !row.original.isPinned), - [rows] - ); - const pinnedHeight = useMemo( - () => pinnedRows.length * currentRowHeight, - [pinnedRows.length, currentRowHeight] - ); - - const virtualizerOptions = useMemo( - () => ({ - count: unpinnedRows.length, - getScrollElement: () => parentRef.current, - estimateSize: () => currentRowHeight, - overscan: 15, - scrollMode: "sync", - scrollPaddingStart: pinnedHeight, - scrollPaddingEnd: 0, - initialRect: { width: 0, height: currentRowHeight * 15 }, - }), - [currentRowHeight, unpinnedRows.length, pinnedHeight] - ); - - const rowVirtualizer = useVirtualizer(virtualizerOptions); - - const virtualRows = rowVirtualizer.getVirtualItems(); - - // Adjust paddings to account for pinned rows - const paddingTop = virtualRows.length > 0 ? virtualRows[0].start : 0; - const paddingBottom = - virtualRows.length > 0 - ? unpinnedRows.length * currentRowHeight - - virtualRows[virtualRows.length - 1].end - : 0; - - // Handle column reset - const handleColumnReset = useCallback(() => { - onColumnVisibilityChange(TABLE_DEFAULTS.COLUMNS.DEFAULT_VISIBLE); - }, [onColumnVisibilityChange]); - - const cellStyles = (theme) => ({ - borderRight: `1px solid ${alpha( - theme.palette.divider, - theme.palette.mode === "dark" ? 0.05 : 0.1 - )}`, - "&:last-child": { - borderRight: "none", - }, - whiteSpace: "nowrap", - overflow: "hidden", - textOverflow: "ellipsis", - padding: "8px 16px", - }); - - const headerCellStyles = (theme) => ({ - ...cellStyles(theme), - padding: "6px 16px", - height: "36px", - position: "sticky !important", - top: 0, - zIndex: 10, - "& > .header-content": { - display: "flex", - alignItems: "center", - width: "100%", - gap: "4px", - flexDirection: "row", - }, - }); - - const getSortingIcon = (column) => { - if ( - column.id === "rank" || - column.id === "model_type" || - column.id === "isPinned" - ) { - return null; - } - - if (!column.getIsSorted()) { - return ; - } - return column.getIsSorted() === "desc" ? ( - - ) : ( - - ); - }; - - const renderHeaderContent = (header) => { - const sortIcon = getSortingIcon(header.column); - return ( - - {flexRender(header.column.columnDef.header, header.getContext())} - - {sortIcon || } - - - ); - }; - - const renderRow = (row, isSticky = false, stickyIndex = 0) => { - // Get row index in the sorted data model - const sortedIndex = table - .getSortedRowModel() - .rows.findIndex((r) => r.id === row.id); - - return ( - ({ - height: `${currentRowHeight}px !important`, - backgroundColor: isSticky - ? theme.palette.background.paper - : (sortedIndex + 1) % 2 === 0 - ? "transparent" - : alpha(theme.palette.mode === "dark" ? "#fff" : "#000", 0.02), - position: isSticky ? "sticky" : "relative", - top: isSticky - ? `${headerHeight + stickyIndex * currentRowHeight}px` - : "auto", - zIndex: isSticky ? 2 : 1, - boxShadow: isSticky - ? `0 1px 1px ${alpha( - theme.palette.common.black, - theme.palette.mode === "dark" ? 0.1 : 0.05 - )}` - : "none", - "&::after": isSticky - ? { - content: '""', - position: "absolute", - left: 0, - right: 0, - height: "1px", - bottom: -1, - backgroundColor: alpha( - theme.palette.divider, - theme.palette.mode === "dark" ? 0.1 : 0.2 - ), - zIndex: 1, - } - : {}, - })} - > - {row.getVisibleCells().map((cell) => ( - ({ - width: `${cell.column.columnDef.size}px !important`, - minWidth: `${cell.column.columnDef.size}px !important`, - height: `${currentRowHeight}px`, - backgroundColor: isSticky - ? theme.palette.background.paper - : "inherit", - borderBottom: isSticky - ? "none" - : `1px solid ${theme.palette.divider}`, - ...cellStyles(theme), - ...(cell.column.columnDef.meta?.cellStyle?.(cell.getValue()) || - {}), - "& .MuiBox-root": { - overflow: "visible", - }, - })} - > - {flexRender(cell.column.columnDef.cell, cell.getContext())} - - ))} - - ); - }; - - if (!loading && (!rows || rows.length === 0)) { - return ( - - - - - - - ); - } - - if (loading) { - return ( - - - - - - - ); - } - - return ( - - - - ({ - height: "100%", - overflow: "auto", - border: "none", - boxShadow: "none", - "&::-webkit-scrollbar": { - width: "8px", - height: "8px", - }, - "&::-webkit-scrollbar-thumb": { - backgroundColor: alpha( - theme.palette.common.black, - theme.palette.mode === "dark" ? 0.4 : 0.2 - ), - borderRadius: "4px", - }, - "&::-webkit-scrollbar-corner": { - backgroundColor: theme.palette.background.paper, - }, - willChange: "transform", - transform: "translateZ(0)", - WebkitOverflowScrolling: "touch", - scrollBehavior: "auto", - })} - > - 0 ? "fixed" : "fixed", - border: "none", - "& td, & th": - pinnedRows.length > 0 - ? { - width: `${100 / table.getAllColumns().length}%`, - } - : {}, - }} - > - - {table.getAllColumns().map((column, index) => ( - - ))} - - - theme.palette.background.paper, - "& th": { - backgroundColor: (theme) => theme.palette.background.paper, - }, - }} - > - {table.getHeaderGroups().map((headerGroup) => ( - - {headerGroup.headers.map((header) => ( - ({ - cursor: header.column.getCanSort() - ? "pointer" - : "default", - width: header.column.columnDef.size, - minWidth: header.column.columnDef.size, - ...headerCellStyles(theme), - textAlign: "left", - fontWeight: header.column.getIsSorted() ? 700 : 400, - userSelect: "none", - height: `${headerHeight}px`, - padding: `${headerHeight * 0.25}px 16px`, - backgroundColor: theme.palette.background.paper, - })} - > - {renderHeaderContent(header)} - - ))} - - ))} - - - - {/* Pinned rows */} - {pinnedRows.map((row, index) => renderRow(row, true, index))} - - {/* Padding for virtualized rows */} - {paddingTop > 0 && ( - - - - )} - - {/* Virtualized unpinned rows */} - {virtualRows.map((virtualRow) => { - const row = unpinnedRows[virtualRow.index]; - if (!row) return null; - return renderRow(row); - })} - - {/* Bottom padding */} - {paddingBottom > 0 && ( - - - - )} - -
-
-
-
- ); -}; - -export { TableSkeleton }; -export default LeaderboardTable; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Table/hooks/useDataProcessing.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Table/hooks/useDataProcessing.js deleted file mode 100644 index 6f5463755578ae260d6639403706e5f6071eb614..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Table/hooks/useDataProcessing.js +++ /dev/null @@ -1,161 +0,0 @@ -import { useMemo } from "react"; -import { - useReactTable, - getSortedRowModel, - getCoreRowModel, - getFilteredRowModel, -} from "@tanstack/react-table"; -import { createColumns } from "../../../utils/columnUtils"; -import { - useAverageRange, - useColorGenerator, - useProcessedData, - useFilteredData, - useColumnVisibility, -} from "../../../hooks/useDataUtils"; - -export const useDataProcessing = ( - data, - searchValue, - selectedPrecisions, - selectedTypes, - paramsRange, - selectedBooleanFilters, - sorting, - rankingMode, - averageMode, - visibleColumns, - scoreDisplay, - pinnedModels, - onTogglePin, - setSorting, - isOfficialProviderActive -) => { - // Call hooks directly at root level - const { minAverage, maxAverage } = useAverageRange(data); - const getColorForValue = useColorGenerator(minAverage, maxAverage); - const processedData = useProcessedData(data, averageMode, visibleColumns); - const columnVisibility = useColumnVisibility(visibleColumns); - - // Memoize filters - const filterConfig = useMemo( - () => ({ - selectedPrecisions, - selectedTypes, - paramsRange, - searchValue, - selectedBooleanFilters, - rankingMode, - pinnedModels, - isOfficialProviderActive, - }), - [ - selectedPrecisions, - selectedTypes, - paramsRange, - searchValue, - selectedBooleanFilters, - rankingMode, - pinnedModels, - isOfficialProviderActive, - ] - ); - - // Call useFilteredData at root level - const filteredData = useFilteredData( - processedData, - filterConfig.selectedPrecisions, - filterConfig.selectedTypes, - filterConfig.paramsRange, - filterConfig.searchValue, - filterConfig.selectedBooleanFilters, - filterConfig.rankingMode, - filterConfig.pinnedModels, - filterConfig.isOfficialProviderActive - ); - - // Memoize columns creation - const columns = useMemo( - () => - createColumns( - getColorForValue, - scoreDisplay, - columnVisibility, - data.length, - averageMode, - searchValue, - rankingMode, - onTogglePin - ), - [ - getColorForValue, - scoreDisplay, - columnVisibility, - data.length, - averageMode, - searchValue, - rankingMode, - onTogglePin, - ] - ); - - // Memoize table configuration - const tableConfig = useMemo( - () => ({ - data: filteredData, - columns, - state: { - sorting: Array.isArray(sorting) ? sorting : [], - columnVisibility, - }, - getCoreRowModel: getCoreRowModel(), - getFilteredRowModel: getFilteredRowModel(), - getSortedRowModel: getSortedRowModel(), - onSortingChange: setSorting, - enableColumnVisibility: true, - defaultColumn: { - sortingFn: (rowA, rowB, columnId) => { - const isDesc = sorting?.[0]?.desc; - - if (rowA.original.isPinned && rowB.original.isPinned) { - return ( - pinnedModels.indexOf(rowA.original.id) - - pinnedModels.indexOf(rowB.original.id) - ); - } - - if (isDesc) { - if (rowA.original.isPinned) return -1; - if (rowB.original.isPinned) return 1; - } else { - if (rowA.original.isPinned) return -1; - if (rowB.original.isPinned) return 1; - } - - const aValue = rowA.getValue(columnId); - const bValue = rowB.getValue(columnId); - - if (typeof aValue === "number" && typeof bValue === "number") { - return aValue - bValue; - } - - return String(aValue).localeCompare(String(bValue)); - }, - }, - }), - [filteredData, columns, sorting, columnVisibility, pinnedModels, setSorting] - ); - - const table = useReactTable(tableConfig); - - return { - table, - minAverage, - maxAverage, - getColorForValue, - processedData, - filteredData, - columns, - columnVisibility, - }; -}; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Table/hooks/useSorting.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Table/hooks/useSorting.js deleted file mode 100644 index b6e24b528b4938ecd52e2a61624e028d3ffc8dc0..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Table/hooks/useSorting.js +++ /dev/null @@ -1,16 +0,0 @@ -export const typeColumnSort = (rowA, rowB) => { - const aValue = rowA.getValue("model_type"); - const bValue = rowB.getValue("model_type"); - - // If both values are arrays, compare their first elements - if (Array.isArray(aValue) && Array.isArray(bValue)) { - return String(aValue[0] || "").localeCompare(String(bValue[0] || "")); - } - - // If one is array and other isn't, array comes first - if (Array.isArray(aValue)) return -1; - if (Array.isArray(bValue)) return 1; - - // If neither is array, compare as strings - return String(aValue || "").localeCompare(String(bValue || "")); -}; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/shared/DropdownButton.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/shared/DropdownButton.js deleted file mode 100644 index 2badebd0fb115b1a0f78ff81abd41c2b384c9233..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/components/shared/DropdownButton.js +++ /dev/null @@ -1,137 +0,0 @@ -import React, { useState } from "react"; -import { Box, Popover, Portal, Typography, Skeleton } from "@mui/material"; -import { useTheme } from "@mui/material/styles"; -import { commonStyles } from "../../styles/common"; - -const DropdownButton = ({ - label, - icon: Icon, - closeIcon: CloseIcon, - hasChanges = false, - children, - defaultWidth = 340, - paperProps = {}, - buttonSx = {}, - loading = false, -}) => { - const theme = useTheme(); - const [anchorEl, setAnchorEl] = useState(null); - - const handleClick = (event) => { - event.stopPropagation(); - setAnchorEl(event.currentTarget); - }; - - const handleClose = (event) => { - if (event) { - event.stopPropagation(); - } - setAnchorEl(null); - }; - - if (loading) { - return ( - - ); - } - - return ( - - - {Boolean(anchorEl) && CloseIcon ? ( - - ) : ( - - )} - - {label} - - - - - theme.palette.mode === "light" - ? "rgba(0, 0, 0, 0.12)" - : "rgba(255, 255, 255, 0.12)", - borderRadius: 1, - position: "relative", - boxShadow: (theme) => - `0px 4px 20px ${ - theme.palette.mode === "light" - ? "rgba(0, 0, 0, 0.1)" - : "rgba(255, 255, 255, 0.1)" - }`, - ...paperProps.sx, - }, - ...paperProps, - }} - anchorOrigin={{ - vertical: "bottom", - horizontal: "right", - }} - transformOrigin={{ - vertical: "top", - horizontal: "right", - }} - slotProps={{ - backdrop: { - sx: { - backgroundColor: "transparent", - }, - }, - }} - > - {children} - - - - ); -}; - -export default DropdownButton; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/constants/defaults.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/constants/defaults.js deleted file mode 100644 index edfa7be429120862896260cdecd4b7a752e90e5a..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/constants/defaults.js +++ /dev/null @@ -1,380 +0,0 @@ -import { MODEL_TYPE_ORDER } from "./modelTypes"; - -// Time constants (in milliseconds) -const TIME = { - CACHE_DURATION: 5 * 60 * 1000, // 5 minutes - DEBOUNCE: { - URL_PARAMS: 100, - SEARCH: 150, - RANGE_PICKER: 350, - }, -}; - -// Display constants -const DISPLAY = { - ROW_SIZES: { - normal: 45, - large: 60, - }, - SCORE_DISPLAY_OPTIONS: [ - { value: "normalized", label: "Normalized" }, - { value: "raw", label: "Raw" }, - ], - RANKING_MODE_OPTIONS: [ - { value: "static", label: "Static" }, - { value: "dynamic", label: "Dynamic" }, - ], -}; - -// Filter constants -const FILTERS = { - PRECISIONS: ["bfloat16", "float16", "4bit"], - SUBMISSION_PRECISIONS: [ - { value: "float16", label: "float16" }, - { value: "bfloat16", label: "bfloat16" }, - { value: "8bit", label: "8-bit" }, - { value: "4bit", label: "4-bit" }, - { value: "gptq", label: "GPTQ" }, - ], - PARAMS_RANGE: [-1, 140], - BOOLEAN_OPTIONS: [ - { - value: "is_moe", - label: "Mixture of Experts", - hide: true, - }, - { - value: "is_merged", - label: "Merged model", - hide: true, - }, - { - value: "is_flagged", - label: "Potentially contaminated model", - hide: true, - }, - { - value: "is_not_available_on_hub", - label: "Unavailable model", - hide: true, - }, - { - value: "is_official_provider", - label: "Only Official Providers", - hide: false, - }, - ], - HIGHLIGHT_OPTIONS: [ - { - value: "is_official_provider", - label: "Only Official Providers", - }, - ], -}; - -// Column size constants -const COLUMN_SIZES = { - RANK: 65, - TYPE_ICON: 65, - MODEL: 400, - AVERAGE_SCORE: 150, - BENCHMARK: 110, - CO2_COST: 140, - HUB_HEARTS: 140, - ARCHITECTURE: 210, - PRECISION: 140, - PARAMS: 160, - LICENSE: 160, - UPLOAD_DATE: 160, - SUBMISSION_DATE: 200, - GENERATION: 160, - BASE_MODEL: 390, - HUB_AVAILABILITY: 180, - OFFICIAL_PROVIDER: 240, - MOE: 200, - FLAG_STATUS: 160, - CHAT_TEMPLATE: 140, -}; - -// Column definitions with organized structure -const COLUMNS = { - FIXED: { - rank: { - group: "fixed", - size: COLUMN_SIZES.RANK, - defaultVisible: true, - label: "Rank", - }, - "model.type_icon": { - group: "fixed", - size: COLUMN_SIZES.TYPE_ICON, - defaultVisible: true, - label: "Type", - }, - id: { - group: "fixed", - size: COLUMN_SIZES.MODEL, - defaultVisible: true, - label: "Model", - }, - "model.average_score": { - group: "fixed", - size: COLUMN_SIZES.AVERAGE_SCORE, - defaultVisible: true, - label: "Average Score", - }, - }, - EVALUATION: { - "evaluations.ifeval.normalized_score": { - group: "evaluation", - size: COLUMN_SIZES.BENCHMARK, - defaultVisible: true, - label: "IFEval", - }, - "evaluations.bbh.normalized_score": { - group: "evaluation", - size: COLUMN_SIZES.BENCHMARK, - defaultVisible: true, - label: "BBH", - }, - "evaluations.math.normalized_score": { - group: "evaluation", - size: COLUMN_SIZES.BENCHMARK, - defaultVisible: true, - label: "MATH", - }, - "evaluations.gpqa.normalized_score": { - group: "evaluation", - size: COLUMN_SIZES.BENCHMARK, - defaultVisible: true, - label: "GPQA", - }, - "evaluations.musr.normalized_score": { - group: "evaluation", - size: COLUMN_SIZES.BENCHMARK, - defaultVisible: true, - label: "MUSR", - }, - "evaluations.mmlu_pro.normalized_score": { - group: "evaluation", - size: COLUMN_SIZES.BENCHMARK, - defaultVisible: true, - label: "MMLU-PRO", - }, - }, - MODEL_INFO: { - "metadata.co2_cost": { - group: "model_info", - size: COLUMN_SIZES.CO2_COST, - defaultVisible: true, - label: "CO₂ Cost (kg)", - }, - "metadata.hub_hearts": { - group: "model_info", - size: COLUMN_SIZES.HUB_HEARTS, - defaultVisible: false, - label: "Hub ❤️", - }, - "model.architecture": { - group: "model_info", - size: COLUMN_SIZES.ARCHITECTURE, - defaultVisible: false, - label: "Architecture", - }, - "model.precision": { - group: "model_info", - size: COLUMN_SIZES.PRECISION, - defaultVisible: false, - label: "Precision", - }, - "metadata.params_billions": { - group: "model_info", - size: COLUMN_SIZES.PARAMS, - defaultVisible: false, - label: "Parameters (B)", - }, - "metadata.hub_license": { - group: "model_info", - size: COLUMN_SIZES.LICENSE, - defaultVisible: false, - label: "License", - }, - "model.has_chat_template": { - group: "model_info", - size: COLUMN_SIZES.CHAT_TEMPLATE, - defaultVisible: false, - label: "Chat Template", - }, - }, - ADDITIONAL_INFO: { - "metadata.upload_date": { - group: "additional_info", - size: COLUMN_SIZES.UPLOAD_DATE, - defaultVisible: false, - label: "Upload Date", - }, - "metadata.submission_date": { - group: "additional_info", - size: COLUMN_SIZES.SUBMISSION_DATE, - defaultVisible: false, - label: "Submission Date", - }, - "metadata.generation": { - group: "additional_info", - size: COLUMN_SIZES.GENERATION, - defaultVisible: false, - label: "Generation", - }, - "metadata.base_model": { - group: "additional_info", - size: COLUMN_SIZES.BASE_MODEL, - defaultVisible: false, - label: "Base Model", - }, - "features.is_not_available_on_hub": { - group: "additional_info", - size: COLUMN_SIZES.HUB_AVAILABILITY, - defaultVisible: false, - label: "Hub Availability", - }, - "features.is_official_provider": { - group: "additional_info", - size: COLUMN_SIZES.OFFICIAL_PROVIDER, - defaultVisible: false, - label: "Only Official Providers", - }, - "features.is_moe": { - group: "additional_info", - size: COLUMN_SIZES.MOE, - defaultVisible: false, - label: "Mixture of Experts", - }, - "features.is_flagged": { - group: "additional_info", - size: COLUMN_SIZES.FLAG_STATUS, - defaultVisible: false, - label: "Flag Status", - }, - }, -}; - -// Combine all columns for backward compatibility -const ALL_COLUMNS = { - ...COLUMNS.FIXED, - ...COLUMNS.EVALUATION, - ...COLUMNS.MODEL_INFO, - ...COLUMNS.ADDITIONAL_INFO, -}; - -// Column definitions for external use (maintaining the same interface) -const COLUMN_DEFINITIONS = { - ALL_COLUMNS, - COLUMN_GROUPS: { - "Evaluation Scores": Object.keys(COLUMNS.EVALUATION), - "Model Information": Object.keys(COLUMNS.MODEL_INFO), - "Additional Information": Object.keys(COLUMNS.ADDITIONAL_INFO), - }, - COLUMN_LABELS: Object.entries(ALL_COLUMNS).reduce((acc, [key, value]) => { - acc[key] = value.label; - return acc; - }, {}), - DEFAULT_VISIBLE: Object.entries(ALL_COLUMNS) - .filter(([_, value]) => value.defaultVisible) - .map(([key]) => key), - - // Remettre les getters nécessaires - get FIXED() { - return Object.entries(ALL_COLUMNS) - .filter(([_, def]) => def.group === "fixed") - .map(([key]) => key); - }, - - get EVALUATION() { - return Object.entries(ALL_COLUMNS) - .filter(([_, def]) => def.group === "evaluation") - .map(([key]) => key); - }, - - get OPTIONAL() { - return Object.entries(ALL_COLUMNS) - .filter(([_, def]) => def.group !== "fixed" && def.group !== "evaluation") - .map(([key]) => key); - }, - - get COLUMN_SIZES() { - return Object.entries(ALL_COLUMNS).reduce( - (acc, [key, def]) => ({ - ...acc, - [key]: def.size, - }), - {} - ); - }, -}; - -// Export constants maintaining the same interface -export const FILTER_PRECISIONS = FILTERS.PRECISIONS; -export const SUBMISSION_PRECISIONS = FILTERS.SUBMISSION_PRECISIONS; -export const PARAMS_RANGE = FILTERS.PARAMS_RANGE; -export const CACHE_SETTINGS = { DURATION: TIME.CACHE_DURATION }; -export const PINNED_MODELS = []; -export const DEBOUNCE_TIMINGS = TIME.DEBOUNCE; -export const ROW_SIZES = DISPLAY.ROW_SIZES; -export const SCORE_DISPLAY_OPTIONS = DISPLAY.SCORE_DISPLAY_OPTIONS; -export const RANKING_MODE_OPTIONS = DISPLAY.RANKING_MODE_OPTIONS; -export const BOOLEAN_FILTER_OPTIONS = FILTERS.BOOLEAN_OPTIONS; -export const HIGHLIGHT_FILTER_OPTIONS = FILTERS.HIGHLIGHT_OPTIONS; -export { COLUMN_DEFINITIONS }; - -// Export defaults for backward compatibility -export const TABLE_DEFAULTS = { - ROW_SIZE: "normal", - SCORE_DISPLAY: "normalized", - AVERAGE_MODE: "all", - RANKING_MODE: "static", - SEARCH: { - PRECISIONS: FILTERS.PRECISIONS, - TYPES: MODEL_TYPE_ORDER, - PARAMS_RANGE: FILTERS.PARAMS_RANGE, - }, - DEFAULT_SELECTED: { - searchValue: "", - selectedPrecisions: FILTERS.PRECISIONS, - selectedTypes: MODEL_TYPE_ORDER, - paramsRange: FILTERS.PARAMS_RANGE, - selectedBooleanFilters: [], - }, - DEBOUNCE: TIME.DEBOUNCE, - COLUMNS: COLUMN_DEFINITIONS, - PINNED_MODELS: [], - CACHE_DURATION: TIME.CACHE_DURATION, -}; - -// Highlight colors for search and table -export const HIGHLIGHT_COLORS = [ - "#1f77b4", // bleu - "#ff7f0e", // orange - "#2ca02c", // vert - "#d62728", // rouge - "#9467bd", // violet - "#8c564b", // marron - "#e377c2", // rose - "#7f7f7f", // gris - "#bcbd22", // olive - "#17becf", // cyan -]; - -// Skeleton columns widths (in pixels) -export const SKELETON_COLUMNS = [ - 40, // Checkbox - COLUMN_SIZES.RANK, // Rank - COLUMN_SIZES.TYPE_ICON, // Type icon - COLUMN_SIZES.MODEL, // Model name - COLUMN_SIZES.AVERAGE_SCORE, // Average score - COLUMN_SIZES.BENCHMARK, // Benchmark 1 - COLUMN_SIZES.BENCHMARK, // Benchmark 2 - COLUMN_SIZES.BENCHMARK, // Benchmark 3 - COLUMN_SIZES.BENCHMARK, // Benchmark 4 - COLUMN_SIZES.BENCHMARK, // Benchmark 5 - COLUMN_SIZES.BENCHMARK, // Benchmark 6 -]; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/constants/modelTypes.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/constants/modelTypes.js deleted file mode 100644 index 46683b1e6d3a8b20e364260f579ce559a71e3e8b..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/constants/modelTypes.js +++ /dev/null @@ -1,79 +0,0 @@ -export const MODEL_TYPE_ORDER = [ - 'pretrained', - 'continuously pretrained', - 'fine-tuned', - 'chat', - 'merge', - 'multimodal' -]; - -export const MODEL_TYPES = { - 'pretrained': { - icon: '🟢', - label: 'Pretrained', - description: 'Base models trained on raw text data using self-supervised learning objectives', - order: 0 - }, - 'continuously pretrained': { - icon: '🟩', - label: 'Continuously Pretrained', - description: 'Base models with extended pretraining on additional data while maintaining original architecture', - order: 1 - }, - 'fine-tuned': { - icon: '🔶', - label: 'Fine-tuned', - description: 'Models specialized through task-specific training on curated datasets', - order: 2 - }, - 'chat': { - icon: '💬', - label: 'Chat', - description: 'Models optimized for conversation using various techniques: RLHF, DPO, IFT, SFT', - order: 3 - }, - 'merge': { - icon: '🤝', - label: 'Merge', - description: 'Models created by combining weights from multiple models', - order: 4 - }, - 'multimodal': { - icon: '🌸', - label: 'Multimodal', - description: 'Models capable of processing multiple types of input', - order: 5 - } -}; - -export const getModelTypeIcon = (type) => { - const cleanType = type.toLowerCase().trim(); - const matchedType = Object.entries(MODEL_TYPES).find(([key]) => - cleanType.includes(key) - ); - return matchedType ? matchedType[1].icon : '❓'; -}; - -export const getModelTypeLabel = (type) => { - const cleanType = type.toLowerCase().trim(); - const matchedType = Object.entries(MODEL_TYPES).find(([key]) => - cleanType.includes(key) - ); - return matchedType ? matchedType[1].label : type; -}; - -export const getModelTypeDescription = (type) => { - const cleanType = type.toLowerCase().trim(); - const matchedType = Object.entries(MODEL_TYPES).find(([key]) => - cleanType.includes(key) - ); - return matchedType ? matchedType[1].description : 'Unknown model type'; -}; - -export const getModelTypeOrder = (type) => { - const cleanType = type.toLowerCase().trim(); - const matchedType = Object.entries(MODEL_TYPES).find(([key]) => - cleanType.includes(key) - ); - return matchedType ? matchedType[1].order : Infinity; -}; \ No newline at end of file diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/constants/quickFilters.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/constants/quickFilters.js deleted file mode 100644 index de74e7065becab032d996c91b858746f9e38247f..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/constants/quickFilters.js +++ /dev/null @@ -1,51 +0,0 @@ -export const QUICK_FILTER_PRESETS = [ - { - id: 'edge_device', - label: 'For Edge Devices', - shortDescription: 'Tiny models: Up to 3B parameters', - description: 'Lightweight models optimized for edge devices with limited resources. Ideal for mobile deployment or edge computing environments.', - filters: { - paramsRange: [0, 3], - selectedBooleanFilters: ['is_for_edge_devices'] - } - }, - { - id: 'small_models', - label: 'For Consumers', - shortDescription: 'Smol-LMs: 3-7B parameters', - description: 'Lightweight models optimized for consumer hardware with up to one GPU. Ideal for private consumer hardware.', - filters: { - paramsRange: [3, 7], - selectedBooleanFilters: ['is_for_edge_devices'] - } - }, - { - id: 'medium_models', - label: 'Mid-range', - shortDescription: 'Medium-sized models: 7B-65B parameters', - description: 'Overall balance between performance and required resources.', - filters: { - paramsRange: [7, 65], - selectedBooleanFilters: [] - } - }, - { - id: 'large_models', - label: 'For the GPU-rich', - shortDescription: 'Large models: 65B+ parameters', - description: 'Large-scale models offering (in theory) the best performance but requiring significant resources. Require adapted infrastructure.', - filters: { - paramsRange: [65, 141], - selectedBooleanFilters: [] - } - }, - { - id: 'official_providers', - label: 'Only Official Providers', - shortDescription: 'Officially provided models', - description: 'Models that are officially provided and maintained by official creators or organizations.', - filters: { - selectedBooleanFilters: ['is_official_provider'] - } - } -]; \ No newline at end of file diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/constants/tooltips.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/constants/tooltips.js deleted file mode 100644 index 06f311739cb6a46f0477746bf47ae59732252b44..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/constants/tooltips.js +++ /dev/null @@ -1,386 +0,0 @@ -import { Box, Typography } from "@mui/material"; - -const createTooltipContent = (title, items) => ( - - - {title} - - - {items.map(({ label, description, subItems }, index) => ( -
  • - - {label}: {description} - {subItems && ( - - {subItems.map((item, subIndex) => ( -
  • - - {item} - -
  • - ))} -
    - )} - - - ))} -
    - -); - -export const COLUMN_TOOLTIPS = { - AVERAGE: createTooltipContent("Average score across all benchmarks:", [ - { - label: "Calculation", - description: "Weighted average of normalized scores from all benchmarks", - subItems: [ - "Each benchmark is normalized to a 0-100 scale", - "All normalised benchmarks are then averaged together", - ], - }, - ]), - - IFEVAL: createTooltipContent("Instruction-Following Evaluation (IFEval):", [ - { - label: "Purpose", - description: - "Tests model's ability to follow explicit formatting instructions", - subItems: ["Instruction following", "Formatting", "Generation"], - }, - { - label: "Scoring: Accuracy", - description: "Was the format asked for strictly respected.", - }, - ]), - - BBH: createTooltipContent("Big Bench Hard (BBH):", [ - { - label: "Overview", - description: "Collection of challenging for LLM tasks across domains, for example", - subItems: [ - "Language understanding", - "Mathematical reasoning", - "Common sense and world knowledge", - ], - }, - { - label: "Scoring: Accuracy", - description: - "Was the correct choice selected among the options.", - }, - ]), - - MATH: createTooltipContent( - "Mathematics Aptitude Test of Heuristics (MATH), level 5:", - [ - { - label: "Content", - description: "High school level competitions mathematical problems", - subItems: ["Complex algebra", "Geometry problems", "Advanced calculus"], - }, - { - label: "Scoring: Exact match", - description: - "Was the solution generated correct and in the expected format", - }, - ] - ), - - GPQA: createTooltipContent("Graduate-Level Google-Proof Q&A (GPQA):", [ - { - label: "Focus", - description: "PhD-level knowledge multiple choice questions in science", - subItems: [ - "Chemistry", - "Biology", - "Physics", - ], - }, - { - label: "Scoring: Accuracy", - description: - "Was the correct choice selected among the options.", - }, - ]), - - MUSR: createTooltipContent("Multistep Soft Reasoning (MuSR):", [ - { - label: "Scope", - description: "Reasoning and understanding on/of long texts", - subItems: [ - "Language understanding", - "Reasoning capabilities", - "Long context reasoning", - ], - }, - { - label: "Scoring: Accuracy", - description: - "Was the correct choice selected among the options.", - }, - ]), - - MMLU_PRO: createTooltipContent( - "Massive Multitask Language Understanding - Professional (MMLU-Pro):", - [ - { - label: "Coverage", - description: "Expertly reviewed multichoice questions across domains, for example:", - subItems: [ - "Medicine and healthcare", - "Law and ethics", - "Engineering", - "Mathematics", - ], - }, - { - label: "Scoring: Accuracy", - description: - "Was the correct choice selected among the options.", - }, - ] - ), - - ARCHITECTURE: createTooltipContent("Model Architecture Information:", [ - { - label: "Definition", - description: "The fundamental structure and design of the model", - subItems: [ - "Pretrained: Foundational models, initially trained on large datasets without task-specific tuning, serving as a versatile base for further development.", - "Continuously Pretrained: Base models trained with a data mix evolving as the model is trained, with the addition of specialized data during the last training steps.", - "Fine-tuned: Base models, fine-tuned on specialised domain data (legal, medical, ...), and optimized for particular tasks.", - "Chat: Models fine-tuned with IFT, RLHF, DPO, and other techniques, to handle conversational contexts effectively.", - "Merged: Combining multiple models through weights averaging or similar methods.", - "Multimodal: Models which can handle several modalities (text & image/audio/video/...). We only evaluate the text capabilities.", - ], - }, - { - label: "Impact", - description: "How architecture affects model capabilities", - subItems: [ - "Base models are expected to perform less well on instruction following evaluations, like IFEval.", - "Fine-tuned and chat models can be more verbose and more chatty than base models.", - "Merged models tend to exhibit good performance on benchmarks, which do not translate to real-world situations.", - ], - }, - ]), - - PRECISION: createTooltipContent("Numerical Precision Format:", [ - { - label: "Overview", - description: - "Data format used to store model weights and perform computations", - subItems: [ - "bfloat16: Half precision (Brain Float format), good for stability", - "float16: Half precision", - "8bit/4bit: Quantized formats, for efficiency", - "GPTQ/AWQ: Quantized methods", - ], - }, - { - label: "Impact", - description: "How precision affects model deployment", - subItems: [ - "Higher precision = better accuracy but more memory usage", - "Lower precision = faster inference and smaller size", - "Trade-off between model quality and resource usage", - ], - }, - ]), - - FLAGS: createTooltipContent("Model Flags and Special Features:", [ - { - label: "Filters", - subItems: [ - "Mixture of Expert: Uses a MoE architecture", - "Merged models: Created by averaging other models", - "Contaminated: Flagged by users from the community for (possibly accidental) cheating", - "Unavailable: No longer on the hub (private, deleted) or missing a license tag", - ], - }, - { - label: "Purpose", - description: "Why do people want to hide these models?", - subItems: [ - "Mixture of Experts: These models can be too parameter heavy", - "Merged models: Performance on benchmarks tend to be inflated compared to real life usage", - "Contaminated: Performance on benchmarks is inflated and not reflecting real life usage", - ], - }, - ]), - - PARAMETERS: createTooltipContent("Model Parameters:", [ - { - label: "Measurement", - description: "Total number of trainable parameters in billions", - subItems: [ - "Indicates model capacity and complexity", - "Correlates with computational requirements", - "Influences memory usage and inference speed", - ], - }, - ]), - - LICENSE: createTooltipContent("Model License Information:", [ - { - label: "Importance", - description: "Legal terms governing model usage and distribution", - subItems: [ - "Commercial vs non-commercial use", - "Attribution requirements", - "Modification and redistribution rights", - "Liability and warranty terms", - ], - }, - ]), - - CO2_COST: createTooltipContent("Carbon Dioxide Emissions:", [ - { - label: "What is it?", - description: "CO₂ emissions of the model evaluation ", - subItems: [ - "Only focuses on model inference for our specific setup", - "Considers data center location and energy mix", - "Allows equivalent comparision of models on our use case", - ], - }, - { - label: "Why it matters", - description: "Environmental impact of AI model training", - subItems: [ - "Large models can have significant carbon footprints", - "Helps make informed choices about model selection", - ], - }, - { - label: "Learn more", - description: - "For detailed information about our CO₂ calculation methodology, visit:", - subItems: [ - - Carbon Emissions Documentation ↗ - , - ], - }, - ]), -}; - -export const UI_TOOLTIPS = { - COLUMN_SELECTOR: "Choose which columns to display in the table", - DISPLAY_OPTIONS: createTooltipContent("Table Display Options", [ - { - label: "Overview", - description: "Configure how the table displays data and information", - subItems: [ - "Row size and layout", - "Score display format", - "Ranking calculation", - "Average score computation", - ], - }, - ]), - SEARCH_BAR: createTooltipContent("Advanced Model Search", [ - { - label: "Name Search", - description: "Search directly by model name", - subItems: [ - "Supports regular expressions (e.g., ^mistral.*7b)", - "Case sensitive", - ], - }, - { - label: "Field Search", - description: "Use @field:value syntax for precise filtering", - subItems: [ - "@architecture:llama - Filter by architecture", - "@license:mit - Filter by license", - "@precision:float16 - Filter by precision", - "@type:chat - Filter by model type", - ], - }, - { - label: "Multiple Searches", - description: "Combine multiple criteria using semicolons", - subItems: [ - "meta @license:mit; @architecture:llama", - "^mistral.*7b; @precision:float16", - ], - }, - ]), - QUICK_FILTERS: createTooltipContent( - "Filter models based on their size and applicable hardware:", - [ - { - label: "Edge devices (Up to 3BB)", - description: - "Efficient models for edge devices, optimized for blazing fast inference.", - }, - { - label: "Smol Models (3B-7B)", - description: - "Efficient models for consumer hardware, optimized for fast inference.", - }, - { - label: "Mid-range models (7B-65B)", - description: - "A bit of everything here, with overall balanced performance and resource usage around 30B.", - }, - { - label: "GPU-rich models (65B+)", - description: - "State-of-the-art performance for complex tasks, requires significant computing power.", - }, - { - label: "Official Providers", - description: - "Models directly maintained by their original creators, ensuring reliability and up-to-date performance.", - }, - ] - ), - ROW_SIZE: { - title: "Row Size", - description: - "Adjust the height of table rows. Compact is ideal for viewing more data at once, while Large provides better readability and touch targets.", - }, - SCORE_DISPLAY: { - title: "Score Display", - description: - "Choose between normalized scores (0-100% scale for easy comparison) or raw scores (actual benchmark results). Normalized scores help compare performance across different benchmarks, while raw scores show actual benchmark outputs.", - }, - RANKING_MODE: { - title: "Ranking Mode", - description: - "Choose between static ranking (original position in the full leaderboard) or dynamic ranking (position based on current filters and sorting).", - }, - AVERAGE_SCORE: { - title: "Average Score Calculation", - description: - "Define how the average score is calculated. 'All Scores' uses all benchmarks, while 'Visible Only' calculates the average using only the visible benchmark columns.", - }, -}; - -export const getTooltipStyle = {}; - -export const TABLE_TOOLTIPS = { - HUB_LINK: (modelName) => `View ${modelName} on Hugging Face Hub`, - EVAL_RESULTS: (modelName) => - `View detailed evaluation results for ${modelName}`, - POSITION_CHANGE: (change) => - `${Math.abs(change)} position${Math.abs(change) > 1 ? "s" : ""} ${ - change > 0 ? "up" : "down" - }`, - METADATA: { - TYPE: (type) => type || "-", - ARCHITECTURE: (arch) => arch || "-", - PRECISION: (precision) => precision || "-", - LICENSE: (license) => license || "-", - UPLOAD_DATE: (date) => date || "-", - SUBMISSION_DATE: (date) => date || "-", - BASE_MODEL: (model) => model || "-", - }, -}; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/context/LeaderboardContext.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/context/LeaderboardContext.js deleted file mode 100644 index e41599900865195536321cd8ae121d0d794cb94a..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/context/LeaderboardContext.js +++ /dev/null @@ -1,760 +0,0 @@ -import React, { - createContext, - useContext, - useReducer, - useEffect, - useMemo, - useCallback, -} from "react"; -import { useSearchParams, useLocation } from "react-router-dom"; -import { MODEL_TYPE_ORDER } from "../constants/modelTypes"; -import { FILTER_PRECISIONS, TABLE_DEFAULTS } from "../constants/defaults"; - -// Create context -const LeaderboardContext = createContext(); - -// Define default filter values -const DEFAULT_FILTERS = { - search: "", - precisions: FILTER_PRECISIONS, - types: MODEL_TYPE_ORDER, - paramsRange: [-1, 140], - booleanFilters: [], - isOfficialProviderActive: false, -}; - -// Define default display values -const DEFAULT_DISPLAY = { - rowSize: TABLE_DEFAULTS.ROW_SIZE, - scoreDisplay: TABLE_DEFAULTS.SCORE_DISPLAY, - averageMode: TABLE_DEFAULTS.AVERAGE_MODE, - rankingMode: TABLE_DEFAULTS.RANKING_MODE, - visibleColumns: TABLE_DEFAULTS.COLUMNS.DEFAULT_VISIBLE, -}; - -// Create initial counter structure -const createInitialCounts = () => { - const modelTypes = {}; - MODEL_TYPE_ORDER.forEach((type) => { - modelTypes[type] = 0; - }); - - const precisions = {}; - FILTER_PRECISIONS.forEach((precision) => { - precisions[precision] = 0; - }); - - return { - modelTypes, - precisions, - officialProviders: 0, - mixtureOfExperts: 0, - flagged: 0, - merged: 0, - notOnHub: 0, - parameterRanges: { - edge: 0, - small: 0, - medium: 0, - large: 0, - }, - }; -}; - -// Define initial state -const initialState = { - models: [], - loading: true, - countsReady: false, - error: null, - filters: DEFAULT_FILTERS, - display: DEFAULT_DISPLAY, - filtersExpanded: false, - pinnedModels: [], - filterCounts: { - normal: createInitialCounts(), - officialOnly: createInitialCounts(), - }, -}; - -// Function to normalize parameter value -const normalizeParams = (params) => { - const numParams = Number(params); - if (isNaN(numParams)) return null; - return Math.round(numParams * 100) / 100; -}; - -// Function to check if a parameter count is within a range -const isInParamRange = (params, range) => { - if (range[0] === -1 && range[1] === 140) return true; - const normalizedParams = normalizeParams(params); - if (normalizedParams === null) return false; - return normalizedParams >= range[0] && normalizedParams < range[1]; -}; - -// Function to check if a model matches filter criteria -const modelMatchesFilters = (model, filters) => { - // Filter by precision - if ( - filters.precisions.length > 0 && - !filters.precisions.includes(model.model.precision) - ) { - return false; - } - - // Filter by type - if (filters.types.length > 0) { - const modelType = model.model.type?.toLowerCase().trim(); - if (!filters.types.some((type) => modelType?.includes(type))) { - return false; - } - } - - // Filter by parameters - const params = Number( - model.metadata?.params_billions || model.features?.params_billions - ); - if (!isInParamRange(params, filters.paramsRange)) return false; - - // Filter by search - if (filters.search) { - const searchLower = filters.search.toLowerCase(); - const modelName = model.model.name.toLowerCase(); - if (!modelName.includes(searchLower)) return false; - } - - // Boolean filters - if (filters.booleanFilters.length > 0) { - return filters.booleanFilters.every((filter) => { - const filterValue = typeof filter === "object" ? filter.value : filter; - - // Maintainer's Highlight keeps positive logic - if (filterValue === "is_official_provider") { - return model.features[filterValue]; - } - - // For all other filters, invert the logic - if (filterValue === "is_not_available_on_hub") { - return model.features[filterValue]; - } - - return !model.features[filterValue]; - }); - } - - return true; -}; - -// Function to calculate filtered model counts -const calculateFilteredCounts = ( - allRows, - totalPinnedCount, - filters, - filteredCount -) => { - // If no table, use raw filteredCount - if (!allRows) { - return { - currentFilteredCount: - typeof filteredCount === "number" ? filteredCount : 0, - totalPinnedCount: totalPinnedCount || 0, - }; - } - - // 1. Total number of rows (models matching filters) - const totalFilteredCount = allRows.length; - - // 2. Number of pinned models that also match filters - // These models are already included in totalFilteredCount, so we need to subtract them - // to avoid counting them twice - const pinnedMatchingFilters = allRows.filter((row) => { - const model = row.original; - return model.isPinned && modelMatchesFilters(model, filters); - }).length; - - return { - // Subtract pinned models that match filters - // as they are already displayed separately with "+X" - currentFilteredCount: totalFilteredCount - pinnedMatchingFilters, - totalPinnedCount: totalPinnedCount || 0, - }; -}; - -// Function to calculate counters -const calculateModelCounts = (models) => { - const normalCounts = createInitialCounts(); - const officialOnlyCounts = createInitialCounts(); - - models.forEach((model) => { - const isOfficial = - model.features?.is_official_provider || - model.metadata?.is_official_provider; - const countsToUpdate = [normalCounts]; - - if (isOfficial) { - countsToUpdate.push(officialOnlyCounts); - } - - countsToUpdate.forEach((counts) => { - // Model type - if (model.model?.type) { - const cleanType = model.model.type.toLowerCase().trim(); - const matchedType = MODEL_TYPE_ORDER.find((key) => - cleanType.includes(key) - ); - if (matchedType) { - counts.modelTypes[matchedType]++; - } - } - - // Precision - if (model.model?.precision) { - counts.precisions[model.model.precision]++; - } - - // Boolean filters - if ( - model.features?.is_official_provider || - model.metadata?.is_official_provider - ) - counts.officialProviders++; - if (model.features?.is_moe || model.metadata?.is_moe) - counts.mixtureOfExperts++; - if (model.features?.is_flagged || model.metadata?.is_flagged) - counts.flagged++; - if (model.features?.is_merged || model.metadata?.is_merged) - counts.merged++; - if ( - !( - model.features?.is_not_available_on_hub || - model.metadata?.is_not_available_on_hub - ) - ) - counts.notOnHub++; - - // Parameter ranges - const params = Number( - model.metadata?.params_billions || model.features?.params_billions - ); - if (!isNaN(params)) { - if (isInParamRange(params, [0, 3])) counts.parameterRanges.edge++; - if (isInParamRange(params, [3, 7])) counts.parameterRanges.small++; - if (isInParamRange(params, [7, 65])) counts.parameterRanges.medium++; - if (isInParamRange(params, [65, 141])) counts.parameterRanges.large++; - } - }); - }); - - return { - normal: normalCounts, - officialOnly: officialOnlyCounts, - }; -}; - -// Define reducer -const reducer = (state, action) => { - switch (action.type) { - case "SET_MODELS": - const newCounts = calculateModelCounts(action.payload); - return { - ...state, - models: action.payload, - filterCounts: newCounts, - countsReady: true, - loading: false, - }; - - case "SET_LOADING": - return { - ...state, - loading: action.payload, - ...(action.payload ? { countsReady: false } : {}), - }; - - case "SET_ERROR": - return { - ...state, - error: action.payload, - loading: false, - }; - - case "SET_FILTER": - return { - ...state, - filters: { - ...state.filters, - [action.key]: action.value, - }, - }; - - case "SET_DISPLAY_OPTION": - return { - ...state, - display: { - ...state.display, - [action.key]: action.value, - }, - }; - - case "TOGGLE_PINNED_MODEL": - const modelKey = action.payload; - const pinnedModels = [...state.pinnedModels]; - const modelIndex = pinnedModels.indexOf(modelKey); - - if (modelIndex === -1) { - pinnedModels.push(modelKey); - } else { - pinnedModels.splice(modelIndex, 1); - } - - return { - ...state, - pinnedModels, - }; - - case "SET_PINNED_MODELS": - return { - ...state, - pinnedModels: action.payload, - }; - - case "TOGGLE_FILTERS_EXPANDED": - return { - ...state, - filtersExpanded: !state.filtersExpanded, - }; - - case "TOGGLE_OFFICIAL_PROVIDER": - return { - ...state, - filters: { - ...state.filters, - isOfficialProviderActive: !state.filters.isOfficialProviderActive, - }, - }; - - case "RESET_FILTERS": - return { - ...state, - filters: DEFAULT_FILTERS, - }; - - case "RESET_ALL": - return { - ...state, - filters: DEFAULT_FILTERS, - display: DEFAULT_DISPLAY, - pinnedModels: [], - }; - - default: - return state; - } -}; - -// Provider component -const LeaderboardProvider = ({ children }) => { - const [state, dispatch] = useReducer(reducer, initialState); - const [searchParams, setSearchParams] = useSearchParams(); - const location = useLocation(); - - // Effect to load initial values from URL - useEffect(() => { - // Skip URL sync if we're resetting - if (location.state?.skipUrlSync) return; - - const loadFromUrl = () => { - // Load filters - const searchFromUrl = searchParams.get("search"); - if (searchFromUrl) { - dispatch({ type: "SET_FILTER", key: "search", value: searchFromUrl }); - } - - const paramsFromUrl = searchParams.get("params")?.split(",").map(Number); - if (paramsFromUrl?.length === 2) { - dispatch({ - type: "SET_FILTER", - key: "paramsRange", - value: paramsFromUrl, - }); - } - - const filtersFromUrl = - searchParams.get("filters")?.split(",").filter(Boolean) || []; - if (filtersFromUrl.length > 0) { - dispatch({ - type: "SET_FILTER", - key: "booleanFilters", - value: filtersFromUrl, - }); - } - - const precisionsFromUrl = searchParams - .get("precision") - ?.split(",") - .filter(Boolean); - if (precisionsFromUrl) { - dispatch({ - type: "SET_FILTER", - key: "precisions", - value: precisionsFromUrl, - }); - } - - const typesFromUrl = searchParams - .get("types") - ?.split(",") - .filter(Boolean); - if (typesFromUrl) { - dispatch({ type: "SET_FILTER", key: "types", value: typesFromUrl }); - } - - const officialFromUrl = searchParams.get("official") === "true"; - if (officialFromUrl) { - dispatch({ - type: "SET_FILTER", - key: "isOfficialProviderActive", - value: true, - }); - } - - // Load pinned models - const pinnedFromUrl = - searchParams.get("pinned")?.split(",").filter(Boolean) || []; - if (pinnedFromUrl.length > 0) { - dispatch({ type: "SET_PINNED_MODELS", payload: pinnedFromUrl }); - } - - // Load visible columns - const columnsFromUrl = searchParams - .get("columns") - ?.split(",") - .filter(Boolean); - if (columnsFromUrl) { - dispatch({ - type: "SET_DISPLAY_OPTION", - key: "visibleColumns", - value: columnsFromUrl, - }); - } - - // Load table options - const rowSizeFromUrl = searchParams.get("rowSize"); - if (rowSizeFromUrl) { - dispatch({ - type: "SET_DISPLAY_OPTION", - key: "rowSize", - value: rowSizeFromUrl, - }); - } - - const scoreDisplayFromUrl = searchParams.get("scoreDisplay"); - if (scoreDisplayFromUrl) { - dispatch({ - type: "SET_DISPLAY_OPTION", - key: "scoreDisplay", - value: scoreDisplayFromUrl, - }); - } - - const averageModeFromUrl = searchParams.get("averageMode"); - if (averageModeFromUrl) { - dispatch({ - type: "SET_DISPLAY_OPTION", - key: "averageMode", - value: averageModeFromUrl, - }); - } - - const rankingModeFromUrl = searchParams.get("rankingMode"); - if (rankingModeFromUrl) { - dispatch({ - type: "SET_DISPLAY_OPTION", - key: "rankingMode", - value: rankingModeFromUrl, - }); - } - }; - - loadFromUrl(); - }, [searchParams, location.state]); - - // Effect to synchronize filters with URL - useEffect(() => { - // Skip URL sync if we're resetting - if (location.state?.skipUrlSync) return; - - const newSearchParams = new URLSearchParams(searchParams); - const currentParams = searchParams.get("params")?.split(",").map(Number); - const currentFilters = - searchParams.get("filters")?.split(",").filter(Boolean) || []; - const currentSearch = searchParams.get("search"); - const currentPinned = - searchParams.get("pinned")?.split(",").filter(Boolean) || []; - const currentColumns = - searchParams.get("columns")?.split(",").filter(Boolean) || []; - const currentRowSize = searchParams.get("rowSize"); - const currentScoreDisplay = searchParams.get("scoreDisplay"); - const currentAverageMode = searchParams.get("averageMode"); - const currentRankingMode = searchParams.get("rankingMode"); - const currentOfficialProvider = searchParams.get("official") === "true"; - const currentPrecisions = - searchParams.get("precision")?.split(",").filter(Boolean) || []; - const currentTypes = - searchParams.get("types")?.split(",").filter(Boolean) || []; - - // Only update URL if values have changed - const paramsChanged = - !currentParams || - currentParams[0] !== state.filters.paramsRange[0] || - currentParams[1] !== state.filters.paramsRange[1]; - - const filtersChanged = - state.filters.booleanFilters.length !== currentFilters.length || - state.filters.booleanFilters.some((f) => !currentFilters.includes(f)); - - const searchChanged = state.filters.search !== currentSearch; - - const pinnedChanged = - state.pinnedModels.length !== currentPinned.length || - state.pinnedModels.some((m) => !currentPinned.includes(m)); - - const columnsChanged = - state.display.visibleColumns.length !== currentColumns.length || - state.display.visibleColumns.some((c) => !currentColumns.includes(c)); - - const rowSizeChanged = state.display.rowSize !== currentRowSize; - const scoreDisplayChanged = - state.display.scoreDisplay !== currentScoreDisplay; - const averageModeChanged = state.display.averageMode !== currentAverageMode; - const rankingModeChanged = state.display.rankingMode !== currentRankingMode; - const officialProviderChanged = - state.filters.isOfficialProviderActive !== currentOfficialProvider; - const precisionsChanged = - state.filters.precisions.length !== currentPrecisions.length || - state.filters.precisions.some((p) => !currentPrecisions.includes(p)); - const typesChanged = - state.filters.types.length !== currentTypes.length || - state.filters.types.some((t) => !currentTypes.includes(t)); - - if (paramsChanged) { - if ( - state.filters.paramsRange[0] !== -1 || - state.filters.paramsRange[1] !== 140 - ) { - newSearchParams.set("params", state.filters.paramsRange.join(",")); - } else { - newSearchParams.delete("params"); - } - } - - if (filtersChanged) { - if (state.filters.booleanFilters.length > 0) { - newSearchParams.set("filters", state.filters.booleanFilters.join(",")); - } else { - newSearchParams.delete("filters"); - } - } - - if (searchChanged) { - if (state.filters.search) { - newSearchParams.set("search", state.filters.search); - } else { - newSearchParams.delete("search"); - } - } - - if (pinnedChanged) { - if (state.pinnedModels.length > 0) { - newSearchParams.set("pinned", state.pinnedModels.join(",")); - } else { - newSearchParams.delete("pinned"); - } - } - - if (columnsChanged) { - if ( - JSON.stringify([...state.display.visibleColumns].sort()) !== - JSON.stringify([...TABLE_DEFAULTS.COLUMNS.DEFAULT_VISIBLE].sort()) - ) { - newSearchParams.set("columns", state.display.visibleColumns.join(",")); - } else { - newSearchParams.delete("columns"); - } - } - - if (rowSizeChanged) { - if (state.display.rowSize !== TABLE_DEFAULTS.ROW_SIZE) { - newSearchParams.set("rowSize", state.display.rowSize); - } else { - newSearchParams.delete("rowSize"); - } - } - - if (scoreDisplayChanged) { - if (state.display.scoreDisplay !== TABLE_DEFAULTS.SCORE_DISPLAY) { - newSearchParams.set("scoreDisplay", state.display.scoreDisplay); - } else { - newSearchParams.delete("scoreDisplay"); - } - } - - if (averageModeChanged) { - if (state.display.averageMode !== TABLE_DEFAULTS.AVERAGE_MODE) { - newSearchParams.set("averageMode", state.display.averageMode); - } else { - newSearchParams.delete("averageMode"); - } - } - - if (rankingModeChanged) { - if (state.display.rankingMode !== TABLE_DEFAULTS.RANKING_MODE) { - newSearchParams.set("rankingMode", state.display.rankingMode); - } else { - newSearchParams.delete("rankingMode"); - } - } - - if (officialProviderChanged) { - if (state.filters.isOfficialProviderActive) { - newSearchParams.set("official", "true"); - } else { - newSearchParams.delete("official"); - } - } - - if (precisionsChanged) { - if ( - JSON.stringify([...state.filters.precisions].sort()) !== - JSON.stringify([...FILTER_PRECISIONS].sort()) - ) { - newSearchParams.set("precision", state.filters.precisions.join(",")); - } else { - newSearchParams.delete("precision"); - } - } - - if (typesChanged) { - if ( - JSON.stringify([...state.filters.types].sort()) !== - JSON.stringify([...MODEL_TYPE_ORDER].sort()) - ) { - newSearchParams.set("types", state.filters.types.join(",")); - } else { - newSearchParams.delete("types"); - } - } - - if ( - paramsChanged || - filtersChanged || - searchChanged || - pinnedChanged || - columnsChanged || - rowSizeChanged || - scoreDisplayChanged || - averageModeChanged || - rankingModeChanged || - officialProviderChanged || - precisionsChanged || - typesChanged - ) { - // Update search params and let HashRouter handle the URL - setSearchParams(newSearchParams); - } - }, [state, searchParams, location.state]); - - const actions = useMemo( - () => ({ - setModels: (models) => dispatch({ type: "SET_MODELS", payload: models }), - setLoading: (loading) => - dispatch({ type: "SET_LOADING", payload: loading }), - setError: (error) => dispatch({ type: "SET_ERROR", payload: error }), - setFilter: (key, value) => dispatch({ type: "SET_FILTER", key, value }), - setDisplayOption: (key, value) => - dispatch({ type: "SET_DISPLAY_OPTION", key, value }), - togglePinnedModel: (modelKey) => - dispatch({ type: "TOGGLE_PINNED_MODEL", payload: modelKey }), - toggleOfficialProvider: () => - dispatch({ type: "TOGGLE_OFFICIAL_PROVIDER" }), - toggleFiltersExpanded: () => - dispatch({ type: "TOGGLE_FILTERS_EXPANDED" }), - resetFilters: () => { - dispatch({ type: "RESET_FILTERS" }); - const newParams = new URLSearchParams(searchParams); - [ - "filters", - "params", - "precision", - "types", - "official", - "search", - ].forEach((param) => { - newParams.delete(param); - }); - setSearchParams(newParams); - }, - resetAll: () => { - // Reset all state - dispatch({ type: "RESET_ALL" }); - // Clear all URL params with skipUrlSync flag - setSearchParams({}, { state: { skipUrlSync: true } }); - }, - }), - [searchParams, setSearchParams] - ); - - // Function to calculate counts (exposed via context) - const getFilteredCounts = useCallback( - (allRows, totalPinnedCount, filteredCount) => { - return calculateFilteredCounts( - allRows, - totalPinnedCount, - state.filters, - filteredCount - ); - }, - [state.filters] - ); - - // Also expose filtering function for reuse elsewhere - const checkModelMatchesFilters = useCallback( - (model) => { - return modelMatchesFilters(model, state.filters); - }, - [state.filters] - ); - - const value = useMemo( - () => ({ - state: { - ...state, - loading: state.loading || !state.countsReady, - }, - actions, - utils: { - getFilteredCounts, - checkModelMatchesFilters, - }, - }), - [state, actions, getFilteredCounts, checkModelMatchesFilters] - ); - - return ( - - {children} - - ); -}; - -// Hook to use context -const useLeaderboard = () => { - const context = useContext(LeaderboardContext); - if (!context) { - throw new Error("useLeaderboard must be used within a LeaderboardProvider"); - } - return context; -}; - -export { useLeaderboard }; -export default LeaderboardProvider; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/hooks/useBatchedState.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/hooks/useBatchedState.js deleted file mode 100644 index ad11c91393ca9e413853ae440154b948293103e9..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/hooks/useBatchedState.js +++ /dev/null @@ -1,31 +0,0 @@ -import { useState, useCallback, useTransition } from 'react'; - -export const useBatchedState = (initialState, options = {}) => { - const { batchDelay = 0, useTransitions = false } = options; - const [state, setState] = useState(typeof initialState === 'function' ? initialState() : initialState); - const [isPending, startTransition] = useTransition(); - - const setBatchedState = useCallback((newState) => { - if (useTransitions) { - startTransition(() => { - if (batchDelay > 0) { - setTimeout(() => { - setState(newState); - }, batchDelay); - } else { - setState(newState); - } - }); - } else { - if (batchDelay > 0) { - setTimeout(() => { - setState(newState); - }, batchDelay); - } else { - setState(newState); - } - } - }, [batchDelay, useTransitions]); - - return [state, setBatchedState, isPending]; -}; \ No newline at end of file diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/hooks/useDataUtils.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/hooks/useDataUtils.js deleted file mode 100644 index 53ba5a3f704f1e86835dc62543c886523880ea80..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/hooks/useDataUtils.js +++ /dev/null @@ -1,306 +0,0 @@ -import { useMemo } from "react"; -import { - looksLikeRegex, - parseSearchQuery, - getValueByPath, -} from "../utils/searchUtils"; - -// Calculate min/max averages -export const useAverageRange = (data) => { - return useMemo(() => { - const averages = data.map((item) => item.model.average_score); - return { - minAverage: Math.min(...averages), - maxAverage: Math.max(...averages), - }; - }, [data]); -}; - -// Generate colors for scores -export const useColorGenerator = (minAverage, maxAverage) => { - return useMemo(() => { - const colorCache = new Map(); - return (value) => { - const cached = colorCache.get(value); - if (cached) return cached; - - const normalizedValue = (value - minAverage) / (maxAverage - minAverage); - const red = Math.round(255 * (1 - normalizedValue) * 1); - const green = Math.round(255 * normalizedValue) * 1; - const color = `rgba(${red}, ${green}, 0, 1)`; - colorCache.set(value, color); - return color; - }; - }, [minAverage, maxAverage]); -}; - -// Process data with boolean standardization -export const useProcessedData = (data, averageMode, visibleColumns) => { - return useMemo(() => { - let processed = data.map((item) => { - const evaluationScores = Object.entries(item.evaluations) - .filter(([key]) => { - if (averageMode === "all") return true; - return visibleColumns.includes(`evaluations.${key}.normalized_score`); - }) - .map(([, value]) => value.normalized_score); - - const average = - evaluationScores.length > 0 - ? evaluationScores.reduce((a, b) => a + b, 0) / - evaluationScores.length - : averageMode === "visible" - ? null - : 0; - - // Boolean standardization - const standardizedFeatures = { - ...item.features, - is_moe: Boolean(item.features.is_moe), - is_flagged: Boolean(item.features.is_flagged), - is_official_provider: Boolean( - item.features.is_official_provider - ), - is_merged: Boolean(item.features.is_merged), - is_not_available_on_hub: Boolean(item.features.is_not_available_on_hub), - }; - - return { - ...item, - features: standardizedFeatures, - model: { - ...item.model, - has_chat_template: Boolean(item.model.has_chat_template), - average_score: average, - }, - }; - }); - - processed.sort((a, b) => { - if (a.model.average_score === null && b.model.average_score === null) - return 0; - if (a.model.average_score === null) return 1; - if (b.model.average_score === null) return -1; - return b.model.average_score - a.model.average_score; - }); - - return processed.map((item, index) => ({ - ...item, - static_rank: index + 1, - })); - }, [data, averageMode, visibleColumns]); -}; - -// Common filtering logic -export const useFilteredData = ( - processedData, - selectedPrecisions, - selectedTypes, - paramsRange, - searchValue, - selectedBooleanFilters, - rankingMode, - pinnedModels = [], - isOfficialProviderActive = false -) => { - return useMemo(() => { - const pinnedData = processedData.filter((row) => { - return pinnedModels.includes(row.id); - }); - const unpinnedData = processedData.filter((row) => { - return !pinnedModels.includes(row.id); - }); - - let filteredUnpinned = unpinnedData; - - // Filter by official providers - if (isOfficialProviderActive) { - filteredUnpinned = filteredUnpinned.filter( - (row) => - row.features?.is_official_provider || - row.metadata?.is_official_provider - ); - } - - // Filter by precision - if (selectedPrecisions.length > 0) { - filteredUnpinned = filteredUnpinned.filter((row) => - selectedPrecisions.includes(row.model.precision) - ); - } - - // Filter by type - if (selectedTypes.length > 0) { - filteredUnpinned = filteredUnpinned.filter((row) => { - const modelType = row.model.type?.toLowerCase().trim(); - return selectedTypes.some((type) => modelType?.includes(type)); - }); - } - - // Filter by parameters - filteredUnpinned = filteredUnpinned.filter((row) => { - // Skip parameter filtering if no filter is active - if (paramsRange[0] === -1 && paramsRange[1] === 140) return true; - - const params = - row.metadata?.params_billions || row.features?.params_billions; - if (params === undefined || params === null) return false; - return params >= paramsRange[0] && params < paramsRange[1]; - }); - - // Filter by search - if (searchValue) { - const searchQueries = searchValue - .split(";") - .map((q) => q.trim()) - .filter((q) => q); - if (searchQueries.length > 0) { - filteredUnpinned = filteredUnpinned.filter((row) => { - return searchQueries.some((query) => { - const { specialSearches, textSearch } = parseSearchQuery(query); - - const specialSearchMatch = specialSearches.every( - ({ field, value }) => { - const fieldValue = getValueByPath(row, field) - ?.toString() - .toLowerCase(); - return fieldValue?.includes(value.toLowerCase()); - } - ); - - if (!specialSearchMatch) return false; - if (!textSearch) return true; - - const modelName = row.model.name.toLowerCase(); - const searchLower = textSearch.toLowerCase(); - - if (looksLikeRegex(textSearch)) { - try { - const regex = new RegExp(textSearch, "i"); - return regex.test(modelName); - } catch (e) { - return modelName.includes(searchLower); - } - } else { - return modelName.includes(searchLower); - } - }); - }); - } - } - - // Filter by booleans - if (selectedBooleanFilters.length > 0) { - filteredUnpinned = filteredUnpinned.filter((row) => { - return selectedBooleanFilters.every((filter) => { - const filterValue = - typeof filter === "object" ? filter.value : filter; - - // Maintainer's Highlight keeps positive logic - if (filterValue === "is_official_provider") { - return row.features[filterValue]; - } - - // For all other filters, invert the logic - if (filterValue === "is_not_available_on_hub") { - return row.features[filterValue]; - } - - return !row.features[filterValue]; - }); - }); - } - - // Create ordered array of pinned models respecting pinnedModels order - const orderedPinnedData = pinnedModels - .map((pinnedModelId) => - pinnedData.find((item) => item.id === pinnedModelId) - ) - .filter(Boolean); - - // Combine all filtered data - const allFilteredData = [...filteredUnpinned, ...orderedPinnedData]; - - // Sort all data by average_score for dynamic_rank - const sortedByScore = [...allFilteredData].sort((a, b) => { - // Si les scores moyens sont différents, trier par score - if (a.model.average_score !== b.model.average_score) { - if (a.model.average_score === null && b.model.average_score === null) - return 0; - if (a.model.average_score === null) return 1; - if (b.model.average_score === null) return -1; - return b.model.average_score - a.model.average_score; - } - - // Si les scores sont égaux, comparer le nom du modèle et la date de soumission - if (a.model.name === b.model.name) { - // Si même nom, trier par date de soumission (la plus récente d'abord) - const dateA = new Date(a.metadata?.submission_date || 0); - const dateB = new Date(b.metadata?.submission_date || 0); - return dateB - dateA; - } - - // Si noms différents, trier par nom - return a.model.name.localeCompare(b.model.name); - }); - - // Create Map to store dynamic_ranks - const dynamicRankMap = new Map(); - sortedByScore.forEach((item, index) => { - dynamicRankMap.set(item.id, index + 1); - }); - - // Add ranks to final data - const finalData = [...orderedPinnedData, ...filteredUnpinned].map( - (item) => { - return { - ...item, - dynamic_rank: dynamicRankMap.get(item.id), - rank: item.isPinned - ? pinnedModels.indexOf(item.id) + 1 - : rankingMode === "static" - ? item.static_rank - : dynamicRankMap.get(item.id), - isPinned: pinnedModels.includes(item.id), - }; - } - ); - - return finalData; - }, [ - processedData, - selectedPrecisions, - selectedTypes, - paramsRange, - searchValue, - selectedBooleanFilters, - rankingMode, - pinnedModels, - isOfficialProviderActive, - ]); -}; - -// Column visibility management -export const useColumnVisibility = (visibleColumns = []) => { - // Create secure visibility object - const columnVisibility = useMemo(() => { - // Check visible columns - const safeVisibleColumns = Array.isArray(visibleColumns) - ? visibleColumns - : []; - - const visibility = {}; - try { - safeVisibleColumns.forEach((columnKey) => { - if (typeof columnKey === "string") { - visibility[columnKey] = true; - } - }); - } catch (error) { - console.warn("Error in useColumnVisibility:", error); - } - return visibility; - }, [visibleColumns]); - - return columnVisibility; -}; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/hooks/useLeaderboardData.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/hooks/useLeaderboardData.js deleted file mode 100644 index dd63c69dd2b253c21852438618e285ef2615a84f..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/hooks/useLeaderboardData.js +++ /dev/null @@ -1,133 +0,0 @@ -import { useMemo, useRef, useState } from "react"; -import { useQuery, useQueryClient } from "@tanstack/react-query"; -import { useSearchParams } from "react-router-dom"; -import { useLeaderboard } from "../context/LeaderboardContext"; -import { useDataProcessing } from "../components/Table/hooks/useDataProcessing"; - -const CACHE_KEY = "leaderboardData"; -const CACHE_DURATION = 5 * 60 * 1000; // 5 minutes - -export const useLeaderboardData = () => { - const queryClient = useQueryClient(); - const [searchParams] = useSearchParams(); - const isInitialLoadRef = useRef(true); - - const { data, isLoading, error } = useQuery({ - queryKey: ["leaderboard"], - queryFn: async () => { - try { - const cachedData = localStorage.getItem(CACHE_KEY); - if (cachedData) { - const { data: cached, timestamp } = JSON.parse(cachedData); - const age = Date.now() - timestamp; - if (age < CACHE_DURATION) { - return cached; - } - } - - const response = await fetch("/api/leaderboard/formatted"); - if (!response.ok) { - throw new Error(`HTTP error! status: ${response.status}`); - } - - const newData = await response.json(); - localStorage.setItem( - CACHE_KEY, - JSON.stringify({ - data: newData, - timestamp: Date.now(), - }) - ); - - return newData; - } catch (error) { - console.error("Detailed error:", error); - throw error; - } - }, - staleTime: CACHE_DURATION, - cacheTime: CACHE_DURATION * 2, - refetchOnWindowFocus: false, - enabled: isInitialLoadRef.current || !!searchParams.toString(), - }); - - useMemo(() => { - if (data && isInitialLoadRef.current) { - isInitialLoadRef.current = false; - } - }, [data]); - - return { - data, - isLoading, - error, - refetch: () => queryClient.invalidateQueries(["leaderboard"]), - }; -}; - -export const useLeaderboardProcessing = () => { - const { state, actions } = useLeaderboard(); - const [sorting, setSorting] = useState([ - { id: "model.average_score", desc: true }, - ]); - - const memoizedData = useMemo(() => state.models, [state.models]); - const memoizedFilters = useMemo( - () => ({ - search: state.filters.search, - precisions: state.filters.precisions, - types: state.filters.types, - paramsRange: state.filters.paramsRange, - booleanFilters: state.filters.booleanFilters, - isOfficialProviderActive: state.filters.isOfficialProviderActive, - }), - [ - state.filters.search, - state.filters.precisions, - state.filters.types, - state.filters.paramsRange, - state.filters.booleanFilters, - state.filters.isOfficialProviderActive, - ] - ); - - const { - table, - minAverage, - maxAverage, - getColorForValue, - processedData, - filteredData, - columns, - columnVisibility, - } = useDataProcessing( - memoizedData, - memoizedFilters.search, - memoizedFilters.precisions, - memoizedFilters.types, - memoizedFilters.paramsRange, - memoizedFilters.booleanFilters, - sorting, - state.display.rankingMode, - state.display.averageMode, - state.display.visibleColumns, - state.display.scoreDisplay, - state.pinnedModels, - actions.togglePinnedModel, - setSorting, - memoizedFilters.isOfficialProviderActive - ); - - return { - table, - minAverage, - maxAverage, - getColorForValue, - processedData, - filteredData, - columns, - columnVisibility, - loading: state.loading, - error: state.error, - }; -}; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/styles/common.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/styles/common.js deleted file mode 100644 index 06648e526979fd7c992ea3f3721468e261448593..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/styles/common.js +++ /dev/null @@ -1,153 +0,0 @@ -import { alpha } from "@mui/material"; - -export const commonStyles = { - // Tooltips - tooltip: { - sx: { - bgcolor: "background.tooltip", - "& .MuiTooltip-arrow": { - color: "background.tooltip", - }, - padding: "12px 16px", - maxWidth: 300, - fontSize: "0.875rem", - lineHeight: 1.4, - }, - }, - - // Progress bars - progressBar: { - position: "absolute", - left: -16, - top: -8, - height: "calc(100% + 16px)", - opacity: (theme) => (theme.palette.mode === "light" ? 0.1 : 0.2), - transition: "width 0.3s ease", - zIndex: 0, - }, - - // Cell containers - cellContainer: { - display: "flex", - alignItems: "center", - height: "100%", - width: "100%", - position: "relative", - }, - - // Hover effects - hoverEffect: (theme, isActive = false) => ({ - backgroundColor: isActive - ? alpha( - theme.palette.primary.main, - theme.palette.mode === "light" ? 0.08 : 0.16 - ) - : theme.palette.action.hover, - "& .MuiTypography-root": { - color: isActive ? "primary.main" : "text.primary", - }, - "& .MuiSvgIcon-root": { - color: isActive ? "primary.main" : "text.primary", - }, - }), - - // Filter groups - filterGroup: { - title: { - mb: 1, - fontSize: "0.8rem", - fontWeight: 700, - color: "text.primary", - display: "flex", - alignItems: "center", - gap: 0.5, - }, - container: { - display: "flex", - flexWrap: "wrap", - gap: 0.5, - alignItems: "center", - }, - }, - - // Option buttons (like in DisplayOptions) - optionButton: { - display: "flex", - alignItems: "center", - gap: 0.8, - cursor: "pointer", - padding: "4px 10px", - borderRadius: 1, - height: "32px", - "& .MuiSvgIcon-root": { - fontSize: "0.9rem", - }, - "& .MuiTypography-root": { - fontSize: "0.85rem", - }, - }, - - // Score indicators - scoreIndicator: { - dot: { - width: 10, - height: 10, - borderRadius: "50%", - marginLeft: -1, - }, - bar: { - position: "absolute", - left: -16, - top: -8, - height: "calc(100% + 16px)", - opacity: (theme) => (theme.palette.mode === "light" ? 0.1 : 0.2), - transition: "width 0.3s ease", - }, - }, - - // Popover content - popoverContent: { - p: 3, - width: 280, - maxHeight: 400, - overflowY: "auto", - }, -}; - -// Composant styles -export const componentStyles = { - // Table header cell - headerCell: { - borderRight: (theme) => - `1px solid ${alpha( - theme.palette.divider, - theme.palette.mode === "dark" ? 0.05 : 0.1 - )}`, - "&:last-child": { - borderRight: "none", - }, - whiteSpace: "nowrap", - overflow: "hidden", - textOverflow: "ellipsis", - padding: "8px 16px", - backgroundColor: (theme) => theme.palette.background.paper, - position: "sticky !important", - top: 0, - zIndex: 10, - }, - - // Table cell - tableCell: { - borderRight: (theme) => - `1px solid ${alpha( - theme.palette.divider, - theme.palette.mode === "dark" ? 0.05 : 0.1 - )}`, - "&:last-child": { - borderRight: "none", - }, - whiteSpace: "nowrap", - overflow: "hidden", - textOverflow: "ellipsis", - }, -}; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/utils/columnUtils.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/utils/columnUtils.js deleted file mode 100644 index 526c015c6684569bc6581e1931b28e7dd09b3bac..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/utils/columnUtils.js +++ /dev/null @@ -1,1073 +0,0 @@ -import React from "react"; -import { Box, Typography, Link, Tooltip, IconButton } from "@mui/material"; -import { getModelTypeIcon } from "../constants/modelTypes"; -import TrendingUpIcon from "@mui/icons-material/TrendingUp"; -import TrendingDownIcon from "@mui/icons-material/TrendingDown"; -import RemoveIcon from "@mui/icons-material/Remove"; -import PushPinIcon from "@mui/icons-material/PushPin"; -import PushPinOutlinedIcon from "@mui/icons-material/PushPinOutlined"; -import { TABLE_DEFAULTS, HIGHLIGHT_COLORS } from "../constants/defaults"; -import { looksLikeRegex, extractTextSearch } from "./searchUtils"; -import { commonStyles } from "../styles/common"; -import { typeColumnSort } from "../components/Table/hooks/useSorting"; -import { - COLUMN_TOOLTIPS, - getTooltipStyle, - TABLE_TOOLTIPS, -} from "../constants/tooltips"; -import OpenInNewIcon from "@mui/icons-material/OpenInNew"; -import { alpha } from "@mui/material/styles"; -import InfoIconWithTooltip from "../../../../../components/shared/InfoIconWithTooltip"; - -const DatabaseIcon = () => ( - -); - -const HighlightedText = ({ text, searchValue }) => { - if (!searchValue) return text; - - const searches = searchValue - .split(";") - .map((s) => s.trim()) - .filter(Boolean); - let result = text; - let fragments = [{ text: result, isMatch: false }]; - - searches.forEach((search, searchIndex) => { - if (!search) return; - - try { - let regex; - if (looksLikeRegex(search)) { - regex = new RegExp(search, "gi"); - } else { - regex = new RegExp(search.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"), "gi"); - } - - const newFragments = []; - fragments.forEach((fragment) => { - if (fragment.isMatch) { - newFragments.push(fragment); - return; - } - - const parts = fragment.text.split(regex); - const matches = fragment.text.match(regex); - - if (!matches) { - newFragments.push(fragment); - return; - } - - parts.forEach((part, i) => { - if (part) newFragments.push({ text: part, isMatch: false }); - if (i < parts.length - 1) { - newFragments.push({ - text: matches[i], - isMatch: true, - colorIndex: searchIndex % HIGHLIGHT_COLORS.length, - }); - } - }); - }); - - fragments = newFragments; - } catch (e) { - console.warn("Invalid regex:", search); - } - }); - - return ( - <> - {fragments.map((fragment, i) => - fragment.isMatch ? ( - - theme.palette.getContrastText( - HIGHLIGHT_COLORS[fragment.colorIndex] - ), - fontWeight: 500, - px: 0.5, - py: "2px", - borderRadius: "3px", - mx: "1px", - overflow: "visible", - display: "inline-block", - }} - > - {fragment.text} - - ) : ( - {fragment.text} - ) - )} - - ); -}; - -const MEDAL_STYLES = { - 1: { - color: "#B58A1B", - background: "linear-gradient(135deg, #FFF7E0 0%, #FFD700 100%)", - borderColor: "rgba(212, 160, 23, 0.35)", - shadowColor: "rgba(212, 160, 23, 0.8)", - }, - 2: { - color: "#667380", - background: "linear-gradient(135deg, #FFFFFF 0%, #D8E3ED 100%)", - borderColor: "rgba(124, 139, 153, 0.35)", - shadowColor: "rgba(124, 139, 153, 0.8)", - }, - 3: { - color: "#B85C2F", - background: "linear-gradient(135deg, #FDF0E9 0%, #FFBC8C 100%)", - borderColor: "rgba(204, 108, 61, 0.35)", - shadowColor: "rgba(204, 108, 61, 0.8)", - }, -}; - -const getMedalStyle = (rank) => { - if (rank <= 3) { - const medalStyle = MEDAL_STYLES[rank]; - return { - color: medalStyle.color, - fontWeight: 900, - fontStretch: "150%", - fontFamily: '"Inter", -apple-system, sans-serif', - width: "24px", - height: "24px", - background: medalStyle.background, - border: "1px solid", - borderColor: medalStyle.borderColor, - borderRadius: "50%", - display: "flex", - alignItems: "center", - justifyContent: "center", - fontSize: "0.95rem", - lineHeight: 1, - padding: 0, - boxShadow: `1px 1px 0 ${medalStyle.shadowColor}`, - position: "relative", - }; - } - return { - color: "inherit", - fontWeight: rank <= 10 ? 600 : 400, - }; -}; - -const getRankStyle = (rank) => getMedalStyle(rank); - -const RankIndicator = ({ rank, previousRank, mode }) => { - const rankChange = previousRank ? previousRank - rank : 0; - - const RankChangeIndicator = ({ change }) => { - if (!change || mode === "dynamic") return null; - - const getChangeColor = (change) => { - if (change > 0) return "success.main"; - if (change < 0) return "error.main"; - return "grey.500"; - }; - - const getChangeIcon = (change) => { - if (change > 0) return ; - if (change < 0) return ; - return ; - }; - - return ( - 1 ? "s" : "" - } ${change > 0 ? "up" : "down"}`} - arrow - placement="right" - > - - {getChangeIcon(change)} - - - ); - }; - - return ( - - - {rank <= 3 ? ( - <> - - {rank} - - - - ) : ( - <> - - {rank} - - - - )} - - - ); -}; - -const getDetailsUrl = (modelName) => { - const formattedName = modelName.replace("/", "__"); - return `https://huggingface.co/datasets/open-llm-leaderboard/${formattedName}-details`; -}; - -const HeaderLabel = ({ label, tooltip, className, isSorted }) => ( - - - {label} - - -); - -const InfoIcon = ({ tooltip }) => ( - - - -); - -const createHeaderCell = (label, tooltip) => (header) => - ( - - - - - {tooltip && } - - - ); - -const createModelHeader = - (totalModels, officialProvidersCount = 0, isOfficialProviderActive = false) => - ({ table }) => { - return ( - - - - Model - - - - ); - }; - -const BooleanValue = ({ value }) => { - if (value === null || value === undefined) - return -; - - return ( - ({ - display: "flex", - alignItems: "center", - justifyContent: "center", - borderRadius: "4px", - px: 1, - py: 0.5, - backgroundColor: value - ? theme.palette.mode === "dark" - ? alpha(theme.palette.success.main, 0.1) - : alpha(theme.palette.success.main, 0.1) - : theme.palette.mode === "dark" - ? alpha(theme.palette.error.main, 0.1) - : alpha(theme.palette.error.main, 0.1), - })} - > - ({ - color: value - ? theme.palette.mode === "dark" - ? theme.palette.success.light - : theme.palette.success.dark - : theme.palette.mode === "dark" - ? theme.palette.error.light - : theme.palette.error.dark, - })} - > - {value ? "Yes" : "No"} - - - ); -}; - -export const createColumns = ( - getColorForValue, - scoreDisplay = "normalized", - columnVisibility = {}, - totalModels, - averageMode = "all", - searchValue = "", - rankingMode = "static", - onTogglePin, - hasPinnedRows = false -) => { - // Ajuster les tailles des colonnes en fonction de la présence de lignes épinglées - const getColumnSize = (defaultSize) => - hasPinnedRows ? "auto" : `${defaultSize}px`; - - const baseColumns = [ - { - accessorKey: "isPinned", - header: () => null, - cell: ({ row }) => ( - - { - e.stopPropagation(); - e.preventDefault(); - onTogglePin(row.original.id); - }} - sx={{ - padding: 0.5, - color: row.original.isPinned ? "primary.main" : "grey.400", - "&:hover": { - color: "primary.main", - }, - }} - > - {row.original.isPinned ? ( - - ) : ( - - )} - - - ), - enableSorting: false, - size: getColumnSize(40), - }, - { - accessorKey: "rank", - header: createHeaderCell("Rank"), - cell: ({ row }) => { - const rank = - rankingMode === "static" - ? row.original.static_rank - : row.original.dynamic_rank; - - return ( - - ); - }, - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["rank"], - }, - { - id: "model_type", - accessorFn: (row) => row.model.type, - header: createHeaderCell("Type"), - sortingFn: typeColumnSort, - cell: ({ row }) => ( - - - - {getModelTypeIcon(row.original.model.type)} - - - - ), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["model.type_icon"], - }, - { - accessorKey: "id", - header: createModelHeader(totalModels), - cell: ({ row }) => { - const textSearch = extractTextSearch(searchValue); - const modelName = row.original.model.name; - - return ( - - - - theme.palette.mode === "dark" - ? theme.palette.info.light - : theme.palette.info.dark, - "& svg": { - opacity: 0.8, - }, - }, - overflow: "hidden", - textOverflow: "ellipsis", - whiteSpace: "nowrap", - flex: 1, - minWidth: 0, - fontWeight: row.original.static_rank <= 3 ? 600 : "inherit", - }} - > - - - - - - - - - ); - }, - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["id"], - }, - { - accessorKey: "model.average_score", - header: createHeaderCell("Average", COLUMN_TOOLTIPS.AVERAGE), - cell: ({ row, getValue }) => - createScoreCell(getValue, row, "model.average_score"), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["model.average_score"], - meta: { - headerStyle: { - borderLeft: (theme) => - `2px solid ${alpha( - theme.palette.divider, - theme.palette.mode === "dark" ? 0.1 : 0.2 - )}`, - borderRight: (theme) => - `2px solid ${alpha( - theme.palette.divider, - theme.palette.mode === "dark" ? 0.1 : 0.2 - )}`, - }, - cellStyle: (value) => ({ - position: "relative", - overflow: "hidden", - padding: "8px 16px", - borderLeft: (theme) => - `2px solid ${alpha( - theme.palette.divider, - theme.palette.mode === "dark" ? 0.1 : 0.2 - )}`, - borderRight: (theme) => - `2px solid ${alpha( - theme.palette.divider, - theme.palette.mode === "dark" ? 0.1 : 0.2 - )}`, - }), - }, - }, - ]; - const createScoreCell = (getValue, row, field) => { - const value = getValue(); - const rawValue = field.includes("normalized") - ? row.original.evaluations[field.split(".")[1]]?.value - : value; - - const isAverageColumn = field === "model.average_score"; - const hasNoValue = value === null || value === undefined; - - return ( - - {!hasNoValue && (scoreDisplay === "normalized" || isAverageColumn) && ( - (theme.palette.mode === "light" ? 0.1 : 0.2), - transition: "width 0.3s ease", - zIndex: 0, - }} - /> - )} - - {isAverageColumn && !hasNoValue && ( - - )} - - {hasNoValue ? ( - "-" - ) : ( - <> - {isAverageColumn ? ( - <> - {value.toFixed(2)} - % - - ) : scoreDisplay === "normalized" ? ( - <> - {value.toFixed(2)} - % - - ) : ( - <>{rawValue.toFixed(2)} - )} - - )} - - - - ); - }; - - const evaluationColumns = [ - { - accessorKey: "evaluations.ifeval.normalized_score", - header: createHeaderCell("IFEval", COLUMN_TOOLTIPS.IFEVAL), - cell: ({ row, getValue }) => - createScoreCell(getValue, row, "evaluations.ifeval.normalized_score"), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[ - "evaluations.ifeval.normalized_score" - ], - }, - { - accessorKey: "evaluations.bbh.normalized_score", - header: createHeaderCell("BBH", COLUMN_TOOLTIPS.BBH), - cell: ({ row, getValue }) => - createScoreCell(getValue, row, "evaluations.bbh.normalized_score"), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[ - "evaluations.bbh.normalized_score" - ], - }, - { - accessorKey: "evaluations.math.normalized_score", - header: createHeaderCell("MATH", COLUMN_TOOLTIPS.MATH), - cell: ({ row, getValue }) => - createScoreCell(getValue, row, "evaluations.math.normalized_score"), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[ - "evaluations.math.normalized_score" - ], - }, - { - accessorKey: "evaluations.gpqa.normalized_score", - header: createHeaderCell("GPQA", COLUMN_TOOLTIPS.GPQA), - cell: ({ row, getValue }) => - createScoreCell(getValue, row, "evaluations.gpqa.normalized_score"), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[ - "evaluations.gpqa.normalized_score" - ], - }, - { - accessorKey: "evaluations.musr.normalized_score", - header: createHeaderCell("MUSR", COLUMN_TOOLTIPS.MUSR), - cell: ({ row, getValue }) => - createScoreCell(getValue, row, "evaluations.musr.normalized_score"), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[ - "evaluations.musr.normalized_score" - ], - }, - { - accessorKey: "evaluations.mmlu_pro.normalized_score", - header: createHeaderCell("MMLU-PRO", COLUMN_TOOLTIPS.MMLU_PRO), - cell: ({ row, getValue }) => - createScoreCell(getValue, row, "evaluations.mmlu_pro.normalized_score"), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[ - "evaluations.mmlu_pro.normalized_score" - ], - }, - ]; - - const optionalColumns = [ - { - accessorKey: "model.architecture", - header: createHeaderCell("Architecture", COLUMN_TOOLTIPS.ARCHITECTURE), - accessorFn: (row) => row.model.architecture, - cell: ({ row }) => ( - - {row.original.model.architecture || "-"} - - ), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["model.architecture"], - }, - { - accessorKey: "model.precision", - header: createHeaderCell("Precision", COLUMN_TOOLTIPS.PRECISION), - accessorFn: (row) => row.model.precision, - cell: ({ row }) => ( - - {row.original.model.precision || "-"} - - ), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["model.precision"], - }, - { - accessorKey: "metadata.params_billions", - header: createHeaderCell("Parameters", COLUMN_TOOLTIPS.PARAMETERS), - cell: ({ row }) => ( - - - {row.original.metadata.params_billions} - B - - - ), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["metadata.params_billions"], - }, - { - accessorKey: "metadata.hub_license", - header: createHeaderCell("License", COLUMN_TOOLTIPS.LICENSE), - cell: ({ row }) => ( - - - {row.original.metadata.hub_license || "-"} - - - ), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["metadata.hub_license"], - }, - { - accessorKey: "metadata.hub_hearts", - header: createHeaderCell( - "Hub ❤️", - "Number of likes received on the Hugging Face Hub" - ), - cell: ({ row }) => ( - - {row.original.metadata.hub_hearts} - - ), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["metadata.hub_hearts"], - }, - { - accessorKey: "metadata.upload_date", - header: createHeaderCell( - "Upload Date", - "Date when the model was uploaded to the Hugging Face Hub" - ), - cell: ({ row }) => ( - - - {row.original.metadata.upload_date || "-"} - - - ), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["metadata.upload_date"], - }, - { - accessorKey: "metadata.submission_date", - header: createHeaderCell( - "Submission Date", - "Date when the model was submitted to the leaderboard" - ), - cell: ({ row }) => ( - - - {row.original.metadata.submission_date || "-"} - - - ), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["metadata.submission_date"], - }, - { - accessorKey: "metadata.generation", - header: createHeaderCell( - "Generation", - "The generation or version number of the model" - ), - cell: ({ row }) => ( - - {row.original.metadata.generation} - - ), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["metadata.generation"], - }, - { - accessorKey: "metadata.base_model", - header: createHeaderCell( - "Base Model", - "The original model this model was derived from" - ), - cell: ({ row }) => ( - - - {row.original.metadata.base_model || "-"} - - - ), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["metadata.base_model"], - }, - { - accessorKey: "metadata.co2_cost", - header: createHeaderCell("CO₂ Cost", COLUMN_TOOLTIPS.CO2_COST), - cell: ({ row }) => ( - - - {row.original.metadata.co2_cost?.toFixed(2) || "0"} - kg - - - ), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["metadata.co2_cost"], - }, - { - accessorKey: "model.has_chat_template", - header: createHeaderCell( - "Chat Template", - "Whether this model has a chat template defined" - ), - cell: ({ row }) => ( - - ), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["model.has_chat_template"], - }, - { - accessorKey: "features.is_not_available_on_hub", - header: createHeaderCell( - "Hub Availability", - "Whether the model is available on the Hugging Face Hub" - ), - cell: ({ row }) => ( - - ), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[ - "features.is_not_available_on_hub" - ], - }, - { - accessorKey: "features.is_official_provider", - header: createHeaderCell( - "Official Providers", - "Models that are officially provided and maintained by their original creators or organizations" - ), - cell: ({ row }) => ( - - ), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[ - "features.is_official_provider" - ], - enableSorting: true, - }, - { - accessorKey: "features.is_moe", - header: createHeaderCell( - "Mixture of Experts", - "Whether this model uses a Mixture of Experts architecture" - ), - cell: ({ row }) => , - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["features.is_moe"], - }, - { - accessorKey: "features.is_flagged", - header: createHeaderCell( - "Flag Status", - "Whether this model has been flagged for any issues" - ), - cell: ({ row }) => ( - - ), - size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["features.is_flagged"], - }, - ]; - - // Utiliser directement columnVisibility - const finalColumns = [ - ...baseColumns, - ...evaluationColumns.filter((col) => columnVisibility[col.accessorKey]), - ...optionalColumns - .filter((col) => columnVisibility[col.accessorKey]) - .sort((a, b) => { - // Définir l'ordre personnalisé des colonnes - const order = { - "model.architecture": 1, - "model.precision": 2, - "metadata.params_billions": 3, - "metadata.hub_license": 4, - "metadata.co2_cost": 5, - "metadata.hub_hearts": 6, - "metadata.upload_date": 7, - "metadata.submission_date": 8, - "metadata.generation": 9, - "metadata.base_model": 10, - "model.has_chat_template": 11, - "features.is_not_available_on_hub": 12, - "features.is_official_provider": 13, - "features.is_moe": 14, - "features.is_flagged": 15, - }; - return order[a.accessorKey] - order[b.accessorKey]; - }), - ]; - - return finalColumns; -}; diff --git a/frontend/src/pages/LeaderboardPage/components/Leaderboard/utils/searchUtils.js b/frontend/src/pages/LeaderboardPage/components/Leaderboard/utils/searchUtils.js deleted file mode 100644 index 091796b7a7a3721b4d7f790f0fda75ca151a838d..0000000000000000000000000000000000000000 --- a/frontend/src/pages/LeaderboardPage/components/Leaderboard/utils/searchUtils.js +++ /dev/null @@ -1,92 +0,0 @@ -// Utility function to detect if a string looks like a regex -export const looksLikeRegex = (str) => { - const regexSpecialChars = /[\\^$.*+?()[\]{}|]/; - return regexSpecialChars.test(str); -}; - -// Function to map search fields to correct paths -const getFieldPath = (field) => { - const fieldMappings = { - precision: "model.precision", - architecture: "model.architecture", - license: "metadata.hub_license", - type: "model.type", - }; - return fieldMappings[field] || field; -}; - -// Function to extract special searches and normal text -export const parseSearchQuery = (query) => { - const specialSearches = []; - let remainingText = query; - - // Look for all @field:value patterns - const prefixRegex = /@\w+:/g; - const matches = query.match(prefixRegex) || []; - - matches.forEach((prefix) => { - const regex = new RegExp(`${prefix}([^\\s@]+)`, "g"); - remainingText = remainingText.replace(regex, (match, value) => { - const field = prefix.slice(1, -1); - specialSearches.push({ - field: getFieldPath(field), - displayField: field, - value, - }); - return ""; - }); - }); - - return { - specialSearches, - textSearch: remainingText.trim(), - }; -}; - -// Function to extract simple text search -export const extractTextSearch = (searchValue) => { - return searchValue - .split(";") - .map((query) => { - const { textSearch } = parseSearchQuery(query); - return textSearch; - }) - .filter(Boolean) - .join(";"); -}; - -// Utility function to access nested object properties -export const getValueByPath = (obj, path) => { - return path.split(".").reduce((acc, part) => acc?.[part], obj); -}; - -// Function to generate natural language description of the search -export const generateSearchDescription = (searchValue) => { - if (!searchValue) return null; - - const searchGroups = searchValue - .split(";") - .map((group) => group.trim()) - .filter(Boolean); - - return searchGroups.map((group, index) => { - const { specialSearches, textSearch } = parseSearchQuery(group); - - let parts = []; - if (textSearch) { - parts.push(textSearch); - } - - if (specialSearches.length > 0) { - const specialParts = specialSearches.map( - ({ displayField, value }) => `@${displayField}:${value}` - ); - parts = parts.concat(specialParts); - } - - return { - text: parts.join(" "), - index, - }; - }); -}; diff --git a/frontend/src/pages/QuotePage/QuotePage.js b/frontend/src/pages/QuotePage/QuotePage.js deleted file mode 100644 index 16ba846ed19ec76a731e4addcca3a1cd845b29dc..0000000000000000000000000000000000000000 --- a/frontend/src/pages/QuotePage/QuotePage.js +++ /dev/null @@ -1,278 +0,0 @@ -import React from "react"; -import { - Box, - Typography, - Paper, - IconButton, - Tooltip, - Alert, - Link, -} from "@mui/material"; -import ContentCopyIcon from "@mui/icons-material/ContentCopy"; -import PageHeader from "../../components/shared/PageHeader"; - -const citations = [ - { - title: "Open LLM Leaderboard v2", - authors: - "Clémentine Fourrier, Nathan Habib, Alina Lozovskaya, Konrad Szafer, Thomas Wolf", - citation: `@misc{open-llm-leaderboard-v2, - author = {Clémentine Fourrier and Nathan Habib and Alina Lozovskaya and Konrad Szafer and Thomas Wolf}, - title = {Open LLM Leaderboard v2}, - year = {2024}, - publisher = {Hugging Face}, - howpublished = "\\url{https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard}", -}`, - type: "main", - }, - { - title: "Evaluation Framework", - authors: "Leo Gao et al.", - citation: `@software{eval-harness, - author = {Gao, Leo and Tow, Jonathan and Biderman, Stella and Black, Sid and DiPofi, Anthony and Foster, Charles and Golding, Laurence and Hsu, Jeffrey and McDonell, Kyle and Muennighoff, Niklas and Phang, Jason and Reynolds, Laria and Tang, Eric and Thite, Anish and Wang, Ben and Wang, Kevin and Zou, Andy}, - title = {A framework for few-shot language model evaluation}, - month = sep, - year = 2021, - publisher = {Zenodo}, - version = {v0.0.1}, - doi = {10.5281/zenodo.5371628}, - url = {https://doi.org/10.5281/zenodo.5371628}, -}`, - url: "https://doi.org/10.5281/zenodo.5371628", - }, -]; - -const priorWork = [ - { - title: "Open LLM Leaderboard v1", - authors: - "Edward Beeching, Clémentine Fourrier, Nathan Habib, Sheon Han, Nathan Lambert, Nazneen Rajani, Omar Sanseviero, Lewis Tunstall, Thomas Wolf", - citation: `@misc{open-llm-leaderboard-v1, - author = {Edward Beeching and Clémentine Fourrier and Nathan Habib and Sheon Han and Nathan Lambert and Nazneen Rajani and Omar Sanseviero and Lewis Tunstall and Thomas Wolf}, - title = {Open LLM Leaderboard (2023-2024)}, - year = {2023}, - publisher = {Hugging Face}, - howpublished = "\\url{https://huggingface.co/spaces/open-llm-leaderboard-old/open_llm_leaderboard}" -}`, - type: "main", - }, -]; - -const benchmarks = [ - { - title: "IFEval: Instruction-Following Evaluation", - authors: "Zhou et al.", - citation: `@misc{zhou2023instructionfollowingevaluationlargelanguage, - title={Instruction-Following Evaluation for Large Language Models}, - author={Jeffrey Zhou and Tianjian Lu and Swaroop Mishra and Siddhartha Brahma and Sujoy Basu and Yi Luan and Denny Zhou and Le Hou}, - year={2023}, - eprint={2311.07911}, - archivePrefix={arXiv}, - primaryClass={cs.CL}, - url={https://arxiv.org/abs/2311.07911}, -}`, - url: "https://arxiv.org/abs/2311.07911", - }, - { - title: "BBH: Big-Bench Hard", - authors: "Suzgun et al.", - citation: `@misc{suzgun2022challengingbigbenchtaskschainofthought, - title={Challenging BIG-Bench Tasks and Whether Chain-of-Thought Can Solve Them}, - author={Mirac Suzgun and Nathan Scales and Nathanael Schärli and Sebastian Gehrmann and Yi Tay and Hyung Won Chung and Aakanksha Chowdhery and Quoc V. Le and Ed H. Chi and Denny Zhou and Jason Wei}, - year={2022}, - eprint={2210.09261}, - archivePrefix={arXiv}, - primaryClass={cs.CL}, - url={https://arxiv.org/abs/2210.09261}, -}`, - url: "https://arxiv.org/abs/2210.09261", - }, - { - title: "MATH: Mathematics Aptitude Test of Heuristics - Level 5", - authors: "Hendrycks et al.", - citation: `@misc{hendrycks2021measuringmathematicalproblemsolving, - title={Measuring Mathematical Problem Solving With the MATH Dataset}, - author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, - year={2021}, - eprint={2103.03874}, - archivePrefix={arXiv}, - primaryClass={cs.LG}, - url={https://arxiv.org/abs/2103.03874}, -}`, - url: "https://arxiv.org/abs/2103.03874", - }, - { - title: "GPQA: Graduate-Level Google-Proof Q&A", - authors: "Rein et al.", - citation: `@misc{rein2023gpqagraduatelevelgoogleproofqa, - title={GPQA: A Graduate-Level Google-Proof Q&A Benchmark}, - author={David Rein and Betty Li Hou and Asa Cooper Stickland and Jackson Petty and Richard Yuanzhe Pang and Julien Dirani and Julian Michael and Samuel R. Bowman}, - year={2023}, - eprint={2311.12022}, - archivePrefix={arXiv}, - primaryClass={cs.AI}, - url={https://arxiv.org/abs/2311.12022}, -}`, - url: "https://arxiv.org/abs/2311.12022", - }, - { - title: "MuSR: Multistep Soft Reasoning", - authors: "Sprague et al.", - citation: `@misc{sprague2024musrtestinglimitschainofthought, - title={MuSR: Testing the Limits of Chain-of-thought with Multistep Soft Reasoning}, - author={Zayne Sprague and Xi Ye and Kaj Bostrom and Swarat Chaudhuri and Greg Durrett}, - year={2024}, - eprint={2310.16049}, - archivePrefix={arXiv}, - primaryClass={cs.CL}, - url={https://arxiv.org/abs/2310.16049}, -}`, - url: "https://arxiv.org/abs/2310.16049", - }, - { - title: "MMLU-Pro: Massive Multitask Language Understanding Professional", - authors: "Wang et al.", - citation: `@misc{wang2024mmluprorobustchallengingmultitask, - title={MMLU-Pro: A More Robust and Challenging Multi-Task Language Understanding Benchmark}, - author={Yubo Wang and Xueguang Ma and Ge Zhang and Yuansheng Ni and Abhranil Chandra and Shiguang Guo and Weiming Ren and Aaran Arulraj and Xuan He and Ziyan Jiang and Tianle Li and Max Ku and Kai Wang and Alex Zhuang and Rongqi Fan and Xiang Yue and Wenhu Chen}, - year={2024}, - eprint={2406.01574}, - archivePrefix={arXiv}, - primaryClass={cs.CL}, - url={https://arxiv.org/abs/2406.01574}, -}`, - url: "https://arxiv.org/abs/2406.01574", - }, -]; - -const CitationBlock = ({ citation, title, authors, url, type }) => { - const handleCopy = () => { - navigator.clipboard.writeText(citation); - }; - - return ( - - - - {title} - - - {authors} - - {url && ( - - View paper → - - )} - - - - - - - - - {citation} - - - - ); -}; - -function QuotePage() { - return ( - - - - - - The citations below include both the leaderboard itself and the - individual benchmarks used in our evaluation suite. - - - - - - Leaderboard - - - {citations.map((citation, index) => ( - - ))} - - - - - - Benchmarks - - - {benchmarks.map((benchmark, index) => ( - - ))} - - - - - - Prior Work - - - {priorWork.map((citation, index) => ( - - ))} - - - - ); -} - -export default QuotePage; diff --git a/frontend/src/pages/VoteModelPage/VoteModelPage.js b/frontend/src/pages/VoteModelPage/VoteModelPage.js deleted file mode 100644 index c65684104e818d1b65df936c5e40e0044c75c3b2..0000000000000000000000000000000000000000 --- a/frontend/src/pages/VoteModelPage/VoteModelPage.js +++ /dev/null @@ -1,713 +0,0 @@ -import React, { useState, useEffect } from "react"; -import { - Box, - Typography, - Paper, - Button, - Alert, - List, - ListItem, - CircularProgress, - Chip, - Divider, - IconButton, - Stack, - Link, -} from "@mui/material"; -import AccessTimeIcon from "@mui/icons-material/AccessTime"; -import PersonIcon from "@mui/icons-material/Person"; -import OpenInNewIcon from "@mui/icons-material/OpenInNew"; -import HowToVoteIcon from "@mui/icons-material/HowToVote"; -import { useAuth } from "../../hooks/useAuth"; -import PageHeader from "../../components/shared/PageHeader"; -import AuthContainer from "../../components/shared/AuthContainer"; -import { alpha } from "@mui/material/styles"; -import CheckIcon from "@mui/icons-material/Check"; - -const NoModelsToVote = () => ( - - - - No Models to Vote - - - There are currently no models waiting for votes. -
    - Check back later! -
    -
    -); - -function VoteModelPage() { - const { isAuthenticated, user, loading } = useAuth(); - const [pendingModels, setPendingModels] = useState([]); - const [loadingModels, setLoadingModels] = useState(true); - const [error, setError] = useState(null); - const [userVotes, setUserVotes] = useState(new Set()); - - const formatWaitTime = (submissionTime) => { - if (!submissionTime) return "N/A"; - - const now = new Date(); - const submitted = new Date(submissionTime); - const diffInHours = Math.floor((now - submitted) / (1000 * 60 * 60)); - - // Less than 24 hours: show in hours - if (diffInHours < 24) { - return `${diffInHours}h`; - } - - // Less than 7 days: show in days - const diffInDays = Math.floor(diffInHours / 24); - if (diffInDays < 7) { - return `${diffInDays}d`; - } - - // More than 7 days: show in weeks - const diffInWeeks = Math.floor(diffInDays / 7); - return `${diffInWeeks}w`; - }; - - // Fetch user's votes - useEffect(() => { - const fetchUserVotes = async () => { - if (!isAuthenticated || !user) return; - - try { - // Récupérer les votes du localStorage - const localVotes = JSON.parse( - localStorage.getItem(`votes_${user.username}`) || "[]" - ); - const localVotesSet = new Set(localVotes); - - // Récupérer les votes du serveur - const response = await fetch(`/api/votes/user/${user.username}`); - if (!response.ok) { - throw new Error("Failed to fetch user votes"); - } - const data = await response.json(); - - // Fusionner les votes du serveur avec les votes locaux - const votedModels = new Set([ - ...data.map((vote) => vote.model), - ...localVotesSet, - ]); - setUserVotes(votedModels); - } catch (err) { - console.error("Error fetching user votes:", err); - } - }; - - fetchUserVotes(); - }, [isAuthenticated, user]); - - useEffect(() => { - const fetchModels = async () => { - try { - const response = await fetch("/api/models/pending"); - if (!response.ok) { - throw new Error("Failed to fetch pending models"); - } - const data = await response.json(); - - // Fetch votes for each model - const modelsWithVotes = await Promise.all( - data.map(async (model) => { - const [provider, modelName] = model.name.split("/"); - const votesResponse = await fetch( - `/api/votes/model/${provider}/${modelName}` - ); - const votesData = await votesResponse.json(); - - // Calculate total vote score from votes_by_revision - const totalScore = Object.values( - votesData.votes_by_revision || {} - ).reduce((a, b) => a + b, 0); - - // Calculate wait time based on submission_time from model data - const waitTimeDisplay = formatWaitTime(model.submission_time); - - return { - ...model, - votes: totalScore, - votes_by_revision: votesData.votes_by_revision, - wait_time: waitTimeDisplay, - hasVoted: userVotes.has(model.name), - }; - }) - ); - - // Sort models by vote score in descending order - const sortedModels = modelsWithVotes.sort((a, b) => b.votes - a.votes); - - setPendingModels(sortedModels); - } catch (err) { - setError(err.message); - } finally { - setLoadingModels(false); - } - }; - - fetchModels(); - }, [userVotes]); - - const handleVote = async (modelName) => { - if (!isAuthenticated) return; - - try { - // Disable the button immediately by adding the model to userVotes - setUserVotes((prev) => { - const newSet = new Set([...prev, modelName]); - // Sauvegarder dans le localStorage - if (user) { - const localVotes = JSON.parse( - localStorage.getItem(`votes_${user.username}`) || "[]" - ); - if (!localVotes.includes(modelName)) { - localVotes.push(modelName); - localStorage.setItem( - `votes_${user.username}`, - JSON.stringify(localVotes) - ); - } - } - return newSet; - }); - - // Split modelName into provider and model - const [provider, model] = modelName.split("/"); - - const response = await fetch( - `/api/votes/${modelName}?vote_type=up&user_id=${user.username}`, - { - method: "POST", - headers: { - "Content-Type": "application/json", - }, - } - ); - - if (!response.ok) { - // Si le vote échoue, on retire le vote du localStorage et du state - setUserVotes((prev) => { - const newSet = new Set(prev); - newSet.delete(modelName); - if (user) { - const localVotes = JSON.parse( - localStorage.getItem(`votes_${user.username}`) || "[]" - ); - const updatedVotes = localVotes.filter( - (vote) => vote !== modelName - ); - localStorage.setItem( - `votes_${user.username}`, - JSON.stringify(updatedVotes) - ); - } - return newSet; - }); - throw new Error("Failed to submit vote"); - } - - // Refresh votes for this model - const votesResponse = await fetch( - `/api/votes/model/${provider}/${model}` - ); - const votesData = await votesResponse.json(); - - // Calculate total vote score from votes_by_revision - const totalScore = Object.values( - votesData.votes_by_revision || {} - ).reduce((a, b) => a + b, 0); - - // Update model and resort the list - setPendingModels((models) => { - const updatedModels = models.map((model) => - model.name === modelName - ? { - ...model, - votes: totalScore, - votes_by_revision: votesData.votes_by_revision, - } - : model - ); - return updatedModels.sort((a, b) => b.votes - a.votes); - }); - } catch (err) { - setError(err.message); - } - }; - - if (loading) { - return ( - - - - ); - } - - return ( - - - Help us prioritize which - models to evaluate next - - } - /> - - {error && ( - - {error} - - )} - - {/* Auth Status */} - {/* - {isAuthenticated ? ( - - - - - Connected as {user?.username} - - - - - - - ) : ( - - - Login to Vote - - - You need to be logged in with your Hugging Face account to vote - for models - - - - )} - */} - - - {/* Models List */} - - {/* Header - Always visible */} - - theme.palette.mode === "dark" - ? alpha(theme.palette.divider, 0.1) - : "grey.200", - bgcolor: (theme) => - theme.palette.mode === "dark" - ? alpha(theme.palette.background.paper, 0.5) - : "grey.50", - }} - > - - Models Pending Evaluation - - - - {/* Table Header */} - - - - Model - - - - - Votes - - - - - Priority - - - - - {/* Content */} - {loadingModels ? ( - - - - ) : pendingModels.length === 0 && !loadingModels ? ( - - ) : ( - - {pendingModels.map((model, index) => { - const isTopThree = index < 3; - return ( - - {index > 0 && } - - {/* Left side - Model info */} - - - {/* Model name and link */} - - - {model.name} - - - - - - {/* Metadata row */} - - - - - {model.wait_time} - - - - - - {model.submitter} - - - - - - - {/* Vote Column */} - - - - - - + - - - {model.votes > 999 ? "999" : model.votes} - - - - votes - - - - - - - {/* Priority Column */} - - - {isTopThree && ( - - HIGH - - )} - - #{index + 1} - - - } - size="medium" - variant={isTopThree ? "filled" : "outlined"} - sx={{ - height: 36, - minWidth: "100px", - bgcolor: isTopThree - ? (theme) => alpha(theme.palette.primary.main, 0.1) - : "transparent", - borderColor: isTopThree ? "primary.main" : "grey.300", - borderWidth: 2, - "& .MuiChip-label": { - px: 2, - fontSize: "0.95rem", - }, - }} - /> - - - - ); - })} - - )} - - - ); -} - -export default VoteModelPage; diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..8121475b780942f6009177dba13ff1270a4c74af --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,49 @@ +[project] +name = "open-llm-leaderboard" +version = "2.0" +description = "" +authors = [] +readme = "README.md" +requires-python = "==3.12.1" + +dependencies = [ + "apscheduler>=3.10.4", + "black>=24.8.0", + "click>=8.1.7", + "datasets>=3.0.0", + "huggingface-hub>=0.24.7", + "pandas>=2.2.2", + "python-dateutil>=2.9.0", + "sentencepiece>=0.2.0", + "transformers==4.44.2", + "tokenizers>=0.19.0", + "gradio-space-ci @ git+https://huggingface.co/spaces/Wauplin/gradio-space-ci@0.2.3", + "isort>=5.13.2", + "ruff>=0.6.4", + "gradio-leaderboard==0.0.11", + "gradio[oauth]==4.44.0", + "schedule>=1.2.2", + "pigar>=2.1.6", +] + +[tool.ruff] +line-length = 120 +target-version = "py312" +include = ["*.py", "*.pyi", "**/pyproject.toml", "*.ipynb"] +ignore=["I","EM","FBT","TRY003","S101","D101","D102","D103","D104","D105","G004","D107","FA102"] +fixable=["ALL"] +select=["ALL"] + + [tool.ruff.lint] +select = ["E", "F"] +fixable = ["ALL"] +ignore = ["E501"] # line too long (black is taking care of this) + + [tool.isort] +profile = "black" + +[tool.black] +line-length = 119 + +[tool.hatch.metadata] +allow-direct-references = true diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..6a7cb702ad2bcae082554af261f320f2936a5905 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,17 @@ +APScheduler==3.10.4 +black==24.8.0 +click==8.1.7 +datasets==3.0.0 +huggingface-hub>=0.24.7 +pandas==2.2.2 +python-dateutil==2.9.0 +sentencepiece==0.2.0 +transformers==4.44.2 +tokenizers>=0.19.0 +gradio-space-ci @ git+https://huggingface.co/spaces/Wauplin/gradio-space-ci@0.2.3 # CI !!! +isort==5.13.2 +ruff===0.6.4 +gradio==4.44.0 +gradio[oauth] +gradio_leaderboard==0.0.11 +schedule == 1.2.2 \ No newline at end of file diff --git a/src/display/about.py b/src/display/about.py new file mode 100644 index 0000000000000000000000000000000000000000..89aaeb70400dc590285bc3664ccffcd293299ac2 --- /dev/null +++ b/src/display/about.py @@ -0,0 +1,332 @@ +from src.display.utils import ModelType + +TITLE = """

    🤗 Open LLM Leaderboard

    Eleuther AI Language Model Evaluation Harness , a unified framework to test generative language models on a large number of different evaluation tasks. + +- AI2 Reasoning Challenge (25-shot) - a set of grade-school science questions. +- HellaSwag (10-shot) - a test of commonsense inference, which is easy for humans (~95%) but challenging for SOTA models. +- MMLU (5-shot) - a test to measure a text model's multitask accuracy. The test covers 57 tasks including elementary mathematics, US history, computer science, law, and more. +- TruthfulQA (0-shot) - a test to measure a model's propensity to reproduce falsehoods commonly found online. Note: TruthfulQA is technically a 6-shot task in the Harness because each example is prepended with 6 Q/A pairs, even in the 0-shot setting. +- Winogrande (5-shot) - an adversarial and difficult Winograd benchmark at scale, for commonsense reasoning. +- GSM8k (5-shot) - diverse grade school math word problems to measure a model's ability to solve multi-step mathematical reasoning problems. + +For all these evaluations, a higher score is a better score. +We chose these benchmarks as they test a variety of reasoning and general knowledge across a wide variety of fields in 0-shot and few-shot settings. + +### Results +You can find: +- detailed numerical results in the `results` Hugging Face dataset: https://huggingface.co/datasets/open-llm-leaderboard/results +- details on the input/outputs for the models in the `details` of each model, which you can access by clicking the 📄 emoji after the model name +- community queries and running status in the `requests` Hugging Face dataset: https://huggingface.co/datasets/open-llm-leaderboard/requests + +If a model's name contains "Flagged", this indicates it has been flagged by the community, and should probably be ignored! Clicking the link will redirect you to the discussion about the model. + +--------------------------- + +## REPRODUCIBILITY +To reproduce our results, here are the commands you can run, using [this version](https://github.com/EleutherAI/lm-evaluation-harness/tree/b281b0921b636bc36ad05c0b0b0763bd6dd43463) of the Eleuther AI Harness: +`python main.py --model=hf-causal-experimental --model_args="pretrained=,use_accelerate=True,revision="` +` --tasks= --num_fewshot= --batch_size=1 --output_path=` + +``` +python main.py --model=hf-causal-experimental \ + --model_args="pretrained=,use_accelerate=True,revision=" \ + --tasks= \ + --num_fewshot= \ + --batch_size=1 \ + --output_path= +``` + +**Note:** We evaluate all models on a single node of 8 H100s, so the global batch size is 8 for each evaluation. If you don't use parallelism, adapt your batch size to fit. +*You can expect results to vary slightly for different batch sizes because of padding.* + +The tasks and few shots parameters are: +- ARC: 25-shot, *arc-challenge* (`acc_norm`) +- HellaSwag: 10-shot, *hellaswag* (`acc_norm`) +- TruthfulQA: 0-shot, *truthfulqa-mc* (`mc2`) +- MMLU: 5-shot, *hendrycksTest-abstract_algebra,hendrycksTest-anatomy,hendrycksTest-astronomy,hendrycksTest-business_ethics,hendrycksTest-clinical_knowledge,hendrycksTest-college_biology,hendrycksTest-college_chemistry,hendrycksTest-college_computer_science,hendrycksTest-college_mathematics,hendrycksTest-college_medicine,hendrycksTest-college_physics,hendrycksTest-computer_security,hendrycksTest-conceptual_physics,hendrycksTest-econometrics,hendrycksTest-electrical_engineering,hendrycksTest-elementary_mathematics,hendrycksTest-formal_logic,hendrycksTest-global_facts,hendrycksTest-high_school_biology,hendrycksTest-high_school_chemistry,hendrycksTest-high_school_computer_science,hendrycksTest-high_school_european_history,hendrycksTest-high_school_geography,hendrycksTest-high_school_government_and_politics,hendrycksTest-high_school_macroeconomics,hendrycksTest-high_school_mathematics,hendrycksTest-high_school_microeconomics,hendrycksTest-high_school_physics,hendrycksTest-high_school_psychology,hendrycksTest-high_school_statistics,hendrycksTest-high_school_us_history,hendrycksTest-high_school_world_history,hendrycksTest-human_aging,hendrycksTest-human_sexuality,hendrycksTest-international_law,hendrycksTest-jurisprudence,hendrycksTest-logical_fallacies,hendrycksTest-machine_learning,hendrycksTest-management,hendrycksTest-marketing,hendrycksTest-medical_genetics,hendrycksTest-miscellaneous,hendrycksTest-moral_disputes,hendrycksTest-moral_scenarios,hendrycksTest-nutrition,hendrycksTest-philosophy,hendrycksTest-prehistory,hendrycksTest-professional_accounting,hendrycksTest-professional_law,hendrycksTest-professional_medicine,hendrycksTest-professional_psychology,hendrycksTest-public_relations,hendrycksTest-security_studies,hendrycksTest-sociology,hendrycksTest-us_foreign_policy,hendrycksTest-virology,hendrycksTest-world_religions* (average of all the results `acc`) +- Winogrande: 5-shot, *winogrande* (`acc`) +- GSM8k: 5-shot, *gsm8k* (`acc`) + +Side note on the baseline scores: +- for log-likelihood evaluation, we select the random baseline +- for GSM8K, we select the score obtained in the paper after finetuning a 6B model on the full GSM8K training set for 50 epochs + +--------------------------- + +## RESOURCES + +### Quantization +To get more information about quantization, see: +- 8 bits: [blog post](https://huggingface.co/blog/hf-bitsandbytes-integration), [paper](https://arxiv.org/abs/2208.07339) +- 4 bits: [blog post](https://huggingface.co/blog/4bit-transformers-bitsandbytes), [paper](https://arxiv.org/abs/2305.14314) + +### Useful links +- [Community resources](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard/discussions/174) +- [Collection of best models](https://huggingface.co/collections/open-llm-leaderboard/llm-leaderboard-best-models-652d6c7965a4619fb5c27a03) + +### Other cool leaderboards: +- [LLM safety](https://huggingface.co/spaces/AI-Secure/llm-trustworthy-leaderboard) +- [LLM performance](https://huggingface.co/spaces/optimum/llm-perf-leaderboard) + + +""" + +FAQ_TEXT = """ + +## SUBMISSIONS +My model requires `trust_remote_code=True`, can I submit it? +- *We only support models that have been integrated into a stable version of the `transformers` library for automatic submission, as we don't want to run possibly unsafe code on our cluster.* + +What about models of type X? +- *We only support models that have been integrated into a stable version of the `transformers` library for automatic submission.* + +How can I follow when my model is launched? +- *You can look for its request file [here](https://huggingface.co/datasets/open-llm-leaderboard/requests) and follow the status evolution, or directly in the queues above the submit form.* + +My model disappeared from all the queues, what happened? +- *A model disappearing from all the queues usually means that there has been a failure. You can check if that is the case by looking for your model [here](https://huggingface.co/datasets/open-llm-leaderboard/requests).* + +What causes an evaluation failure? +- *Most of the failures we get come from problems in the submissions (corrupted files, config problems, wrong parameters selected for eval ...), so we'll be grateful if you first make sure you have followed the steps in `About`. However, from time to time, we have failures on our side (hardware/node failures, problems with an update of our backend, connectivity problems ending up in the results not being saved, ...).* + +How can I report an evaluation failure? +- *As we store the logs for all models, feel free to create an issue, **where you link to the requests file of your model** (look for it [here](https://huggingface.co/datasets/open-llm-leaderboard/requests/tree/main)), so we can investigate! If the model failed due to a problem on our side, we'll relaunch it right away!* +*Note: Please do not re-upload your model under a different name, it will not help* + +--------------------------- + +## RESULTS +What kind of information can I find? +- *Let's imagine you are interested in the Yi-34B results. You have access to 3 different information categories:* + - *The [request file](https://huggingface.co/datasets/open-llm-leaderboard/requests/blob/main/01-ai/Yi-34B_eval_request_False_bfloat16_Original.json): it gives you information about the status of the evaluation* + - *The [aggregated results folder](https://huggingface.co/datasets/open-llm-leaderboard/results/tree/main/01-ai/Yi-34B): it gives you aggregated scores, per experimental run* + - *The [details dataset](https://huggingface.co/datasets/open-llm-leaderboard/details_01-ai__Yi-34B/tree/main): it gives you the full details (scores and examples for each task and a given model)* + + +Why do models appear several times in the leaderboard? +- *We run evaluations with user-selected precision and model commit. Sometimes, users submit specific models at different commits and at different precisions (for example, in float16 and 4bit to see how quantization affects performance). You should be able to verify this by displaying the `precision` and `model sha` columns in the display. If, however, you see models appearing several times with the same precision and hash commit, this is not normal.* + +What is this concept of "flagging"? +- *This mechanism allows users to report models that have unfair performance on the leaderboard. This contains several categories: exceedingly good results on the leaderboard because the model was (maybe accidentally) trained on the evaluation data, models that are copies of other models not attributed properly, etc.* + +My model has been flagged improperly, what can I do? +- *Every flagged model has a discussion associated with it - feel free to plead your case there, and we'll see what to do together with the community.* + +--------------------------- + +## HOW TO SEARCH FOR A MODEL +Search for models in the leaderboard by: +1. Name, e.g., *model_name* +2. Multiple names, separated by `;`, e.g., *model_name1;model_name2* +3. License, prefix with `Hub License:...`, e.g., *Hub License: MIT* +4. Combination of name and license, order is irrelevant, e.g., *model_name; Hub License: cc-by-sa-4.0* + +--------------------------- + +## EDITING SUBMISSIONS +I upgraded my model and want to re-submit, how can I do that? +- *Please open an issue with the precise name of your model, and we'll remove your model from the leaderboard so you can resubmit. You can also resubmit directly with the new commit hash!* + +I need to rename my model, how can I do that? +- *You can use @Weyaxi 's [super cool tool](https://huggingface.co/spaces/Weyaxi/open-llm-leaderboard-renamer) to request model name changes, then open a discussion where you link to the created pull request, and we'll check them and merge them as needed.* + +--------------------------- + +## OTHER +Why do you differentiate between pretrained, continuously pretrained, fine-tuned, merges, etc? +- *These different models do not play in the same categories, and therefore need to be separated for fair comparison. Base pretrained models are the most interesting for the community, as they are usually good models to fine-tune later on - any jump in performance from a pretrained model represents a true improvement on the SOTA. +Fine-tuned and IFT/RLHF/chat models usually have better performance, but the latter might be more sensitive to system prompts, which we do not cover at the moment in the Open LLM Leaderboard. +Merges and moerges have artificially inflated performance on test sets, which is not always explainable, and does not always apply to real-world situations.* + +What should I use the leaderboard for? +- *We recommend using the leaderboard for 3 use cases: 1) getting an idea of the state of open pretrained models, by looking only at the ranks and score of this category; 2) experimenting with different fine-tuning methods, datasets, quantization techniques, etc, and comparing their score in a reproducible setup, and 3) checking the performance of a model of interest to you, wrt to other models of its category.* + +Why don't you display closed-source model scores? +- *This is a leaderboard for Open models, both for philosophical reasons (openness is cool) and for practical reasons: we want to ensure that the results we display are accurate and reproducible, but 1) commercial closed models can change their API thus rendering any scoring at a given time incorrect 2) we re-run everything on our cluster to ensure all models are run on the same setup and you can't do that for these models.* + +I have an issue with accessing the leaderboard through the Gradio API +- *Since this is not the recommended way to access the leaderboard, we won't provide support for this, but you can look at tools provided by the community for inspiration!* + +I have another problem, help! +- *Please open an issue in the discussion tab, and we'll do our best to help you in a timely manner :) * +""" + + +EVALUATION_QUEUE_TEXT = f""" +# Evaluation Queue for the 🤗 Open LLM Leaderboard + +Models added here will be automatically evaluated on the 🤗 cluster. + +> **Important:** Don't forget to read the [FAQ](https://huggingface.co/docs/leaderboards/open_llm_leaderboard/faq) and [documentation](https://huggingface.co/docs/leaderboards/open_llm_leaderboard/about) for more information! 📄 + +## Submission Disclaimer +**By submitting a model, you acknowledge that:** +- We store information about who submitted each model in [Requests dataset](https://huggingface.co/datasets/open-llm-leaderboard/requests). +- This practice helps maintain the integrity of our leaderboard, prevent spam, and ensure responsible submissions. +- Your submission will be visible to the community and you may be contacted regarding your model. +- Please submit carefully and responsibly 💛 + +## First Steps Before Submitting a Model + +### 1. Ensure Your Model Loads with AutoClasses +Verify that you can load your model and tokenizer using AutoClasses: + +```python +from transformers import AutoConfig, AutoModel, AutoTokenizer +config = AutoConfig.from_pretrained("your model name", revision=revision) +model = AutoModel.from_pretrained("your model name", revision=revision) +tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision) +``` +Note: +- If this step fails, debug your model before submitting. +- Ensure your model is public. +- We are working on adding support for models requiring `use_remote_code=True`. + +### 2. Convert Weights to Safetensors +[Safetensors](https://huggingface.co/docs/safetensors/index) is a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`! + +### 3. Verify Your Model Open License +This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗 + +### 4. Complete Your Model Card +When we add extra information about models to the leaderboard, it will be automatically taken from the model card + +### 5. Select Correct Precision +Choose the right precision to avoid evaluation errors: +- Not all models convert properly from float16 to bfloat16. +- Incorrect precision can cause issues (e.g., loading a bf16 model in fp16 may generate NaNs). + +> **Important:** When submitting, git branches and tags will be strictly tied to the specific commit present at the time of submission to ensure revision consistency. + +## Model types +{icons} +""" + +CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results" +CITATION_BUTTON_TEXT = r""" +@misc{open-llm-leaderboard-v2, + author = {Clémentine Fourrier and Nathan Habib and Alina Lozovskaya and Konrad Szafer and Thomas Wolf}, + title = {Open LLM Leaderboard v2}, + year = {2024}, + publisher = {Hugging Face}, + howpublished = "\url{https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard}", +} + +@software{eval-harness, + author = {Gao, Leo and + Tow, Jonathan and + Biderman, Stella and + Black, Sid and + DiPofi, Anthony and + Foster, Charles and + Golding, Laurence and + Hsu, Jeffrey and + McDonell, Kyle and + Muennighoff, Niklas and + Phang, Jason and + Reynolds, Laria and + Tang, Eric and + Thite, Anish and + Wang, Ben and + Wang, Kevin and + Zou, Andy}, + title = {A framework for few-shot language model evaluation}, + month = sep, + year = 2021, + publisher = {Zenodo}, + version = {v0.0.1}, + doi = {10.5281/zenodo.5371628}, + url = {https://doi.org/10.5281/zenodo.5371628}, +} + +@misc{zhou2023instructionfollowingevaluationlargelanguage, + title={Instruction-Following Evaluation for Large Language Models}, + author={Jeffrey Zhou and Tianjian Lu and Swaroop Mishra and Siddhartha Brahma and Sujoy Basu and Yi Luan and Denny Zhou and Le Hou}, + year={2023}, + eprint={2311.07911}, + archivePrefix={arXiv}, + primaryClass={cs.CL}, + url={https://arxiv.org/abs/2311.07911}, +} + +@misc{suzgun2022challengingbigbenchtaskschainofthought, + title={Challenging BIG-Bench Tasks and Whether Chain-of-Thought Can Solve Them}, + author={Mirac Suzgun and Nathan Scales and Nathanael Schärli and Sebastian Gehrmann and Yi Tay and Hyung Won Chung and Aakanksha Chowdhery and Quoc V. Le and Ed H. Chi and Denny Zhou and Jason Wei}, + year={2022}, + eprint={2210.09261}, + archivePrefix={arXiv}, + primaryClass={cs.CL}, + url={https://arxiv.org/abs/2210.09261}, +} + +@misc{hendrycks2021measuringmathematicalproblemsolving, + title={Measuring Mathematical Problem Solving With the MATH Dataset}, + author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, + year={2021}, + eprint={2103.03874}, + archivePrefix={arXiv}, + primaryClass={cs.LG}, + url={https://arxiv.org/abs/2103.03874}, +} + +@misc{rein2023gpqagraduatelevelgoogleproofqa, + title={GPQA: A Graduate-Level Google-Proof Q&A Benchmark}, + author={David Rein and Betty Li Hou and Asa Cooper Stickland and Jackson Petty and Richard Yuanzhe Pang and Julien Dirani and Julian Michael and Samuel R. Bowman}, + year={2023}, + eprint={2311.12022}, + archivePrefix={arXiv}, + primaryClass={cs.AI}, + url={https://arxiv.org/abs/2311.12022}, +} + +@misc{sprague2024musrtestinglimitschainofthought, + title={MuSR: Testing the Limits of Chain-of-thought with Multistep Soft Reasoning}, + author={Zayne Sprague and Xi Ye and Kaj Bostrom and Swarat Chaudhuri and Greg Durrett}, + year={2024}, + eprint={2310.16049}, + archivePrefix={arXiv}, + primaryClass={cs.CL}, + url={https://arxiv.org/abs/2310.16049}, +} + +@misc{wang2024mmluprorobustchallengingmultitask, + title={MMLU-Pro: A More Robust and Challenging Multi-Task Language Understanding Benchmark}, + author={Yubo Wang and Xueguang Ma and Ge Zhang and Yuansheng Ni and Abhranil Chandra and Shiguang Guo and Weiming Ren and Aaran Arulraj and Xuan He and Ziyan Jiang and Tianle Li and Max Ku and Kai Wang and Alex Zhuang and Rongqi Fan and Xiang Yue and Wenhu Chen}, + year={2024}, + eprint={2406.01574}, + archivePrefix={arXiv}, + primaryClass={cs.CL}, + url={https://arxiv.org/abs/2406.01574}, +} + +@misc{open-llm-leaderboard-v1, + author = {Edward Beeching and Clémentine Fourrier and Nathan Habib and Sheon Han and Nathan Lambert and Nazneen Rajani and Omar Sanseviero and Lewis Tunstall and Thomas Wolf}, + title = {Open LLM Leaderboard (2023-2024)}, + year = {2023}, + publisher = {Hugging Face}, + howpublished = "\url{https://huggingface.co/spaces/open-llm-leaderboard-old/open_llm_leaderboard}" +} + +""" diff --git a/src/display/css_html_js.py b/src/display/css_html_js.py new file mode 100644 index 0000000000000000000000000000000000000000..780f0c18de4b07a2a0f9765ab6d7e7e79363b28a --- /dev/null +++ b/src/display/css_html_js.py @@ -0,0 +1,115 @@ +custom_css = """ +/* Limit the width of the first AutoEvalColumn so that names don't expand too much */ +table td:first-child, +table th:first-child { + max-width: 400px; + overflow: auto; + white-space: nowrap; +} + +/* Full width space */ +.gradio-container { + max-width: 95% !important; +} + +/* Text style and margins */ +.markdown-text { + font-size: 16px !important; +} + +#models-to-add-text { + font-size: 18px !important; +} + +#citation-button span { + font-size: 16px !important; +} + +#citation-button textarea { + font-size: 16px !important; +} + +#citation-button > label > button { + margin: 6px; + transform: scale(1.3); +} + +#search-bar-table-box > div:first-child { + background: none; + border: none; +} + +#search-bar { + padding: 0px; +} + +.tab-buttons button { + font-size: 20px; +} + +/* Filters style */ +#filter_type { + border: 0; + padding-left: 0; + padding-top: 0; +} +#filter_type label { + display: flex; +} +#filter_type label > span { + margin-top: var(--spacing-lg); + margin-right: 0.5em; +} +#filter_type label > .wrap { + width: 103px; +} +#filter_type label > .wrap .wrap-inner { + padding: 2px; +} +#filter_type label > .wrap .wrap-inner input { + width: 1px; +} +#filter-columns-type { + border: 0; + padding: 0.5; +} +#filter-columns-size { + border: 0; + padding: 0.5; +} +#box-filter > .form { + border: 0; +} + +/* Header styles */ +#header-title { + text-align: left; + display: inline-block; +} + +#header-row { + display: flex; + justify-content: space-between; + align-items: center; +} + +#header-row .gradio-html { + flex-grow: 1; +} + +#oauth-button { + height: auto; + min-width: max-content; + white-space: nowrap; + padding: 10px 20px; + border-radius: 4px; +} +""" + +get_window_url_params = """ + function(url_params) { + const params = new URLSearchParams(window.location.search); + url_params = Object.fromEntries(params); + return url_params; + } + """ \ No newline at end of file diff --git a/src/display/formatting.py b/src/display/formatting.py new file mode 100644 index 0000000000000000000000000000000000000000..86a8690d9352f3cefb8c03a19dcd61171dc93efc --- /dev/null +++ b/src/display/formatting.py @@ -0,0 +1,36 @@ +from huggingface_hub import HfApi + +API = HfApi() + + +def model_hyperlink(link, model_name): + return f'{model_name}' + + +def make_clickable_model(model_name): + link = f"https://huggingface.co/{model_name}" + + details_model_name = model_name.replace("/", "__") + details_link = f"https://huggingface.co/datasets/open-llm-leaderboard/{details_model_name}-details" + + return model_hyperlink(link, model_name) + " " + model_hyperlink(details_link, "📑") + + +def styled_error(error): + return f"

    {error}

    " + + +def styled_warning(warn): + return f"

    {warn}

    " + + +def styled_message(message): + return f"

    {message}

    " + + +def has_no_nan_values(df, columns): + return df[columns].notna().all(axis=1) + + +def has_nan_values(df, columns): + return df[columns].isna().any(axis=1) diff --git a/src/display/utils.py b/src/display/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bea63472f7759dd0cf9cc8599f0dd12ad8e1dd33 --- /dev/null +++ b/src/display/utils.py @@ -0,0 +1,268 @@ +from dataclasses import dataclass, make_dataclass +from datasets import load_dataset +from enum import Enum +import json +import logging +from datetime import datetime +import pandas as pd + +from src.envs import MAINTAINERS_HIGHLIGHT_REPO + +# Configure logging +logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") + +dataset = load_dataset(MAINTAINERS_HIGHLIGHT_REPO) +curated_authors = dataset["train"][0]["CURATED_SET"] + +# Convert ISO 8601 dates to datetime objects for comparison +def parse_iso8601_datetime(date_str): + if date_str.endswith('Z'): + date_str = date_str[:-1] + '+00:00' + return datetime.fromisoformat(date_str) + +def parse_datetime(datetime_str): + formats = [ + "%Y-%m-%dT%H-%M-%S.%f", # Format with dashes + "%Y-%m-%dT%H:%M:%S.%f", # Standard format with colons + "%Y-%m-%dT%H %M %S.%f", # Spaces as separator + ] + + for fmt in formats: + try: + return datetime.strptime(datetime_str, fmt) + except ValueError: + continue + # in rare cases set unix start time for files with incorrect time (legacy files) + logging.error(f"No valid date format found for: {datetime_str}") + return datetime(1970, 1, 1) + + +def load_json_data(file_path): + """Safely load JSON data from a file.""" + try: + with open(file_path, "r") as file: + return json.load(file) + except json.JSONDecodeError: + print(f"Error reading JSON from {file_path}") + return None # Or raise an exception + + +def fields(raw_class): + return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"] + + +@dataclass +class Task: + benchmark: str + metric: str + col_name: str + + +class Tasks(Enum): + ifeval = Task("leaderboard_ifeval", "strict_acc,none", "IFEval") + ifeval_raw = Task("leaderboard_ifeval", "strict_acc,none", "IFEval Raw") + + bbh = Task("leaderboard_bbh", "acc_norm,none", "BBH") + bbh_raw = Task("leaderboard_bbh", "acc_norm,none", "BBH Raw") + + math = Task("leaderboard_math_hard", "exact_match,none", "MATH Lvl 5") + math_raw = Task("leaderboard_math_hard", "exact_match,none", "MATH Lvl 5 Raw") + + gpqa = Task("leaderboard_gpqa", "acc_norm,none", "GPQA") + gpqa_raw = Task("leaderboard_gpqa", "acc_norm,none", "GPQA Raw") + + musr = Task("leaderboard_musr", "acc_norm,none", "MUSR") + musr_raw = Task("leaderboard_musr", "acc_norm,none", "MUSR Raw") + + mmlu_pro = Task("leaderboard_mmlu_pro", "acc,none", "MMLU-PRO") + mmlu_pro_raw = Task("leaderboard_mmlu_pro", "acc,none", "MMLU-PRO Raw") + + +# These classes are for user facing column names, +# to avoid having to change them all around the code +# when a modif is needed +@dataclass(frozen=True) +class ColumnContent: + name: str + type: str + displayed_by_default: bool + hidden: bool = False + never_hidden: bool = False + dummy: bool = False + + +auto_eval_column_dict = [] +# Init +auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)]) +auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)]) +# Scores +auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)]) +for task in Tasks: + displayed_by_default = not task.name.endswith("_raw") + auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", displayed_by_default=displayed_by_default)]) +# Model information +auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)]) +auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)]) +auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)]) +auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)]) +auto_eval_column_dict.append(["merged", ColumnContent, ColumnContent("Not_Merged", "bool", False)]) +auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)]) +auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)]) +auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)]) +auto_eval_column_dict.append( + ["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False, hidden=True)] +) +auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)]) +auto_eval_column_dict.append(["not_flagged", ColumnContent, ColumnContent("Flagged", "bool", False, hidden=True)]) +auto_eval_column_dict.append(["moe", ColumnContent, ColumnContent("MoE", "bool", False, hidden=True)]) + +auto_eval_column_dict.append(["submission_date", ColumnContent, ColumnContent("Submission Date", "bool", False, hidden=False)]) +auto_eval_column_dict.append(["upload_to_hub", ColumnContent, ColumnContent("Upload To Hub Date", "bool", False, hidden=False)]) + +auto_eval_column_dict.append(["use_chat_template", ColumnContent, ColumnContent("Chat Template", "bool", False)]) +auto_eval_column_dict.append(["maintainers_highlight", ColumnContent, ColumnContent("Maintainer's Highlight", "bool", False, hidden=True)]) + +# fullname structure: / +auto_eval_column_dict.append(["fullname", ColumnContent, ColumnContent("fullname", "str", False, dummy=True)]) + +auto_eval_column_dict.append(["generation", ColumnContent, ColumnContent("Generation", "number", False)]) +auto_eval_column_dict.append(["base_model", ColumnContent, ColumnContent("Base Model", "str", False)]) + +# We use make dataclass to dynamically fill the scores from Tasks +AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True) + + +@dataclass(frozen=True) +class EvalQueueColumn: # Queue column + model_link = ColumnContent("model_link", "markdown", True) + model_name = ColumnContent("model_name", "str", True) + revision = ColumnContent("revision", "str", True) + #private = ColumnContent("private", "bool", True) # Should not be displayed + precision = ColumnContent("precision", "str", True) + #weight_type = ColumnContent("weight_type", "str", "Original") # Might be confusing, to think about + status = ColumnContent("status", "str", True) + + +# baseline_row = { +# AutoEvalColumn.model.name: "

    Baseline

    ", +# AutoEvalColumn.revision.name: "N/A", +# AutoEvalColumn.precision.name: None, +# AutoEvalColumn.merged.name: False, +# AutoEvalColumn.average.name: 31.0, +# AutoEvalColumn.arc.name: 25.0, +# AutoEvalColumn.hellaswag.name: 25.0, +# AutoEvalColumn.mmlu.name: 25.0, +# AutoEvalColumn.truthfulqa.name: 25.0, +# AutoEvalColumn.winogrande.name: 50.0, +# AutoEvalColumn.gsm8k.name: 0.21, +# AutoEvalColumn.fullname.name: "baseline", +# AutoEvalColumn.model_type.name: "", +# AutoEvalColumn.not_flagged.name: False, +# } + +# Average ⬆️ human baseline is 0.897 (source: averaging human baselines below) +# ARC human baseline is 0.80 (source: https://lab42.global/arc/) +# HellaSwag human baseline is 0.95 (source: https://deepgram.com/learn/hellaswag-llm-benchmark-guide) +# MMLU human baseline is 0.898 (source: https://openreview.net/forum?id=d7KBjmI3GmQ) +# TruthfulQA human baseline is 0.94(source: https://arxiv.org/pdf/2109.07958.pdf) +# Winogrande: https://leaderboard.allenai.org/winogrande/submissions/public +# GSM8K: paper +# Define the human baselines +# human_baseline_row = { +# AutoEvalColumn.model.name: "

    Human performance

    ", +# AutoEvalColumn.revision.name: "N/A", +# AutoEvalColumn.precision.name: None, +# AutoEvalColumn.average.name: 92.75, +# AutoEvalColumn.merged.name: False, +# AutoEvalColumn.arc.name: 80.0, +# AutoEvalColumn.hellaswag.name: 95.0, +# AutoEvalColumn.mmlu.name: 89.8, +# AutoEvalColumn.truthfulqa.name: 94.0, +# AutoEvalColumn.winogrande.name: 94.0, +# AutoEvalColumn.gsm8k.name: 100, +# AutoEvalColumn.fullname.name: "human_baseline", +# AutoEvalColumn.model_type.name: "", +# AutoEvalColumn.not_flagged.name: False, +# } + + +@dataclass +class ModelDetails: + name: str + symbol: str = "" # emoji, only for the model type + + +class ModelType(Enum): + PT = ModelDetails(name="🟢 pretrained", symbol="🟢") + CPT = ModelDetails(name="🟩 continuously pretrained", symbol="🟩") + FT = ModelDetails(name="🔶 fine-tuned on domain-specific datasets", symbol="🔶") + chat = ModelDetails(name="💬 chat models (RLHF, DPO, IFT, ...)", symbol="💬") + merges = ModelDetails(name="🤝 base merges and moerges", symbol="🤝") + Unknown = ModelDetails(name="❓ other", symbol="❓") + + def to_str(self, separator=" "): + return f"{self.value.symbol}{separator}{self.value.name}" + + @staticmethod + def from_str(m_type): + if any([k for k in m_type if k in ["fine-tuned","🔶", "finetuned"]]): + return ModelType.FT + if "continuously pretrained" in m_type or "🟩" in m_type: + return ModelType.CPT + if "pretrained" in m_type or "🟢" in m_type: + return ModelType.PT + if any([k in m_type for k in ["instruction-tuned", "RL-tuned", "chat", "🟦", "⭕", "💬"]]): + return ModelType.chat + if "merge" in m_type or "🤝" in m_type: + return ModelType.merges + return ModelType.Unknown + + +class WeightType(Enum): + Adapter = ModelDetails("Adapter") + Original = ModelDetails("Original") + Delta = ModelDetails("Delta") + + +class Precision(Enum): + float16 = ModelDetails("float16") + bfloat16 = ModelDetails("bfloat16") + qt_8bit = ModelDetails("8bit") + qt_4bit = ModelDetails("4bit") + qt_GPTQ = ModelDetails("GPTQ") + Unknown = ModelDetails("?") + + @staticmethod + def from_str(precision): + if precision in ["torch.float16", "float16"]: + return Precision.float16 + if precision in ["torch.bfloat16", "bfloat16"]: + return Precision.bfloat16 + if precision in ["8bit"]: + return Precision.qt_8bit + if precision in ["4bit"]: + return Precision.qt_4bit + if precision in ["GPTQ", "None"]: + return Precision.qt_GPTQ + return Precision.Unknown + + +# Column selection +COLS = [c.name for c in fields(AutoEvalColumn)] +TYPES = [c.type for c in fields(AutoEvalColumn)] + +EVAL_COLS = [c.name for c in fields(EvalQueueColumn)] +EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)] + +BENCHMARK_COLS = [t.value.col_name for t in Tasks] + +NUMERIC_INTERVALS = { + "?": pd.Interval(-1, 0, closed="right"), + "~1.5": pd.Interval(0, 2, closed="right"), + "~3": pd.Interval(2, 4, closed="right"), + "~7": pd.Interval(4, 9, closed="right"), + "~13": pd.Interval(9, 20, closed="right"), + "~35": pd.Interval(20, 45, closed="right"), + "~60": pd.Interval(45, 70, closed="right"), + "70+": pd.Interval(70, 10000, closed="right"), +} diff --git a/src/envs.py b/src/envs.py new file mode 100644 index 0000000000000000000000000000000000000000..79cafa6698489dd430352932f3923814850e2a7d --- /dev/null +++ b/src/envs.py @@ -0,0 +1,33 @@ +import os +from huggingface_hub import HfApi + +# clone / pull the lmeh eval data +HF_TOKEN = os.environ.get("HF_TOKEN", None) + +REPO_ID = "open-llm-leaderboard/open_llm_leaderboard" +QUEUE_REPO = "open-llm-leaderboard/requests" +AGGREGATED_REPO = "open-llm-leaderboard/contents" +VOTES_REPO = "open-llm-leaderboard/votes" +MAINTAINERS_HIGHLIGHT_REPO = "open-llm-leaderboard/maintainers-highlight" + +HF_HOME = os.getenv("HF_HOME", ".") + +# Check HF_HOME write access +print(f"Initial HF_HOME set to: {HF_HOME}") + +if not os.access(HF_HOME, os.W_OK): + print(f"No write access to HF_HOME: {HF_HOME}. Resetting to current directory.") + HF_HOME = "." + os.environ["HF_HOME"] = HF_HOME +else: + print("Write access confirmed for HF_HOME") + +VOTES_PATH = os.path.join(HF_HOME, "model-votes") +EVAL_REQUESTS_PATH = os.path.join(HF_HOME, "eval-queue") + +# Rate limit variables +RATE_LIMIT_PERIOD = 7 +RATE_LIMIT_QUOTA = 5 +HAS_HIGHER_RATE_LIMIT = [] + +API = HfApi(token=HF_TOKEN) diff --git a/src/leaderboard/filter_models.py b/src/leaderboard/filter_models.py new file mode 100644 index 0000000000000000000000000000000000000000..2d3d396d324d9aa86de3a35befffc27e6170f8e0 --- /dev/null +++ b/src/leaderboard/filter_models.py @@ -0,0 +1,75 @@ +from src.display.formatting import model_hyperlink +from src.display.utils import AutoEvalColumn + + +# Models which have been flagged by users as being problematic for a reason or another +# (Model name to forum discussion link) +# None for the v2 so far! +FLAGGED_MODELS = {} + +# Models which have been requested by orgs to not be submitted on the leaderboard +DO_NOT_SUBMIT_MODELS = [ + "Voicelab/trurl-2-13b", # trained on MMLU + "TigerResearch/tigerbot-70b-chat", # per authors request + "TigerResearch/tigerbot-70b-chat-v2", # per authors request + "TigerResearch/tigerbot-70b-chat-v4-4k", # per authors request +] + + +def flag_models(leaderboard_data: list[dict]): + """Flags models based on external criteria or flagged status.""" + for model_data in leaderboard_data: + # Skip flagging if maintainers_highlight is True + if model_data.get(AutoEvalColumn.maintainers_highlight.name, False): + model_data[AutoEvalColumn.not_flagged.name] = True + continue + + # If a model is not flagged, use its "fullname" as a key + if model_data[AutoEvalColumn.not_flagged.name]: + flag_key = model_data[AutoEvalColumn.fullname.name] + else: + flag_key = None + + # Reverse the logic: Check for non-flagged models instead + if flag_key in FLAGGED_MODELS: + issue_num = FLAGGED_MODELS[flag_key].split("/")[-1] + issue_link = model_hyperlink( + FLAGGED_MODELS[flag_key], + f"See discussion #{issue_num}", + ) + model_data[AutoEvalColumn.model.name] = ( + f"{model_data[AutoEvalColumn.model.name]} has been flagged! {issue_link}" + ) + model_data[AutoEvalColumn.not_flagged.name] = False + else: + model_data[AutoEvalColumn.not_flagged.name] = True + + +def remove_forbidden_models(leaderboard_data: list[dict]): + """Removes models from the leaderboard based on the DO_NOT_SUBMIT list.""" + indices_to_remove = [] + for ix, model in enumerate(leaderboard_data): + if model[AutoEvalColumn.fullname.name] in DO_NOT_SUBMIT_MODELS: + indices_to_remove.append(ix) + + # Remove the models from the list + for ix in reversed(indices_to_remove): + leaderboard_data.pop(ix) + return leaderboard_data + +""" +def remove_forbidden_models(leaderboard_data): + #Removes models from the leaderboard based on the DO_NOT_SUBMIT list. + indices_to_remove = [] + for ix, row in leaderboard_data.iterrows(): + if row[AutoEvalColumn.fullname.name] in DO_NOT_SUBMIT_MODELS: + indices_to_remove.append(ix) + + # Remove the models from the list + return leaderboard_data.drop(indices_to_remove) +""" + + +def filter_models_flags(leaderboard_data: list[dict]): + leaderboard_data = remove_forbidden_models(leaderboard_data) + flag_models(leaderboard_data) diff --git a/src/populate.py b/src/populate.py new file mode 100644 index 0000000000000000000000000000000000000000..41e5f8ce6ff2782071696d1b53648008584a5a65 --- /dev/null +++ b/src/populate.py @@ -0,0 +1,54 @@ +import pathlib +import pandas as pd +from datasets import Dataset +from src.display.formatting import has_no_nan_values, make_clickable_model +from src.display.utils import AutoEvalColumn, EvalQueueColumn +from src.leaderboard.filter_models import filter_models_flags +from src.display.utils import load_json_data + + +def _process_model_data(entry, model_name_key="model", revision_key="revision"): + """Enrich model data with clickable links and revisions.""" + entry[EvalQueueColumn.model_name.name] = entry.get(model_name_key, "") + entry[EvalQueueColumn.model_link.name] = make_clickable_model(entry.get(model_name_key, "")) + entry[EvalQueueColumn.revision.name] = entry.get(revision_key, "main") + return entry + + +def get_evaluation_queue_df(save_path, cols): + """Generate dataframes for pending, running, and finished evaluation entries.""" + save_path = pathlib.Path(save_path) + all_evals = [] + + for path in save_path.rglob("*.json"): + data = load_json_data(path) + if data: + all_evals.append(_process_model_data(data)) + + # Organizing data by status + status_map = { + "PENDING": ["PENDING", "RERUN"], + "RUNNING": ["RUNNING"], + "FINISHED": ["FINISHED", "PENDING_NEW_EVAL"], + } + status_dfs = {status: [] for status in status_map} + for eval_data in all_evals: + for status, extra_statuses in status_map.items(): + if eval_data["status"] in extra_statuses: + status_dfs[status].append(eval_data) + + return tuple(pd.DataFrame(status_dfs[status], columns=cols) for status in ["FINISHED", "RUNNING", "PENDING"]) + + +def get_leaderboard_df(leaderboard_dataset: Dataset, cols: list, benchmark_cols: list): + """Retrieve and process leaderboard data.""" + all_data_json = leaderboard_dataset.to_dict() + num_items = leaderboard_dataset.num_rows + all_data_json_list = [{k: all_data_json[k][ix] for k in all_data_json.keys()} for ix in range(num_items)] + filter_models_flags(all_data_json_list) + + df = pd.DataFrame.from_records(all_data_json_list) + df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False) + df = df[cols].round(decimals=2) + df = df[has_no_nan_values(df, benchmark_cols)] + return df \ No newline at end of file diff --git a/src/submission/check_validity.py b/src/submission/check_validity.py new file mode 100644 index 0000000000000000000000000000000000000000..0eeb01891dd19042136430b89897a24a02bf7489 --- /dev/null +++ b/src/submission/check_validity.py @@ -0,0 +1,209 @@ +import json +import os +import re +import logging +from collections import defaultdict +from datetime import datetime, timedelta, timezone + +import huggingface_hub +from huggingface_hub import ModelCard +from huggingface_hub.hf_api import ModelInfo, get_safetensors_metadata, parse_safetensors_file_metadata +from transformers import AutoConfig, AutoTokenizer + +from src.display.utils import parse_iso8601_datetime, curated_authors +from src.envs import HAS_HIGHER_RATE_LIMIT + + +# ht to @Wauplin, thank you for the snippet! +# See https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard/discussions/317 +def check_model_card(repo_id: str) -> tuple[bool, str]: + # Returns operation status, and error message + try: + card = ModelCard.load(repo_id) + except huggingface_hub.utils.EntryNotFoundError: + return False, "Please add a model card to your model to explain how you trained/fine-tuned it.", None + + # Enforce license metadata + if card.data.license is None and not ("license_name" in card.data and "license_link" in card.data): + return ( + False, + ( + "License not found. Please add a license to your model card using the `license` metadata or a" + " `license_name`/`license_link` pair." + ), + None, + ) + + # Enforce card content + if len(card.text) < 200: + return False, "Please add a description to your model card, it is too short.", None + + return True, "", card + + +def is_model_on_hub( + model_name: str, revision: str, token: str | None = None, trust_remote_code: bool = False, test_tokenizer: bool = False, +) -> tuple[bool, str, AutoConfig]: + try: + config = AutoConfig.from_pretrained( + model_name, revision=revision, trust_remote_code=trust_remote_code, token=token, force_download=True) + if test_tokenizer: + try: + AutoTokenizer.from_pretrained( + model_name, revision=revision, trust_remote_code=trust_remote_code, token=token, + ) + except ValueError as e: + return (False, f"uses a tokenizer which is not in a transformers release: {e}", None) + except Exception: + return ( + False, + "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", + None, + ) + except Exception: + return ( + False, + "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", + None, + ) + return True, None, config + + except ValueError: + return ( + False, + "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.", + None, + ) + + except Exception as e: + if "You are trying to access a gated repo." in str(e): + return True, "uses a gated model.", None + return False, f"was not found or misconfigured on the hub! Error raised was {e.args[0]}", None + + +def get_model_size(model_info: ModelInfo, precision: str, base_model: str| None) -> tuple[float | None, str]: + size_pattern = re.compile(r"(\d+\.)?\d+(b|m)") + safetensors = None + adapter_safetensors = None + # hack way to check that model is adapter + is_adapter = "adapter_config.json" in (s.rfilename for s in model_info.siblings) + + try: + if is_adapter: + if not base_model: + return None, "Adapter model submission detected. Please ensure the base model information is provided." + + adapter_safetensors = parse_safetensors_file_metadata(model_info.id, "adapter_model.safetensors") + safetensors = get_safetensors_metadata(base_model) + else: + safetensors = get_safetensors_metadata(model_info.id) + except Exception as e: + logging.warning(f"Failed to get safetensors metadata for model {model_info.id}: {e!s}") + + if safetensors is not None: + model_size = sum(safetensors.parameter_count.values()) + if adapter_safetensors is not None: + model_size += sum(safetensors.parameter_count.values()) + model_size = round(model_size / 1e9, 3) + else: + try: + size_match = re.search(size_pattern, model_info.id.lower()) + if size_match: + model_size = size_match.group(0) + model_size = round(float(model_size[:-1]) if model_size[-1] == "b" else float(model_size[:-1]) / 1e3, 3) + else: + return None, "Unknown model size" + except AttributeError: + logging.warning(f"Unable to parse model size from ID: {model_info.id}") + return None, "Unknown model size" + + size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.id.lower()) else 1 + model_size = size_factor * model_size + + return model_size, "" + +def get_model_arch(model_info: ModelInfo): + return model_info.config.get("architectures", "Unknown") + + +def user_submission_permission(org_or_user, users_to_submission_dates, rate_limit_period, rate_limit_quota): + # No limit for curated authors + if org_or_user in curated_authors: + return True, "" + # Increase quota first if user has higher limits + if org_or_user in HAS_HIGHER_RATE_LIMIT: + rate_limit_quota *= 2 + + if org_or_user not in users_to_submission_dates: + return True, "" + + submission_dates = sorted(users_to_submission_dates[org_or_user]) + time_limit = datetime.now(timezone.utc) - timedelta(days=rate_limit_period) + + submissions_after_timelimit = [ + parse_iso8601_datetime(d) for d in submission_dates + if parse_iso8601_datetime(d) > time_limit + ] + + num_models_submitted_in_period = len(submissions_after_timelimit) + + # Use >= to correctly enforce the rate limit + if num_models_submitted_in_period >= rate_limit_quota: + error_msg = f"Organisation or user `{org_or_user}` already has {num_models_submitted_in_period} model requests submitted in the last {rate_limit_period} days.\n" + error_msg += "Please wait a couple of days before resubmitting, so that everybody can enjoy using the leaderboard 🤗" + return False, error_msg + + return True, "" + + +def already_submitted_models(requested_models_dir: str) -> set[str]: + depth = 1 + file_names = [] + users_to_submission_dates = defaultdict(list) + + for root, _, files in os.walk(requested_models_dir): + current_depth = root.count(os.sep) - requested_models_dir.count(os.sep) + if current_depth == depth: + for file in files: + if not file.endswith(".json"): + continue + with open(os.path.join(root, file), "r") as f: + info = json.load(f) + file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}") + + # Select organisation + if info["model"].count("/") == 0 or "submitted_time" not in info: + continue + organisation, _ = info["model"].split("/") + users_to_submission_dates[organisation].append(info["submitted_time"]) + + return set(file_names), users_to_submission_dates + + +def get_model_tags(model_card, model: str): + is_merge_from_metadata = False + is_moe_from_metadata = False + + tags = [] + if model_card is None: + return tags + if model_card.data.tags: + is_merge_from_metadata = any( + [tag in model_card.data.tags for tag in ["merge", "moerge", "mergekit", "lazymergekit"]] + ) + is_moe_from_metadata = any([tag in model_card.data.tags for tag in ["moe", "moerge"]]) + + is_merge_from_model_card = any( + keyword in model_card.text.lower() for keyword in ["merged model", "merge model", "moerge"] + ) + if is_merge_from_model_card or is_merge_from_metadata: + tags.append("merge") + is_moe_from_model_card = any(keyword in model_card.text.lower() for keyword in ["moe", "mixtral"]) + # Hardcoding because of gating problem + if "Qwen/Qwen1.5-32B" in model: + is_moe_from_model_card = False + is_moe_from_name = "moe" in model.lower().replace("/", "-").replace("_", "-").split("-") + if is_moe_from_model_card or is_moe_from_name or is_moe_from_metadata: + tags.append("moe") + + return tags diff --git a/src/submission/submit.py b/src/submission/submit.py new file mode 100644 index 0000000000000000000000000000000000000000..8fad0900d65256b09295e5a6d95d543f129af5db --- /dev/null +++ b/src/submission/submit.py @@ -0,0 +1,220 @@ +import json +import os +import gradio as gr +from datetime import datetime, timezone + +from dataclasses import dataclass +from transformers import AutoConfig + +from src.display.formatting import styled_error, styled_message, styled_warning +from src.envs import ( + API, + EVAL_REQUESTS_PATH, + HF_TOKEN, + QUEUE_REPO, + RATE_LIMIT_PERIOD, + RATE_LIMIT_QUOTA, + VOTES_REPO, + VOTES_PATH, +) +from src.leaderboard.filter_models import DO_NOT_SUBMIT_MODELS +from src.submission.check_validity import ( + already_submitted_models, + check_model_card, + get_model_size, + is_model_on_hub, + user_submission_permission, +) + +from src.voting.vote_system import VoteManager + +REQUESTED_MODELS = None +USERS_TO_SUBMISSION_DATES = None + +vote_manager = VoteManager(VOTES_PATH, EVAL_REQUESTS_PATH, VOTES_REPO) + +@dataclass +class ModelSizeChecker: + model: str + precision: str + model_size_in_b: float + + def get_precision_factor(self): + if self.precision in ["float16", "bfloat16"]: + return 1 + elif self.precision == "8bit": + return 2 + elif self.precision == "4bit": + return 4 + elif self.precision == "GPTQ": + config = AutoConfig.from_pretrained(self.model) + num_bits = int(config.quantization_config["bits"]) + bits_to_precision_factor = {2: 8, 3: 6, 4: 4, 8: 2} + return bits_to_precision_factor.get(num_bits, 1) + else: + raise Exception(f"Unknown precision {self.precision}.") + + def can_evaluate(self): + precision_factor = self.get_precision_factor() + return self.model_size_in_b <= 140 * precision_factor + +def add_new_eval( + model: str, + base_model: str, + revision: str, + precision: str, + weight_type: str, + model_type: str, + use_chat_template: bool, + profile: gr.OAuthProfile | None, + requested_models: set[str] = None, + users_to_submission_dates: dict[str, list[str]] = None, +): + # Login required + if profile is None: + return styled_error("Hub Login Required") + + # Name of the actual user who sent the request + username = profile.username + + # Initialize the requested_models and users_to_submission_dates variables + # If the caller did not provide these values, fetch them from the EVAL_REQUESTS_PATH + if requested_models is None or users_to_submission_dates is None: + requested_models, users_to_submission_dates = already_submitted_models(EVAL_REQUESTS_PATH) + + org_or_user = "" + model_path = model + if "/" in model: + org_or_user = model.split("/")[0] + model_path = model.split("/")[1] + + precision = precision.split(" ")[0] + current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + + if model_type is None or model_type == "": + return styled_error("Please select a model type.") + + # Is the user rate limited? + if org_or_user != "": + user_can_submit, error_msg = user_submission_permission( + org_or_user, users_to_submission_dates, RATE_LIMIT_PERIOD, RATE_LIMIT_QUOTA + ) + if not user_can_submit: + return styled_error(error_msg) + + # Did the model authors forbid its submission to the leaderboard? + if model in DO_NOT_SUBMIT_MODELS or base_model in DO_NOT_SUBMIT_MODELS: + return styled_warning("Model authors have requested that their model be not submitted on the leaderboard.") + + # Does the model actually exist? + if revision == "": + revision = "main" + try: + model_info = API.model_info(repo_id=model, revision=revision) + except Exception as e: + return styled_error("Could not get your model information. Please fill it up properly.") + + model_key = f"{model}_{model_info.sha}_{precision}" + if model_key in requested_models: + return styled_error(f"The model '{model}' with revision '{model_info.sha}' and precision '{precision}' has already been submitted.") + + # Check model size early + model_size, error_text = get_model_size(model_info=model_info, precision=precision, base_model=base_model) + if model_size is None: + return styled_error(error_text) + + # First check: Absolute size limit for float16 and bfloat16 + if precision in ["float16", "bfloat16"] and model_size > 100: + return styled_error(f"Sadly, models larger than 100B parameters cannot be submitted in {precision} precision at this time. " + f"Your model size: {model_size:.2f}B parameters.") + + # Second check: Precision-adjusted size limit for 8bit, 4bit, and GPTQ + if precision in ["8bit", "4bit", "GPTQ"]: + size_checker = ModelSizeChecker(model=model, precision=precision, model_size_in_b=model_size) + + if not size_checker.can_evaluate(): + precision_factor = size_checker.get_precision_factor() + max_size = 140 * precision_factor + return styled_error(f"Sadly, models this big ({model_size:.2f}B parameters) cannot be evaluated automatically " + f"at the moment on our cluster. The maximum size for {precision} precision is {max_size:.2f}B parameters.") + + architecture = "?" + # Is the model on the hub? + if weight_type in ["Delta", "Adapter"]: + base_model_on_hub, error, _ = is_model_on_hub( + model_name=base_model, revision="main", token=HF_TOKEN, test_tokenizer=True + ) + if not base_model_on_hub: + return styled_error(f'Base model "{base_model}" {error}') + if not weight_type == "Adapter": + model_on_hub, error, model_config = is_model_on_hub(model_name=model, revision=model_info.sha, test_tokenizer=True) + if not model_on_hub or model_config is None: + return styled_error(f'Model "{model}" {error}') + if model_config is not None: + architectures = getattr(model_config, "architectures", None) + if architectures: + architecture = ";".join(architectures) + # Were the model card and license filled? + try: + model_info.cardData["license"] + except Exception: + return styled_error("Please select a license for your model") + + modelcard_OK, error_msg, model_card = check_model_card(model) + if not modelcard_OK: + return styled_error(error_msg) + + # Seems good, creating the eval + print("Adding new eval") + eval_entry = { + "model": model, + "base_model": base_model, + "revision": model_info.sha, # force to use the exact model commit + "precision": precision, + "params": model_size, + "architectures": architecture, + "weight_type": weight_type, + "status": "PENDING", + "submitted_time": current_time, + "model_type": model_type, + "job_id": -1, + "job_start_time": None, + "use_chat_template": use_chat_template, + "sender": username + } + + print("Creating eval file") + OUT_DIR = f"{EVAL_REQUESTS_PATH}/{org_or_user}" + os.makedirs(OUT_DIR, exist_ok=True) + out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json" + + with open(out_path, "w") as f: + f.write(json.dumps(eval_entry)) + + print("Uploading eval file") + print(eval_entry) + API.upload_file( + path_or_fileobj=out_path, + path_in_repo=out_path.split("eval-queue/")[1], + repo_id=QUEUE_REPO, + repo_type="dataset", + commit_message=f"Add {model} to eval queue", + ) + + # Remove the local file + os.remove(out_path) + + # Always add a vote for the submitted model + vote_manager.add_vote( + selected_model=model, + pending_models_df=None, + profile=profile + ) + print(f"Automatically added a vote for {model} submitted by {username}") + + # Upload votes to the repository + vote_manager.upload_votes() + + return styled_message( + "Your request and vote has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list." + ) \ No newline at end of file diff --git a/src/tools/create_request_file.py b/src/tools/create_request_file.py new file mode 100644 index 0000000000000000000000000000000000000000..c1aea64f3fdd25fa5ea087cf02709359c5b5892d --- /dev/null +++ b/src/tools/create_request_file.py @@ -0,0 +1,92 @@ +import json +import os +import pprint +from datetime import datetime, timezone + +import click +from colorama import Fore +from huggingface_hub import HfApi, snapshot_download + +from src.display.utils import ModelType, WeightType +from src.submission.check_validity import get_model_size + +EVAL_REQUESTS_PATH = "eval-queue" +QUEUE_REPO = "open-llm-leaderboard/requests" + +precisions = ("float16", "bfloat16", "8bit (LLM.int8)", "4bit (QLoRA / FP4)", "GPTQ") +model_types = [e.name for e in ModelType] +weight_types = [e.name for e in WeightType] + + +def main(): + api = HfApi() + current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH, repo_type="dataset") + + model_name = click.prompt("Enter model name") + revision = click.prompt("Enter revision", default="main") + precision = click.prompt("Enter precision", default="float16", type=click.Choice(precisions)) + model_type = click.prompt("Enter model type", type=click.Choice(model_types)) + weight_type = click.prompt("Enter weight type", default="Original", type=click.Choice(weight_types)) + base_model = click.prompt("Enter base model", default="") + status = click.prompt("Enter status", default="FINISHED") + + try: + model_info = api.model_info(repo_id=model_name, revision=revision) + except Exception as e: + print(f"{Fore.RED}Could not find model info for {model_name} on the Hub\n{e}{Fore.RESET}") + return 1 + + model_size = get_model_size(model_info=model_info, precision=precision) + + try: + license = model_info.cardData["license"] + except Exception: + license = "?" + + eval_entry = { + "model": model_name, + "base_model": base_model, + "revision": model_info.sha, # force to use the exact model commit + "private": False, + "precision": precision, + "weight_type": weight_type, + "status": status, + "submitted_time": current_time, + "model_type": model_type, + "likes": model_info.likes, + "params": model_size, + "license": license, + } + + user_name = "" + model_path = model_name + if "/" in model_name: + user_name = model_name.split("/")[0] + model_path = model_name.split("/")[1] + + pprint.pprint(eval_entry) + + if click.confirm("Do you want to continue? This request file will be pushed to the hub"): + click.echo("continuing...") + + out_dir = f"{EVAL_REQUESTS_PATH}/{user_name}" + os.makedirs(out_dir, exist_ok=True) + out_path = f"{out_dir}/{model_path}_eval_request_{False}_{precision}_{weight_type}.json" + + with open(out_path, "w") as f: + f.write(json.dumps(eval_entry)) + + api.upload_file( + path_or_fileobj=out_path, + path_in_repo=out_path.split(f"{EVAL_REQUESTS_PATH}/")[1], + repo_id=QUEUE_REPO, + repo_type="dataset", + commit_message=f"Add {model_name} to eval queue", + ) + else: + click.echo("aborting...") + + +if __name__ == "__main__": + main() diff --git a/src/tools/model_backlinks.py b/src/tools/model_backlinks.py new file mode 100644 index 0000000000000000000000000000000000000000..c1afa53b0549a15d7f384621e3c0655aad67f67e --- /dev/null +++ b/src/tools/model_backlinks.py @@ -0,0 +1,1309 @@ +models = [ + "uni-tianyan/Uni-TianYan", + "fangloveskari/ORCA_LLaMA_70B_QLoRA", + "garage-bAInd/Platypus2-70B-instruct", + "upstage/Llama-2-70b-instruct-v2", + "fangloveskari/Platypus_QLoRA_LLaMA_70b", + "yeontaek/llama-2-70B-ensemble-v5", + "TheBloke/Genz-70b-GPTQ", + "TheBloke/Platypus2-70B-Instruct-GPTQ", + "psmathur/model_007", + "yeontaek/llama-2-70B-ensemble-v4", + "psmathur/orca_mini_v3_70b", + "ehartford/Samantha-1.11-70b", + "MayaPH/GodziLLa2-70B", + "psmathur/model_007_v2", + "chargoddard/MelangeA-70b", + "ehartford/Samantha-1.1-70b", + "psmathur/model_009", + "upstage/Llama-2-70b-instruct", + "yeontaek/llama-2-70B-ensemble-v7", + "yeontaek/llama-2-70B-ensemble-v6", + "chargoddard/MelangeB-70b", + "yeontaek/llama-2-70B-ensemble-v3", + "chargoddard/MelangeC-70b", + "garage-bAInd/Camel-Platypus2-70B", + "yeontaek/llama-2-70B-ensemble-v2", + "garage-bAInd/Camel-Platypus2-70B", + "migtissera/Synthia-70B-v1.2", + "v2ray/LLaMA-2-Wizard-70B-QLoRA", + "quantumaikr/llama-2-70b-fb16-orca-chat-10k", + "v2ray/LLaMA-2-Wizard-70B-QLoRA", + "stabilityai/StableBeluga2", + "quantumaikr/llama-2-70b-fb16-guanaco-1k", + "garage-bAInd/Camel-Platypus2-70B", + "migtissera/Synthia-70B-v1.1", + "migtissera/Synthia-70B", + "psmathur/model_101", + "augtoma/qCammel70", + "augtoma/qCammel-70", + "augtoma/qCammel-70v1", + "augtoma/qCammel-70x", + "augtoma/qCammel-70-x", + "jondurbin/airoboros-l2-70b-gpt4-1.4.1", + "dfurman/llama-2-70b-dolphin-peft", + "jondurbin/airoboros-l2-70b-2.1", + "TheBloke/llama-2-70b-Guanaco-QLoRA-fp16", + "quantumaikr/QuantumLM-llama2-70B-Korean-LoRA", + "quantumaikr/quantumairk-llama-2-70B-instruct", + "psmathur/model_420", + "psmathur/model_51", + "garage-bAInd/Camel-Platypus2-70B", + "TheBloke/Airoboros-L2-70B-2.1-GPTQ", + "OpenAssistant/llama2-70b-oasst-sft-v10", + "garage-bAInd/Platypus2-70B", + "liuxiang886/llama2-70B-qlora-gpt4", + "upstage/llama-65b-instruct", + "quantumaikr/llama-2-70b-fb16-korean", + "NousResearch/Nous-Hermes-Llama2-70b", + "v2ray/LLaMA-2-Jannie-70B-QLoRA", + "jondurbin/airoboros-l2-70b-gpt4-m2.0", + "jondurbin/airoboros-l2-70b-gpt4-m2.0", + "OpenAssistant/llama2-70b-oasst-sft-v10", + "yeontaek/llama-2-70B-ensemble-v8", + "jondurbin/airoboros-l2-70b-gpt4-2.0", + "jarradh/llama2_70b_chat_uncensored", + "WizardLM/WizardMath-70B-V1.0", + "jordiclive/Llama-2-70b-oasst-1-200", + "WizardLM/WizardMath-70B-V1.0", + "jondurbin/airoboros-l2-70b-gpt4-2.0", + "OpenLemur/lemur-70b-chat-v1", + "tiiuae/falcon-180B", + "tiiuae/falcon-180B", + "stabilityai/StableBeluga1-Delta", + "psmathur/model_42_70b", + "psmathur/test_42_70b", + "TheBloke/fiction.live-Kimiko-V2-70B-fp16", + "tiiuae/falcon-180B", + "WizardLM/WizardMath-70B-V1.0", + "tiiuae/falcon-180B-chat", + "jondurbin/airoboros-l2-70b-gpt4-2.0", + "ehartford/samantha-1.1-llama-33b", + "ajibawa-2023/scarlett-33b", + "ddobokki/Llama-2-70b-orca-200k", + "TheBloke/gpt4-alpaca-lora_mlp-65B-HF", + "tiiuae/falcon-180B-chat", + "tiiuae/falcon-180B-chat", + "tiiuae/falcon-180B", + "TheBloke/Lemur-70B-Chat-v1-GPTQ", + "NousResearch/Nous-Puffin-70B", + "WizardLM/WizardLM-70B-V1.0", + "WizardLM/WizardMath-70B-V1.0", + "meta-llama/Llama-2-70b-hf", + "TheBloke/Llama-2-70B-fp16", + "Weyaxi/llama-2-alpacagpt4-1000step", + "WizardLM/WizardLM-70B-V1.0", + "simsim314/WizardLM-70B-V1.0-HF", + "simsim314/WizardLM-70B-V1.0-HF", + "WizardLM/WizardLM-70B-V1.0", + "openbmb/UltraLM-65b", + "psmathur/model_420_preview", + "WizardLM/WizardLM-70B-V1.0", + "simsim314/WizardLM-70B-V1.0-HF", + "OpenBuddy/openbuddy-llama2-70b-v10.1-bf16", + "upstage/llama-30b-instruct-2048", + "jondurbin/airoboros-65b-gpt4-1.2", + "TheBloke/guanaco-65B-HF", + "jondurbin/airoboros-65b-gpt4-1.3", + "meta-llama/Llama-2-70b-chat-hf", + "ValiantLabs/ShiningValiant", + "Faradaylab/Aria-70B", + "lilloukas/GPlatty-30B", + "TheBloke/VicUnlocked-alpaca-65B-QLoRA-fp16", + "jondurbin/airoboros-65b-gpt4-1.4-peft", + "jondurbin/airoboros-65b-gpt4-1.4", + "jondurbin/airoboros-65b-gpt4-2.0", + "TheBloke/WizardLM-70B-V1.0-GPTQ", + "TheBloke/WizardLM-70B-V1.0-GPTQ", + "ariellee/SuperPlatty-30B", + "jondurbin/airoboros-65b-gpt4-1.4", + "jondurbin/airoboros-65b-gpt4-2.0", + "yeontaek/llama-2-70b-IA3-guanaco", + "CalderaAI/30B-Lazarus", + "Aspik101/trurl-2-13b-pl-instruct_unload", + "ehartford/WizardLM-33B-V1.0-Uncensored", + "ehartford/WizardLM-33B-V1.0-Uncensored", + "OpenBuddy/openbuddy-llama-65b-v8-bf16", + "Aspik101/llama-30b-instruct-2048-PL-lora", + "h2oai/h2ogpt-research-oasst1-llama-65b", + "Aspik101/llama-30b-instruct-2048-PL-lora", + "CalderaAI/30B-Epsilon", + "Aspik101/llama-30b-2048-instruct-PL-lora_unload", + "jondurbin/airoboros-65b-gpt4-m2.0", + "jondurbin/airoboros-65b-gpt4-m2.0", + "Aeala/Alpaca-elina-65b", + "TheBloke/robin-65b-v2-fp16", + "TheBloke/gpt4-alpaca-lora-30b-HF", + "TheBloke/Llama-2-70B-chat-GPTQ", + "upstage/llama-30b-instruct", + "OpenLemur/lemur-70b-v1", + "lmsys/vicuna-33b-v1.3", + "ausboss/llama-30b-supercot", + "ai-business/Luban-13B", + "Henk717/airochronos-33B", + "lmsys/vicuna-33b-v1.3", + "Henk717/airochronos-33B", + "bavest/fin-llama-33b-merged", + "jondurbin/airoboros-33b-gpt4-1.4", + "YeungNLP/firefly-llama-30b", + "Aspik101/30B-Lazarus-instruct-PL-lora_unload", + "uukuguy/speechless-llama2-luban-orca-platypus-13b", + "xxyyy123/test_merge_p_ov1_w0.66_w0.5_n1", + "jondurbin/airoboros-33b-gpt4-1.2", + "TheBloke/alpaca-lora-65B-HF", + "bofenghuang/vigogne-33b-instruct", + "yeontaek/llama-2-13B-ensemble-v5", + "garage-bAInd/Platypus-30B", + "Open-Orca/OpenOrca-Platypus2-13B", + "kajdun/viwaai-30b_v4", + "lilloukas/Platypus-30B", + "Open-Orca/OpenOrca-Platypus2-13B", + "Henk717/chronoboros-33B", + "jondurbin/airoboros-33b-2.1", + "HiTZ/alpaca-lora-65b-en-pt-es-ca", + "quantumaikr/QuantumLM-70B-hf", + "uukuguy/speechless-llama2-13b", + "uukuguy/speechless-llama2-hermes-orca-platypus-13b", + "openaccess-ai-collective/manticore-30b-chat-pyg-alpha", + "LLMs/WizardLM-30B-V1.0", + "TheBloke/WizardLM-30B-fp16", + "openaccess-ai-collective/hippogriff-30b-chat", + "concedo/Vicuzard-30B-Uncensored", + "TFLai/OpenOrca-Platypus2-13B-QLoRA-0.80-epoch", + "huggingface/llama-65b", + "huggyllama/llama-65b", + "gaodrew/gaodrew-llama-30b-instruct-2048-Open-Platypus-100steps", + "uukuguy/speechless-llama2-hermes-orca-platypus-wizardlm-13b", + "Sao10K/Mythical-Destroyer-V2-L2-13B", + "camel-ai/CAMEL-33B-Combined-Data", + "dsvv-cair/alpaca-cleaned-llama-30b-bf16", + "MetaIX/GPT4-X-Alpasta-30b", + "garage-bAInd/Stable-Platypus2-13B", + "TFLai/Luban-Platypus2-13B-QLora-0.80-epoch", + "TheBloke/OpenOrca-Platypus2-13B-GPTQ", + "IkariDev/Athena-tmp", + "OpenBuddyEA/openbuddy-llama-30b-v7.1-bf16", + "OpenBuddyEA/openbuddy-llama-30b-v7.1-bf16", + "Open-Orca/OpenOrcaxOpenChat-Preview2-13B", + "psmathur/model_007_13b_v2", + "Aspik101/Vicuzard-30B-Uncensored-instruct-PL-lora_unload", + "jondurbin/airoboros-33b-gpt4-m2.0", + "Sao10K/Mythical-Destroyer-L2-13B", + "TheBloke/Wizard-Vicuna-30B-Uncensored-fp16", + "ehartford/Wizard-Vicuna-30B-Uncensored", + "TFLai/Nova-13B", + "TheBloke/robin-33B-v2-fp16", + "totally-not-an-llm/PuddleJumper-13b", + "Aeala/VicUnlocked-alpaca-30b", + "Yhyu13/oasst-rlhf-2-llama-30b-7k-steps-hf", + "jondurbin/airoboros-33b-gpt4", + "jondurbin/airoboros-33b-gpt4-m2.0", + "tiiuae/falcon-40b-instruct", + "psmathur/orca_mini_v3_13b", + "Aeala/GPT4-x-AlpacaDente-30b", + "MayaPH/GodziLLa-30B", + "jondurbin/airoboros-33b-gpt4-m2.0", + "TFLai/SpeechlessV1-Nova-13B", + "yeontaek/llama-2-13B-ensemble-v4", + "ajibawa-2023/carl-33b", + "jondurbin/airoboros-33b-gpt4-2.0", + "TFLai/Stable-Platypus2-13B-QLoRA-0.80-epoch", + "jondurbin/airoboros-33b-gpt4-1.3", + "TehVenom/oasst-sft-6-llama-33b-xor-MERGED-16bit", + "TFLai/OrcaMini-Platypus2-13B-QLoRA-0.80-epoch", + "jondurbin/airoboros-33b-gpt4-2.0", + "chargoddard/Chronorctypus-Limarobormes-13b", + "jondurbin/airoboros-33b-gpt4-1.3", + "Open-Orca/OpenOrca-Platypus2-13B", + "FelixChao/vicuna-33b-coder", + "FelixChao/vicuna-33b-coder", + "Gryphe/MythoMix-L2-13b", + "Aeala/Enterredaas-33b", + "yeontaek/llama-2-13B-ensemble-v1", + "TFLai/OpenOrcaPlatypus2-Platypus2-13B-QLora-0.80-epoch", + "TFLai/Ensemble5-Platypus2-13B-QLora-0.80-epoch", + "yeontaek/llama-2-13B-ensemble-v3", + "TFLai/MythoMix-Platypus2-13B-QLoRA-0.80-epoch", + "yihan6324/llama2-13b-instructmining-40k-sharegpt", + "timdettmers/guanaco-33b-merged", + "TFLai/EnsembleV5-Nova-13B", + "circulus/Llama-2-13b-orca-v1", + "Undi95/ReMM-SLERP-L2-13B", + "Gryphe/MythoMax-L2-13b", + "stabilityai/StableBeluga-13B", + "circulus/Llama-2-13b-orca-v1", + "ehartford/WizardLM-30B-Uncensored", + "The-Face-Of-Goonery/huginnv1.2", + "TheBloke/OpenOrcaxOpenChat-Preview2-13B-GPTQ", + "Sao10K/Stheno-L2-13B", + "bofenghuang/vigogne-2-13b-instruct", + "The-Face-Of-Goonery/Huginn-13b-FP16", + "grimpep/L2-MythoMax22b-instruct-Falseblock", + "TFLai/Nous-Hermes-Platypus2-13B-QLoRA-0.80-epoch", + "yeontaek/Platypus2xOpenOrca-13B-IA3-v4", + "yeontaek/Platypus2xOpenOrca-13B-IA3", + "yeontaek/Platypus2xOpenOrca-13B-IA3-ensemble", + "Open-Orca/LlongOrca-13B-16k", + "Sao10K/Stheno-Inverted-L2-13B", + "garage-bAInd/Camel-Platypus2-13B", + "digitous/Alpacino30b", + "NousResearch/Nous-Hermes-Llama2-13b", + "yeontaek/Platypus2xOpenOrca-13B-IA3-v3", + "TFLai/MythicalDestroyerV2-Platypus2-13B-QLora-0.80-epoch", + "TheBloke/VicUnlocked-30B-LoRA-HF", + "Undi95/Nous-Hermes-13B-Code", + "The-Face-Of-Goonery/Chronos-Beluga-v2-13bfp16", + "NousResearch/Nous-Hermes-Llama2-13b", + "Monero/WizardLM-Uncensored-SuperCOT-StoryTelling-30b", + "TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ", + "Open-Orca/OpenOrcaxOpenChat-Preview2-13B", + "Austism/chronos-hermes-13b-v2", + "yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1", + "yeontaek/Platypus2xOpenOrca-13B-IA3-v2", + "Gryphe/MythoLogic-L2-13b", + "augtoma/qCammel-13", + "YeungNLP/firefly-llama2-13b-v1.2", + "Aspik101/StableBeluga-13B-instruct-PL-lora_unload", + "andreaskoepf/llama2-13b-megacode2_min100", + "rombodawg/LosslessMegaCoder-llama2-13b-mini", + "yulan-team/YuLan-Chat-2-13b-fp16", + "elinas/chronos-33b", + "YeungNLP/firefly-llama2-13b", + "Sao10K/Medusa-13b", + "OptimalScale/robin-65b-v2-delta", + "minlik/chinese-alpaca-33b-merged", + "OpenAssistant/llama2-13b-megacode2-oasst", + "TheBloke/OpenAssistant-SFT-7-Llama-30B-HF", + "Undi95/UndiMix-v1-13b", + "ehartford/Samantha-1.11-13b", + "beaugogh/Llama2-13b-sharegpt4", + "Aeala/GPT4-x-AlpacaDente2-30b", + "luffycodes/nash-vicuna-13b-v1dot5-ep2-w-rag-w-simple", + "WizardLM/WizardLM-13B-V1.1", + "uukuguy/speechless-orca-platypus-coig-lite-2k-0.6e-13b", + "huggyllama/llama-30b", + "Undi95/ReMM-L2-13B-PIPPA", + "Undi95/ReMM-L2-13B", + "gaodrew/gaodrew-gorgonzola-13b", + "lmsys/vicuna-13b-v1.5", + "yeontaek/Platypus2xOpenOrca-13B-LoRa", + "Yhyu13/llama-30B-hf-openassitant", + "huggingface/llama-30b", + "lmsys/vicuna-13b-v1.5", + "TFLai/Athena-Platypus2-13B-QLora-0.80-epoch", + "TheBloke/dromedary-65b-lora-HF", + "yeontaek/llama-2-13b-Beluga-QLoRA", + "The-Face-Of-Goonery/Huginn-13b-V4", + "The-Face-Of-Goonery/Huginn-13b-v4.5", + "The-Face-Of-Goonery/Huginn-v3-13b", + "tiiuae/falcon-40b", + "WhoTookMyAmogusNickname/NewHope_HF_not_official", + "gaodrew/OpenOrca-Platypus2-13B-thera-1250", + "SLAM-group/NewHope", + "garage-bAInd/Platypus2-13B", + "migtissera/Synthia-13B", + "elinas/chronos-13b-v2", + "mosaicml/mpt-30b-chat", + "CHIH-HUNG/llama-2-13b-OpenOrca_5w", + "uukuguy/speechless-hermes-coig-lite-13b", + "TheBloke/tulu-30B-fp16", + "uukuguy/speechless-hermes-coig-lite-13b", + "xDAN-AI/xDAN_13b_l2_lora", + "lmsys/vicuna-13b-v1.5-16k", + "openchat/openchat_v3.1", + "CHIH-HUNG/llama-2-13b-dolphin_5w", + "Aspik101/vicuna-13b-v1.5-PL-lora_unload", + "Undi95/MLewd-L2-13B", + "ehartford/minotaur-llama2-13b-qlora", + "kajdun/iubaris-13b-v3", + "TFLai/Limarp-Platypus2-13B-QLoRA-0.80-epoch", + "openchat/openchat_v3.1", + "uukuguy/speechless-orca-platypus-coig-lite-4k-0.6e-13b", + "ziqingyang/chinese-alpaca-2-13b", + "TFLai/Airboros2.1-Platypus2-13B-QLora-0.80-epoch", + "yeontaek/llama-2-13b-Guanaco-QLoRA", + "lmsys/vicuna-13b-v1.5-16k", + "ehartford/based-30b", + "kingbri/airolima-chronos-grad-l2-13B", + "openchat/openchat_v3.2", + "uukuguy/speechless-orca-platypus-coig-lite-4k-0.5e-13b", + "yeontaek/Platypus2-13B-LoRa", + "kingbri/chronolima-airo-grad-l2-13B", + "openchat/openchat_v3.2", + "TFLai/PuddleJumper-Platypus2-13B-QLoRA-0.80-epoch", + "shareAI/llama2-13b-Chinese-chat", + "ehartford/WizardLM-1.0-Uncensored-Llama2-13b", + "Aspik101/Redmond-Puffin-13B-instruct-PL-lora_unload", + "yeontaek/llama-2-13B-ensemble-v6", + "WizardLM/WizardLM-13B-V1.2", + "TheBloke/WizardLM-13B-V1.1-GPTQ", + "bhenrym14/airophin-13b-pntk-16k-fp16", + "ehartford/WizardLM-1.0-Uncensored-Llama2-13b", + "Mikael110/llama-2-13b-guanaco-fp16", + "yeontaek/airoboros-2.1-llama-2-13B-QLoRa", + "CalderaAI/13B-Legerdemain-L2", + "grimpep/llama2-22b-wizard_vicuna", + "grimpep/llama2-22B-GPLATTY", + "bhenrym14/airophin-13b-pntk-16k-fp16", + "yeontaek/llama-2-13b-QLoRA", + "OpenAssistant/llama2-13b-orca-8k-3319", + "TheBloke/WizardLM-13B-V1-1-SuperHOT-8K-fp16", + "duliadotio/dulia-13b-8k-alpha", + "Undi95/LewdEngine", + "OpenBuddy/openbuddy-llama2-13b-v8.1-fp16", + "CHIH-HUNG/llama-2-13b-open_orca_20w", + "bhenrym14/airoboros-33b-gpt4-1.4.1-lxctx-PI-16384-fp16", + "FlagAlpha/Llama2-Chinese-13b-Chat", + "LLMs/WizardLM-13B-V1.0", + "chansung/gpt4-alpaca-lora-13b-decapoda-1024", + "TheBloke/wizardLM-13B-1.0-fp16", + "digitous/13B-Chimera", + "yeontaek/Platypus2xOpenOrcaxGuanaco-13B-LoRa", + "jondurbin/airoboros-l2-13b-2.1", + "Monero/WizardLM-30B-Uncensored-Guanaco-SuperCOT-30b", + "TheBloke/UltraLM-13B-fp16", + "openaccess-ai-collective/minotaur-13b-fixed", + "NousResearch/Redmond-Puffin-13B", + "KoboldAI/LLaMA2-13B-Holomax", + "Lajonbot/WizardLM-13B-V1.2-PL-lora_unload", + "yeontaek/Platypus2-13B-LoRa-v2", + "TheBloke/airoboros-13B-HF", + "jondurbin/airoboros-13b", + "jjaaaww/posi_13b", + "CoolWP/llama-2-13b-guanaco-fp16", + "yeontaek/Platypus2-13B-QLoRa", + "h2oai/h2ogpt-research-oig-oasst1-512-30b", + "dfurman/llama-2-13b-guanaco-peft", + "NousResearch/Redmond-Puffin-13B", + "pe-nlp/llama-2-13b-platypus-vicuna-wizard", + "CHIH-HUNG/llama-2-13b-dolphin_20w", + "NousResearch/Nous-Hermes-13b", + "NobodyExistsOnTheInternet/GiftedConvo13bLoraNoEconsE4", + "ehartford/Wizard-Vicuna-13B-Uncensored", + "TheBloke/Wizard-Vicuna-13B-Uncensored-HF", + "openchat/openchat_v3.2_super", + "bhenrym14/airophin-v2-13b-PI-8k-fp16", + "openaccess-ai-collective/manticore-13b", + "The-Face-Of-Goonery/Huginn-22b-Prototype", + "jphme/Llama-2-13b-chat-german", + "grimpep/llama2-28B-Airo03", + "TheBloke/Kimiko-v2-13B-fp16", + "FPHam/Free_Sydney_13b_HF", + "lmsys/vicuna-13b-v1.3", + "FelixChao/llama2-13b-math1.1", + "CalderaAI/13B-BlueMethod", + "meta-llama/Llama-2-13b-chat-hf", + "deepse/CodeUp-Llama-2-13b-chat-hf", + "WizardLM/WizardMath-13B-V1.0", + "WizardLM/WizardMath-13B-V1.0", + "HyperbeeAI/Tulpar-7b-v0", + "xxyyy123/test_qkvo_adptor", + "xxyyy123/mc_data_30k_from_platpus_orca_7b_10k_v1_lora_qkvo_rank14_v2", + "openchat/openchat_v2_w", + "FelixChao/llama2-13b-math1.1", + "psmathur/orca_mini_v3_7b", + "TehVenom/Metharme-13b-Merged", + "xxyyy123/10k_v1_lora_qkvo_rank14_v3", + "OpenAssistant/llama2-13b-orca-v2-8k-3166", + "openaccess-ai-collective/wizard-mega-13b", + "jondurbin/airoboros-13b-gpt4-1.4", + "jondurbin/airoboros-13b-gpt4-1.4-fp16", + "Monero/Manticore-13b-Chat-Pyg-Guanaco", + "FelixChao/llama2-13b-math1.2", + "chargoddard/platypus-2-22b-relora", + "FelixChao/llama2-13b-math1.2", + "Gryphe/MythoBoros-13b", + "CalderaAI/13B-Ouroboros", + "OpenAssistant/llama2-13b-orca-v2-8k-3166", + "heegyu/LIMA2-13b-hf", + "digitous/13B-HyperMantis", + "Gryphe/MythoLogic-13b", + "TheBloke/Airoboros-L2-13B-2.1-GPTQ", + "chargoddard/platypus2-22b-relora", + "openchat/openchat_v2", + "yeontaek/Platypus2-13B-IA3", + "stabilityai/StableBeluga-7B", + "circulus/Llama-2-7b-orca-v1", + "budecosystem/genz-13b-v2", + "TheBloke/gpt4-x-vicuna-13B-HF", + "NobodyExistsOnTheInternet/GiftedConvo13bLoraNoEcons", + "zarakiquemparte/zarafusionex-1.1-l2-7b", + "Lajonbot/tableBeluga-7B-instruct-pl-lora_unload", + "jondurbin/airoboros-13b-gpt4", + "gaodrew/gaodrew-gorgonzola-13b", + "jondurbin/airoboros-13b-gpt4-1.1", + "TheBloke/gpt4-alpaca-lora-13B-HF", + "zarakiquemparte/zarablendex-vq-l2-7b", + "openaccess-ai-collective/manticore-13b-chat-pyg", + "Lajonbot/Llama-2-13b-hf-instruct-pl-lora_unload", + "NobodyExistsOnTheInternet/PuffedLIMA13bQLORA", + "xxyyy123/10k_v1_lora_qkvo_rank28_v2", + "jondurbin/airoboros-l2-13b-gpt4-1.4.1", + "dhmeltzer/Llama-2-13b-hf-eli5-wiki-1024_r_64_alpha_16", + "NobodyExistsOnTheInternet/PuffedConvo13bLoraE4", + "yihan6324/llama2-7b-instructmining-40k-sharegpt", + "CHIH-HUNG/llama-2-13b-Open_Platypus_and_ccp_2.6w", + "Aeala/GPT4-x-Alpasta-13b", + "psmathur/orca_mini_v2_13b", + "YeungNLP/firefly-llama-13b", + "psmathur/orca_mini_v2_13b", + "zarakiquemparte/zarafusionix-l2-7b", + "yihan6324/llama2-7b-instructmining-60k-sharegpt", + "yihan6324/llama-2-7b-instructmining-60k-sharegpt", + "layoric/llama-2-13b-code-alpaca", + "bofenghuang/vigogne-13b-instruct", + "Lajonbot/vicuna-13b-v1.3-PL-lora_unload", + "lvkaokao/llama2-7b-hf-chat-lora-v3", + "ehartford/dolphin-llama-13b", + "YeungNLP/firefly-llama-13b-v1.2", + "TheBloke/Kimiko-13B-fp16", + "kevinpro/Vicuna-13B-CoT", + "eachadea/vicuna-13b-1.1", + "pillowtalks-ai/delta13b", + "TheBloke/vicuna-13B-1.1-HF", + "TheBloke/Vicuna-13B-CoT-fp16", + "lmsys/vicuna-13b-delta-v1.1", + "lmsys/vicuna-13b-v1.1", + "xxyyy123/20k_v1_lora_qkvo_rank14_v2", + "TheBloke/guanaco-13B-HF", + "TheBloke/vicuna-13b-v1.3.0-GPTQ", + "edor/Stable-Platypus2-mini-7B", + "totally-not-an-llm/EverythingLM-13b-V2-16k", + "zarakiquemparte/zaraxe-l2-7b", + "beaugogh/Llama2-7b-openorca-mc-v2", + "TheBloke/Nous-Hermes-13B-SuperHOT-8K-fp16", + "quantumaikr/QuantumLM", + "jondurbin/airoboros-13b-gpt4-1.2", + "TheBloke/robin-13B-v2-fp16", + "TFLai/llama-2-13b-4bit-alpaca-gpt4", + "yihan6324/llama2-7b-instructmining-orca-40k", + "dvruette/oasst-llama-13b-2-epochs", + "Open-Orca/LlongOrca-7B-16k", + "Aspik101/Nous-Hermes-13b-pl-lora_unload", + "ehartford/Samantha-1.11-CodeLlama-34b", + "nkpz/llama2-22b-chat-wizard-uncensored", + "bofenghuang/vigogne-13b-chat", + "beaugogh/Llama2-7b-openorca-mc-v1", + "OptimalScale/robin-13b-v2-delta", + "pe-nlp/llama-2-13b-vicuna-wizard", + "chargoddard/llama2-22b", + "gywy/llama2-13b-chinese-v1", + "frank098/Wizard-Vicuna-13B-juniper", + "IGeniusDev/llama13B-quant8-testv1-openorca-customdataset", + "CHIH-HUNG/llama-2-13b-huangyt_Fintune_1_17w-gate_up_down_proj", + "eachadea/vicuna-13b", + "yihan6324/llama2-7b-instructmining-orca-90k", + "chargoddard/llama2-22b-blocktriangular", + "luffycodes/mcq-vicuna-13b-v1.5", + "Yhyu13/chimera-inst-chat-13b-hf", + "luffycodes/mcq-vicuna-13b-v1.5", + "chargoddard/ypotryll-22b-epoch2-qlora", + "totally-not-an-llm/EverythingLM-13b-16k", + "luffycodes/mcq-hal-vicuna-13b-v1.5", + "openaccess-ai-collective/minotaur-13b", + "IGeniusDev/llama13B-quant8-testv1-openorca-customdataset", + "chargoddard/llama2-22b-blocktriangular", + "TFLai/Platypus2-13B-QLoRA-0.80-epoch", + "meta-llama/Llama-2-13b-hf", + "CHIH-HUNG/llama-2-13b-huangyt_FINETUNE2_3w-gate_up_down_proj", + "luffycodes/mcq-hal-vicuna-13b-v1.5", + "TheBloke/Llama-2-13B-fp16", + "TaylorAI/Flash-Llama-13B", + "shareAI/bimoGPT-llama2-13b", + "wahaha1987/llama_13b_sharegpt94k_fastchat", + "openchat/openchat_8192", + "CHIH-HUNG/llama-2-13b-huangyt_Fintune_1_17w-q_k_v_o_proj", + "dvruette/llama-13b-pretrained-sft-do2", + "CHIH-HUNG/llama-2-13b-alpaca-test", + "OpenBuddy/openbuddy-llama2-13b-v11.1-bf16", + "CHIH-HUNG/llama-2-13b-FINETUNE2_TEST_2.2w", + "project-baize/baize-v2-13b", + "jondurbin/airoboros-l2-13b-gpt4-m2.0", + "yeontaek/Platypus2xOpenOrca-13B-LoRa-v2", + "CHIH-HUNG/llama-2-13b-huangyt_FINETUNE2_3w", + "xzuyn/Alpacino-SuperCOT-13B", + "jondurbin/airoboros-l2-13b-gpt4-2.0", + "aiplanet/effi-13b", + "clibrain/Llama-2-13b-ft-instruct-es", + "CHIH-HUNG/llama-2-13b-huangyt_Fintune_1_17w", + "bofenghuang/vigogne-2-7b-instruct", + "CHIH-HUNG/llama-2-13b-huangyt_FINETUNE2_3w-q_k_v_o_proj", + "bofenghuang/vigogne-2-7b-chat", + "aiplanet/effi-13b", + "haonan-li/bactrian-x-llama-13b-merged", + "beaugogh/Llama2-7b-sharegpt4", + "HWERI/Llama2-7b-sharegpt4", + "jondurbin/airoboros-13b-gpt4-1.3", + "jondurbin/airoboros-c34b-2.1", + "junelee/wizard-vicuna-13b", + "TheBloke/wizard-vicuna-13B-HF", + "Open-Orca/OpenOrca-Preview1-13B", + "TheBloke/h2ogpt-oasst1-512-30B-HF", + "TheBloke/Llama-2-13B-GPTQ", + "camel-ai/CAMEL-13B-Combined-Data", + "lmsys/vicuna-7b-v1.5", + "lmsys/vicuna-7b-v1.5-16k", + "lmsys/vicuna-7b-v1.5", + "ausboss/llama-13b-supercot", + "TheBloke/tulu-13B-fp16", + "NousResearch/Nous-Hermes-llama-2-7b", + "jlevin/guanaco-13b-llama-2", + "lmsys/vicuna-7b-v1.5-16k", + "dvruette/llama-13b-pretrained", + "nkpz/llama2-22b-daydreamer-v3", + "dvruette/llama-13b-pretrained-dropout", + "jondurbin/airoboros-l2-13b-2.1", + "LLMs/Stable-Vicuna-13B", + "64bits/LexPodLM-13B", + "lizhuang144/llama_mirror_13b_v1.0", + "TheBloke/stable-vicuna-13B-HF", + "zarakiquemparte/zaraxls-l2-7b", + "TheBloke/Llama-2-13B-GPTQ", + "Kiddyz/testlm-3", + "migtissera/Synthia-7B", + "zarakiquemparte/zarablend-l2-7b", + "mosaicml/mpt-30b-instruct", + "PocketDoc/Dans-PileOfSets-Mk1-llama-13b-merged", + "vonjack/Qwen-LLaMAfied-HFTok-7B-Chat", + "l3utterfly/llama2-7b-layla", + "Lajonbot/vicuna-7b-v1.5-PL-lora_unload", + "heegyu/LIMA-13b-hf", + "frank098/WizardLM_13B_juniper", + "ashercn97/manatee-7b", + "chavinlo/gpt4-x-alpaca", + "PocketDoc/Dans-PersonalityEngine-13b", + "ehartford/WizardLM-1.0-Uncensored-CodeLlama-34b", + "digitous/Alpacino13b", + "edor/Hermes-Platypus2-mini-7B", + "lvkaokao/llama2-7b-hf-chat-lora-v2", + "Kiddyz/testlm-1-1", + "Kiddyz/testlm", + "Kiddyz/testlm-1", + "Kiddyz/testlm2", + "radm/Philosophy-Platypus2-13b", + "aiplanet/effi-13b", + "Harshvir/Llama-2-7B-physics", + "YeungNLP/firefly-ziya-13b", + "LinkSoul/Chinese-Llama-2-7b", + "PeanutJar/LLaMa-2-PeanutButter_v10-7B", + "OpenBuddy/openbuddy-llama2-13b-v11-bf16", + "StudentLLM/Alpagasus-2-13B-QLoRA-pipeline", + "meta-llama/Llama-2-13b-hf", + "WizardLM/WizardCoder-Python-34B-V1.0", + "dvruette/llama-13b-pretrained-sft-epoch-1", + "camel-ai/CAMEL-13B-Role-Playing-Data", + "ziqingyang/chinese-llama-2-13b", + "rombodawg/LosslessMegaCoder-llama2-7b-mini", + "TheBloke/koala-13B-HF", + "lmsys/vicuna-7b-delta-v1.1", + "eachadea/vicuna-7b-1.1", + "Ejafa/vicuna_7B_vanilla_1.1", + "lvkaokao/llama2-7b-hf-chat-lora", + "OpenBuddy/openbuddy-atom-13b-v9-bf16", + "Norquinal/llama-2-7b-claude-chat-rp", + "Danielbrdz/Barcenas-7b", + "heegyu/WizardVicuna2-13b-hf", + "meta-llama/Llama-2-7b-chat-hf", + "PeanutJar/LLaMa-2-PeanutButter_v14-7B", + "PeanutJar/LLaMa-2-PeanutButter_v4-7B", + "davzoku/cria-llama2-7b-v1.3", + "OpenBuddy/openbuddy-atom-13b-v9-bf16", + "lvkaokao/llama2-7b-hf-instruction-lora", + "Tap-M/Luna-AI-Llama2-Uncensored", + "ehartford/Samantha-1.11-7b", + "WizardLM/WizardCoder-Python-34B-V1.0", + "TheBloke/Manticore-13B-Chat-Pyg-Guanaco-SuperHOT-8K-GPTQ", + "Mikael110/llama-2-7b-guanaco-fp16", + "garage-bAInd/Platypus2-7B", + "PeanutJar/LLaMa-2-PeanutButter_v18_B-7B", + "mosaicml/mpt-30b", + "garage-bAInd/Platypus2-7B", + "huggingface/llama-13b", + "dvruette/oasst-llama-13b-1000-steps", + "jordiclive/gpt4all-alpaca-oa-codealpaca-lora-13b", + "huggyllama/llama-13b", + "Voicelab/trurl-2-7b", + "TFLai/llama-13b-4bit-alpaca", + "gywy/llama2-13b-chinese-v2", + "lmsys/longchat-13b-16k", + "Aspik101/trurl-2-7b-pl-instruct_unload", + "WizardLM/WizardMath-7B-V1.0", + "Norquinal/llama-2-7b-claude-chat", + "TheTravellingEngineer/llama2-7b-chat-hf-dpo", + "open-llm-leaderboard/starchat-beta", + "joehuangx/spatial-vicuna-7b-v1.5-LoRA", + "conceptofmind/LLongMA-2-13b-16k", + "tianyil1/denas-llama2", + "lmsys/vicuna-7b-v1.3", + "conceptofmind/LLongMA-2-13b-16k", + "openchat/opencoderplus", + "ajibawa-2023/scarlett-7b", + "dhmeltzer/llama-7b-SFT_eli5_wiki65k_1024_r_64_alpha_16_merged", + "psyche/kollama2-7b-v2", + "heegyu/LIMA2-7b-hf", + "dhmeltzer/llama-7b-SFT-qlora-eli5-wiki_DPO_ds_RM_top_2_1024_r_64_alpha_16", + "abhishek/llama2guanacotest", + "jondurbin/airoboros-l2-7b-2.1", + "llama-anon/instruct-13b", + "FelixChao/vicuna-7B-physics", + "Aspik101/Llama-2-7b-hf-instruct-pl-lora_unload", + "shibing624/chinese-alpaca-plus-13b-hf", + "davzoku/cria-llama2-7b-v1.3_peft", + "quantumaikr/llama-2-7b-hf-guanaco-1k", + "togethercomputer/Llama-2-7B-32K-Instruct", + "sia-ai/llama-2-7b-1-percent-open-orca-1000-steps-v0", + "TheTravellingEngineer/llama2-7b-hf-guanaco", + "Lajonbot/Llama-2-7b-chat-hf-instruct-pl-lora_unload", + "jondurbin/airoboros-l2-7b-gpt4-1.4.1", + "wahaha1987/llama_7b_sharegpt94k_fastchat", + "FelixChao/vicuna-7B-chemical", + "TinyPixel/llama2-7b-oa", + "chaoyi-wu/MedLLaMA_13B", + "edor/Platypus2-mini-7B", + "RoversX/llama-2-7b-hf-small-shards-Samantha-V1-SFT", + "venkycs/llama-v2-7b-32kC-Security", + "psyche/kollama2-7b", + "Fredithefish/Guanaco-7B-Uncensored", + "TheTravellingEngineer/llama2-7b-chat-hf-guanaco", + "ehartford/WizardLM-13B-Uncensored", + "PocketDoc/Dans-CreepingSenseOfDoom", + "wenge-research/yayi-7b-llama2", + "georgesung/llama2_7b_chat_uncensored", + "TinyPixel/llama2-7b-instruct", + "quantumaikr/QuantumLM-7B", + "xzuyn/MedicWizard-7B", + "wenge-research/yayi-7b-llama2", + "TinyPixel/lima-test", + "elyza/ELYZA-japanese-Llama-2-7b-instruct", + "lgaalves/llama-2-7b-hf_open-platypus", + "ziqingyang/chinese-alpaca-2-7b", + "TehVenom/Pygmalion-Vicuna-1.1-7b", + "meta-llama/Llama-2-7b-hf", + "bongchoi/test-llama2-7b", + "TaylorAI/Flash-Llama-7B", + "TheTravellingEngineer/llama2-7b-chat-hf-v2", + "TheTravellingEngineer/llama2-7b-chat-hf-v4", + "kashif/stack-llama-2", + "PeanutJar/LLaMa-2-PeanutButter_v18_A-7B", + "ToolBench/ToolLLaMA-7b-LoRA", + "Monero/WizardLM-13b-OpenAssistant-Uncensored", + "TheTravellingEngineer/llama2-7b-chat-hf-v2", + "TheTravellingEngineer/llama2-7b-chat-hf-v4", + "mrm8488/llama-2-coder-7b", + "elyza/ELYZA-japanese-Llama-2-7b-fast-instruct", + "clibrain/Llama-2-7b-ft-instruct-es", + "medalpaca/medalpaca-7b", + "TheBloke/tulu-7B-fp16", + "OpenBuddy/openbuddy-openllama-13b-v7-fp16", + "TaylorAI/FLAN-Llama-7B-2_Llama2-7B-Flash_868_full_model", + "Aspik101/vicuna-7b-v1.3-instruct-pl-lora_unload", + "jondurbin/airoboros-l2-7b-gpt4-2.0", + "dhmeltzer/llama-7b-SFT_ds_eli5_1024_r_64_alpha_16_merged", + "GOAT-AI/GOAT-7B-Community", + "AtomEchoAI/AtomGPT_56k", + "julianweng/Llama-2-7b-chat-orcah", + "TehVenom/Pygmalion-13b-Merged", + "jondurbin/airoboros-7b-gpt4-1.1", + "dhmeltzer/llama-7b-SFT_ds_wiki65k_1024_r_64_alpha_16_merged", + "bofenghuang/vigogne-7b-chat", + "lmsys/longchat-7b-v1.5-32k", + "jondurbin/airoboros-l2-7b-gpt4-m2.0", + "synapsoft/Llama-2-7b-chat-hf-flan2022-1.2M", + "jondurbin/airoboros-7b-gpt4-1.4", + "Charlie911/vicuna-7b-v1.5-lora-mctaco", + "yihan6324/instructmining-platypus-15k", + "meta-llama/Llama-2-7b-hf", + "TheTravellingEngineer/llama2-7b-chat-hf-v3", + "quantumaikr/KoreanLM-hf", + "openthaigpt/openthaigpt-1.0.0-alpha-7b-chat-ckpt-hf", + "TheBloke/Llama-2-7B-GPTQ", + "TheBloke/Llama-2-7B-GPTQ", + "LLMs/AlpacaGPT4-7B-elina", + "ehartford/Wizard-Vicuna-7B-Uncensored", + "TheBloke/Wizard-Vicuna-7B-Uncensored-HF", + "TheTravellingEngineer/llama2-7b-chat-hf-v3", + "golaxy/gowizardlm", + "ehartford/dolphin-llama2-7b", + "CHIH-HUNG/llama-2-7b-dolphin_10w-test", + "mncai/chatdoctor", + "psyche/kollama2-7b-v3", + "jondurbin/airoboros-7b-gpt4", + "jondurbin/airoboros-7b", + "TheBloke/airoboros-7b-gpt4-fp16", + "mosaicml/mpt-7b-8k-chat", + "elyza/ELYZA-japanese-Llama-2-7b", + "bofenghuang/vigogne-7b-instruct", + "jxhong/CAlign-alpaca-7b", + "golaxy/goims", + "jondurbin/airoboros-7b-gpt4-1.2", + "jphme/orca_mini_v2_ger_7b", + "psmathur/orca_mini_v2_7b", + "notstoic/PygmalionCoT-7b", + "golaxy/gogpt2-13b", + "golaxy/gogpt2-13b-chat", + "togethercomputer/LLaMA-2-7B-32K", + "TheBloke/wizardLM-7B-HF", + "keyfan/vicuna-chinese-replication-v1.1", + "golaxy/gogpt2-7b", + "aiplanet/effi-7b", + "arver/llama7b-qlora", + "titan087/OpenLlama13B-Guanaco", + "chavinlo/alpaca-native", + "project-baize/baize-healthcare-lora-7B", + "AlpinDale/pygmalion-instruct", + "openlm-research/open_llama_13b", + "jondurbin/airoboros-7b-gpt4-1.3", + "elyza/ELYZA-japanese-Llama-2-7b-fast", + "jondurbin/airoboros-gpt-3.5-turbo-100k-7b", + "uukuguy/speechless-codellama-orca-13b", + "bigcode/starcoderplus", + "TheBloke/guanaco-7B-HF", + "Neko-Institute-of-Science/metharme-7b", + "TigerResearch/tigerbot-7b-base", + "golaxy/gogpt-7b", + "togethercomputer/LLaMA-2-7B-32K", + "yhyhy3/open_llama_7b_v2_med_instruct", + "ajibawa-2023/carl-7b", + "stabilityai/stablelm-base-alpha-7b-v2", + "conceptofmind/LLongMA-2-7b-16k", + "TehVenom/Pygmalion_AlpacaLora-7b", + "jondurbin/airoboros-7b-gpt4-1.4.1-qlora", + "wannaphong/openthaigpt-0.1.0-beta-full-model_for_open_llm_leaderboard", + "ausboss/llama7b-wizardlm-unfiltered", + "project-baize/baize-v2-7b", + "LMFlow/Robin-v2", + "HanningZhang/Robin-v2", + "LMFlow/Robin-7b-v2", + "OptimalScale/robin-7b-v2-delta", + "uukuguy/speechless-codellama-platypus-13b", + "jerryjalapeno/nart-100k-7b", + "wenge-research/yayi-13b-llama2", + "fireballoon/baichuan-vicuna-chinese-7b", + "jlevin/guanaco-unchained-llama-2-7b", + "csitfun/llama-7b-logicot", + "DevaMalla/llama7b_alpaca_1gpu_bf16", + "WeOpenML/PandaLM-Alpaca-7B-v1", + "illuin/test-custom-llama", + "yeontaek/WizardCoder-Python-13B-LoRa", + "ashercn97/giraffe-7b", + "mosaicml/mpt-7b-chat", + "abhishek/autotrain-llama-alpaca-peft-52508123785", + "Neko-Institute-of-Science/pygmalion-7b", + "TFLai/llama-7b-4bit-alpaca", + "huggingface/llama-7b", + "TheBloke/Planner-7B-fp16", + "shibing624/chinese-llama-plus-13b-hf", + "AGI-inc/lora_moe_7b_baseline", + "DevaMalla/llama-base-7b", + "AGI-inc/lora_moe_7b", + "togethercomputer/GPT-JT-6B-v0", + "ehartford/WizardLM-7B-Uncensored", + "shibing624/chinese-alpaca-plus-7b-hf", + "beomi/llama-2-ko-7b", + "mosaicml/mpt-7b-8k-instruct", + "Enno-Ai/ennodata-7b", + "mosaicml/mpt-7b-instruct", + "facebook/opt-iml-max-30b", + "WeOpenML/Alpaca-7B-v1", + "TheBloke/Project-Baize-v2-7B-GPTQ", + "codellama/CodeLlama-13b-Instruct-hf", + "TheBloke/CodeLlama-13B-Instruct-fp16", + "facebook/galactica-30b", + "FreedomIntelligence/phoenix-inst-chat-7b", + "openlm-research/open_llama_7b_v2", + "GeorgiaTechResearchInstitute/galpaca-30b", + "THUDM/chatglm2-6b", + "togethercomputer/GPT-JT-6B-v1", + "TheBloke/koala-7B-HF", + "nathan0/mpt_delta_tuned_model_v3", + "nathan0/mpt_delta_tuned_model_v2", + "GeorgiaTechResearchInstitute/galpaca-30b", + "JosephusCheung/Guanaco", + "shareAI/CodeLLaMA-chat-13b-Chinese", + "TigerResearch/tigerbot-7b-sft", + "Writer/InstructPalmyra-20b", + "OpenAssistant/codellama-13b-oasst-sft-v10", + "bigscience/bloomz-7b1-mt", + "nathan0/mpt_delta_tuned_model_v3", + "VMware/open-llama-7b-open-instruct", + "baichuan-inc/Baichuan-7B", + "anas-awadalla/mpt-7b", + "mosaicml/mpt-7b", + "bigscience/bloomz-7b1", + "ziqingyang/chinese-llama-2-7b", + "OpenAssistant/codellama-13b-oasst-sft-v10", + "wenge-research/yayi-7b", + "tiiuae/falcon-7b", + "togethercomputer/RedPajama-INCITE-Instruct-7B-v0.1", + "togethercomputer/RedPajama-INCITE-7B-Instruct", + "TheBloke/landmark-attention-llama7b-fp16", + "togethercomputer/GPT-JT-Moderation-6B", + "h2oai/h2ogpt-gm-oasst1-en-1024-20b", + "dvruette/gpt-neox-20b-full-precision", + "TehVenom/Moderator-Chan_GPT-JT-6b", + "dvruette/oasst-gpt-neox-20b-1000-steps", + "AlekseyKorshuk/pygmalion-6b-vicuna-chatml", + "facebook/opt-66b", + "Salesforce/codegen-16B-nl", + "Vmware/open-llama-7b-v2-open-instruct", + "mosaicml/mpt-7b-storywriter", + "acrastt/Marx-3B-V2", + "openlm-research/open_llama_7b", + "Fredithefish/ReasonixPajama-3B-HF", + "togethercomputer/GPT-NeoXT-Chat-Base-20B", + "psmathur/orca_mini_13b", + "RWKV/rwkv-raven-14b", + "h2oai/h2ogpt-oasst1-512-20b", + "acrastt/Marx-3B", + "klosax/open_llama_13b_600bt_preview", + "synapsoft/Llama-2-7b-hf-flan2022-1.2M", + "OpenAssistant/oasst-sft-1-pythia-12b", + "golaxy/gogpt-7b-bloom", + "Writer/palmyra-large", + "psmathur/orca_mini_7b", + "dvruette/oasst-pythia-12b-6000-steps", + "NousResearch/CodeLlama-13b-hf", + "codellama/CodeLlama-13b-hf", + "h2oai/h2ogpt-gm-oasst1-multilang-1024-20b", + "VMware/open-llama-0.7T-7B-open-instruct-v1.1", + "dvruette/oasst-pythia-12b-flash-attn-5000-steps", + "dvruette/oasst-gpt-neox-20b-3000-steps", + "RobbeD/OpenLlama-Platypus-3B", + "facebook/opt-30b", + "acrastt/Puma-3B", + "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", + "dvruette/oasst-pythia-12b-pretrained-sft", + "digitous/GPT-R", + "acrastt/Griffin-3B", + "togethercomputer/RedPajama-INCITE-Base-7B-v0.1", + "togethercomputer/RedPajama-INCITE-7B-Base", + "CobraMamba/mamba-gpt-3b-v3", + "Danielbrdz/CodeBarcenas-7b", + "l3utterfly/open-llama-3b-v2-layla", + "CobraMamba/mamba-gpt-3b-v2", + "OpenAssistant/pythia-12b-sft-v8-7k-steps", + "KoboldAI/GPT-NeoX-20B-Erebus", + "RobbeD/Orca-Platypus-3B", + "h2oai/h2ogpt-gm-oasst1-en-1024-12b", + "OpenAssistant/pythia-12b-sft-v8-2.5k-steps", + "AlekseyKorshuk/chatml-pyg-v1", + "togethercomputer/RedPajama-INCITE-Chat-7B-v0.1", + "togethercomputer/RedPajama-INCITE-7B-Chat", + "digitous/Javelin-R", + "dvruette/oasst-pythia-12b-reference", + "EleutherAI/gpt-neox-20b", + "KoboldAI/fairseq-dense-13B", + "OpenAssistant/pythia-12b-sft-v8-rlhf-2k-steps", + "codellama/CodeLlama-7b-Instruct-hf", + "digitous/Javelin-GPTJ", + "KoboldAI/GPT-NeoX-20B-Skein", + "digitous/Javalion-R", + "h2oai/h2ogpt-oasst1-512-12b", + "acrastt/Bean-3B", + "KoboldAI/GPT-J-6B-Skein", + "nomic-ai/gpt4all-j", + "databricks/dolly-v2-12b", + "TehVenom/Dolly_Shygmalion-6b-Dev_V8P2", + "databricks/dolly-v2-7b", + "Aspik101/WizardVicuna-Uncensored-3B-instruct-PL-lora_unload", + "digitous/Adventien-GPTJ", + "openlm-research/open_llama_3b_v2", + "RWKV/rwkv-4-14b-pile", + "Lazycuber/Janemalion-6B", + "OpenAssistant/pythia-12b-pre-v8-12.5k-steps", + "digitous/Janin-R", + "kfkas/Llama-2-ko-7b-Chat", + "heegyu/WizardVicuna-Uncensored-3B-0719", + "h2oai/h2ogpt-gm-oasst1-en-1024-open-llama-7b-preview-400bt", + "TaylorAI/Flash-Llama-3B", + "kfkas/Llama-2-ko-7b-Chat", + "digitous/Skegma-GPTJ", + "digitous/Javalion-GPTJ", + "Pirr/pythia-13b-deduped-green_devil", + "TehVenom/PPO_Shygmalion-V8p4_Dev-6b", + "dvruette/oasst-pythia-6.9b-4000-steps", + "heegyu/WizardVicuna-3B-0719", + "psmathur/orca_mini_3b", + "OpenAssistant/galactica-6.7b-finetuned", + "frank098/orca_mini_3b_juniper", + "PygmalionAI/pygmalion-6b", + "TehVenom/PPO_Pygway-V8p4_Dev-6b", + "TFLai/gpt-neox-20b-4bit-alpaca", + "Corianas/gpt-j-6B-Dolly", + "TehVenom/Dolly_Shygmalion-6b", + "digitous/Janin-GPTJ", + "TehVenom/GPT-J-Pyg_PPO-6B-Dev-V8p4", + "EleutherAI/gpt-j-6b", + "KoboldAI/GPT-J-6B-Shinen", + "TehVenom/Dolly_Malion-6b", + "TehVenom/ChanMalion", + "Salesforce/codegen-6B-nl", + "Fredithefish/RedPajama-INCITE-Chat-3B-Instruction-Tuning-with-GPT-4", + "KoboldAI/GPT-J-6B-Janeway", + "togethercomputer/RedPajama-INCITE-Chat-3B-v1", + "togethercomputer/Pythia-Chat-Base-7B", + "heegyu/RedTulu-Uncensored-3B-0719", + "KoboldAI/PPO_Pygway-6b-Mix", + "KoboldAI/OPT-13B-Erebus", + "KoboldAI/fairseq-dense-6.7B", + "EleutherAI/pythia-12b-deduped", + "pszemraj/pythia-6.9b-HC3", + "Fredithefish/Guanaco-3B-Uncensored-v2", + "facebook/opt-13b", + "TehVenom/GPT-J-Pyg_PPO-6B", + "EleutherAI/pythia-6.9b-deduped", + "Devio/test-1400", + "Fredithefish/Guanaco-3B-Uncensored", + "codellama/CodeLlama-7b-hf", + "acrastt/RedPajama-INCITE-Chat-Instruct-3B-V1", + "Fredithefish/ScarletPajama-3B-HF", + "KoboldAI/OPT-13B-Nerybus-Mix", + "YeungNLP/firefly-bloom-7b1", + "DanielSc4/RedPajama-INCITE-Chat-3B-v1-RL-LoRA-8bit-test1", + "klosax/open_llama_7b_400bt_preview", + "KoboldAI/OPT-13B-Nerys-v2", + "TehVenom/PPO_Shygmalion-6b", + "amazon/LightGPT", + "KnutJaegersberg/black_goo_recipe_c", + "NousResearch/CodeLlama-7b-hf", + "togethercomputer/RedPajama-INCITE-Instruct-3B-v1", + "heegyu/WizardVicuna-open-llama-3b-v2", + "bigscience/bloom-7b1", + "Devio/test-22B", + "RWKV/rwkv-raven-7b", + "hakurei/instruct-12b", + "CobraMamba/mamba-gpt-3b", + "KnutJaegersberg/black_goo_recipe_a", + "acrastt/OmegLLaMA-3B", + "codellama/CodeLlama-7b-Instruct-hf", + "h2oai/h2ogpt-oig-oasst1-512-6_9b", + "KoboldAI/OPT-6.7B-Erebus", + "facebook/opt-6.7b", + "KnutJaegersberg/black_goo_recipe_d", + "KnutJaegersberg/LLongMA-3b-LIMA", + "KnutJaegersberg/black_goo_recipe_b", + "KoboldAI/OPT-6.7B-Nerybus-Mix", + "health360/Healix-3B", + "EleutherAI/pythia-12b", + "Fredithefish/RedPajama-INCITE-Chat-3B-ShareGPT-11K", + "GeorgiaTechResearchInstitute/galactica-6.7b-evol-instruct-70k", + "h2oai/h2ogpt-oig-oasst1-256-6_9b", + "ikala/bloom-zh-3b-chat", + "Taekyoon/llama2-ko-7b-test", + "anhnv125/pygmalion-6b-roleplay", + "TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4", + "KoboldAI/OPT-6B-nerys-v2", + "Lazycuber/pyg-instruct-wizardlm", + "Devio/testC", + "KoboldAI/OPT-30B-Erebus", + "Fredithefish/CrimsonPajama", + "togethercomputer/RedPajama-INCITE-Base-3B-v1", + "bigscience/bloomz-3b", + "conceptofmind/Open-LLongMA-3b", + "RWKV/rwkv-4-7b-pile", + "openlm-research/open_llama_3b", + "ewof/koishi-instruct-3b", + "DanielSc4/RedPajama-INCITE-Chat-3B-v1-FT-LoRA-8bit-test1", + "cerebras/Cerebras-GPT-13B", + "EleutherAI/pythia-6.7b", + "aisquared/chopt-2_7b", + "Azure99/blossom-v1-3b", + "PSanni/Deer-3b", + "bertin-project/bertin-gpt-j-6B-alpaca", + "OpenBuddy/openbuddy-openllama-3b-v10-bf16", + "KoboldAI/fairseq-dense-2.7B", + "ehartford/CodeLlama-34b-Instruct-hf", + "codellama/CodeLlama-34b-Instruct-hf", + "TheBloke/CodeLlama-34B-Instruct-fp16", + "h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt-v2", + "openlm-research/open_llama_7b_700bt_preview", + "NbAiLab/nb-gpt-j-6B-alpaca", + "KoboldAI/OPT-2.7B-Erebus", + "Writer/camel-5b-hf", + "EleutherAI/pythia-2.7b", + "facebook/xglm-7.5B", + "EleutherAI/pythia-2.8b-deduped", + "klosax/open_llama_3b_350bt_preview", + "klosax/openllama-3b-350bt", + "KoboldAI/OPT-2.7B-Nerybus-Mix", + "KoboldAI/GPT-J-6B-Adventure", + "cerebras/Cerebras-GPT-6.7B", + "TFLai/pythia-2.8b-4bit-alpaca", + "facebook/opt-2.7b", + "KoboldAI/OPT-2.7B-Nerys-v2", + "bigscience/bloom-3b", + "Devio/test100", + "RWKV/rwkv-raven-3b", + "Azure99/blossom-v2-3b", + "codellama/CodeLlama-34b-Python-hf", + "bhenrym14/airoboros-33b-gpt4-1.4.1-PI-8192-fp16", + "EleutherAI/gpt-neo-2.7B", + "danielhanchen/open_llama_3b_600bt_preview", + "open-llm-leaderboard/starchat-alpha", + "pythainlp/wangchanglm-7.5B-sft-en-sharded", + "beaugogh/pythia-1.4b-deduped-sharegpt", + "HWERI/pythia-1.4b-deduped-sharegpt", + "OpenAssistant/stablelm-7b-sft-v7-epoch-3", + "codellama/CodeLlama-7b-Python-hf", + "aisquared/chopt-1_3b", + "PygmalionAI/metharme-1.3b", + "Linly-AI/Chinese-LLaMA-2-13B-hf", + "chargoddard/llama-2-34b-uncode", + "RWKV/rwkv-4-3b-pile", + "pythainlp/wangchanglm-7.5B-sft-enth", + "MBZUAI/LaMini-GPT-1.5B", + "Writer/palmyra-base", + "KoboldAI/fairseq-dense-1.3B", + "EleutherAI/pythia-1.4b-deduped", + "MBZUAI/lamini-neo-1.3b", + "h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt", + "sartmis1/starcoder-finetune-openapi", + "MayaPH/opt-flan-iml-6.7b", + "facebook/xglm-4.5B", + "WizardLM/WizardCoder-15B-V1.0", + "facebook/opt-iml-max-1.3b", + "stabilityai/stablelm-tuned-alpha-7b", + "aisquared/dlite-v2-1_5b", + "stabilityai/stablelm-base-alpha-7b", + "sartmis1/starcoder-finetune-selfinstruct", + "lizhuang144/starcoder_mirror", + "bigcode/starcoder", + "TheBloke/CodeLlama-34B-Python-fp16", + "open-llm-leaderboard/bloomz-1b7-4bit-alpaca-auto-eval-adapter-applied", + "ehartford/CodeLlama-34b-Python-hf", + "codellama/CodeLlama-7b-Python-hf", + "GeorgiaTechResearchInstitute/starcoder-gpteacher-code-instruct", + "LoupGarou/WizardCoder-Guanaco-15B-V1.0", + "golaxy/gogpt-3b-bloom", + "EleutherAI/pythia-1.3b", + "codellama/CodeLlama-13b-Python-hf", + "hakurei/lotus-12B", + "NYTK/PULI-GPTrio", + "facebook/opt-1.3b", + "TheBloke/CodeLlama-13B-Python-fp16", + "codellama/CodeLlama-13b-Python-hf", + "RWKV/rwkv-raven-1b5", + "PygmalionAI/pygmalion-2.7b", + "bigscience/bloom-1b7", + "gpt2-xl", + "LoupGarou/WizardCoder-Guanaco-15B-V1.1", + "RWKV/rwkv-4-1b5-pile", + "codellama/CodeLlama-34b-hf", + "NousResearch/CodeLlama-34b-hf", + "rinna/bilingual-gpt-neox-4b-8k", + "lxe/Cerebras-GPT-2.7B-Alpaca-SP", + "cerebras/Cerebras-GPT-2.7B", + "jzjiao/opt-1.3b-rlhf", + "EleutherAI/gpt-neo-1.3B", + "aisquared/dlite-v1-1_5b", + "Corianas/Quokka_2.7b", + "MrNJK/gpt2-xl-sft", + "facebook/galactica-1.3b", + "aisquared/dlite-v2-774m", + "EleutherAI/pythia-1b-deduped", + "Kunhao/pile-7b-250b-tokens", + "w601sxs/b1ade-1b", + "rinna/bilingual-gpt-neox-4b", + "shaohang/SparseOPT-1.3B", + "shaohang/Sparse0.5_OPT-1.3", + "EleutherAI/polyglot-ko-12.8b", + "Salesforce/codegen-6B-multi", + "bigscience/bloom-1b1", + "TFLai/gpt-neo-1.3B-4bit-alpaca", + "FabbriSimo01/Bloom_1b_Quantized", + "MBZUAI/LaMini-GPT-774M", + "Locutusque/gpt2-large-conversational", + "Devio/test-3b", + "stabilityai/stablelm-tuned-alpha-3b", + "PygmalionAI/pygmalion-1.3b", + "KoboldAI/fairseq-dense-355M", + "Rachneet/gpt2-xl-alpaca", + "gpt2-large", + "Mikivis/gpt2-large-lora-sft", + "stabilityai/stablelm-base-alpha-3b", + "gpt2-medium", + "Kunhao/pile-7b", + "aisquared/dlite-v1-774m", + "aisquared/dlite-v2-355m", + "YeungNLP/firefly-bloom-2b6-v2", + "KnutJaegersberg/gpt-2-xl-EvolInstruct", + "KnutJaegersberg/galactica-orca-wizardlm-1.3b", + "cerebras/Cerebras-GPT-1.3B", + "FabbriSimo01/Cerebras_1.3b_Quantized", + "facebook/xglm-1.7B", + "EleutherAI/pythia-410m-deduped", + "TheBloke/GPlatty-30B-SuperHOT-8K-fp16", + "DataLinguistic/DataLinguistic-34B-V1.0", + "Corianas/Quokka_1.3b", + "TheTravellingEngineer/bloom-560m-RLHF-v2", + "Corianas/1.3b", + "RWKV/rwkv-4-430m-pile", + "porkorbeef/Llama-2-13b-sf", + "xhyi/PT_GPTNEO350_ATG", + "TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ", + "bigscience/bloomz-560m", + "TheBloke/medalpaca-13B-GPTQ-4bit", + "TheBloke/Vicuna-33B-1-3-SuperHOT-8K-fp16", + "aisquared/dlite-v1-355m", + "uukuguy/speechless-codellama-orca-airoboros-13b-0.10e", + "yhyhy3/med-orca-instruct-33b", + "TheBloke/Wizard-Vicuna-30B-Superhot-8K-fp16", + "TheTravellingEngineer/bloom-1b1-RLHF", + "MBZUAI/lamini-cerebras-1.3b", + "IDEA-CCNL/Ziya-LLaMA-13B-Pretrain-v1", + "TheBloke/WizardLM-7B-uncensored-GPTQ", + "TheBloke/EverythingLM-13B-16K-GPTQ", + "quantumaikr/open_llama_7b_hf", + "TheBloke/chronos-wizardlm-uc-scot-st-13B-GPTQ", + "TheBloke/WizardLM-30B-Uncensored-GPTQ", + "IDEA-CCNL/Ziya-LLaMA-13B-v1", + "Phind/Phind-CodeLlama-34B-v1", + "robowaifudev/megatron-gpt2-345m", + "MayaPH/GodziLLa-30B-instruct", + "TheBloke/CAMEL-33B-Combined-Data-SuperHOT-8K-fp16", + "uukuguy/speechless-codellama-orca-platypus-13b-0.10e", + "doas/test2", + "BreadAi/PM_modelV2", + "bigcode/santacoder", + "TheBloke/wizard-vicuna-13B-GPTQ", + "porkorbeef/Llama-2-13b", + "TehVenom/DiffMerge-DollyGPT-Pygmalion", + "PygmalionAI/pygmalion-350m", + "TheBloke/orca_mini_v3_7B-GPTQ", + "TheBloke/WizardLM-Uncensored-SuperCOT-StoryTelling-30B-GPTQ", + "TheBloke/WizardLM-30B-GPTQ", + "bigscience/bloom-560m", + "TFLai/gpt2-turkish-uncased", + "TheBloke/guanaco-33B-GPTQ", + "TheBloke/openchat_v2_openorca_preview-GPTQ", + "porkorbeef/Llama-2-13b-public", + "TheBloke/LongChat-13B-GPTQ", + "yhyhy3/med-orca-instruct-33b", + "TheBloke/airoboros-33B-gpt4-1-4-SuperHOT-8K-fp16", + "TheBloke/Chinese-Alpaca-33B-SuperHOT-8K-fp16", + "MayaPH/FinOPT-Franklin", + "TheBloke/WizardLM-33B-V1.0-Uncensored-GPTQ", + "TheBloke/Project-Baize-v2-13B-GPTQ", + "malhajar/Platypus2-70B-instruct-4bit-gptq", + "KoboldAI/OPT-350M-Erebus", + "rishiraj/bloom-560m-guanaco", + "Panchovix/WizardLM-33B-V1.0-Uncensored-SuperHOT-8k", + "doas/test5", + "vicgalle/alpaca-7b", + "beomi/KoAlpaca-Polyglot-5.8B", + "Phind/Phind-CodeLlama-34B-Python-v1", + "timdettmers/guanaco-65b-merged", + "TheBloke/wizard-mega-13B-GPTQ", + "MayaPH/GodziLLa-30B-plus", + "TheBloke/Platypus-30B-SuperHOT-8K-fp16", + "facebook/opt-350m", + "KoboldAI/OPT-350M-Nerys-v2", + "TheBloke/robin-33B-v2-GPTQ", + "jaspercatapang/Echidna-30B", + "TheBloke/llama-30b-supercot-SuperHOT-8K-fp16", + "marcchew/test1", + "Harshvir/LaMini-Neo-1.3B-Mental-Health_lora", + "golaxy/gogpt-560m", + "TheBloke/orca_mini_13B-GPTQ", + "Panchovix/airoboros-33b-gpt4-1.2-SuperHOT-8k", + "Aspik101/tulu-7b-instruct-pl-lora_unload", + "Phind/Phind-CodeLlama-34B-v2", + "BreadAi/MusePy-1-2", + "cerebras/Cerebras-GPT-590M", + "microsoft/CodeGPT-small-py", + "victor123/WizardLM-13B-1.0", + "OptimalScale/robin-65b-v2-delta", + "voidful/changpt-bart", + "FabbriSimo01/GPT_Large_Quantized", + "MayaPH/FinOPT-Lincoln", + "KoboldAI/fairseq-dense-125M", + "SebastianSchramm/Cerebras-GPT-111M-instruction", + "TheTravellingEngineer/bloom-560m-RLHF", + "breadlicker45/dough-instruct-base-001", + "WizardLM/WizardLM-30B-V1.0", + "WizardLM/WizardLM-30B-V1.0", + "WizardLM/WizardLM-30B-V1.0", + "TaylorAI/Flash-Llama-30M-20001", + "porkorbeef/Llama-2-13b-12_153950", + "huggingtweets/bladeecity-jerma985", + "KnutJaegersberg/megatron-GPT-2-345m-EvolInstruct", + "bhenrym14/airoboros-33b-gpt4-1.4.1-lxctx-PI-16384-fp16", + "microsoft/DialoGPT-small", + "Corianas/590m", + "facebook/xglm-564M", + "EleutherAI/gpt-neo-125m", + "EleutherAI/pythia-160m-deduped", + "klosax/pythia-160m-deduped-step92k-193bt", + "MBZUAI/lamini-neo-125m", + "bigcode/tiny_starcoder_py", + "concedo/OPT-19M-ChatSalad", + "anton-l/gpt-j-tiny-random", + "grantprice/Cerebras-GPT-590M-finetuned-DND", + "deepnight-research/zsc-text", + "WangZeJun/bloom-820m-chat", + "cerebras/Cerebras-GPT-256M", + "ai-forever/rugpt3large_based_on_gpt2", + "alibidaran/medical_transcription_generator", + "Deci/DeciCoder-1b", + "microsoft/DialoGPT-medium", + "ogimgio/gpt-neo-125m-neurallinguisticpioneers", + "open-llm-leaderboard/bloom-560m-4bit-alpaca-auto-eval-adapter-applied", + "BreadAi/gpt-YA-1-1_160M", + "microsoft/DialoGPT-large", + "facebook/opt-125m", + "huggingtweets/jerma985", + "Locutusque/gpt2-conversational-or-qa", + "concedo/Pythia-70M-ChatSalad", + "roneneldan/TinyStories-1M", + "BreadAi/DiscordPy", + "bigcode/gpt_bigcode-santacoder", + "Tincando/fiction_story_generator", + "klosax/pythia-70m-deduped-step44k-92bt", + "Quake24/easyTermsSummerizer", + "BreadAi/gpt-YA-1-1_70M", + "EleutherAI/pythia-160m", + "euclaise/gpt-neox-122m-minipile-digits", + "MBZUAI/lamini-cerebras-590m", + "nicholasKluge/Aira-124M", + "MayaPH/FinOPT-Washington", + "cyberagent/open-calm-large", + "BreadAi/StoryPy", + "EleutherAI/pythia-70m", + "BreadAi/gpt-Youtube", + "roneneldan/TinyStories-33M", + "EleutherAI/pythia-70m-deduped", + "lgaalves/gpt2_guanaco-dolly-platypus", + "Corianas/Quokka_590m", + "lgaalves/gpt2_platypus-dolly-guanaco", + "cyberagent/open-calm-7b", + "RWKV/rwkv-4-169m-pile", + "gpt2", + "roneneldan/TinyStories-28M", + "lgaalves/gpt2_open-platypus", + "gpt2", + "SaylorTwift/gpt2_test", + "roneneldan/TinyStories-3M", + "nthngdy/pythia-owt2-70m-50k", + "Corianas/256_5epoch", + "roneneldan/TinyStories-8M", + "lgaalves/gpt2-dolly", + "nthngdy/pythia-owt2-70m-100k", + "aisquared/dlite-v2-124m", + "mncai/SGPT-1.3B-insurance-epoch10", + "huggingtweets/gladosystem", + "abhiramtirumala/DialoGPT-sarcastic-medium", + "MBZUAI/lamini-cerebras-256m", + "cerebras/Cerebras-GPT-111M", + "uberkie/metharme-1.3b-finetuned", + "MBZUAI/lamini-cerebras-111m", + "psyche/kogpt", + "Corianas/Quokka_256m", + "vicgalle/gpt2-alpaca-gpt4", + "aisquared/dlite-v1-124m", + "Mikivis/xuanxuan", + "MBZUAI/LaMini-GPT-124M", + "vicgalle/gpt2-alpaca", + "huashiyiqike/testmodel", + "Corianas/111m", + "baseline", +] diff --git a/src/voting/vote_system.py b/src/voting/vote_system.py new file mode 100644 index 0000000000000000000000000000000000000000..0ab75738159cc377a480c566393f872d615da898 --- /dev/null +++ b/src/voting/vote_system.py @@ -0,0 +1,155 @@ +import json +import logging +import pathlib +import pandas as pd +import gradio as gr +import schedule +import time +from datetime import datetime, timezone +from src.display.utils import EvalQueueColumn + +from src.envs import API + +# Set up logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +class VoteManager: + def __init__(self, votes_path, eval_requests_path, repo_id): + self.votes_path = votes_path + self.eval_requests_path = eval_requests_path + self.repo_id = repo_id + self.vote_dataset = self.read_vote_dataset() + self.vote_check_set = self.make_check_set(self.vote_dataset) + self.votes_to_upload = [] + + def init_vote_dataset(self): + self.vote_dataset = self.read_vote_dataset() + self.vote_check_set = self.make_check_set(self.vote_dataset) + + def read_vote_dataset(self): + result = [] + votes_file = pathlib.Path(self.votes_path) / "votes_data.jsonl" + if votes_file.exists(): + with open(votes_file, "r") as f: + for line in f: + data = json.loads(line.strip()) + result.append(data) + result = pd.DataFrame(result) + return result + + def make_check_set(self, vote_dataset: pd.DataFrame): + result = list() + for row in vote_dataset.itertuples(index=False, name='vote'): + result.append((row.model, row.revision, row.username)) + return set(result) + + def get_model_revision(self, selected_model: str) -> str: + """Fetch the revision for the given model from the request files.""" + for user_folder in pathlib.Path(self.eval_requests_path).iterdir(): + if user_folder.is_dir(): + for file in user_folder.glob("*.json"): + with open(file, "r") as f: + data = json.load(f) + if data.get("model") == selected_model: + return data.get("revision", "main") + return "main" + + def create_request_vote_df(self, pending_models_df: gr.Dataframe): + if pending_models_df.empty or "model_name" not in pending_models_df.columns: + return pending_models_df + + self.vote_dataset = self.read_vote_dataset() + vote_counts = self.vote_dataset.groupby(['model', 'revision']).size().reset_index(name='vote_count') + + pending_models_df_votes = pd.merge( + pending_models_df, + vote_counts, + left_on=["model_name", 'revision'], + right_on=['model', 'revision'], + how='left' + ) + # Filling empty votes + pending_models_df_votes['vote_count'] = pending_models_df_votes['vote_count'].fillna(0) + pending_models_df_votes = pending_models_df_votes.sort_values(by=["vote_count", "model_name"], ascending=[False, True]) + # Removing useless columns + pending_models_df_votes = pending_models_df_votes.drop(["model_name", "model"], axis=1) + return pending_models_df_votes + + # Function to be called when a user votes for a model + def add_vote( + self, + selected_model: str, + pending_models_df: gr.Dataframe | None, + profile: gr.OAuthProfile | None + ): + logger.debug(f"Type of list before usage: {type(list)}") + # model_name, revision, user_id, timestamp + if selected_model in ["str", ""]: + gr.Warning("No model selected") + return + + if profile is None: + gr.Warning("Hub Login required") + return + + vote_username = profile.username + model_revision = self.get_model_revision(selected_model) + + # tuple (immutable) for checking than already voted for model + check_tuple = (selected_model, model_revision, vote_username) + if check_tuple in self.vote_check_set: + gr.Warning("Already voted for this model") + return + + current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + + vote_obj = { + "model": selected_model, + "revision": model_revision, + "username": vote_username, + "timestamp": current_time + } + + # Append the vote to the JSONL file + try: + votes_file = pathlib.Path(self.votes_path) / "votes_data.jsonl" + with open(votes_file, "a") as f: + f.write(json.dumps(vote_obj) + "\n") + logger.info(f"Vote added locally: {vote_obj}") + + self.votes_to_upload.append(vote_obj) + except Exception as e: + logger.error(f"Failed to write vote to file: {e}") + gr.Warning("Failed to record vote. Please try again") + return + + self.vote_check_set.add(check_tuple) + gr.Info(f"Voted for {selected_model}") + + if pending_models_df is None: + return + + return self.create_request_vote_df(pending_models_df) + + def upload_votes(self): + if self.votes_to_upload: + votes_file = pathlib.Path(self.votes_path) / "votes_data.jsonl" + try: + with open(votes_file, "rb") as f: + API.upload_file( + path_or_fileobj=f, + path_in_repo="votes_data.jsonl", + repo_id=self.repo_id, + repo_type="dataset", + commit_message="Updating votes_data.jsonl with new votes", + ) + logger.info("Votes uploaded to votes repository") + self.votes_to_upload.clear() + except Exception as e: + logger.error(f"Failed to upload votes to repository: {e}") + +def run_scheduler(vote_manager): + while True: + schedule.run_pending() + time.sleep(1) diff --git a/tests/submission/test_user_submission_permission.py b/tests/submission/test_user_submission_permission.py new file mode 100644 index 0000000000000000000000000000000000000000..2f459f7b7b86282a922f6c22e4f6a79daec1bcde --- /dev/null +++ b/tests/submission/test_user_submission_permission.py @@ -0,0 +1,98 @@ +import unittest +from unittest.mock import patch +from datetime import datetime, timedelta, timezone + +from src.submission.check_validity import user_submission_permission +from src.envs import RATE_LIMIT_PERIOD, RATE_LIMIT_QUOTA + +class TestUserSubmissionPermission(unittest.TestCase): + + def setUp(self): + self.user_name = "test_user" + self.rate_limit_period = RATE_LIMIT_PERIOD + self.rate_limit_quota = RATE_LIMIT_QUOTA + self.fixed_now = datetime(2023, 6, 1, 12, 0, 0, tzinfo=timezone.utc) + # Submission dates that simulate various test cases + self.users_to_submission_dates = { + "test_user": [ + (self.fixed_now - timedelta(days=1)).isoformat(), + (self.fixed_now - timedelta(days=2)).isoformat(), + (self.fixed_now - timedelta(days=3)).isoformat(), + (self.fixed_now - timedelta(days=4)).isoformat(), + ] + } + + @staticmethod + def fixed_datetime_now(tz=None): + return datetime(2023, 6, 1, 12, 0, 0, tzinfo=timezone.utc) + + @patch('src.submission.check_validity.datetime') + def test_user_below_quota(self, mock_datetime): + mock_datetime.now.side_effect = self.fixed_datetime_now + mock_datetime.fromisoformat = datetime.fromisoformat + allowed, message = user_submission_permission( + self.user_name, self.users_to_submission_dates, self.rate_limit_period, self.rate_limit_quota + ) + self.assertTrue(allowed) + + @patch('src.submission.check_validity.datetime') + def test_user_at_quota(self, mock_datetime): + mock_datetime.now.side_effect = self.fixed_datetime_now + mock_datetime.fromisoformat = datetime.fromisoformat + + # Add one more submission to reach the quota + self.users_to_submission_dates["test_user"].append(self.fixed_now.isoformat()) + + allowed, message = user_submission_permission( + self.user_name, self.users_to_submission_dates, self.rate_limit_period, self.rate_limit_quota + ) + self.assertFalse(allowed) + expected_message = ( + f"Organisation or user `{self.user_name}` already has {self.rate_limit_quota} model requests submitted " + f"in the last {self.rate_limit_period} days.\n" + "Please wait a couple of days before resubmitting, so that everybody can enjoy using the leaderboard 🤗" + ) + self.assertEqual(message, expected_message) + + @patch('src.submission.check_validity.datetime') + def test_user_above_quota(self, mock_datetime): + mock_datetime.now.side_effect = self.fixed_datetime_now + mock_datetime.fromisoformat = datetime.fromisoformat + # Add more than quota submissions + for _ in range(self.rate_limit_quota + 1): + self.users_to_submission_dates["test_user"].append(self.fixed_now.isoformat()) + allowed, message = user_submission_permission( + self.user_name, self.users_to_submission_dates, self.rate_limit_period, self.rate_limit_quota + ) + self.assertFalse(allowed) + + def test_user_no_previous_submissions(self): + allowed, message = user_submission_permission( + "new_user", self.users_to_submission_dates, self.rate_limit_period, self.rate_limit_quota + ) + self.assertTrue(allowed) + + @patch('src.submission.check_validity.HAS_HIGHER_RATE_LIMIT', ["specific_user"]) + @patch('src.submission.check_validity.datetime') + def test_user_higher_rate_limit(self, mock_datetime): + mock_datetime.now.side_effect = self.fixed_datetime_now + mock_datetime.fromisoformat = datetime.fromisoformat + self.users_to_submission_dates["specific_user"] = [self.fixed_now.isoformat()] * (self.rate_limit_quota + 1) + allowed, message = user_submission_permission( + "specific_user", self.users_to_submission_dates, self.rate_limit_period, self.rate_limit_quota + ) + self.assertTrue(allowed) + + @patch('src.submission.check_validity.datetime') + def test_submission_just_outside_window(self, mock_datetime): + mock_datetime.now.side_effect = self.fixed_datetime_now + mock_datetime.fromisoformat = datetime.fromisoformat + old_submission = (self.fixed_now - timedelta(days=self.rate_limit_period, seconds=1)).isoformat() + self.users_to_submission_dates["test_user"] = [old_submission] + allowed, message = user_submission_permission( + self.user_name, self.users_to_submission_dates, self.rate_limit_period, self.rate_limit_quota + ) + self.assertTrue(allowed) + +if __name__ == '__main__': + unittest.main()