Maharshi Gor commited on
Commit
633b045
·
1 Parent(s): a02c1af

Refactor: moved some vars from envs to app_config

Browse files
Files changed (6) hide show
  1. app.py +2 -4
  2. src/app_configs.py +52 -0
  3. src/envs.py +0 -50
  4. src/llms.py +1 -1
  5. src/submission/submit.py +45 -6
  6. src/utils.py +1 -1
app.py CHANGED
@@ -3,22 +3,20 @@ import gradio as gr
3
  from apscheduler.schedulers.background import BackgroundScheduler
4
  from huggingface_hub import snapshot_download
5
 
 
6
  from components.quizbowl.bonus import BonusInterface
7
  from components.quizbowl.tossup import TossupInterface
8
  from display.custom_css import css_pipeline, css_tossup
9
 
10
  # Constants
11
- from src.envs import (
12
  API,
13
- AVAILABLE_MODELS,
14
- DEFAULT_SELECTIONS,
15
  EVAL_REQUESTS_PATH,
16
  EVAL_RESULTS_PATH,
17
  PLAYGROUND_DATASET_NAMES,
18
  QUEUE_REPO,
19
  REPO_ID,
20
  RESULTS_REPO,
21
- THEME,
22
  TOKEN,
23
  )
24
  from workflows import factory
 
3
  from apscheduler.schedulers.background import BackgroundScheduler
4
  from huggingface_hub import snapshot_download
5
 
6
+ from app_configs import AVAILABLE_MODELS, DEFAULT_SELECTIONS, THEME
7
  from components.quizbowl.bonus import BonusInterface
8
  from components.quizbowl.tossup import TossupInterface
9
  from display.custom_css import css_pipeline, css_tossup
10
 
11
  # Constants
12
+ from envs import (
13
  API,
 
 
14
  EVAL_REQUESTS_PATH,
15
  EVAL_RESULTS_PATH,
16
  PLAYGROUND_DATASET_NAMES,
17
  QUEUE_REPO,
18
  REPO_ID,
19
  RESULTS_REPO,
 
20
  TOKEN,
21
  )
22
  from workflows import factory
src/app_configs.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ THEME = "gstaff/xkcd"
2
+
3
+ UNSELECTED_VAR_NAME = "Select Variable..."
4
+ UNSELECTED_MODEL_NAME = "Select Model..."
5
+ AVAILABLE_MODELS = {
6
+ "OpenAI/gpt-4o": {
7
+ "model": "gpt-4o-2024-11-20",
8
+ },
9
+ "OpenAI/gpt-4o-mini": {
10
+ "model": "gpt-4o-mini-2024-07-18",
11
+ },
12
+ "OpenAI/gpt-3.5-turbo": {
13
+ "model": "gpt-3.5-turbo-0125",
14
+ },
15
+ "Anthropic/claude-3-7-sonnet": {
16
+ "model": "claude-3-7-sonnet-20250219",
17
+ },
18
+ "Anthropic/claude-3-5-sonnet": {
19
+ "model": "claude-3-5-sonnet-20241022",
20
+ },
21
+ "Anthropic/claude-3-5-haiku": {
22
+ "model": "claude-3-5-haiku-20241022",
23
+ },
24
+ "Cohere/command-r": {
25
+ "model": "command-r-08-2024",
26
+ },
27
+ "Cohere/command-r-plus": {
28
+ "model": "command-r-plus-08-2024",
29
+ },
30
+ "Cohere/command-r7b": {
31
+ "model": "command-r7b-12-2024",
32
+ },
33
+ }
34
+
35
+ DEFAULT_SELECTIONS = {
36
+ "tossup": {
37
+ "simple_workflow": False,
38
+ "model": "OpenAI/gpt-4o-mini",
39
+ "temperature": 0.2,
40
+ "buzz_threshold": 0.85,
41
+ "early_stop": True,
42
+ },
43
+ "bonus": {
44
+ "simple_workflow": False,
45
+ "model": "OpenAI/gpt-4o-mini",
46
+ "temperature": 0.2,
47
+ "buzz_threshold": 0.85,
48
+ "early_stop": True,
49
+ },
50
+ }
51
+
52
+ DAILY_SUBMISSION_LIMIT_PER_USER = 5
src/envs.py CHANGED
@@ -32,55 +32,5 @@ EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
32
  EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
33
  EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
34
 
35
- THEME = "gstaff/xkcd"
36
- UNSELECTED_VAR_NAME = "Select Variable..."
37
- UNSELECTED_MODEL_NAME = "Select Model..."
38
- AVAILABLE_MODELS = {
39
- "OpenAI/gpt-4o": {
40
- "model": "gpt-4o-2024-11-20",
41
- },
42
- "OpenAI/gpt-4o-mini": {
43
- "model": "gpt-4o-mini-2024-07-18",
44
- },
45
- "OpenAI/gpt-3.5-turbo": {
46
- "model": "gpt-3.5-turbo-0125",
47
- },
48
- "Anthropic/claude-3-7-sonnet": {
49
- "model": "claude-3-7-sonnet-20250219",
50
- },
51
- "Anthropic/claude-3-5-sonnet": {
52
- "model": "claude-3-5-sonnet-20241022",
53
- },
54
- "Anthropic/claude-3-5-haiku": {
55
- "model": "claude-3-5-haiku-20241022",
56
- },
57
- "Cohere/command-r": {
58
- "model": "command-r-08-2024",
59
- },
60
- "Cohere/command-r-plus": {
61
- "model": "command-r-plus-08-2024",
62
- },
63
- "Cohere/command-r7b": {
64
- "model": "command-r7b-12-2024",
65
- },
66
- }
67
-
68
- DEFAULT_SELECTIONS = {
69
- "tossup": {
70
- "simple_workflow": False,
71
- "model": "OpenAI/gpt-4o-mini",
72
- "temperature": 0.2,
73
- "buzz_threshold": 0.85,
74
- "early_stop": True,
75
- },
76
- "bonus": {
77
- "simple_workflow": False,
78
- "model": "OpenAI/gpt-4o-mini",
79
- "temperature": 0.2,
80
- "buzz_threshold": 0.85,
81
- "early_stop": True,
82
- },
83
- }
84
 
85
- DAILY_SUBMISSION_LIMIT_PER_USER = 5
86
  API = HfApi(token=TOKEN)
 
32
  EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
33
  EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
 
36
  API = HfApi(token=TOKEN)
src/llms.py CHANGED
@@ -15,7 +15,7 @@ from pydantic import BaseModel, Field
15
  from rich import print as rprint
16
 
17
  import utils
18
- from envs import AVAILABLE_MODELS
19
 
20
 
21
  class LLMOutput(BaseModel):
 
15
  from rich import print as rprint
16
 
17
  import utils
18
+ from app_configs import AVAILABLE_MODELS
19
 
20
 
21
  class LLMOutput(BaseModel):
src/submission/submit.py CHANGED
@@ -1,19 +1,45 @@
1
  import json
 
2
  import os
3
  import traceback
4
  from datetime import datetime, timedelta, timezone
5
- from typing import Optional
6
 
7
  import gradio as gr
8
- import yaml
9
 
10
- from src.display.formatting import styled_error, styled_message
11
- from src.envs import API, DAILY_SUBMISSION_LIMIT_PER_USER, EVAL_REQUESTS_PATH, QUEUE_REPO
12
- from src.submission.structs import CompetitionType, Submission, SubmissionStatus
 
13
  from workflows.structs import Workflow
14
 
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  def get_user_submissions_today(username: str, competition_type: str) -> list[Submission]:
 
17
  today = datetime.now(timezone.utc).strftime("%Y%m%d")
18
  if username is None:
19
  raise gr.Error("Authentication required. Please log in to view your submissions.")
@@ -103,7 +129,7 @@ def submit_model(
103
 
104
  username = profile.username
105
 
106
- if len(get_user_submissions_today(username)) >= DAILY_SUBMISSION_LIMIT_PER_USER:
107
  time_str = get_time_until_next_submission()
108
  return styled_error(
109
  f"Daily submission limit of {DAILY_SUBMISSION_LIMIT_PER_USER} reached. Please try again in \n {time_str}."
@@ -144,6 +170,17 @@ def submit_model(
144
  return styled_error(f"Error submitting model: {str(e)}")
145
 
146
 
 
 
 
 
 
 
 
 
 
 
 
147
  if __name__ == "__main__":
148
  # Example usage
149
  from workflows.factory import create_quizbowl_simple_step_initial_setup
@@ -168,3 +205,5 @@ if __name__ == "__main__":
168
  competition_type="tossup",
169
  )
170
  print(result)
 
 
 
1
  import json
2
+ import logging
3
  import os
4
  import traceback
5
  from datetime import datetime, timedelta, timezone
 
6
 
7
  import gradio as gr
 
8
 
9
+ from app_configs import DAILY_SUBMISSION_LIMIT_PER_USER
10
+ from display.formatting import styled_error, styled_message
11
+ from envs import API, EVAL_REQUESTS_PATH, QUEUE_REPO
12
+ from submission.structs import CompetitionType, Submission, SubmissionStatus
13
  from workflows.structs import Workflow
14
 
15
 
16
+ def get_user_submissions(username: str, competition_type: str, pattern: str = None) -> list[Submission]:
17
+ """Get all submissions for a user."""
18
+ out_dir = f"{EVAL_REQUESTS_PATH}/{username}"
19
+ submissions = []
20
+ if not os.path.exists(out_dir):
21
+ return submissions
22
+ for file in os.listdir(out_dir):
23
+ if not file.startswith(f"{competition_type}_"):
24
+ continue
25
+ if pattern is not None and pattern not in file:
26
+ continue
27
+ with open(os.path.join(out_dir, file), "r") as f:
28
+ submission = Submission.from_dict(json.load(f))
29
+ submissions.append(submission)
30
+ return submissions
31
+
32
+
33
+ def get_user_submission_names(competition_type: str, profile: gr.OAuthProfile | None) -> list[str]:
34
+ """Get all submission model names for a user."""
35
+ if profile is None:
36
+ return []
37
+ submissions = get_user_submissions(profile.username, competition_type)
38
+ return [s.model_name for s in submissions]
39
+
40
+
41
  def get_user_submissions_today(username: str, competition_type: str) -> list[Submission]:
42
+ """Get all submissions for a user for a given competition type."""
43
  today = datetime.now(timezone.utc).strftime("%Y%m%d")
44
  if username is None:
45
  raise gr.Error("Authentication required. Please log in to view your submissions.")
 
129
 
130
  username = profile.username
131
 
132
+ if len(get_user_submissions_today(username, competition_type)) >= DAILY_SUBMISSION_LIMIT_PER_USER:
133
  time_str = get_time_until_next_submission()
134
  return styled_error(
135
  f"Daily submission limit of {DAILY_SUBMISSION_LIMIT_PER_USER} reached. Please try again in \n {time_str}."
 
170
  return styled_error(f"Error submitting model: {str(e)}")
171
 
172
 
173
+ def load_submission(model_name: str, competition_type: CompetitionType, profile: gr.OAuthProfile | None) -> Submission:
174
+ if profile is None:
175
+ logging.error("Authentication required. Please log in to view your submissions.")
176
+ return styled_error("Authentication required. Please log in to view your submissions.")
177
+ username = profile.username
178
+ submissions = get_user_submissions(username, competition_type, model_name)
179
+ if len(submissions) == 0:
180
+ return styled_error(f"Submission {model_name} not found.")
181
+ return submissions[0]
182
+
183
+
184
  if __name__ == "__main__":
185
  # Example usage
186
  from workflows.factory import create_quizbowl_simple_step_initial_setup
 
205
  competition_type="tossup",
206
  )
207
  print(result)
208
+
209
+ # %%
src/utils.py CHANGED
@@ -1,6 +1,6 @@
1
  # Description: Utility functions for the model_step component.
2
 
3
- from envs import AVAILABLE_MODELS, UNSELECTED_MODEL_NAME
4
 
5
 
6
  def guess_model_provider(model_name: str):
 
1
  # Description: Utility functions for the model_step component.
2
 
3
+ from app_configs import AVAILABLE_MODELS, UNSELECTED_MODEL_NAME
4
 
5
 
6
  def guess_model_provider(model_name: str):