hamzabouajila commited on
Commit
742dfc3
·
1 Parent(s): 28e88f2

implement scripts for checking , add logging and update submission and integrate evaluation

Browse files
app.py CHANGED
@@ -1,3 +1,9 @@
 
 
 
 
 
 
1
  import gradio as gr
2
  from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
3
  from apscheduler.schedulers.background import BackgroundScheduler
@@ -32,7 +38,32 @@ import time
32
 
33
 
34
  def restart_space():
35
- API.restart_space(repo_id=REPO_ID)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  ### Space initialisation
38
  try:
@@ -109,25 +140,24 @@ def init_leaderboard(dataframe):
109
 
110
  # Add model evaluation functionality
111
  def evaluate_and_update(model_name, revision, precision, weight_type):
112
- """Evaluate a model and update the leaderboard"""
113
  try:
114
- # Run evaluation
115
- eval_result = evaluate_model(model_name, revision, precision, weight_type)
116
-
117
- # Add evaluation to queue
118
  add_new_eval(
119
  model_name=model_name,
120
  revision=revision,
121
  precision=precision,
122
  weight_type=weight_type,
123
- results=eval_result.results
124
  )
125
 
126
  # Update leaderboard
127
  LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
128
- return "Evaluation started successfully! Check the leaderboard for updates."
129
  except Exception as e:
130
- return f"Error during evaluation: {str(e)}"
 
 
131
 
132
 
133
  demo = gr.Blocks(css=custom_css)
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+
4
+ # Load environment variables from .env file
5
+ load_dotenv()
6
+
7
  import gradio as gr
8
  from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
9
  from apscheduler.schedulers.background import BackgroundScheduler
 
38
 
39
 
40
  def restart_space():
41
+ try:
42
+ # Restart the space
43
+ API.restart_space(repo_id=REPO_ID)
44
+ except Exception as e:
45
+ print(f"Error restarting space: {str(e)}")
46
+ # If restart fails, try to download the datasets again
47
+ try:
48
+ print("Attempting to download datasets again...")
49
+ snapshot_download(
50
+ repo_id=QUEUE_REPO,
51
+ local_dir=EVAL_REQUESTS_PATH,
52
+ repo_type="dataset",
53
+ tqdm_class=None,
54
+ etag_timeout=30,
55
+ token=TOKEN
56
+ )
57
+ snapshot_download(
58
+ repo_id=RESULTS_REPO,
59
+ local_dir=EVAL_RESULTS_PATH,
60
+ repo_type="dataset",
61
+ tqdm_class=None,
62
+ etag_timeout=30,
63
+ token=TOKEN
64
+ )
65
+ except Exception as download_error:
66
+ print(f"Error downloading datasets: {str(download_error)}")
67
 
68
  ### Space initialisation
69
  try:
 
140
 
141
  # Add model evaluation functionality
142
  def evaluate_and_update(model_name, revision, precision, weight_type):
143
+ """Add a model evaluation request to the queue"""
144
  try:
145
+ # Add evaluation request to queue
 
 
 
146
  add_new_eval(
147
  model_name=model_name,
148
  revision=revision,
149
  precision=precision,
150
  weight_type=weight_type,
151
+ model_type="LLM", # Add appropriate model type
152
  )
153
 
154
  # Update leaderboard
155
  LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
156
+ return "Evaluation request added to queue! Check the leaderboard for updates."
157
  except Exception as e:
158
+ print(f"Error in evaluate_and_update: {str(e)}")
159
+ print(f"Full traceback: {traceback.format_exc()}")
160
+ return f"Error adding evaluation request: {str(e)}"
161
 
162
 
163
  demo = gr.Blocks(css=custom_css)
pyproject.toml CHANGED
@@ -18,6 +18,7 @@ dependencies = [
18
  "numpy>=2.3.1",
19
  "pandas>=2.3.0",
20
  "python-dateutil>=2.9.0.post0",
 
21
  "scikit-learn>=1.7.0",
22
  "sentencepiece>=0.2.0",
23
  "tokenizers>=0.15.0",
 
18
  "numpy>=2.3.1",
19
  "pandas>=2.3.0",
20
  "python-dateutil>=2.9.0.post0",
21
+ "python-dotenv>=1.1.1",
22
  "scikit-learn>=1.7.0",
23
  "sentencepiece>=0.2.0",
24
  "tokenizers>=0.15.0",
scripts/check_model.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoConfig
2
+ import torch
3
+
4
+ def check_model(model_name):
5
+ try:
6
+ # Try to load the model configuration
7
+ config = AutoConfig.from_pretrained(model_name)
8
+ print("\nModel Configuration:")
9
+ print(config)
10
+
11
+ # Check if model_type is present
12
+ print("\nModel Type:", config.model_type if hasattr(config, 'model_type') else 'Not specified')
13
+
14
+ # Try to load the model
15
+ print("\nAttempting to load model...")
16
+ model = AutoModelForSequenceClassification.from_pretrained(
17
+ model_name,
18
+ torch_dtype=torch.float16,
19
+ trust_remote_code=True
20
+ )
21
+ print("\nSuccessfully loaded model!")
22
+
23
+ except Exception as e:
24
+ print(f"\nError: {str(e)}")
25
+
26
+ if __name__ == "__main__":
27
+ check_model("HabibBelguith44/Llama3-Tunisian-Dialect")
scripts/explore_arabml.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+
3
+ def explore_arabml():
4
+ # Load the ArabML dataset
5
+ dataset = load_dataset("arbml/Tunisian_Dialect_Corpus", split="test")
6
+
7
+ # Print dataset info
8
+ print("\nDataset Info:")
9
+ print(dataset.info)
10
+
11
+ # Print first example
12
+ print("\nFirst Example:")
13
+ print(dataset[0])
14
+
15
+ # Print all column names
16
+ print("\nColumn Names:")
17
+ print(dataset.column_names)
18
+
19
+ # Print first few rows
20
+ print("\nFirst few rows:")
21
+ print(dataset[:3])
22
+
23
+ if __name__ == "__main__":
24
+ explore_arabml()
scripts/explore_dataset.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+
3
+ def explore_dataset():
4
+ # Load the dataset
5
+ dataset = load_dataset("arbml/Tunisian_Dialect_Corpus", split="train")
6
+
7
+ # Print dataset info
8
+ print("\nDataset Info:")
9
+ print(dataset.info)
10
+
11
+ # Print first example
12
+ print("\nFirst Example:")
13
+ print(dataset[0])
14
+
15
+ # Print all column names
16
+ print("\nColumn Names:")
17
+ print(dataset.column_names)
18
+
19
+ # Print first few rows
20
+ print("\nFirst few rows:")
21
+ print(dataset[:3])
22
+
23
+ if __name__ == "__main__":
24
+ explore_dataset()
scripts/explore_tsac.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+
3
+ def explore_tsac():
4
+ # Load the TSAC dataset
5
+ dataset = load_dataset("fbougares/tsac", split="train", trust_remote_code=True)
6
+
7
+ # Print dataset info
8
+ print("\nDataset Info:")
9
+ print(dataset.info)
10
+
11
+ # Print first example
12
+ print("\nFirst Example:")
13
+ print(dataset[0])
14
+
15
+ # Print all column names
16
+ print("\nColumn Names:")
17
+ print(dataset.column_names)
18
+
19
+ # Print first few rows
20
+ print("\nFirst few rows:")
21
+ print(dataset[:3])
22
+
23
+ if __name__ == "__main__":
24
+ explore_tsac()
src/display/utils.py CHANGED
@@ -86,6 +86,28 @@ class WeightType(Enum):
86
  Original = ModelDetails("Original")
87
  Delta = ModelDetails("Delta")
88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  class Precision(Enum):
90
  float16 = ModelDetails("float16")
91
  bfloat16 = ModelDetails("bfloat16")
 
86
  Original = ModelDetails("Original")
87
  Delta = ModelDetails("Delta")
88
 
89
+ @staticmethod
90
+ def from_str(weight_type):
91
+ """Convert string representation to WeightType enum value.
92
+
93
+ Args:
94
+ weight_type (str): The string representation of the weight type
95
+
96
+ Returns:
97
+ WeightType: The corresponding enum value
98
+
99
+ Raises:
100
+ ValueError: If the weight type is not recognized
101
+ """
102
+ weight_type = str(weight_type).lower()
103
+ if weight_type == "adapter":
104
+ return WeightType.Adapter
105
+ elif weight_type == "original":
106
+ return WeightType.Original
107
+ elif weight_type == "delta":
108
+ return WeightType.Delta
109
+ raise ValueError(f"Unknown weight type: {weight_type}")
110
+
111
  class Precision(Enum):
112
  float16 = ModelDetails("float16")
113
  bfloat16 = ModelDetails("bfloat16")
src/evaluator/evaluate.py CHANGED
@@ -5,7 +5,7 @@ from dataclasses import dataclass
5
  from enum import Enum
6
  from datetime import datetime
7
  import torch
8
- from transformers import AutoModelForSequenceClassification, AutoTokenizer
9
  from datasets import load_dataset
10
 
11
  from src.envs import API, OWNER, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH
@@ -29,94 +29,252 @@ class EvaluationResult:
29
  def evaluate_tsac_sentiment(model, tokenizer, device):
30
  """Evaluate model on TSAC sentiment analysis task"""
31
  try:
32
- dataset = load_dataset("fbougares/tsac", split="train")
 
 
 
 
 
 
33
 
34
  def preprocess(examples):
35
- return tokenizer(examples['text'], padding=True, truncation=True, max_length=512)
 
 
 
 
 
 
 
 
36
 
37
  dataset = dataset.map(preprocess, batched=True)
38
- dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])
 
 
 
 
 
 
 
39
 
40
  model.eval()
 
 
 
41
  with torch.no_grad():
42
  predictions = []
43
- labels = []
44
 
45
- for batch in dataset:
46
- inputs = {k: v.to(device) for k, v in batch.items() if k != 'label'}
47
- label = batch['label'].to(device)
 
 
 
 
 
48
 
49
  outputs = model(**inputs)
50
- predictions.extend(outputs.logits.argmax(dim=-1).cpu().tolist())
51
- labels.extend(label.cpu().tolist())
52
-
53
- accuracy = sum(p == l for p, l in zip(predictions, labels)) / len(predictions)
54
- return accuracy
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  except Exception as e:
56
- print(f"Error in TSAC evaluation: {str(e)}")
57
- return 0.0
 
58
 
59
- def evaluate_tunisian_corpus_coverage(model, tokenizer):
60
  """Evaluate model's coverage on Tunisian Dialect Corpus"""
61
  try:
62
  dataset = load_dataset("arbml/Tunisian_Dialect_Corpus", split="train")
63
 
64
  def preprocess(examples):
65
- return tokenizer(examples['text'], padding=True, truncation=True, max_length=512)
 
 
66
 
67
  dataset = dataset.map(preprocess, batched=True)
68
 
69
- # Calculate coverage based on tokenization
70
  total_tokens = 0
71
  covered_tokens = 0
72
 
73
  for example in dataset:
74
- tokens = tokenizer.tokenize(example['text'])
75
  total_tokens += len(tokens)
76
  covered_tokens += len([t for t in tokens if t != tokenizer.unk_token])
77
 
78
  coverage = covered_tokens / total_tokens if total_tokens > 0 else 0
79
- return coverage
 
80
  except Exception as e:
81
  print(f"Error in Tunisian Corpus evaluation: {str(e)}")
82
- return 0.0
83
 
84
  def evaluate_model(model_name: str, revision: str, precision: str, weight_type: str) -> EvaluationResult:
85
  """Evaluate a single model on all tasks"""
86
  try:
87
- print(f"------------ evaluation model {model_name}")
88
- # Load model and tokenizer
89
- device = "cuda" if torch.cuda.is_available() else "cpu"
90
-
91
- model = AutoModelForSequenceClassification.from_pretrained(
92
- model_name,
93
- revision=revision,
94
- torch_dtype=getattr(torch, precision),
95
- trust_remote_code=True
96
- ).to(device)
97
-
98
- tokenizer = AutoTokenizer.from_pretrained(model_name, revision=revision)
99
 
100
- # Run evaluations
101
- results = {}
102
-
103
- # TSAC Sentiment
104
- tsac_result = evaluate_tsac_sentiment(model, tokenizer, device)
105
- results[Tasks.tsac_sentiment.value.benchmark] = tsac_result
106
-
107
- # Tunisian Corpus Coverage
108
- corpus_result = evaluate_tunisian_corpus_coverage(model, tokenizer)
109
- results[Tasks.tunisian_corpus.value.benchmark] = corpus_result
110
-
111
- return EvaluationResult(
112
- model=model_name,
113
- revision=revision,
114
- precision=precision,
115
- weight_type=weight_type,
116
- results=results
117
- )
118
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  except Exception as e:
 
 
 
120
  return EvaluationResult(
121
  model=model_name,
122
  revision=revision,
@@ -128,99 +286,124 @@ def evaluate_model(model_name: str, revision: str, precision: str, weight_type:
128
 
129
  def process_evaluation_queue():
130
  """Process all pending evaluations in the queue"""
131
- # Get all pending evaluations (including nested directories)
132
- queue_dir = os.path.join(EVAL_REQUESTS_PATH)
 
 
 
 
 
 
 
133
  pending_files = []
 
 
 
134
 
135
- # Walk through the directory tree
136
- for root, dirs, files in os.walk(queue_dir):
137
- pending_files.extend([os.path.join(root, f) for f in files if f.endswith('.json')])
 
 
 
 
138
 
139
  for file_path in pending_files:
140
- with open(file_path, 'r') as f:
141
- eval_request = json.load(f)
142
 
143
- if eval_request.get('status') != EvaluationStatus.PENDING.value:
144
- continue
 
 
 
 
 
 
145
 
146
- print(f"Processing evaluation request: {file_path}")
147
-
148
- # Mark as running
149
- eval_request['status'] = EvaluationStatus.RUNNING.value
150
- with open(file_path, 'w') as f:
151
- json.dump(eval_request, f, indent=2)
152
-
153
- # Perform evaluation
154
- result = evaluate_model(
155
- model_name=eval_request['model'],
156
- revision=eval_request['revision'],
157
- precision=eval_request['precision'],
158
- weight_type=eval_request['weight_type']
159
- )
160
-
161
- # Save results
162
- if result.error:
163
- eval_request['status'] = EvaluationStatus.FAILED.value
164
- eval_request['error'] = result.error
165
- else:
166
- eval_request['status'] = EvaluationStatus.FINISHED.value
167
- eval_request['results'] = result.results
168
-
169
- with open(file_path, 'w') as f:
170
- json.dump(eval_request, f, indent=2)
171
-
172
- # Save to results dataset
173
- # Extract username from model path if it exists
174
- username = result.model.split('/')[0] if '/' in result.model else ''
175
- result_filename = f"{result.model.split('/')[-1]}_{result.precision}.json"
176
-
177
- if username:
178
- # Create user directory if it doesn't exist
179
- user_dir = os.path.join(EVAL_RESULTS_PATH, username)
180
- os.makedirs(user_dir, exist_ok=True)
181
- result_file = os.path.join(user_dir, result_filename)
182
- else:
183
- result_file = os.path.join(EVAL_RESULTS_PATH, result_filename)
184
-
185
- # First, update the request file with the results
186
- request_file = os.path.join(os.path.dirname(file_path), os.path.basename(file_path))
187
- with open(file_path, 'r') as f:
188
- request_data = json.load(f)
189
-
190
- # Update request file with results and status
191
- request_data['results'] = result.results
192
- request_data['status'] = EvaluationStatus.FINISHED.value
193
-
194
- with open(file_path, 'w') as f:
195
- json.dump(request_data, f, indent=2)
196
-
197
- # Now create the results file
198
- with open(result_file, 'w') as f:
199
- json.dump({
200
- 'model': result.model,
201
- 'revision': result.revision,
202
- 'precision': result.precision,
203
- 'weight_type': result.weight_type,
204
- 'results': result.results,
205
- 'config': {
206
- 'model_name': result.model,
207
- 'model_dtype': result.precision,
208
- 'model_type': result.weight_type,
209
- 'architecture': 'Unknown',
210
- 'license': request_data.get('license', '?'),
211
- 'likes': request_data.get('likes', 0),
212
- 'num_params': request_data.get('params', 0),
213
- 'date': request_data.get('submitted_time', datetime.now().strftime('%Y-%m-%d')),
214
- 'still_on_hub': True
215
- }
216
- }, f, indent=2)
217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  # Upload to Hugging Face
219
- API.upload_file(
220
- path_or_fileobj=result_file,
221
- path_in_repo=result_filename if not username else os.path.join(username, result_filename),
222
- repo_id=f"{OWNER}/results",
223
- repo_type="dataset",
224
- commit_message=f"Add evaluation results for {result.model}"
225
- )
 
 
 
 
 
226
 
 
5
  from enum import Enum
6
  from datetime import datetime
7
  import torch
8
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoConfig
9
  from datasets import load_dataset
10
 
11
  from src.envs import API, OWNER, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH
 
29
  def evaluate_tsac_sentiment(model, tokenizer, device):
30
  """Evaluate model on TSAC sentiment analysis task"""
31
  try:
32
+ print("\n=== Starting TSAC sentiment evaluation ===")
33
+ print(f"Current device: {device}")
34
+
35
+ # Load and preprocess dataset
36
+ print("\nLoading and preprocessing TSAC dataset...")
37
+ dataset = load_dataset("fbougares/tsac", split="test", trust_remote_code=True)
38
+ print(f"Dataset size: {len(dataset)} examples")
39
 
40
  def preprocess(examples):
41
+ print(f"\nProcessing batch of {len(examples['sentence'])} examples")
42
+ # Use 'sentence' field as per dataset structure
43
+ return tokenizer(
44
+ examples['sentence'],
45
+ padding=True,
46
+ truncation=True,
47
+ max_length=512,
48
+ return_tensors='pt'
49
+ )
50
 
51
  dataset = dataset.map(preprocess, batched=True)
52
+ dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'target'])
53
+
54
+ # Check first example
55
+ first_example = dataset[0]
56
+ print("\nFirst example details:")
57
+ print(f"Input IDs shape: {first_example['input_ids'].shape}")
58
+ print(f"Attention mask shape: {first_example['attention_mask'].shape}")
59
+ print(f"Target: {first_example['target']}")
60
 
61
  model.eval()
62
+ print(f"\nModel class: {model.__class__.__name__}")
63
+ print(f"Model device: {next(model.parameters()).device}")
64
+
65
  with torch.no_grad():
66
  predictions = []
67
+ targets = []
68
 
69
+ for i, batch in enumerate(dataset):
70
+ if i == 0:
71
+ print("\nProcessing first batch...")
72
+ print(f"Batch keys: {list(batch.keys())}")
73
+ print(f"Target shape: {batch['target'].shape}")
74
+
75
+ inputs = {k: v.to(device) for k, v in batch.items() if k != 'target'}
76
+ target = batch['target'].to(device)
77
 
78
  outputs = model(**inputs)
79
+ print(f"\nBatch {i} output type: {type(outputs)}")
80
+
81
+ # Handle different model output formats
82
+ if isinstance(outputs, dict):
83
+ print(f"Output keys: {list(outputs.keys())}")
84
+ if 'logits' in outputs:
85
+ logits = outputs['logits']
86
+ elif 'prediction_logits' in outputs:
87
+ logits = outputs['prediction_logits']
88
+ else:
89
+ raise ValueError(f"Unknown output format. Available keys: {list(outputs.keys())}")
90
+ elif isinstance(outputs, tuple):
91
+ print(f"Output tuple length: {len(outputs)}")
92
+ logits = outputs[0]
93
+ else:
94
+ logits = outputs
95
+
96
+ print(f"Logits shape: {logits.shape}")
97
+
98
+ # For sequence classification, we typically use the [CLS] token's prediction
99
+ if len(logits.shape) == 3: # [batch_size, sequence_length, num_classes]
100
+ logits = logits[:, 0, :] # Take the [CLS] token prediction
101
+
102
+ print(f"Final logits shape: {logits.shape}")
103
+
104
+ batch_predictions = logits.argmax(dim=-1).cpu().tolist()
105
+ batch_targets = target.cpu().tolist()
106
+
107
+ predictions.extend(batch_predictions)
108
+ targets.extend(batch_targets)
109
+
110
+ if i == 0:
111
+ print("\nFirst batch predictions:")
112
+ print(f"Predictions: {batch_predictions[:5]}")
113
+ print(f"Targets: {batch_targets[:5]}")
114
+
115
+ print(f"\nTotal predictions: {len(predictions)}")
116
+ print(f"Total targets: {len(targets)}")
117
+
118
+ # Calculate accuracy
119
+ correct = sum(p == t for p, t in zip(predictions, targets))
120
+ total = len(predictions)
121
+ accuracy = correct / total if total > 0 else 0.0
122
+
123
+ print(f"\nEvaluation results:")
124
+ print(f"Correct predictions: {correct}")
125
+ print(f"Total predictions: {total}")
126
+ print(f"Accuracy: {accuracy:.4f}")
127
+
128
+ return {"accuracy": accuracy}
129
  except Exception as e:
130
+ print(f"\n=== Error in TSAC evaluation: {str(e)} ===")
131
+ print(f"Full traceback: {traceback.format_exc()}")
132
+ raise e
133
 
134
+ def evaluate_tunisian_corpus_coverage(model, tokenizer, device):
135
  """Evaluate model's coverage on Tunisian Dialect Corpus"""
136
  try:
137
  dataset = load_dataset("arbml/Tunisian_Dialect_Corpus", split="train")
138
 
139
  def preprocess(examples):
140
+ print("Tunisian Corpus preprocess exemples -------------",examples)
141
+ # Use 'Tweet' field as per dataset structure
142
+ return tokenizer(examples['Tweet'], padding=True, truncation=True, max_length=512)
143
 
144
  dataset = dataset.map(preprocess, batched=True)
145
 
146
+ # Calculate token coverage
147
  total_tokens = 0
148
  covered_tokens = 0
149
 
150
  for example in dataset:
151
+ tokens = tokenizer.tokenize(example['Tweet'])
152
  total_tokens += len(tokens)
153
  covered_tokens += len([t for t in tokens if t != tokenizer.unk_token])
154
 
155
  coverage = covered_tokens / total_tokens if total_tokens > 0 else 0
156
+ print(f"Tunisian Corpus Coverage: {coverage:.2%}")
157
+ return {"coverage": coverage}
158
  except Exception as e:
159
  print(f"Error in Tunisian Corpus evaluation: {str(e)}")
160
+ raise e # Raise the error instead of returning 0.0
161
 
162
  def evaluate_model(model_name: str, revision: str, precision: str, weight_type: str) -> EvaluationResult:
163
  """Evaluate a single model on all tasks"""
164
  try:
165
+ print(f"\nStarting evaluation for model: {model_name} (revision: {revision}, precision: {precision}, weight_type: {weight_type})")
166
+ print(f"Current working directory: {os.getcwd()}")
167
+ print(f"Evaluation requests path: {EVAL_REQUESTS_PATH}")
168
+ print(f"Evaluation results path: {EVAL_RESULTS_PATH}")
 
 
 
 
 
 
 
 
169
 
170
+ # Initialize device
171
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
172
+ print(f"Using device: {device}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
 
174
+ # Load model and tokenizer with enhanced error handling
175
+ try:
176
+ print(f"\nLoading model: {model_name}")
177
+ print(f"Model path exists: {os.path.exists(model_name)}")
178
+
179
+ # First try to load the config to check model type
180
+ try:
181
+ config = AutoConfig.from_pretrained(model_name, revision=revision)
182
+ print(f"Model type from config: {config.model_type}")
183
+ except Exception as config_error:
184
+ print(f"Error loading config: {str(config_error)}")
185
+
186
+ # Try loading with trust_remote_code=True first
187
+ try:
188
+ print("\nAttempting to load with trust_remote_code=True...")
189
+ model = AutoModelForSequenceClassification.from_pretrained(
190
+ model_name,
191
+ revision=revision,
192
+ torch_dtype=getattr(torch, precision),
193
+ trust_remote_code=True
194
+ ).to(device)
195
+ print(f"Successfully loaded model {model_name} with trust_remote_code=True")
196
+ print(f"Model class: {model.__class__.__name__}")
197
+ except Exception as e1:
198
+ print(f"Error loading with trust_remote_code=True: {str(e1)}")
199
+ print(f"Error type: {type(e1).__name__}")
200
+
201
+ # If it's a model type error, try with llama as model type
202
+ if "Unrecognized model" in str(e1) and "llama" in model_name.lower():
203
+ print("\nAttempting to load as llama model...")
204
+ try:
205
+ model = AutoModelForSequenceClassification.from_pretrained(
206
+ model_name,
207
+ revision=revision,
208
+ torch_dtype=getattr(torch, precision),
209
+ trust_remote_code=True,
210
+ model_type="llama"
211
+ ).to(device)
212
+ print(f"Successfully loaded model {model_name} as llama model")
213
+ print(f"Model class: {model.__class__.__name__}")
214
+ except Exception as e2:
215
+ print(f"Error loading as llama model: {str(e2)}")
216
+ print(f"Error type: {type(e2).__name__}")
217
+ raise Exception(f"Failed to load model with both methods: {str(e1)}, {str(e2)}")
218
+ else:
219
+ raise e1
220
+
221
+ print(f"\nLoading tokenizer: {model_name}")
222
+ try:
223
+ tokenizer = AutoTokenizer.from_pretrained(model_name, revision=revision)
224
+ print(f"Successfully loaded tokenizer for {model_name}")
225
+ print(f"Tokenizer class: {tokenizer.__class__.__name__}")
226
+ except Exception as e:
227
+ print(f"Error loading tokenizer: {str(e)}")
228
+ print(f"Error type: {type(e).__name__}")
229
+ raise Exception(f"Failed to load tokenizer: {str(e)}")
230
+
231
+ # Run evaluations
232
+ print("\nStarting TSAC sentiment evaluation...")
233
+ try:
234
+ tsac_results = evaluate_tsac_sentiment(model, tokenizer, device)
235
+ print(f"TSAC results: {tsac_results}")
236
+ except Exception as e:
237
+ print(f"Error in TSAC evaluation for {model_name}: {str(e)}")
238
+ print(f"Error type: {type(e).__name__}")
239
+ tsac_results = {"accuracy": None}
240
+
241
+ print("\nStarting Tunisian Corpus evaluation...")
242
+ try:
243
+ tunisian_results = evaluate_tunisian_corpus_coverage(model, tokenizer, device)
244
+ print(f"Tunisian Corpus results: {tunisian_results}")
245
+ except Exception as e:
246
+ print(f"Error in Tunisian Corpus evaluation for {model_name}: {str(e)}")
247
+ print(f"Error type: {type(e).__name__}")
248
+ tunisian_results = {"coverage": None}
249
+
250
+ print("\nEvaluation completed successfully!")
251
+ print(f"Final results: {tsac_results} | {tunisian_results}")
252
+ return EvaluationResult(
253
+ model=model_name,
254
+ revision=revision,
255
+ precision=precision,
256
+ weight_type=weight_type,
257
+ results={
258
+ **tsac_results,
259
+ **tunisian_results
260
+ }
261
+ )
262
+ except Exception as e:
263
+ print(f"\nError loading model {model_name}: {str(e)}")
264
+ print(f"Error type: {type(e).__name__}")
265
+ print(f"Full traceback: {traceback.format_exc()}")
266
+ return EvaluationResult(
267
+ model=model_name,
268
+ revision=revision,
269
+ precision=precision,
270
+ weight_type=weight_type,
271
+ results={},
272
+ error=str(e)
273
+ )
274
  except Exception as e:
275
+ print(f"\nError evaluating model {model_name}: {str(e)}")
276
+ print(f"Error type: {type(e).__name__}")
277
+ print(f"Full traceback: {traceback.format_exc()}")
278
  return EvaluationResult(
279
  model=model_name,
280
  revision=revision,
 
286
 
287
  def process_evaluation_queue():
288
  """Process all pending evaluations in the queue"""
289
+ print(f"\n=== Starting evaluation queue processing ===")
290
+ print(f"Current time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
291
+ print(f"Looking for evaluation requests in: {EVAL_REQUESTS_PATH}")
292
+
293
+ # Get all pending evaluations
294
+ if not os.path.exists(EVAL_REQUESTS_PATH):
295
+ print(f"Evaluation requests path does not exist: {EVAL_REQUESTS_PATH}")
296
+ return
297
+
298
  pending_files = []
299
+ for file in os.listdir(EVAL_REQUESTS_PATH):
300
+ if file.endswith('.json'):
301
+ pending_files.append(os.path.join(EVAL_REQUESTS_PATH, file))
302
 
303
+ print(f"Found {len(pending_files)} pending evaluation requests")
304
+ for file_path in pending_files:
305
+ print(f" - {file_path}")
306
+
307
+ if not pending_files:
308
+ print("No pending evaluation requests found")
309
+ return
310
 
311
  for file_path in pending_files:
312
+ try:
313
+ print(f"\n=== Processing evaluation request: {file_path} ===")
314
 
315
+ # Read the file atomically
316
+ try:
317
+ with open(file_path, 'r') as f:
318
+ eval_request = json.load(f)
319
+ print(f"Loaded evaluation request: {json.dumps(eval_request, indent=2)}")
320
+ except Exception as e:
321
+ print(f"Error reading evaluation request: {str(e)}")
322
+ continue
323
 
324
+ # Skip non-pending evaluations
325
+ status = eval_request.get('status', 'UNKNOWN')
326
+ if status != EvaluationStatus.PENDING.value:
327
+ print(f"Skipping non-pending evaluation (status: {status})")
328
+ continue
329
+
330
+ # Update status to RUNNING
331
+ eval_request['status'] = EvaluationStatus.RUNNING.value
332
+ print(f"Updating status to RUNNING for {eval_request['model']}")
333
+
334
+ # Write the update atomically
335
+ try:
336
+ with open(file_path, 'w') as f:
337
+ json.dump(eval_request, f, indent=2)
338
+ print("Successfully updated status to RUNNING")
339
+ except Exception as e:
340
+ print(f"Error updating status: {str(e)}")
341
+ continue
342
+
343
+ # Get model info from request
344
+ model_name = eval_request.get('model', '')
345
+ revision = eval_request.get('revision', '')
346
+ precision = eval_request.get('precision', '')
347
+ weight_type = eval_request.get('weight_type', '')
348
+
349
+ if not model_name:
350
+ print("Error: Missing model name in evaluation request")
351
+ continue
352
+
353
+ print(f"\n=== Evaluating model: {model_name} ===")
354
+ print(f"Revision: {revision}")
355
+ print(f"Precision: {precision}")
356
+ print(f"Weight type: {weight_type}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
357
 
358
+ result = evaluate_model(model_name, revision, precision, weight_type)
359
+
360
+ # Update status and save results
361
+ if result.error:
362
+ print(f"\n=== Evaluation failed ===")
363
+ print(f"Error: {result.error}")
364
+ eval_request['status'] = EvaluationStatus.FAILED.value
365
+ eval_request['error'] = result.error
366
+ else:
367
+ print(f"\n=== Evaluation completed successfully ===")
368
+ print(f"Results: {result.results}")
369
+ eval_request['status'] = EvaluationStatus.FINISHED.value
370
+ eval_request['results'] = result.results
371
+
372
+ # Write the final update atomically
373
+ try:
374
+ with open(file_path, 'w') as f:
375
+ json.dump(eval_request, f, indent=2)
376
+ print("Successfully saved evaluation results")
377
+ except Exception as e:
378
+ print(f"Error saving evaluation results: {str(e)}")
379
+ continue
380
+
381
+ # Move successful evaluations to results directory
382
+ if eval_request['status'] == EvaluationStatus.FINISHED.value:
383
+ try:
384
+ os.makedirs(EVAL_RESULTS_PATH, exist_ok=True)
385
+ result_file = os.path.join(EVAL_RESULTS_PATH, os.path.basename(file_path))
386
+ os.rename(file_path, result_file)
387
+ print(f"Moved evaluation results to: {result_file}")
388
+ except Exception as e:
389
+ print(f"Error moving results file: {str(e)}")
390
+
391
+ except Exception as e:
392
+ print(f"\n=== Error processing evaluation: {str(e)} ===")
393
+ print(f"Full traceback: {traceback.format_exc()}")
394
+ continue
395
+
396
  # Upload to Hugging Face
397
+ try:
398
+ if 'result_file' in locals():
399
+ API.upload_file(
400
+ path_or_fileobj=result_file,
401
+ path_in_repo=result_filename if not username else os.path.join(username, result_filename),
402
+ repo_id=f"{OWNER}/results",
403
+ repo_type="dataset",
404
+ commit_message=f"Add evaluation results for {result.model}"
405
+ )
406
+ print("Successfully uploaded results to Hugging Face")
407
+ except Exception as e:
408
+ print(f"Error uploading results to Hugging Face: {str(e)}")
409
 
src/leaderboard/read_evals.py CHANGED
@@ -35,11 +35,37 @@ class EvalResult:
35
  @classmethod
36
  def init_from_json_file(self, json_filepath):
37
  """Inits the result from the specific model result file"""
38
- with open(json_filepath) as fp:
39
- print(json_filepath)
40
- data = json.load(fp)
41
- print(data)
42
- config = data.get("config")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
  # Precision
45
  precision = Precision.from_str(config.get("model_dtype"))
@@ -71,7 +97,7 @@ class EvalResult:
71
  results = {}
72
  for task in Tasks:
73
  task = task.value
74
-
75
  # We average all scores of a given metric (not all metrics are present in all files)
76
  accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
77
  if accs.size == 0 or any([acc is None for acc in accs]):
@@ -167,9 +193,23 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
167
 
168
  eval_results = {}
169
  for model_result_filepath in model_result_filepaths:
170
- # Creation of result
171
- eval_result = EvalResult.init_from_json_file(model_result_filepath)
172
- eval_result.update_with_request_file(requests_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
 
174
  # Store results of same eval together
175
  eval_name = eval_result.eval_name
 
35
  @classmethod
36
  def init_from_json_file(self, json_filepath):
37
  """Inits the result from the specific model result file"""
38
+ try:
39
+ with open(json_filepath) as fp:
40
+ data = json.load(fp)
41
+
42
+ # Get model info
43
+ model_name = data.get('model')
44
+ org_and_model = model_name.split("/", 1)
45
+ org = org_and_model[0]
46
+ model = org_and_model[1]
47
+
48
+ # Get results
49
+ results = data.get('results', {})
50
+ precision = Precision.from_str(data.get('precision', 'Unknown'))
51
+
52
+ # Create EvalResult
53
+ return EvalResult(
54
+ eval_name=f"{org}_{model}_{precision.value}",
55
+ full_model=model_name,
56
+ org=org,
57
+ model=model,
58
+ revision=data.get('revision', ''),
59
+ results=results,
60
+ precision=precision,
61
+ model_type=ModelType.from_str(data.get('model_type', 'Unknown')),
62
+ weight_type=WeightType.from_str(data.get('weight_type', 'Original')),
63
+ date=data.get('submitted_at', ''),
64
+ still_on_hub=is_model_on_hub(model_name)
65
+ )
66
+ except Exception as e:
67
+ print(f"Error reading evaluation file {json_filepath}: {str(e)}")
68
+ return None
69
 
70
  # Precision
71
  precision = Precision.from_str(config.get("model_dtype"))
 
97
  results = {}
98
  for task in Tasks:
99
  task = task.value
100
+
101
  # We average all scores of a given metric (not all metrics are present in all files)
102
  accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
103
  if accs.size == 0 or any([acc is None for acc in accs]):
 
193
 
194
  eval_results = {}
195
  for model_result_filepath in model_result_filepaths:
196
+ try:
197
+ # Creation of result
198
+ eval_result = EvalResult.init_from_json_file(model_result_filepath)
199
+ if eval_result is None:
200
+ print(f"Skipping invalid evaluation file: {model_result_filepath}")
201
+ continue
202
+
203
+ eval_result.update_with_request_file(requests_path)
204
+
205
+ # Store results of same eval together
206
+ if eval_result.eval_name not in eval_results:
207
+ eval_results[eval_result.eval_name] = []
208
+ eval_results[eval_result.eval_name].append(eval_result)
209
+
210
+ except Exception as e:
211
+ print(f"Error processing evaluation file {model_result_filepath}: {str(e)}")
212
+ continue
213
 
214
  # Store results of same eval together
215
  eval_name = eval_result.eval_name
src/submission/check_validity.py CHANGED
@@ -74,10 +74,10 @@ def get_model_arch(model_info: ModelInfo):
74
  """Gets the model architecture from the configuration"""
75
  return model_info.config.get("architectures", "Unknown")
76
 
77
- def already_submitted_models(requested_models_dir: str) -> set[str]:
78
- """Gather a list of already submitted models to avoid duplicates"""
79
  depth = 1
80
- file_names = []
81
  users_to_submission_dates = defaultdict(list)
82
 
83
  for root, _, files in os.walk(requested_models_dir):
@@ -86,9 +86,11 @@ def already_submitted_models(requested_models_dir: str) -> set[str]:
86
  for file in files:
87
  if not file.endswith(".json"):
88
  continue
89
- with open(os.path.join(root, file), "r") as f:
 
90
  info = json.load(f)
91
- file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
 
92
 
93
  # Select organisation
94
  if info["model"].count("/") == 0 or "submitted_time" not in info:
@@ -96,4 +98,4 @@ def already_submitted_models(requested_models_dir: str) -> set[str]:
96
  organisation, _ = info["model"].split("/")
97
  users_to_submission_dates[organisation].append(info["submitted_time"])
98
 
99
- return set(file_names), users_to_submission_dates
 
74
  """Gets the model architecture from the configuration"""
75
  return model_info.config.get("architectures", "Unknown")
76
 
77
+ def already_submitted_models(requested_models_dir: str) -> dict:
78
+ """Gather a mapping of submitted models to their queue files to avoid duplicates"""
79
  depth = 1
80
+ requested_models = {}
81
  users_to_submission_dates = defaultdict(list)
82
 
83
  for root, _, files in os.walk(requested_models_dir):
 
86
  for file in files:
87
  if not file.endswith(".json"):
88
  continue
89
+ queue_file = os.path.join(root, file)
90
+ with open(queue_file, "r") as f:
91
  info = json.load(f)
92
+ model_key = f"{info['model']}_{info['revision']}_{info['precision']}"
93
+ requested_models[model_key] = queue_file
94
 
95
  # Select organisation
96
  if info["model"].count("/") == 0 or "submitted_time" not in info:
 
98
  organisation, _ = info["model"].split("/")
99
  users_to_submission_dates[organisation].append(info["submitted_time"])
100
 
101
+ return requested_models, users_to_submission_dates
src/submission/submit.py CHANGED
@@ -20,6 +20,58 @@ import time
20
  REQUESTED_MODELS = None
21
  USERS_TO_SUBMISSION_DATES = None
22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  def add_new_eval(
24
  model: str,
25
  base_model: str,
@@ -28,144 +80,293 @@ def add_new_eval(
28
  weight_type: str,
29
  model_type: str,
30
  ):
31
- global REQUESTED_MODELS
32
- global USERS_TO_SUBMISSION_DATES
33
- if not REQUESTED_MODELS:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
 
 
35
 
36
- user_name = ""
37
- model_path = model
38
- if "/" in model:
39
- user_name = model.split("/")[0]
40
- model_path = model.split("/")[1]
 
 
41
 
42
- precision = precision.split(" ")[0]
43
- current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
- if model_type is None or model_type == "":
46
- return styled_error("Please select a model type.")
 
 
47
 
48
- # Does the model actually exist?
49
- if revision == "":
50
- revision = "main"
 
51
 
52
- # Is the model on the hub?
53
- if weight_type in ["Delta", "Adapter"]:
54
- base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
55
- if not base_model_on_hub:
56
- return styled_error(f'Base model "{base_model}" {error}')
 
 
 
 
 
 
 
 
 
57
 
58
- if not weight_type == "Adapter":
59
- model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
60
- if not model_on_hub:
61
- return styled_error(f'Model "{model}" {error}')
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
- # Is the model info correctly filled?
64
- try:
65
- model_info = API.model_info(repo_id=model, revision=revision)
66
- except Exception:
67
- return styled_error("Could not get your model information. Please fill it up properly.")
 
 
 
68
 
69
- model_size = get_model_size(model_info=model_info, precision=precision)
 
 
 
 
 
 
 
70
 
71
- # Were the model card and license filled?
72
- try:
73
- license = model_info.cardData["license"]
74
- except Exception:
75
- return styled_error("Please select a license for your model")
76
-
77
- modelcard_OK, error_msg = check_model_card(model)
78
- if not modelcard_OK:
79
- return styled_error(error_msg)
80
-
81
- # Seems good, creating the eval
82
- print("Adding new eval")
83
-
84
- eval_entry = {
85
- "model": model,
86
- "base_model": base_model,
87
- "revision": revision,
88
- "precision": precision,
89
- "weight_type": weight_type,
90
- "status": "PENDING",
91
- "submitted_time": current_time,
92
- "model_type": model_type,
93
- "likes": model_info.likes,
94
- "params": model_size,
95
- "license": license,
96
- "private": False,
97
- }
98
-
99
- # Check for duplicate submission
100
- if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
101
- return styled_warning("This model has been already submitted.")
102
-
103
- print("Creating eval file")
104
- OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
105
- os.makedirs(OUT_DIR, exist_ok=True)
106
- out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
107
-
108
- with open(out_path, "w") as f:
109
- f.write(json.dumps(eval_entry))
110
-
111
- print("Uploading eval file")
112
- API.upload_file(
113
- path_or_fileobj=out_path,
114
- path_in_repo=out_path.split("eval-queue/")[1],
115
- repo_id=QUEUE_REPO,
116
- repo_type="dataset",
117
- commit_message=f"Add {model} to eval queue",
118
- )
119
-
120
- # Remove the local file
121
- os.remove(out_path)
122
-
123
- # Run evaluation immediately
124
- print(f"Evaluating model {model}...")
125
- try:
126
- # Load model and tokenizer
127
- device = "cuda" if torch.cuda.is_available() else "cpu"
128
-
129
- model_obj = AutoModelForSequenceClassification.from_pretrained(
130
- model,
131
- revision=revision,
132
- torch_dtype=getattr(torch, precision),
133
- trust_remote_code=True
134
- ).to(device)
135
-
136
- tokenizer = AutoTokenizer.from_pretrained(model, revision=revision)
137
-
138
- # Evaluate on TSAC
139
- print("Evaluating on TSAC sentiment analysis...")
140
- tsac_dataset = load_dataset("fbougares/tsac", split="test")
141
-
142
- def preprocess_tsac(examples):
143
- return tokenizer(examples['text'], padding=True, truncation=True, max_length=512)
144
 
145
- tsac_dataset = tsac_dataset.map(preprocess_tsac, batched=True)
146
- tsac_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
 
148
  model_obj.eval()
149
  with torch.no_grad():
150
  predictions = []
151
- labels = []
152
 
153
- for batch in tsac_dataset:
154
- inputs = {k: v.to(device) for k, v in batch.items() if k != 'label'}
155
- label = batch['label'].to(device)
 
 
 
 
 
 
156
 
157
  outputs = model_obj(**inputs)
158
- predictions.extend(outputs.logits.argmax(dim=-1).cpu().tolist())
159
- labels.extend(label.cpu().tolist())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
 
161
- tsac_accuracy = sum(p == l for p, l in zip(predictions, labels)) / len(predictions)
162
 
 
 
 
 
163
  # Evaluate on ArabML
164
  print("Evaluating on ArabML Tunisian Corpus...")
165
- arabml_dataset = load_dataset("arbml/Tunisian_Dialect_Corpus", split="test")
166
 
167
  def preprocess_arabml(examples):
168
- return tokenizer(examples['text'], padding=True, truncation=True, max_length=512)
169
 
170
  arabml_dataset = arabml_dataset.map(preprocess_arabml, batched=True)
171
 
@@ -173,72 +374,10 @@ def add_new_eval(
173
  covered_tokens = 0
174
 
175
  for example in arabml_dataset:
176
- tokens = tokenizer.tokenize(example['text'])
177
  total_tokens += len(tokens)
178
  covered_tokens += len([t for t in tokens if t != tokenizer.unk_token])
179
 
180
  arabml_coverage = covered_tokens / total_tokens if total_tokens > 0 else 0
181
 
182
  # Store results
183
- eval_results = {
184
- Tasks.tsac_sentiment.value.benchmark: tsac_accuracy,
185
- Tasks.tunisian_corpus.value.benchmark: arabml_coverage
186
- }
187
-
188
- print(f"Evaluation results: {eval_results}")
189
-
190
- # Update eval_entry with results
191
- eval_entry["status"] = EvaluationStatus.FINISHED.value
192
- eval_entry["results"] = eval_results
193
-
194
- # Save to results dataset
195
- results_file = os.path.join(EVAL_RESULTS_PATH, f"{model}_{revision}_{precision}_{weight_type}.json")
196
- with open(results_file, 'w') as f:
197
- json.dump({
198
- 'model': model,
199
- 'revision': revision,
200
- 'precision': precision,
201
- 'weight_type': weight_type,
202
- 'results': eval_results
203
- }, f, indent=2)
204
-
205
- # Upload results to Hugging Face
206
- API.upload_file(
207
- path_or_fileobj=results_file,
208
- path_in_repo=os.path.basename(results_file),
209
- repo_id=RESULTS_REPO,
210
- repo_type="dataset",
211
- commit_message=f"Add evaluation results for {model}"
212
- )
213
-
214
- # Remove the original eval request file
215
- os.remove(out_path)
216
-
217
- return styled_message(
218
- f"Model evaluation completed!\n\n"
219
- f"TSAC Sentiment Accuracy: {tsac_accuracy:.2%}\n"
220
- f"ArabML Corpus Coverage: {arabml_coverage:.2%}"
221
- )
222
-
223
- except Exception as e:
224
- print(f"Error during evaluation: {str(e)}")
225
- eval_entry["status"] = EvaluationStatus.FAILED.value
226
- eval_entry["error"] = str(e)
227
-
228
- with open(out_path, "w") as f:
229
- f.write(json.dumps(eval_entry))
230
-
231
- API.upload_file(
232
- path_or_fileobj=out_path,
233
- path_in_repo=out_path.split("eval-queue/")[1],
234
- repo_id=QUEUE_REPO,
235
- repo_type="dataset",
236
- commit_message=f"Add {model} evaluation error",
237
- )
238
-
239
- os.remove(out_path)
240
-
241
- return styled_error(
242
- f"Error during evaluation: {str(e)}\n\n"
243
- "The evaluation will be retried automatically later."
244
- )
 
20
  REQUESTED_MODELS = None
21
  USERS_TO_SUBMISSION_DATES = None
22
 
23
+ def create_eval_request(
24
+ model: str,
25
+ base_model: str,
26
+ revision: str,
27
+ precision: str,
28
+ weight_type: str,
29
+ model_type: str,
30
+ ):
31
+ """Create and upload an evaluation request"""
32
+ try:
33
+ # Create evaluation request file
34
+ request_data = {
35
+ 'model': model,
36
+ 'base_model': base_model,
37
+ 'revision': revision,
38
+ 'precision': precision,
39
+ 'weight_type': weight_type,
40
+ 'model_type': model_type,
41
+ 'status': EvaluationStatus.PENDING.value,
42
+ 'submitted_time': datetime.now(timezone.utc).isoformat()
43
+ }
44
+
45
+ # Create filename
46
+ username = model.split('/')[0] if '/' in model else None
47
+ request_filename = f"{username or 'unknown'}_{model.replace('/', '_')}_eval_request_{revision}_{precision}_{weight_type}.json"
48
+ request_path = os.path.join(EVAL_REQUESTS_PATH, request_filename)
49
+
50
+ # Write request file
51
+ with open(request_path, 'w') as f:
52
+ json.dump(request_data, f, indent=2)
53
+
54
+ print(f"Created evaluation request: {request_filename}")
55
+
56
+ # Upload to Hugging Face
57
+ API.upload_file(
58
+ path_or_fileobj=request_path,
59
+ path_in_repo=request_filename if not username else os.path.join(username, request_filename),
60
+ repo_id=QUEUE_REPO,
61
+ repo_type="dataset",
62
+ commit_message=f"Add evaluation request for {model}",
63
+ token=TOKEN
64
+ )
65
+
66
+ print(f"Uploaded evaluation request to {QUEUE_REPO}")
67
+
68
+ return styled_message(
69
+ "Evaluation request created! Please wait for the evaluation to complete."
70
+ )
71
+ except Exception as e:
72
+ print(f"Error creating evaluation request: {str(e)}")
73
+ return styled_error(f"Failed to create evaluation request: {str(e)}")
74
+
75
  def add_new_eval(
76
  model: str,
77
  base_model: str,
 
80
  weight_type: str,
81
  model_type: str,
82
  ):
83
+ """Validate model and create evaluation request"""
84
+ try:
85
+ print("\n=== Starting evaluation submission ===")
86
+ print(f"Submission time: {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S')} UTC")
87
+ print(f"Model: {model}")
88
+ print(f"Base model: {base_model}")
89
+ print(f"Revision: {revision}")
90
+ print(f"Precision: {precision}")
91
+ print(f"Weight type: {weight_type}")
92
+ print(f"Model type: {model_type}")
93
+ print(f"Evaluation requests path: {EVAL_REQUESTS_PATH}")
94
+ print(f"Queue repo: {QUEUE_REPO}")
95
+
96
+ # Always refresh the cache before checking for duplicates
97
+ print("\n=== Checking for duplicate submissions ===")
98
+ global REQUESTED_MODELS
99
+ global USERS_TO_SUBMISSION_DATES
100
+ start_time = time.time()
101
  REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
102
+ print(f"Cache refresh completed in {time.time() - start_time:.2f} seconds")
103
+ print(f"Found {len(REQUESTED_MODELS)} existing submissions")
104
 
105
+ user_name = ""
106
+ model_path = model
107
+ if "/" in model:
108
+ user_name = model.split("/")[0]
109
+ model_path = model.split("/")[1]
110
+ print(f"\nUser name: {user_name}")
111
+ print(f"Model path: {model_path}")
112
 
113
+ precision = precision.split(" ")[0]
114
+ if revision == "":
115
+ revision = "main"
116
+ print("Using default revision: main")
117
+
118
+ current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
119
+
120
+ # Check if model is already submitted
121
+ print("\n=== Checking for existing submission ===")
122
+ model_key = f"{model}_{revision}_{precision}"
123
+ if model_key in REQUESTED_MODELS:
124
+ print(f"Found existing submission with key: {model_key}")
125
+ # Get the status from the queue file
126
+ queue_file = REQUESTED_MODELS[model_key]
127
+ try:
128
+ with open(queue_file, 'r') as f:
129
+ queue_entry = json.load(f)
130
+ status = queue_entry.get('status')
131
+ print(f"Found existing submission with status: {status}")
132
+ if status is None:
133
+ print(f"Warning: No status found in queue file {queue_file}")
134
+ return styled_warning("Error checking model status. Please try again later.")
135
+
136
+ if status != EvaluationStatus.FAILED.value:
137
+ print(f"Model already submitted and in {status} status")
138
+ return styled_warning(f"This model has been already submitted and is in {status} status.")
139
+ except Exception as e:
140
+ print(f"Error reading queue file: {e}")
141
+ print(f"Full traceback: {traceback.format_exc()}")
142
+ return styled_warning("Error checking model status. Please try again later.")
143
+ except Exception as e:
144
+ print(f"Error during evaluation: {str(e)}")
145
+ raise
146
 
147
+ print("\n=== Validating model type ===")
148
+ if model_type is None or model_type == "":
149
+ print("Error: Model type is missing")
150
+ return styled_error("Please select a model type.")
151
 
152
+ print("\n=== Validating model existence ===")
153
+ if revision == "":
154
+ revision = "main"
155
+ print("Using default revision: main")
156
 
157
+ print("\n=== Validating model on Hugging Face ===")
158
+ try:
159
+ if weight_type in ["Delta", "Adapter"]:
160
+ print(f"Checking base model {base_model} on Hugging Face...")
161
+ base_model_on_hub, error, _ = is_model_on_hub(
162
+ model_name=base_model,
163
+ revision=revision,
164
+ token=TOKEN,
165
+ test_tokenizer=True
166
+ )
167
+ print(f"Base model check result: {base_model_on_hub}")
168
+ if not base_model_on_hub:
169
+ print(f"Error: Base model not found: {error}")
170
+ return styled_error(f'Base model "{base_model}" {error}')
171
 
172
+ if not weight_type == "Adapter":
173
+ print(f"Checking model {model} on Hugging Face...")
174
+ model_on_hub, error, _ = is_model_on_hub(
175
+ model_name=model,
176
+ revision=revision,
177
+ token=TOKEN,
178
+ test_tokenizer=True
179
+ )
180
+ print(f"Model check result: {model_on_hub}")
181
+ if not model_on_hub:
182
+ print(f"Error: Model not found: {error}")
183
+ return styled_error(f'Model "{model}" {error}')
184
+ except Exception as e:
185
+ print(f"Error checking model on Hugging Face: {e}")
186
+ print(f"Full traceback: {traceback.format_exc()}")
187
+ return styled_error(f"Failed to validate model on Hugging Face: {str(e)}")
188
 
189
+ print("\n=== Getting model info ===")
190
+ try:
191
+ model_info = API.model_info(repo_id=model, revision=revision)
192
+ print(f"Successfully retrieved model info for {model}")
193
+ except Exception as e:
194
+ print(f"Error getting model info: {e}")
195
+ print(f"Full traceback: {traceback.format_exc()}")
196
+ return styled_error("Could not get your model information. Please fill it up properly.")
197
 
198
+ print("\n=== Getting model size ===")
199
+ try:
200
+ model_size = get_model_size(model_info=model_info, precision=precision)
201
+ print(f"Model size: {model_size}")
202
+ except Exception as e:
203
+ print(f"Error getting model size: {e}")
204
+ print(f"Full traceback: {traceback.format_exc()}")
205
+ model_size = "?"
206
 
207
+ print("\n=== Validating model card and license ===")
208
+ try:
209
+ license = model_info.cardData["license"]
210
+ print(f"Model license: {license}")
211
+ except Exception as e:
212
+ print(f"Error getting model license: {e}")
213
+ print(f"Full traceback: {traceback.format_exc()}")
214
+ return styled_error("Please select a license for your model")
215
+
216
+ print("\n=== Checking model card ===")
217
+ try:
218
+ modelcard_OK, error_msg = check_model_card(model)
219
+ print(f"Model card check result: {modelcard_OK}")
220
+ if not modelcard_OK:
221
+ print(f"Model card error: {error_msg}")
222
+ return styled_error(error_msg)
223
+ except Exception as e:
224
+ print(f"Error checking model card: {e}")
225
+ print(f"Full traceback: {traceback.format_exc()}")
226
+ return styled_error("Failed to validate model card")
227
+
228
+ print("\n=== Creating evaluation entry ===")
229
+ eval_entry = {
230
+ "model": model,
231
+ "base_model": base_model,
232
+ "revision": revision,
233
+ "precision": precision,
234
+ "weight_type": weight_type,
235
+ "status": "PENDING",
236
+ "submitted_time": current_time,
237
+ "model_type": model_type,
238
+ "likes": model_info.likes,
239
+ "params": model_size,
240
+ "license": license,
241
+ "private": False,
242
+ }
243
+ print(f"\nEvaluation entry created: {json.dumps(eval_entry, indent=2)}")
244
+
245
+ print("\n=== Checking for duplicate submission ===")
246
+ model_key = f"{model}_{revision}_{precision}"
247
+ if model_key in REQUESTED_MODELS:
248
+ print(f"Found existing submission with key: {model_key}")
249
+ # Get the status from the queue file
250
+ queue_file = REQUESTED_MODELS[model_key]
251
+ try:
252
+ with open(queue_file, 'r') as f:
253
+ queue_entry = json.load(f)
254
+ status = queue_entry.get('status')
255
+ print(f"Found existing submission with status: {status}")
256
+ if status is None:
257
+ print(f"Warning: No status found in queue file {queue_file}")
258
+ return styled_warning("Error checking model status. Please try again later.")
259
+
260
+ if status != EvaluationStatus.FAILED.value:
261
+ print(f"Model already submitted and in {status} status")
262
+ return styled_warning(f"This model has been already submitted and is in {status} status.")
263
+ except Exception as e:
264
+ print(f"Error reading queue file: {e}")
265
+ print(f"Full traceback: {traceback.format_exc()}")
266
+ return styled_warning("Error checking model status. Please try again later.")
267
+
268
+ print("\n=== Creating evaluation file ===")
269
+ OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
270
+ print(f"Creating output directory: {OUT_DIR}")
271
+ os.makedirs(OUT_DIR, exist_ok=True)
 
 
 
 
 
 
 
 
272
 
273
+ out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
274
+ print(f"Output file path: {out_path}")
275
+
276
+ # Write evaluation entry to file
277
+ try:
278
+ with open(out_path, "w") as f:
279
+ f.write(json.dumps(eval_entry))
280
+ print("\nEvaluation file created successfully")
281
+
282
+ # Upload to Hugging Face
283
+ print("\n=== Uploading evaluation file ===")
284
+ API.upload_file(
285
+ path_or_fileobj=out_path,
286
+ path_in_repo=out_path.split("eval-queue/")[1],
287
+ repo_id=QUEUE_REPO,
288
+ repo_type="dataset",
289
+ commit_message=f"Add evaluation request for {model}",
290
+ token=TOKEN
291
+ )
292
+ print(f"\nEvaluation request uploaded successfully to {QUEUE_REPO}")
293
+
294
+ # Clean up local file
295
+ os.remove(out_path)
296
+ print("\nLocal evaluation file removed")
297
+
298
+ return styled_message(
299
+ "Evaluation request created successfully! Please wait for the evaluation to complete."
300
+ )
301
+ except Exception as e:
302
+ print(f"Error during file operations: {str(e)}")
303
+ print(f"Full traceback: {traceback.format_exc()}")
304
+ return styled_error(f"Failed to create evaluation request: {str(e)}")
305
+
306
+
307
+
308
+ dataloader = DataLoader(tsac_dataset, batch_size=32, shuffle=False)
309
 
310
  model_obj.eval()
311
  with torch.no_grad():
312
  predictions = []
313
+ targets = []
314
 
315
+ for batch in dataloader:
316
+ inputs = {k: v.to(device) for k, v in batch.items() if k != 'target'}
317
+ target = batch['target'].to(device)
318
+
319
+ # Log the first batch details
320
+ if len(predictions) == 0: # Only log for the first batch
321
+ print(f"\nFirst batch example:")
322
+ print(f"Input keys: {list(inputs.keys())}")
323
+ print(f"Target shape: {target.shape}")
324
 
325
  outputs = model_obj(**inputs)
326
+ print(f"\nModel output type: {type(outputs)}")
327
+
328
+ # Try to get logits from different possible formats
329
+ if isinstance(outputs, dict):
330
+ print(f"Output keys: {list(outputs.keys())}")
331
+ # Try different common keys
332
+ if 'logits' in outputs:
333
+ logits = outputs['logits']
334
+ elif 'prediction_logits' in outputs:
335
+ logits = outputs['prediction_logits']
336
+ else:
337
+ raise ValueError(f"Unknown output format. Available keys: {list(outputs.keys())}")
338
+ elif isinstance(outputs, tuple):
339
+ print(f"Output tuple length: {len(outputs)}")
340
+ # Try different positions in the tuple
341
+ if len(outputs) > 0:
342
+ logits = outputs[0]
343
+ else:
344
+ raise ValueError("Empty output tuple")
345
+ else:
346
+ # If it's a single tensor, assume it's the logits
347
+ logits = outputs
348
+
349
+ print(f"Logits shape: {logits.shape}")
350
+ # For sequence classification, we typically use the [CLS] token's prediction
351
+ # Get the first token's prediction (CLS token)
352
+ cls_logits = logits[:, 0, :] # Shape: [batch_size, num_classes]
353
+ predictions.extend(cls_logits.argmax(dim=-1).cpu().tolist())
354
+ targets.extend(target.cpu().tolist())
355
+
356
+ accuracy = sum(p == t for p, t in zip(predictions, targets)) / len(predictions)
357
 
358
+ eval_entry['results'] = {'accuracy': accuracy}
359
 
360
+ # Update the queue file with results
361
+ with open(out_path, "w") as f:
362
+ f.write(json.dumps(eval_entry))
363
+
364
  # Evaluate on ArabML
365
  print("Evaluating on ArabML Tunisian Corpus...")
366
+ arabml_dataset = load_dataset("arbml/Tunisian_Dialect_Corpus", split="train", trust_remote_code=True)
367
 
368
  def preprocess_arabml(examples):
369
+ return tokenizer(examples['Tweet'], padding=True, truncation=True, max_length=512)
370
 
371
  arabml_dataset = arabml_dataset.map(preprocess_arabml, batched=True)
372
 
 
374
  covered_tokens = 0
375
 
376
  for example in arabml_dataset:
377
+ tokens = tokenizer.tokenize(example['Tweet'])
378
  total_tokens += len(tokens)
379
  covered_tokens += len([t for t in tokens if t != tokenizer.unk_token])
380
 
381
  arabml_coverage = covered_tokens / total_tokens if total_tokens > 0 else 0
382
 
383
  # Store results