MatthiasPi commited on
Commit
22b1a98
·
verified ·
1 Parent(s): 998e8ac

Update tasks/text.py

Browse files
Files changed (1) hide show
  1. tasks/text.py +22 -3
tasks/text.py CHANGED
@@ -9,7 +9,7 @@ from .utils.emissions import tracker, clean_emissions_data, get_space_info
9
 
10
  router = APIRouter()
11
 
12
- DESCRIPTION = "Random Baseline"
13
  ROUTE = "/text"
14
 
15
  @router.post(ROUTE, tags=["Text Task"],
@@ -57,8 +57,27 @@ async def evaluate_text(request: TextEvaluationRequest):
57
  #--------------------------------------------------------------------------------------------
58
 
59
  # Make random predictions (placeholder for actual model inference)
60
- true_labels = test_dataset["label"]
61
- predictions = [random.randint(0, 7) for _ in range(len(true_labels))]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  #--------------------------------------------------------------------------------------------
64
  # YOUR MODEL INFERENCE STOPS HERE
 
9
 
10
  router = APIRouter()
11
 
12
+ DESCRIPTION = "modernBERT"
13
  ROUTE = "/text"
14
 
15
  @router.post(ROUTE, tags=["Text Task"],
 
57
  #--------------------------------------------------------------------------------------------
58
 
59
  # Make random predictions (placeholder for actual model inference)
60
+ # true_labels = test_dataset["label"]
61
+ # predictions = [random.randint(0, 7) for _ in range(len(true_labels))]
62
+ path_model = 'MatthiasPi/CARDS_ModernBert_no_overfitting'
63
+ path_tokenizer = "answerdotai/ModernBERT-base"
64
+
65
+ model = AutoModelForSequenceClassification.from_pretrained(path_model)
66
+
67
+ def preprocess_function(df):
68
+ return tokenizer(df["quote"], truncation=True)
69
+ tokenized_test = test_dataset.map(preprocess_function, batched=True)
70
+
71
+ training_args = torch.load("../training_args.bin")
72
+ training_args.eval_strategy='no'
73
+
74
+ trainer = Trainer(
75
+ model=model,
76
+ args=training_args,
77
+ tokenizer=tokenizer
78
+ )
79
+
80
+ predictions = trainer.predict(tokenized_test)
81
 
82
  #--------------------------------------------------------------------------------------------
83
  # YOUR MODEL INFERENCE STOPS HERE