abarbosa commited on
Commit
53ffe3f
·
verified ·
1 Parent(s): 8cee98e

Pushing fine-tuned model to Hugging Face Hub

Browse files
README.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ language:
4
+ - pt
5
+ - en
6
+ tags:
7
+ - aes
8
+ datasets:
9
+ - kamel-usp/aes_enem_dataset
10
+ base_model: microsoft/phi-4
11
+ metrics:
12
+ - accuracy
13
+ - qwk
14
+ library_name: peft
15
+ model-index:
16
+ - name: phi4-balanced-C2
17
+ results:
18
+ - task:
19
+ type: text-classification
20
+ name: Automated Essay Score
21
+ dataset:
22
+ name: Automated Essay Score ENEM Dataset
23
+ type: kamel-usp/aes_enem_dataset
24
+ config: JBCS2025
25
+ split: test
26
+ metrics:
27
+ - name: Macro F1 (ignoring nan)
28
+ type: f1
29
+ value: 0.4230106879189448
30
+ - name: QWK
31
+ type: qwk
32
+ value: 0.4118587182355762
33
+ - name: Weighted Macro F1
34
+ type: f1
35
+ value: 0.4331935675507009
36
+ ---
37
+ # Model ID: phi4-balanced-C2
38
+ ## Results
39
+ | | test_data |
40
+ |:-----------------------------|------------:|
41
+ | eval_accuracy | 0.456522 |
42
+ | eval_RMSE | 60.911 |
43
+ | eval_QWK | 0.411859 |
44
+ | eval_Macro_F1 | 0.282007 |
45
+ | eval_Macro_F1_(ignoring_nan) | 0.423011 |
46
+ | eval_Weighted_F1 | 0.433194 |
47
+ | eval_Micro_F1 | 0.456522 |
48
+ | eval_HDIV | 0.0797101 |
49
+
adapter_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "microsoft/phi-4",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": [
22
+ "classifier",
23
+ "score"
24
+ ],
25
+ "peft_type": "LORA",
26
+ "r": 8,
27
+ "rank_pattern": {},
28
+ "revision": null,
29
+ "target_modules": [
30
+ "down_proj",
31
+ "gate_up_proj",
32
+ "qkv_proj",
33
+ "o_proj"
34
+ ],
35
+ "task_type": "SEQ_CLS",
36
+ "trainable_token_indices": null,
37
+ "use_dora": false,
38
+ "use_rslora": false
39
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e9597bfea75f0b92f4ab07e10428263630ef1a9f9e09556e8f2724b9dfe01c8
3
+ size 111515584
run_experiment.log ADDED
@@ -0,0 +1,698 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2025-03-23 16:49:40,395][__main__][INFO] - cache_dir: /media/data/tmp
2
+ dataset:
3
+ name: kamel-usp/aes_enem_dataset
4
+ split: JBCS2025
5
+ training_params:
6
+ seed: 42
7
+ num_train_epochs: 20
8
+ logging_steps: 100
9
+ metric_for_best_model: QWK
10
+ bf16: true
11
+ post_training_results:
12
+ model_path: /workspace/jbcs2025/outputs/2025-03-23/15-04-12
13
+ experiments:
14
+ model:
15
+ name: microsoft/phi-4
16
+ type: phi4_classification_lora
17
+ num_labels: 6
18
+ output_dir: ./results/phi4-balanced/C2
19
+ logging_dir: ./logs/phi4-balanced/C2
20
+ best_model_dir: ./results/phi4-balanced/C2/best_model
21
+ lora_r: 8
22
+ lora_dropout: 0.05
23
+ lora_alpha: 16
24
+ lora_target_modules: all-linear
25
+ dataset:
26
+ grade_index: 1
27
+ training_id: phi4-balanced-C2
28
+ training_params:
29
+ weight_decay: 0.01
30
+ warmup_ratio: 0.1
31
+ learning_rate: 5.0e-05
32
+ train_batch_size: 1
33
+ eval_batch_size: 16
34
+ gradient_accumulation_steps: 16
35
+ gradient_checkpointing: false
36
+
37
+ [2025-03-23 16:49:40,398][__main__][INFO] - Starting the Fine Tuning training process.
38
+ [2025-03-23 16:49:43,783][transformers.tokenization_utils_base][INFO] - loading file vocab.json from cache at /media/data/tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/vocab.json
39
+ [2025-03-23 16:49:43,783][transformers.tokenization_utils_base][INFO] - loading file merges.txt from cache at /media/data/tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/merges.txt
40
+ [2025-03-23 16:49:43,783][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /media/data/tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/tokenizer.json
41
+ [2025-03-23 16:49:43,783][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /media/data/tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/added_tokens.json
42
+ [2025-03-23 16:49:43,783][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /media/data/tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/special_tokens_map.json
43
+ [2025-03-23 16:49:43,784][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /media/data/tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/tokenizer_config.json
44
+ [2025-03-23 16:49:43,784][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
45
+ [2025-03-23 16:49:44,000][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: False
46
+ [2025-03-23 16:49:45,344][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /media/data/tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/config.json
47
+ [2025-03-23 16:49:45,345][transformers.configuration_utils][INFO] - Model config Phi3Config {
48
+ "architectures": [
49
+ "Phi3ForCausalLM"
50
+ ],
51
+ "attention_bias": false,
52
+ "attention_dropout": 0.0,
53
+ "bos_token_id": 100257,
54
+ "embd_pdrop": 0.0,
55
+ "eos_token_id": 100265,
56
+ "hidden_act": "silu",
57
+ "hidden_size": 5120,
58
+ "id2label": {
59
+ "0": 0,
60
+ "1": 40,
61
+ "2": 80,
62
+ "3": 120,
63
+ "4": 160,
64
+ "5": 200
65
+ },
66
+ "initializer_range": 0.02,
67
+ "intermediate_size": 17920,
68
+ "label2id": {
69
+ "0": 0,
70
+ "40": 1,
71
+ "80": 2,
72
+ "120": 3,
73
+ "160": 4,
74
+ "200": 5
75
+ },
76
+ "max_position_embeddings": 16384,
77
+ "model_type": "phi3",
78
+ "num_attention_heads": 40,
79
+ "num_hidden_layers": 40,
80
+ "num_key_value_heads": 10,
81
+ "original_max_position_embeddings": 16384,
82
+ "pad_token_id": 100349,
83
+ "partial_rotary_factor": 1.0,
84
+ "resid_pdrop": 0.0,
85
+ "rms_norm_eps": 1e-05,
86
+ "rope_scaling": null,
87
+ "rope_theta": 250000,
88
+ "sliding_window": null,
89
+ "tie_word_embeddings": false,
90
+ "torch_dtype": "bfloat16",
91
+ "transformers_version": "4.50.0",
92
+ "use_cache": true,
93
+ "vocab_size": 100352
94
+ }
95
+
96
+ [2025-03-23 16:49:45,370][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /media/data/tmp/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/model.safetensors.index.json
97
+ [2025-03-23 16:49:45,370][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.bfloat16 as defined in model's config object
98
+ [2025-03-23 16:49:45,370][transformers.modeling_utils][INFO] - Instantiating Phi3ForSequenceClassification model under default dtype torch.bfloat16.
99
+ [2025-03-23 16:50:07,592][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at microsoft/phi-4 were not used when initializing Phi3ForSequenceClassification: ['lm_head.weight']
100
+ - This IS expected if you are initializing Phi3ForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
101
+ - This IS NOT expected if you are initializing Phi3ForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
102
+ [2025-03-23 16:50:07,592][transformers.modeling_utils][WARNING] - Some weights of Phi3ForSequenceClassification were not initialized from the model checkpoint at microsoft/phi-4 and are newly initialized: ['score.weight']
103
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
104
+ [2025-03-23 16:50:09,632][__main__][INFO] - None
105
+ [2025-03-23 16:50:09,634][transformers.training_args][INFO] - PyTorch: setting up devices
106
+ [2025-03-23 16:50:09,677][__main__][INFO] - Total steps: 620. Number of warmup steps: 62
107
+ [2025-03-23 16:50:09,684][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
108
+ [2025-03-23 16:50:09,707][transformers.trainer][INFO] - Using auto half precision backend
109
+ [2025-03-23 16:50:09,708][transformers.trainer][WARNING] - No label_names provided for model class `PeftModelForSequenceClassification`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
110
+ [2025-03-23 16:50:09,736][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id. If id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
111
+ [2025-03-23 16:50:09,749][transformers.trainer][INFO] -
112
+ ***** Running Evaluation *****
113
+ [2025-03-23 16:50:09,749][transformers.trainer][INFO] - Num examples = 132
114
+ [2025-03-23 16:50:09,749][transformers.trainer][INFO] - Batch size = 16
115
+ [2025-03-23 16:50:43,802][transformers][INFO] - {'accuracy': 0.22727272727272727, 'RMSE': 61.987290975039734, 'QWK': 0.0, 'HDIV': 0.19696969696969702, 'Macro_F1': 0.07407407407407407, 'Micro_F1': 0.22727272727272727, 'Weighted_F1': 0.08417508417508417, 'Macro_F1_(ignoring_nan)': np.float64(0.37037037037037035)}
116
+ [2025-03-23 16:50:43,805][tensorboardX.summary][INFO] - Summary name eval/Macro_F1_(ignoring_nan) is illegal; using eval/Macro_F1__ignoring_nan_ instead.
117
+ [2025-03-23 16:50:44,041][transformers.trainer][INFO] - The following columns in the training set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id. If id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
118
+ [2025-03-23 16:50:44,073][transformers.trainer][INFO] - ***** Running training *****
119
+ [2025-03-23 16:50:44,073][transformers.trainer][INFO] - Num examples = 500
120
+ [2025-03-23 16:50:44,073][transformers.trainer][INFO] - Num Epochs = 20
121
+ [2025-03-23 16:50:44,073][transformers.trainer][INFO] - Instantaneous batch size per device = 1
122
+ [2025-03-23 16:50:44,073][transformers.trainer][INFO] - Total train batch size (w. parallel, distributed & accumulation) = 16
123
+ [2025-03-23 16:50:44,073][transformers.trainer][INFO] - Gradient Accumulation steps = 16
124
+ [2025-03-23 16:50:44,073][transformers.trainer][INFO] - Total optimization steps = 620
125
+ [2025-03-23 16:50:44,075][transformers.trainer][INFO] - Number of trainable parameters = 27,883,520
126
+ [2025-03-23 16:59:37,359][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id. If id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
127
+ [2025-03-23 16:59:37,364][transformers.trainer][INFO] -
128
+ ***** Running Evaluation *****
129
+ [2025-03-23 16:59:37,364][transformers.trainer][INFO] - Num examples = 132
130
+ [2025-03-23 16:59:37,364][transformers.trainer][INFO] - Batch size = 16
131
+ [2025-03-23 17:00:11,302][transformers][INFO] - {'accuracy': 0.19696969696969696, 'RMSE': 78.39294959021854, 'QWK': 0.14859294692382052, 'HDIV': 0.15909090909090906, 'Macro_F1': 0.1369098191528098, 'Micro_F1': 0.19696969696969696, 'Weighted_F1': 0.1820793197173826, 'Macro_F1_(ignoring_nan)': np.float64(0.17113727394101225)}
132
+ [2025-03-23 17:00:11,304][tensorboardX.summary][INFO] - Summary name eval/Macro_F1_(ignoring_nan) is illegal; using eval/Macro_F1__ignoring_nan_ instead.
133
+ [2025-03-23 17:00:11,307][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-32
134
+ [2025-03-23 17:00:11,698][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/config.json
135
+ [2025-03-23 17:00:11,699][transformers.configuration_utils][INFO] - Model config Phi3Config {
136
+ "architectures": [
137
+ "Phi3ForCausalLM"
138
+ ],
139
+ "attention_bias": false,
140
+ "attention_dropout": 0.0,
141
+ "bos_token_id": 100257,
142
+ "embd_pdrop": 0.0,
143
+ "eos_token_id": 100265,
144
+ "hidden_act": "silu",
145
+ "hidden_size": 5120,
146
+ "initializer_range": 0.02,
147
+ "intermediate_size": 17920,
148
+ "max_position_embeddings": 16384,
149
+ "model_type": "phi3",
150
+ "num_attention_heads": 40,
151
+ "num_hidden_layers": 40,
152
+ "num_key_value_heads": 10,
153
+ "original_max_position_embeddings": 16384,
154
+ "pad_token_id": 100349,
155
+ "partial_rotary_factor": 1.0,
156
+ "resid_pdrop": 0.0,
157
+ "rms_norm_eps": 1e-05,
158
+ "rope_scaling": null,
159
+ "rope_theta": 250000,
160
+ "sliding_window": null,
161
+ "tie_word_embeddings": false,
162
+ "torch_dtype": "bfloat16",
163
+ "transformers_version": "4.50.0",
164
+ "use_cache": true,
165
+ "vocab_size": 100352
166
+ }
167
+
168
+ [2025-03-23 17:09:05,827][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id. If id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
169
+ [2025-03-23 17:09:05,829][transformers.trainer][INFO] -
170
+ ***** Running Evaluation *****
171
+ [2025-03-23 17:09:05,830][transformers.trainer][INFO] - Num examples = 132
172
+ [2025-03-23 17:09:05,830][transformers.trainer][INFO] - Batch size = 16
173
+ [2025-03-23 17:09:39,710][transformers][INFO] - {'accuracy': 0.38636363636363635, 'RMSE': 54.9379815626841, 'QWK': 0.04807692307692302, 'HDIV': 0.11363636363636365, 'Macro_F1': 0.1793170731707317, 'Micro_F1': 0.38636363636363635, 'Weighted_F1': 0.32978566149297855, 'Macro_F1_(ignoring_nan)': np.float64(0.4482926829268293)}
174
+ [2025-03-23 17:09:39,710][tensorboardX.summary][INFO] - Summary name eval/Macro_F1_(ignoring_nan) is illegal; using eval/Macro_F1__ignoring_nan_ instead.
175
+ [2025-03-23 17:09:39,714][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-64
176
+ [2025-03-23 17:09:40,012][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/config.json
177
+ [2025-03-23 17:09:40,013][transformers.configuration_utils][INFO] - Model config Phi3Config {
178
+ "architectures": [
179
+ "Phi3ForCausalLM"
180
+ ],
181
+ "attention_bias": false,
182
+ "attention_dropout": 0.0,
183
+ "bos_token_id": 100257,
184
+ "embd_pdrop": 0.0,
185
+ "eos_token_id": 100265,
186
+ "hidden_act": "silu",
187
+ "hidden_size": 5120,
188
+ "initializer_range": 0.02,
189
+ "intermediate_size": 17920,
190
+ "max_position_embeddings": 16384,
191
+ "model_type": "phi3",
192
+ "num_attention_heads": 40,
193
+ "num_hidden_layers": 40,
194
+ "num_key_value_heads": 10,
195
+ "original_max_position_embeddings": 16384,
196
+ "pad_token_id": 100349,
197
+ "partial_rotary_factor": 1.0,
198
+ "resid_pdrop": 0.0,
199
+ "rms_norm_eps": 1e-05,
200
+ "rope_scaling": null,
201
+ "rope_theta": 250000,
202
+ "sliding_window": null,
203
+ "tie_word_embeddings": false,
204
+ "torch_dtype": "bfloat16",
205
+ "transformers_version": "4.50.0",
206
+ "use_cache": true,
207
+ "vocab_size": 100352
208
+ }
209
+
210
+ [2025-03-23 17:18:33,419][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id. If id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
211
+ [2025-03-23 17:18:33,422][transformers.trainer][INFO] -
212
+ ***** Running Evaluation *****
213
+ [2025-03-23 17:18:33,422][transformers.trainer][INFO] - Num examples = 132
214
+ [2025-03-23 17:18:33,422][transformers.trainer][INFO] - Batch size = 16
215
+ [2025-03-23 17:19:07,193][transformers][INFO] - {'accuracy': 0.4621212121212121, 'RMSE': 53.37119867948301, 'QWK': 0.3829076151826213, 'HDIV': 0.030303030303030276, 'Macro_F1': 0.21136288998357963, 'Micro_F1': 0.4621212121212121, 'Weighted_F1': 0.3556202418271384, 'Macro_F1_(ignoring_nan)': np.float64(0.5284072249589491)}
216
+ [2025-03-23 17:19:07,193][tensorboardX.summary][INFO] - Summary name eval/Macro_F1_(ignoring_nan) is illegal; using eval/Macro_F1__ignoring_nan_ instead.
217
+ [2025-03-23 17:19:07,197][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-96
218
+ [2025-03-23 17:19:07,534][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/config.json
219
+ [2025-03-23 17:19:07,534][transformers.configuration_utils][INFO] - Model config Phi3Config {
220
+ "architectures": [
221
+ "Phi3ForCausalLM"
222
+ ],
223
+ "attention_bias": false,
224
+ "attention_dropout": 0.0,
225
+ "bos_token_id": 100257,
226
+ "embd_pdrop": 0.0,
227
+ "eos_token_id": 100265,
228
+ "hidden_act": "silu",
229
+ "hidden_size": 5120,
230
+ "initializer_range": 0.02,
231
+ "intermediate_size": 17920,
232
+ "max_position_embeddings": 16384,
233
+ "model_type": "phi3",
234
+ "num_attention_heads": 40,
235
+ "num_hidden_layers": 40,
236
+ "num_key_value_heads": 10,
237
+ "original_max_position_embeddings": 16384,
238
+ "pad_token_id": 100349,
239
+ "partial_rotary_factor": 1.0,
240
+ "resid_pdrop": 0.0,
241
+ "rms_norm_eps": 1e-05,
242
+ "rope_scaling": null,
243
+ "rope_theta": 250000,
244
+ "sliding_window": null,
245
+ "tie_word_embeddings": false,
246
+ "torch_dtype": "bfloat16",
247
+ "transformers_version": "4.50.0",
248
+ "use_cache": true,
249
+ "vocab_size": 100352
250
+ }
251
+
252
+ [2025-03-23 17:19:08,261][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-32] due to args.save_total_limit
253
+ [2025-03-23 17:19:08,303][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-64] due to args.save_total_limit
254
+ [2025-03-23 17:28:00,964][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id. If id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
255
+ [2025-03-23 17:28:00,967][transformers.trainer][INFO] -
256
+ ***** Running Evaluation *****
257
+ [2025-03-23 17:28:00,967][transformers.trainer][INFO] - Num examples = 132
258
+ [2025-03-23 17:28:00,967][transformers.trainer][INFO] - Batch size = 16
259
+ [2025-03-23 17:28:34,842][transformers][INFO] - {'accuracy': 0.42424242424242425, 'RMSE': 51.52228114656274, 'QWK': 0.29195650044087385, 'HDIV': 0.007575757575757569, 'Macro_F1': 0.20225108225108226, 'Micro_F1': 0.42424242424242425, 'Weighted_F1': 0.33055227600682147, 'Macro_F1_(ignoring_nan)': np.float64(0.3370851370851371)}
260
+ [2025-03-23 17:28:34,843][tensorboardX.summary][INFO] - Summary name eval/Macro_F1_(ignoring_nan) is illegal; using eval/Macro_F1__ignoring_nan_ instead.
261
+ [2025-03-23 17:28:34,846][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-128
262
+ [2025-03-23 17:28:35,158][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/config.json
263
+ [2025-03-23 17:28:35,159][transformers.configuration_utils][INFO] - Model config Phi3Config {
264
+ "architectures": [
265
+ "Phi3ForCausalLM"
266
+ ],
267
+ "attention_bias": false,
268
+ "attention_dropout": 0.0,
269
+ "bos_token_id": 100257,
270
+ "embd_pdrop": 0.0,
271
+ "eos_token_id": 100265,
272
+ "hidden_act": "silu",
273
+ "hidden_size": 5120,
274
+ "initializer_range": 0.02,
275
+ "intermediate_size": 17920,
276
+ "max_position_embeddings": 16384,
277
+ "model_type": "phi3",
278
+ "num_attention_heads": 40,
279
+ "num_hidden_layers": 40,
280
+ "num_key_value_heads": 10,
281
+ "original_max_position_embeddings": 16384,
282
+ "pad_token_id": 100349,
283
+ "partial_rotary_factor": 1.0,
284
+ "resid_pdrop": 0.0,
285
+ "rms_norm_eps": 1e-05,
286
+ "rope_scaling": null,
287
+ "rope_theta": 250000,
288
+ "sliding_window": null,
289
+ "tie_word_embeddings": false,
290
+ "torch_dtype": "bfloat16",
291
+ "transformers_version": "4.50.0",
292
+ "use_cache": true,
293
+ "vocab_size": 100352
294
+ }
295
+
296
+ [2025-03-23 17:37:28,963][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id. If id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
297
+ [2025-03-23 17:37:28,965][transformers.trainer][INFO] -
298
+ ***** Running Evaluation *****
299
+ [2025-03-23 17:37:28,965][transformers.trainer][INFO] - Num examples = 132
300
+ [2025-03-23 17:37:28,965][transformers.trainer][INFO] - Batch size = 16
301
+ [2025-03-23 17:38:02,817][transformers][INFO] - {'accuracy': 0.4696969696969697, 'RMSE': 53.59782899266791, 'QWK': 0.37511984659635667, 'HDIV': 0.030303030303030276, 'Macro_F1': 0.26887134164010434, 'Micro_F1': 0.4696969696969697, 'Weighted_F1': 0.3967379898667931, 'Macro_F1_(ignoring_nan)': np.float64(0.3360891770501304)}
302
+ [2025-03-23 17:38:02,818][tensorboardX.summary][INFO] - Summary name eval/Macro_F1_(ignoring_nan) is illegal; using eval/Macro_F1__ignoring_nan_ instead.
303
+ [2025-03-23 17:38:02,820][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-160
304
+ [2025-03-23 17:38:03,215][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/config.json
305
+ [2025-03-23 17:38:03,216][transformers.configuration_utils][INFO] - Model config Phi3Config {
306
+ "architectures": [
307
+ "Phi3ForCausalLM"
308
+ ],
309
+ "attention_bias": false,
310
+ "attention_dropout": 0.0,
311
+ "bos_token_id": 100257,
312
+ "embd_pdrop": 0.0,
313
+ "eos_token_id": 100265,
314
+ "hidden_act": "silu",
315
+ "hidden_size": 5120,
316
+ "initializer_range": 0.02,
317
+ "intermediate_size": 17920,
318
+ "max_position_embeddings": 16384,
319
+ "model_type": "phi3",
320
+ "num_attention_heads": 40,
321
+ "num_hidden_layers": 40,
322
+ "num_key_value_heads": 10,
323
+ "original_max_position_embeddings": 16384,
324
+ "pad_token_id": 100349,
325
+ "partial_rotary_factor": 1.0,
326
+ "resid_pdrop": 0.0,
327
+ "rms_norm_eps": 1e-05,
328
+ "rope_scaling": null,
329
+ "rope_theta": 250000,
330
+ "sliding_window": null,
331
+ "tie_word_embeddings": false,
332
+ "torch_dtype": "bfloat16",
333
+ "transformers_version": "4.50.0",
334
+ "use_cache": true,
335
+ "vocab_size": 100352
336
+ }
337
+
338
+ [2025-03-23 17:38:04,167][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-128] due to args.save_total_limit
339
+ [2025-03-23 17:46:56,854][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id. If id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
340
+ [2025-03-23 17:46:56,856][transformers.trainer][INFO] -
341
+ ***** Running Evaluation *****
342
+ [2025-03-23 17:46:56,856][transformers.trainer][INFO] - Num examples = 132
343
+ [2025-03-23 17:46:56,856][transformers.trainer][INFO] - Batch size = 16
344
+ [2025-03-23 17:47:30,576][transformers][INFO] - {'accuracy': 0.4621212121212121, 'RMSE': 50.81159495448044, 'QWK': 0.29590303515977157, 'HDIV': 0.045454545454545414, 'Macro_F1': 0.27262584876988477, 'Micro_F1': 0.4621212121212121, 'Weighted_F1': 0.4151466188225378, 'Macro_F1_(ignoring_nan)': np.float64(0.4543764146164746)}
345
+ [2025-03-23 17:47:30,577][tensorboardX.summary][INFO] - Summary name eval/Macro_F1_(ignoring_nan) is illegal; using eval/Macro_F1__ignoring_nan_ instead.
346
+ [2025-03-23 17:47:30,580][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-192
347
+ [2025-03-23 17:47:30,916][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/config.json
348
+ [2025-03-23 17:47:30,917][transformers.configuration_utils][INFO] - Model config Phi3Config {
349
+ "architectures": [
350
+ "Phi3ForCausalLM"
351
+ ],
352
+ "attention_bias": false,
353
+ "attention_dropout": 0.0,
354
+ "bos_token_id": 100257,
355
+ "embd_pdrop": 0.0,
356
+ "eos_token_id": 100265,
357
+ "hidden_act": "silu",
358
+ "hidden_size": 5120,
359
+ "initializer_range": 0.02,
360
+ "intermediate_size": 17920,
361
+ "max_position_embeddings": 16384,
362
+ "model_type": "phi3",
363
+ "num_attention_heads": 40,
364
+ "num_hidden_layers": 40,
365
+ "num_key_value_heads": 10,
366
+ "original_max_position_embeddings": 16384,
367
+ "pad_token_id": 100349,
368
+ "partial_rotary_factor": 1.0,
369
+ "resid_pdrop": 0.0,
370
+ "rms_norm_eps": 1e-05,
371
+ "rope_scaling": null,
372
+ "rope_theta": 250000,
373
+ "sliding_window": null,
374
+ "tie_word_embeddings": false,
375
+ "torch_dtype": "bfloat16",
376
+ "transformers_version": "4.50.0",
377
+ "use_cache": true,
378
+ "vocab_size": 100352
379
+ }
380
+
381
+ [2025-03-23 17:47:31,626][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-160] due to args.save_total_limit
382
+ [2025-03-23 17:56:24,123][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id. If id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
383
+ [2025-03-23 17:56:24,125][transformers.trainer][INFO] -
384
+ ***** Running Evaluation *****
385
+ [2025-03-23 17:56:24,125][transformers.trainer][INFO] - Num examples = 132
386
+ [2025-03-23 17:56:24,125][transformers.trainer][INFO] - Batch size = 16
387
+ [2025-03-23 17:56:57,849][transformers][INFO] - {'accuracy': 0.4090909090909091, 'RMSE': 59.59458995173263, 'QWK': 0.40026051358392256, 'HDIV': 0.045454545454545414, 'Macro_F1': 0.2429620933666886, 'Micro_F1': 0.4090909090909091, 'Weighted_F1': 0.39500831974942074, 'Macro_F1_(ignoring_nan)': np.float64(0.36444314005003287)}
388
+ [2025-03-23 17:56:57,849][tensorboardX.summary][INFO] - Summary name eval/Macro_F1_(ignoring_nan) is illegal; using eval/Macro_F1__ignoring_nan_ instead.
389
+ [2025-03-23 17:56:57,853][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-224
390
+ [2025-03-23 17:56:58,174][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/config.json
391
+ [2025-03-23 17:56:58,175][transformers.configuration_utils][INFO] - Model config Phi3Config {
392
+ "architectures": [
393
+ "Phi3ForCausalLM"
394
+ ],
395
+ "attention_bias": false,
396
+ "attention_dropout": 0.0,
397
+ "bos_token_id": 100257,
398
+ "embd_pdrop": 0.0,
399
+ "eos_token_id": 100265,
400
+ "hidden_act": "silu",
401
+ "hidden_size": 5120,
402
+ "initializer_range": 0.02,
403
+ "intermediate_size": 17920,
404
+ "max_position_embeddings": 16384,
405
+ "model_type": "phi3",
406
+ "num_attention_heads": 40,
407
+ "num_hidden_layers": 40,
408
+ "num_key_value_heads": 10,
409
+ "original_max_position_embeddings": 16384,
410
+ "pad_token_id": 100349,
411
+ "partial_rotary_factor": 1.0,
412
+ "resid_pdrop": 0.0,
413
+ "rms_norm_eps": 1e-05,
414
+ "rope_scaling": null,
415
+ "rope_theta": 250000,
416
+ "sliding_window": null,
417
+ "tie_word_embeddings": false,
418
+ "torch_dtype": "bfloat16",
419
+ "transformers_version": "4.50.0",
420
+ "use_cache": true,
421
+ "vocab_size": 100352
422
+ }
423
+
424
+ [2025-03-23 17:56:58,940][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-96] due to args.save_total_limit
425
+ [2025-03-23 17:56:58,982][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-192] due to args.save_total_limit
426
+ [2025-03-23 18:05:51,473][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id. If id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
427
+ [2025-03-23 18:05:51,476][transformers.trainer][INFO] -
428
+ ***** Running Evaluation *****
429
+ [2025-03-23 18:05:51,476][transformers.trainer][INFO] - Num examples = 132
430
+ [2025-03-23 18:05:51,476][transformers.trainer][INFO] - Batch size = 16
431
+ [2025-03-23 18:06:25,206][transformers][INFO] - {'accuracy': 0.42424242424242425, 'RMSE': 53.25752187591567, 'QWK': 0.2858926342072411, 'HDIV': 0.022727272727272707, 'Macro_F1': 0.19468859921782666, 'Micro_F1': 0.42424242424242425, 'Weighted_F1': 0.3598213024932513, 'Macro_F1_(ignoring_nan)': np.float64(0.29203289882674)}
432
+ [2025-03-23 18:06:25,206][tensorboardX.summary][INFO] - Summary name eval/Macro_F1_(ignoring_nan) is illegal; using eval/Macro_F1__ignoring_nan_ instead.
433
+ [2025-03-23 18:06:25,209][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-256
434
+ [2025-03-23 18:06:30,363][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/config.json
435
+ [2025-03-23 18:06:30,364][transformers.configuration_utils][INFO] - Model config Phi3Config {
436
+ "architectures": [
437
+ "Phi3ForCausalLM"
438
+ ],
439
+ "attention_bias": false,
440
+ "attention_dropout": 0.0,
441
+ "bos_token_id": 100257,
442
+ "embd_pdrop": 0.0,
443
+ "eos_token_id": 100265,
444
+ "hidden_act": "silu",
445
+ "hidden_size": 5120,
446
+ "initializer_range": 0.02,
447
+ "intermediate_size": 17920,
448
+ "max_position_embeddings": 16384,
449
+ "model_type": "phi3",
450
+ "num_attention_heads": 40,
451
+ "num_hidden_layers": 40,
452
+ "num_key_value_heads": 10,
453
+ "original_max_position_embeddings": 16384,
454
+ "pad_token_id": 100349,
455
+ "partial_rotary_factor": 1.0,
456
+ "resid_pdrop": 0.0,
457
+ "rms_norm_eps": 1e-05,
458
+ "rope_scaling": null,
459
+ "rope_theta": 250000,
460
+ "sliding_window": null,
461
+ "tie_word_embeddings": false,
462
+ "torch_dtype": "bfloat16",
463
+ "transformers_version": "4.50.0",
464
+ "use_cache": true,
465
+ "vocab_size": 100352
466
+ }
467
+
468
+ [2025-03-23 18:15:23,835][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id. If id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
469
+ [2025-03-23 18:15:23,837][transformers.trainer][INFO] -
470
+ ***** Running Evaluation *****
471
+ [2025-03-23 18:15:23,837][transformers.trainer][INFO] - Num examples = 132
472
+ [2025-03-23 18:15:23,837][transformers.trainer][INFO] - Batch size = 16
473
+ [2025-03-23 18:15:57,602][transformers][INFO] - {'accuracy': 0.5303030303030303, 'RMSE': 48.98979485566356, 'QWK': 0.34545454545454535, 'HDIV': 0.030303030303030276, 'Macro_F1': 0.3304219312830178, 'Micro_F1': 0.5303030303030303, 'Weighted_F1': 0.49144563122828755, 'Macro_F1_(ignoring_nan)': np.float64(0.4130274141037722)}
474
+ [2025-03-23 18:15:57,602][tensorboardX.summary][INFO] - Summary name eval/Macro_F1_(ignoring_nan) is illegal; using eval/Macro_F1__ignoring_nan_ instead.
475
+ [2025-03-23 18:15:57,605][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-288
476
+ [2025-03-23 18:15:57,932][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/config.json
477
+ [2025-03-23 18:15:57,933][transformers.configuration_utils][INFO] - Model config Phi3Config {
478
+ "architectures": [
479
+ "Phi3ForCausalLM"
480
+ ],
481
+ "attention_bias": false,
482
+ "attention_dropout": 0.0,
483
+ "bos_token_id": 100257,
484
+ "embd_pdrop": 0.0,
485
+ "eos_token_id": 100265,
486
+ "hidden_act": "silu",
487
+ "hidden_size": 5120,
488
+ "initializer_range": 0.02,
489
+ "intermediate_size": 17920,
490
+ "max_position_embeddings": 16384,
491
+ "model_type": "phi3",
492
+ "num_attention_heads": 40,
493
+ "num_hidden_layers": 40,
494
+ "num_key_value_heads": 10,
495
+ "original_max_position_embeddings": 16384,
496
+ "pad_token_id": 100349,
497
+ "partial_rotary_factor": 1.0,
498
+ "resid_pdrop": 0.0,
499
+ "rms_norm_eps": 1e-05,
500
+ "rope_scaling": null,
501
+ "rope_theta": 250000,
502
+ "sliding_window": null,
503
+ "tie_word_embeddings": false,
504
+ "torch_dtype": "bfloat16",
505
+ "transformers_version": "4.50.0",
506
+ "use_cache": true,
507
+ "vocab_size": 100352
508
+ }
509
+
510
+ [2025-03-23 18:15:58,841][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-256] due to args.save_total_limit
511
+ [2025-03-23 18:24:51,529][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id. If id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
512
+ [2025-03-23 18:24:51,531][transformers.trainer][INFO] -
513
+ ***** Running Evaluation *****
514
+ [2025-03-23 18:24:51,531][transformers.trainer][INFO] - Num examples = 132
515
+ [2025-03-23 18:24:51,531][transformers.trainer][INFO] - Batch size = 16
516
+ [2025-03-23 18:25:25,235][transformers][INFO] - {'accuracy': 0.45454545454545453, 'RMSE': 51.75700801618925, 'QWK': 0.3186659192825111, 'HDIV': 0.05303030303030298, 'Macro_F1': 0.2310617893009612, 'Micro_F1': 0.45454545454545453, 'Weighted_F1': 0.4315089072701878, 'Macro_F1_(ignoring_nan)': np.float64(0.34659268395144177)}
517
+ [2025-03-23 18:25:25,236][tensorboardX.summary][INFO] - Summary name eval/Macro_F1_(ignoring_nan) is illegal; using eval/Macro_F1__ignoring_nan_ instead.
518
+ [2025-03-23 18:25:25,240][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-320
519
+ [2025-03-23 18:25:26,405][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/config.json
520
+ [2025-03-23 18:25:26,406][transformers.configuration_utils][INFO] - Model config Phi3Config {
521
+ "architectures": [
522
+ "Phi3ForCausalLM"
523
+ ],
524
+ "attention_bias": false,
525
+ "attention_dropout": 0.0,
526
+ "bos_token_id": 100257,
527
+ "embd_pdrop": 0.0,
528
+ "eos_token_id": 100265,
529
+ "hidden_act": "silu",
530
+ "hidden_size": 5120,
531
+ "initializer_range": 0.02,
532
+ "intermediate_size": 17920,
533
+ "max_position_embeddings": 16384,
534
+ "model_type": "phi3",
535
+ "num_attention_heads": 40,
536
+ "num_hidden_layers": 40,
537
+ "num_key_value_heads": 10,
538
+ "original_max_position_embeddings": 16384,
539
+ "pad_token_id": 100349,
540
+ "partial_rotary_factor": 1.0,
541
+ "resid_pdrop": 0.0,
542
+ "rms_norm_eps": 1e-05,
543
+ "rope_scaling": null,
544
+ "rope_theta": 250000,
545
+ "sliding_window": null,
546
+ "tie_word_embeddings": false,
547
+ "torch_dtype": "bfloat16",
548
+ "transformers_version": "4.50.0",
549
+ "use_cache": true,
550
+ "vocab_size": 100352
551
+ }
552
+
553
+ [2025-03-23 18:25:27,125][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-288] due to args.save_total_limit
554
+ [2025-03-23 18:34:20,013][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id. If id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
555
+ [2025-03-23 18:34:20,015][transformers.trainer][INFO] -
556
+ ***** Running Evaluation *****
557
+ [2025-03-23 18:34:20,015][transformers.trainer][INFO] - Num examples = 132
558
+ [2025-03-23 18:34:20,015][transformers.trainer][INFO] - Batch size = 16
559
+ [2025-03-23 18:34:53,725][transformers][INFO] - {'accuracy': 0.4318181818181818, 'RMSE': 59.59458995173263, 'QWK': 0.3200900077350397, 'HDIV': 0.06060606060606055, 'Macro_F1': 0.23509519464835335, 'Micro_F1': 0.4318181818181818, 'Weighted_F1': 0.42138168615966626, 'Macro_F1_(ignoring_nan)': np.float64(0.35264279197253)}
560
+ [2025-03-23 18:34:53,725][tensorboardX.summary][INFO] - Summary name eval/Macro_F1_(ignoring_nan) is illegal; using eval/Macro_F1__ignoring_nan_ instead.
561
+ [2025-03-23 18:34:53,728][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-352
562
+ [2025-03-23 18:34:54,034][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/config.json
563
+ [2025-03-23 18:34:54,034][transformers.configuration_utils][INFO] - Model config Phi3Config {
564
+ "architectures": [
565
+ "Phi3ForCausalLM"
566
+ ],
567
+ "attention_bias": false,
568
+ "attention_dropout": 0.0,
569
+ "bos_token_id": 100257,
570
+ "embd_pdrop": 0.0,
571
+ "eos_token_id": 100265,
572
+ "hidden_act": "silu",
573
+ "hidden_size": 5120,
574
+ "initializer_range": 0.02,
575
+ "intermediate_size": 17920,
576
+ "max_position_embeddings": 16384,
577
+ "model_type": "phi3",
578
+ "num_attention_heads": 40,
579
+ "num_hidden_layers": 40,
580
+ "num_key_value_heads": 10,
581
+ "original_max_position_embeddings": 16384,
582
+ "pad_token_id": 100349,
583
+ "partial_rotary_factor": 1.0,
584
+ "resid_pdrop": 0.0,
585
+ "rms_norm_eps": 1e-05,
586
+ "rope_scaling": null,
587
+ "rope_theta": 250000,
588
+ "sliding_window": null,
589
+ "tie_word_embeddings": false,
590
+ "torch_dtype": "bfloat16",
591
+ "transformers_version": "4.50.0",
592
+ "use_cache": true,
593
+ "vocab_size": 100352
594
+ }
595
+
596
+ [2025-03-23 18:34:54,743][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-320] due to args.save_total_limit
597
+ [2025-03-23 18:43:47,551][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id. If id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
598
+ [2025-03-23 18:43:47,552][transformers.trainer][INFO] -
599
+ ***** Running Evaluation *****
600
+ [2025-03-23 18:43:47,552][transformers.trainer][INFO] - Num examples = 132
601
+ [2025-03-23 18:43:47,552][transformers.trainer][INFO] - Batch size = 16
602
+ [2025-03-23 18:44:21,266][transformers][INFO] - {'accuracy': 0.5378787878787878, 'RMSE': 46.056618647183825, 'QWK': 0.355684480642642, 'HDIV': 0.045454545454545414, 'Macro_F1': 0.27301688194743967, 'Micro_F1': 0.5378787878787878, 'Weighted_F1': 0.4949473165745214, 'Macro_F1_(ignoring_nan)': np.float64(0.4095253229211595)}
603
+ [2025-03-23 18:44:21,267][tensorboardX.summary][INFO] - Summary name eval/Macro_F1_(ignoring_nan) is illegal; using eval/Macro_F1__ignoring_nan_ instead.
604
+ [2025-03-23 18:44:21,269][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-384
605
+ [2025-03-23 18:44:23,361][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/config.json
606
+ [2025-03-23 18:44:23,362][transformers.configuration_utils][INFO] - Model config Phi3Config {
607
+ "architectures": [
608
+ "Phi3ForCausalLM"
609
+ ],
610
+ "attention_bias": false,
611
+ "attention_dropout": 0.0,
612
+ "bos_token_id": 100257,
613
+ "embd_pdrop": 0.0,
614
+ "eos_token_id": 100265,
615
+ "hidden_act": "silu",
616
+ "hidden_size": 5120,
617
+ "initializer_range": 0.02,
618
+ "intermediate_size": 17920,
619
+ "max_position_embeddings": 16384,
620
+ "model_type": "phi3",
621
+ "num_attention_heads": 40,
622
+ "num_hidden_layers": 40,
623
+ "num_key_value_heads": 10,
624
+ "original_max_position_embeddings": 16384,
625
+ "pad_token_id": 100349,
626
+ "partial_rotary_factor": 1.0,
627
+ "resid_pdrop": 0.0,
628
+ "rms_norm_eps": 1e-05,
629
+ "rope_scaling": null,
630
+ "rope_theta": 250000,
631
+ "sliding_window": null,
632
+ "tie_word_embeddings": false,
633
+ "torch_dtype": "bfloat16",
634
+ "transformers_version": "4.50.0",
635
+ "use_cache": true,
636
+ "vocab_size": 100352
637
+ }
638
+
639
+ [2025-03-23 18:44:24,564][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-352] due to args.save_total_limit
640
+ [2025-03-23 18:44:24,604][transformers.trainer][INFO] -
641
+
642
+ Training completed. Do not forget to share your model on huggingface.co/models =)
643
+
644
+
645
+ [2025-03-23 18:44:24,604][transformers.trainer][INFO] - Loading best model from /workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-224 (score: 0.40026051358392256).
646
+ [2025-03-23 18:44:25,097][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-23/16-49-40/results/phi4-balanced/C2/checkpoint-384] due to args.save_total_limit
647
+ [2025-03-23 18:44:25,152][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id. If id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
648
+ [2025-03-23 18:44:25,154][transformers.trainer][INFO] -
649
+ ***** Running Evaluation *****
650
+ [2025-03-23 18:44:25,155][transformers.trainer][INFO] - Num examples = 132
651
+ [2025-03-23 18:44:25,155][transformers.trainer][INFO] - Batch size = 16
652
+ [2025-03-23 18:44:59,146][transformers][INFO] - {'accuracy': 0.4090909090909091, 'RMSE': 59.59458995173263, 'QWK': 0.40026051358392256, 'HDIV': 0.045454545454545414, 'Macro_F1': 0.2429620933666886, 'Micro_F1': 0.4090909090909091, 'Weighted_F1': 0.39500831974942074, 'Macro_F1_(ignoring_nan)': np.float64(0.36444314005003287)}
653
+ [2025-03-23 18:44:59,148][tensorboardX.summary][INFO] - Summary name eval/Macro_F1_(ignoring_nan) is illegal; using eval/Macro_F1__ignoring_nan_ instead.
654
+ [2025-03-23 18:44:59,149][__main__][INFO] - Training completed successfully.
655
+ [2025-03-23 18:44:59,149][__main__][INFO] - Running on Test
656
+ [2025-03-23 18:44:59,150][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id. If id_prompt, reference, prompt, grades, essay_year, essay_text, supporting_text, id are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
657
+ [2025-03-23 18:44:59,151][transformers.trainer][INFO] -
658
+ ***** Running Evaluation *****
659
+ [2025-03-23 18:44:59,151][transformers.trainer][INFO] - Num examples = 138
660
+ [2025-03-23 18:44:59,151][transformers.trainer][INFO] - Batch size = 16
661
+ [2025-03-23 18:45:35,347][transformers][INFO] - {'accuracy': 0.45652173913043476, 'RMSE': 60.91095901015048, 'QWK': 0.4118587182355762, 'HDIV': 0.07971014492753625, 'Macro_F1': 0.28200712527929656, 'Micro_F1': 0.45652173913043476, 'Weighted_F1': 0.43319356755070093, 'Macro_F1_(ignoring_nan)': np.float64(0.4230106879189448)}
662
+ [2025-03-23 18:45:35,347][tensorboardX.summary][INFO] - Summary name eval/Macro_F1_(ignoring_nan) is illegal; using eval/Macro_F1__ignoring_nan_ instead.
663
+ [2025-03-23 18:45:35,349][transformers.trainer][INFO] - Saving model checkpoint to ./results/phi4-balanced/C2/best_model
664
+ [2025-03-23 18:45:35,634][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--microsoft--phi-4/snapshots/187ef0342fff0eb3333be9f00389385e95ef0b61/config.json
665
+ [2025-03-23 18:45:35,634][transformers.configuration_utils][INFO] - Model config Phi3Config {
666
+ "architectures": [
667
+ "Phi3ForCausalLM"
668
+ ],
669
+ "attention_bias": false,
670
+ "attention_dropout": 0.0,
671
+ "bos_token_id": 100257,
672
+ "embd_pdrop": 0.0,
673
+ "eos_token_id": 100265,
674
+ "hidden_act": "silu",
675
+ "hidden_size": 5120,
676
+ "initializer_range": 0.02,
677
+ "intermediate_size": 17920,
678
+ "max_position_embeddings": 16384,
679
+ "model_type": "phi3",
680
+ "num_attention_heads": 40,
681
+ "num_hidden_layers": 40,
682
+ "num_key_value_heads": 10,
683
+ "original_max_position_embeddings": 16384,
684
+ "pad_token_id": 100349,
685
+ "partial_rotary_factor": 1.0,
686
+ "resid_pdrop": 0.0,
687
+ "rms_norm_eps": 1e-05,
688
+ "rope_scaling": null,
689
+ "rope_theta": 250000,
690
+ "sliding_window": null,
691
+ "tie_word_embeddings": false,
692
+ "torch_dtype": "bfloat16",
693
+ "transformers_version": "4.50.0",
694
+ "use_cache": true,
695
+ "vocab_size": 100352
696
+ }
697
+
698
+ [2025-03-23 18:45:36,969][__main__][INFO] - Fine Tuning Finished.
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0250a1f049b5785f215035dda73aa8857046aa467f21108058a9d8c058ecdc7b
3
+ size 5432