Pisethan commited on
Commit
6bb98fe
1 Parent(s): f234767

Fine-tuned model update

Browse files
checkpoint-6/config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "xlm-roberta-base",
3
+ "architectures": [
4
+ "XLMRobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "arithmetic",
15
+ "1": "multiplication",
16
+ "2": "division",
17
+ "3": "algebra",
18
+ "4": "geometry",
19
+ "5": "exponents"
20
+ },
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 3072,
23
+ "label2id": {
24
+ "algebra": 3,
25
+ "arithmetic": 0,
26
+ "division": 2,
27
+ "exponents": 5,
28
+ "geometry": 4,
29
+ "multiplication": 1
30
+ },
31
+ "layer_norm_eps": 1e-05,
32
+ "max_position_embeddings": 514,
33
+ "model_type": "xlm-roberta",
34
+ "num_attention_heads": 12,
35
+ "num_hidden_layers": 12,
36
+ "output_past": true,
37
+ "pad_token_id": 1,
38
+ "position_embedding_type": "absolute",
39
+ "problem_type": "single_label_classification",
40
+ "torch_dtype": "float32",
41
+ "transformers_version": "4.47.0",
42
+ "type_vocab_size": 1,
43
+ "use_cache": true,
44
+ "vocab_size": 250002
45
+ }
checkpoint-6/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00622e8cdee09bc9c52000c3ea057bfbb3b38ec90338e57a081ae179be6cc3cf
3
+ size 1112217312
checkpoint-6/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:800bc7bb32cd75dca077ef971f3dcf5e5dc584fc870a87448aab2989020f5571
3
+ size 2224548602
checkpoint-6/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07ae460aee1d885b276b898f4f8b9dee77c8b6c7941b7009c1d7d14dd5d118c4
3
+ size 13990
checkpoint-6/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a507d9cafa2419d0ab9d8bfd3d73565df3d210c7e1a2b8ba116f25efdab63f1
3
+ size 1064
checkpoint-6/trainer_state.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 6,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_loss": 1.7609878778457642,
14
+ "eval_runtime": 0.4696,
15
+ "eval_samples_per_second": 8.517,
16
+ "eval_steps_per_second": 2.129,
17
+ "step": 2
18
+ },
19
+ {
20
+ "epoch": 2.0,
21
+ "eval_loss": 1.7294942140579224,
22
+ "eval_runtime": 0.2479,
23
+ "eval_samples_per_second": 16.136,
24
+ "eval_steps_per_second": 4.034,
25
+ "step": 4
26
+ },
27
+ {
28
+ "epoch": 3.0,
29
+ "eval_loss": 1.7482409477233887,
30
+ "eval_runtime": 0.279,
31
+ "eval_samples_per_second": 14.335,
32
+ "eval_steps_per_second": 3.584,
33
+ "step": 6
34
+ }
35
+ ],
36
+ "logging_steps": 50,
37
+ "max_steps": 6,
38
+ "num_input_tokens_seen": 0,
39
+ "num_train_epochs": 3,
40
+ "save_steps": 500,
41
+ "stateful_callbacks": {
42
+ "TrainerControl": {
43
+ "args": {
44
+ "should_epoch_stop": false,
45
+ "should_evaluate": false,
46
+ "should_log": false,
47
+ "should_save": true,
48
+ "should_training_stop": true
49
+ },
50
+ "attributes": {}
51
+ }
52
+ },
53
+ "total_flos": 2170744164864.0,
54
+ "train_batch_size": 8,
55
+ "trial_name": null,
56
+ "trial_params": null
57
+ }
checkpoint-6/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:598e7344eec6698068461bff781155d74bd9440d68eab071e2ac9d361da8c62d
3
+ size 5240
config.json CHANGED
@@ -1,45 +1,45 @@
1
- {
2
- "_name_or_path": "D:/sangapac-math-model",
3
- "architectures": [
4
- "XLMRobertaForSequenceClassification"
5
- ],
6
- "attention_probs_dropout_prob": 0.1,
7
- "bos_token_id": 0,
8
- "classifier_dropout": null,
9
- "eos_token_id": 2,
10
- "hidden_act": "gelu",
11
- "hidden_dropout_prob": 0.1,
12
- "hidden_size": 768,
13
- "id2label": {
14
- "0": "arithmetic",
15
- "1": "multiplication",
16
- "2": "division",
17
- "3": "algebra",
18
- "4": "geometry",
19
- "5": "exponents"
20
- },
21
- "initializer_range": 0.02,
22
- "intermediate_size": 3072,
23
- "label2id": {
24
- "algebra": 3,
25
- "arithmetic": 0,
26
- "division": 2,
27
- "exponents": 5,
28
- "geometry": 4,
29
- "multiplication": 1
30
- },
31
- "layer_norm_eps": 1e-05,
32
- "max_position_embeddings": 514,
33
- "model_type": "xlm-roberta",
34
- "num_attention_heads": 12,
35
- "num_hidden_layers": 12,
36
- "output_past": true,
37
- "pad_token_id": 1,
38
- "position_embedding_type": "absolute",
39
- "problem_type": "single_label_classification",
40
- "torch_dtype": "float32",
41
- "transformers_version": "4.46.2",
42
- "type_vocab_size": 1,
43
- "use_cache": true,
44
- "vocab_size": 250002
45
- }
 
1
+ {
2
+ "_name_or_path": "xlm-roberta-base",
3
+ "architectures": [
4
+ "XLMRobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "arithmetic",
15
+ "1": "multiplication",
16
+ "2": "division",
17
+ "3": "algebra",
18
+ "4": "geometry",
19
+ "5": "exponents"
20
+ },
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 3072,
23
+ "label2id": {
24
+ "algebra": 3,
25
+ "arithmetic": 0,
26
+ "division": 2,
27
+ "exponents": 5,
28
+ "geometry": 4,
29
+ "multiplication": 1
30
+ },
31
+ "layer_norm_eps": 1e-05,
32
+ "max_position_embeddings": 514,
33
+ "model_type": "xlm-roberta",
34
+ "num_attention_heads": 12,
35
+ "num_hidden_layers": 12,
36
+ "output_past": true,
37
+ "pad_token_id": 1,
38
+ "position_embedding_type": "absolute",
39
+ "problem_type": "single_label_classification",
40
+ "torch_dtype": "float32",
41
+ "transformers_version": "4.47.0",
42
+ "type_vocab_size": 1,
43
+ "use_cache": true,
44
+ "vocab_size": 250002
45
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:75e036f125b577ef815ac4dc06a77a1b679cefd365c599e80c6ee3e823c67bb9
3
  size 1112217312
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00622e8cdee09bc9c52000c3ea057bfbb3b38ec90338e57a081ae179be6cc3cf
3
  size 1112217312