ringorsolya
commited on
Commit
•
3a5a325
1
Parent(s):
0df45f1
Upload folder using huggingface_hub
Browse files- .gitattributes +3 -0
- anger_benchmark.xlsx +0 -0
- checkpoint-2772/config.json +45 -0
- checkpoint-2772/model.safetensors +3 -0
- checkpoint-2772/optimizer.pt +3 -0
- checkpoint-2772/rng_state.pth +3 -0
- checkpoint-2772/scheduler.pt +3 -0
- checkpoint-2772/trainer_state.json +99 -0
- checkpoint-2772/training_args.bin +3 -0
- checkpoint-3696/config.json +45 -0
- checkpoint-3696/model.safetensors +3 -0
- checkpoint-3696/rng_state.pth +3 -0
- checkpoint-3696/scheduler.pt +3 -0
- checkpoint-3696/trainer_state.json +118 -0
- checkpoint-3696/training_args.bin +3 -0
- cleaned_anger_train.xlsx +0 -0
- config.json +29 -0
- events.out.tfevents.1729344456.e68f58a32709.1290.2 +3 -0
- events.out.tfevents.1729349929.e68f58a32709.33818.0 +3 -0
- events.out.tfevents.1729362050.e68f58a32709.84141.0 +3 -0
- events.out.tfevents.1730058868.7ef9627f8aa1.700.0 +3 -0
- guiltbert_train.xlsx +0 -0
- model.safetensors +3 -0
- mores_parl_speech_1998_2002_xlmRoberta_predictions.xlsx +3 -0
- mores_parl_speech_2010_2014_part_0_predictions.xlsx +3 -0
- pooled_v4_xlmRoberta_training.xlsx +3 -0
- test_data.xlsx +0 -0
- training_args.bin +3 -0
- xlm_roberta_polish_train_20241017.xlsx +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
mores_parl_speech_1998_2002_xlmRoberta_predictions.xlsx filter=lfs diff=lfs merge=lfs -text
|
37 |
+
mores_parl_speech_2010_2014_part_0_predictions.xlsx filter=lfs diff=lfs merge=lfs -text
|
38 |
+
pooled_v4_xlmRoberta_training.xlsx filter=lfs diff=lfs merge=lfs -text
|
anger_benchmark.xlsx
ADDED
Binary file (34.2 kB). View file
|
|
checkpoint-2772/config.json
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "xlm-roberta-base",
|
3 |
+
"architectures": [
|
4 |
+
"XLMRobertaForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_dropout_prob": 0.1,
|
12 |
+
"hidden_size": 768,
|
13 |
+
"id2label": {
|
14 |
+
"0": "LABEL_0",
|
15 |
+
"1": "LABEL_1",
|
16 |
+
"2": "LABEL_2",
|
17 |
+
"3": "LABEL_3",
|
18 |
+
"4": "LABEL_4",
|
19 |
+
"5": "LABEL_5"
|
20 |
+
},
|
21 |
+
"initializer_range": 0.02,
|
22 |
+
"intermediate_size": 3072,
|
23 |
+
"label2id": {
|
24 |
+
"LABEL_0": 0,
|
25 |
+
"LABEL_1": 1,
|
26 |
+
"LABEL_2": 2,
|
27 |
+
"LABEL_3": 3,
|
28 |
+
"LABEL_4": 4,
|
29 |
+
"LABEL_5": 5
|
30 |
+
},
|
31 |
+
"layer_norm_eps": 1e-05,
|
32 |
+
"max_position_embeddings": 514,
|
33 |
+
"model_type": "xlm-roberta",
|
34 |
+
"num_attention_heads": 12,
|
35 |
+
"num_hidden_layers": 12,
|
36 |
+
"output_past": true,
|
37 |
+
"pad_token_id": 1,
|
38 |
+
"position_embedding_type": "absolute",
|
39 |
+
"problem_type": "multi_label_classification",
|
40 |
+
"torch_dtype": "float32",
|
41 |
+
"transformers_version": "4.46.0",
|
42 |
+
"type_vocab_size": 1,
|
43 |
+
"use_cache": true,
|
44 |
+
"vocab_size": 250002
|
45 |
+
}
|
checkpoint-2772/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:be47d0ad2ecaa55bdce468bdaa68a8045e8a27f49f5ab0ab438cd20494c31bdf
|
3 |
+
size 1112217312
|
checkpoint-2772/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b07154ce1e8e6d8386ee5c552b6ebbb8da735e9c5eca291f9c36046e75a93ad6
|
3 |
+
size 2224554234
|
checkpoint-2772/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fdf89f4d131941ed2d32c937dbe883ce9eae411d5e1cb32633c6f6edb39e1bb1
|
3 |
+
size 14244
|
checkpoint-2772/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f42f4c95afb8d7bc18fd98fc37c6b8fc78802c71e4358ad882e021e631e7b19c
|
3 |
+
size 1064
|
checkpoint-2772/trainer_state.json
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.05848415940999985,
|
3 |
+
"best_model_checkpoint": "/content/drive/MyDrive/checkpoint-2772",
|
4 |
+
"epoch": 3.0,
|
5 |
+
"eval_steps": 100,
|
6 |
+
"global_step": 2772,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 1.0,
|
13 |
+
"grad_norm": 5.851963043212891,
|
14 |
+
"learning_rate": 4.5e-06,
|
15 |
+
"loss": 0.261,
|
16 |
+
"step": 924
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 1.0,
|
20 |
+
"eval_accuracy": 0.9312026002166848,
|
21 |
+
"eval_f1": 0.9304556698132427,
|
22 |
+
"eval_loss": 0.10791309922933578,
|
23 |
+
"eval_precision": 0.9337159575536489,
|
24 |
+
"eval_recall": 0.9312026002166848,
|
25 |
+
"eval_runtime": 49.9811,
|
26 |
+
"eval_samples_per_second": 36.934,
|
27 |
+
"eval_steps_per_second": 2.321,
|
28 |
+
"step": 924
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"epoch": 2.0,
|
32 |
+
"grad_norm": 0.24086147546768188,
|
33 |
+
"learning_rate": 4.000000000000001e-06,
|
34 |
+
"loss": 0.0812,
|
35 |
+
"step": 1848
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"epoch": 2.0,
|
39 |
+
"eval_accuracy": 0.9582881906825569,
|
40 |
+
"eval_f1": 0.9581837801589876,
|
41 |
+
"eval_loss": 0.06059493124485016,
|
42 |
+
"eval_precision": 0.9585273431262236,
|
43 |
+
"eval_recall": 0.9582881906825569,
|
44 |
+
"eval_runtime": 49.6996,
|
45 |
+
"eval_samples_per_second": 37.143,
|
46 |
+
"eval_steps_per_second": 2.334,
|
47 |
+
"step": 1848
|
48 |
+
},
|
49 |
+
{
|
50 |
+
"epoch": 3.0,
|
51 |
+
"grad_norm": 0.1482285112142563,
|
52 |
+
"learning_rate": 3.5e-06,
|
53 |
+
"loss": 0.0501,
|
54 |
+
"step": 2772
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"epoch": 3.0,
|
58 |
+
"eval_accuracy": 0.9544962080173348,
|
59 |
+
"eval_f1": 0.9543281540493387,
|
60 |
+
"eval_loss": 0.05848415940999985,
|
61 |
+
"eval_precision": 0.9552102707240017,
|
62 |
+
"eval_recall": 0.9544962080173348,
|
63 |
+
"eval_runtime": 49.6249,
|
64 |
+
"eval_samples_per_second": 37.199,
|
65 |
+
"eval_steps_per_second": 2.338,
|
66 |
+
"step": 2772
|
67 |
+
}
|
68 |
+
],
|
69 |
+
"logging_steps": 100,
|
70 |
+
"max_steps": 9240,
|
71 |
+
"num_input_tokens_seen": 0,
|
72 |
+
"num_train_epochs": 10,
|
73 |
+
"save_steps": 100,
|
74 |
+
"stateful_callbacks": {
|
75 |
+
"EarlyStoppingCallback": {
|
76 |
+
"args": {
|
77 |
+
"early_stopping_patience": 2,
|
78 |
+
"early_stopping_threshold": 0.0
|
79 |
+
},
|
80 |
+
"attributes": {
|
81 |
+
"early_stopping_patience_counter": 0
|
82 |
+
}
|
83 |
+
},
|
84 |
+
"TrainerControl": {
|
85 |
+
"args": {
|
86 |
+
"should_epoch_stop": false,
|
87 |
+
"should_evaluate": false,
|
88 |
+
"should_log": false,
|
89 |
+
"should_save": true,
|
90 |
+
"should_training_stop": false
|
91 |
+
},
|
92 |
+
"attributes": {}
|
93 |
+
}
|
94 |
+
},
|
95 |
+
"total_flos": 1.165886956910592e+16,
|
96 |
+
"train_batch_size": 16,
|
97 |
+
"trial_name": null,
|
98 |
+
"trial_params": null
|
99 |
+
}
|
checkpoint-2772/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d6c9b99ed9f50a85ea3cfb046c9b532d2109d14a126aa90a905841854114b554
|
3 |
+
size 5176
|
checkpoint-3696/config.json
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "xlm-roberta-base",
|
3 |
+
"architectures": [
|
4 |
+
"XLMRobertaForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_dropout_prob": 0.1,
|
12 |
+
"hidden_size": 768,
|
13 |
+
"id2label": {
|
14 |
+
"0": "LABEL_0",
|
15 |
+
"1": "LABEL_1",
|
16 |
+
"2": "LABEL_2",
|
17 |
+
"3": "LABEL_3",
|
18 |
+
"4": "LABEL_4",
|
19 |
+
"5": "LABEL_5"
|
20 |
+
},
|
21 |
+
"initializer_range": 0.02,
|
22 |
+
"intermediate_size": 3072,
|
23 |
+
"label2id": {
|
24 |
+
"LABEL_0": 0,
|
25 |
+
"LABEL_1": 1,
|
26 |
+
"LABEL_2": 2,
|
27 |
+
"LABEL_3": 3,
|
28 |
+
"LABEL_4": 4,
|
29 |
+
"LABEL_5": 5
|
30 |
+
},
|
31 |
+
"layer_norm_eps": 1e-05,
|
32 |
+
"max_position_embeddings": 514,
|
33 |
+
"model_type": "xlm-roberta",
|
34 |
+
"num_attention_heads": 12,
|
35 |
+
"num_hidden_layers": 12,
|
36 |
+
"output_past": true,
|
37 |
+
"pad_token_id": 1,
|
38 |
+
"position_embedding_type": "absolute",
|
39 |
+
"problem_type": "multi_label_classification",
|
40 |
+
"torch_dtype": "float32",
|
41 |
+
"transformers_version": "4.46.0",
|
42 |
+
"type_vocab_size": 1,
|
43 |
+
"use_cache": true,
|
44 |
+
"vocab_size": 250002
|
45 |
+
}
|
checkpoint-3696/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:51a09eddbb88499a94d3e8531491ab88e457b5d0ab74d8129b58c19891e2254e
|
3 |
+
size 1112217312
|
checkpoint-3696/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a4750d2ef6f9083f7296a7fcbb7a25d3a29a00c11481cd6fe792a3688e02f841
|
3 |
+
size 14244
|
checkpoint-3696/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7be148d6578622b9a23ead50beea9d3af5dfc75be4998df2c499217cef45c00e
|
3 |
+
size 1064
|
checkpoint-3696/trainer_state.json
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.04969688504934311,
|
3 |
+
"best_model_checkpoint": "/content/drive/MyDrive/checkpoint-3696",
|
4 |
+
"epoch": 4.0,
|
5 |
+
"eval_steps": 100,
|
6 |
+
"global_step": 3696,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 1.0,
|
13 |
+
"grad_norm": 5.851963043212891,
|
14 |
+
"learning_rate": 4.5e-06,
|
15 |
+
"loss": 0.261,
|
16 |
+
"step": 924
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 1.0,
|
20 |
+
"eval_accuracy": 0.9312026002166848,
|
21 |
+
"eval_f1": 0.9304556698132427,
|
22 |
+
"eval_loss": 0.10791309922933578,
|
23 |
+
"eval_precision": 0.9337159575536489,
|
24 |
+
"eval_recall": 0.9312026002166848,
|
25 |
+
"eval_runtime": 49.9811,
|
26 |
+
"eval_samples_per_second": 36.934,
|
27 |
+
"eval_steps_per_second": 2.321,
|
28 |
+
"step": 924
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"epoch": 2.0,
|
32 |
+
"grad_norm": 0.24086147546768188,
|
33 |
+
"learning_rate": 4.000000000000001e-06,
|
34 |
+
"loss": 0.0812,
|
35 |
+
"step": 1848
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"epoch": 2.0,
|
39 |
+
"eval_accuracy": 0.9582881906825569,
|
40 |
+
"eval_f1": 0.9581837801589876,
|
41 |
+
"eval_loss": 0.06059493124485016,
|
42 |
+
"eval_precision": 0.9585273431262236,
|
43 |
+
"eval_recall": 0.9582881906825569,
|
44 |
+
"eval_runtime": 49.6996,
|
45 |
+
"eval_samples_per_second": 37.143,
|
46 |
+
"eval_steps_per_second": 2.334,
|
47 |
+
"step": 1848
|
48 |
+
},
|
49 |
+
{
|
50 |
+
"epoch": 3.0,
|
51 |
+
"grad_norm": 0.1482285112142563,
|
52 |
+
"learning_rate": 3.5e-06,
|
53 |
+
"loss": 0.0501,
|
54 |
+
"step": 2772
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"epoch": 3.0,
|
58 |
+
"eval_accuracy": 0.9544962080173348,
|
59 |
+
"eval_f1": 0.9543281540493387,
|
60 |
+
"eval_loss": 0.05848415940999985,
|
61 |
+
"eval_precision": 0.9552102707240017,
|
62 |
+
"eval_recall": 0.9544962080173348,
|
63 |
+
"eval_runtime": 49.6249,
|
64 |
+
"eval_samples_per_second": 37.199,
|
65 |
+
"eval_steps_per_second": 2.338,
|
66 |
+
"step": 2772
|
67 |
+
},
|
68 |
+
{
|
69 |
+
"epoch": 4.0,
|
70 |
+
"grad_norm": 0.11282943189144135,
|
71 |
+
"learning_rate": 3e-06,
|
72 |
+
"loss": 0.0371,
|
73 |
+
"step": 3696
|
74 |
+
},
|
75 |
+
{
|
76 |
+
"epoch": 4.0,
|
77 |
+
"eval_accuracy": 0.9626218851570965,
|
78 |
+
"eval_f1": 0.9623565485461932,
|
79 |
+
"eval_loss": 0.04969688504934311,
|
80 |
+
"eval_precision": 0.9626465758175986,
|
81 |
+
"eval_recall": 0.9626218851570965,
|
82 |
+
"eval_runtime": 49.5559,
|
83 |
+
"eval_samples_per_second": 37.251,
|
84 |
+
"eval_steps_per_second": 2.341,
|
85 |
+
"step": 3696
|
86 |
+
}
|
87 |
+
],
|
88 |
+
"logging_steps": 100,
|
89 |
+
"max_steps": 9240,
|
90 |
+
"num_input_tokens_seen": 0,
|
91 |
+
"num_train_epochs": 10,
|
92 |
+
"save_steps": 100,
|
93 |
+
"stateful_callbacks": {
|
94 |
+
"EarlyStoppingCallback": {
|
95 |
+
"args": {
|
96 |
+
"early_stopping_patience": 2,
|
97 |
+
"early_stopping_threshold": 0.0
|
98 |
+
},
|
99 |
+
"attributes": {
|
100 |
+
"early_stopping_patience_counter": 0
|
101 |
+
}
|
102 |
+
},
|
103 |
+
"TrainerControl": {
|
104 |
+
"args": {
|
105 |
+
"should_epoch_stop": false,
|
106 |
+
"should_evaluate": false,
|
107 |
+
"should_log": false,
|
108 |
+
"should_save": true,
|
109 |
+
"should_training_stop": false
|
110 |
+
},
|
111 |
+
"attributes": {}
|
112 |
+
}
|
113 |
+
},
|
114 |
+
"total_flos": 1.554515942547456e+16,
|
115 |
+
"train_batch_size": 16,
|
116 |
+
"trial_name": null,
|
117 |
+
"trial_params": null
|
118 |
+
}
|
checkpoint-3696/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d6c9b99ed9f50a85ea3cfb046c9b532d2109d14a126aa90a905841854114b554
|
3 |
+
size 5176
|
cleaned_anger_train.xlsx
ADDED
Binary file (817 kB). View file
|
|
config.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "xlm-roberta-base",
|
3 |
+
"architectures": [
|
4 |
+
"XLMRobertaForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_dropout_prob": 0.1,
|
12 |
+
"hidden_size": 768,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 3072,
|
15 |
+
"layer_norm_eps": 1e-05,
|
16 |
+
"max_position_embeddings": 514,
|
17 |
+
"model_type": "xlm-roberta",
|
18 |
+
"num_attention_heads": 12,
|
19 |
+
"num_hidden_layers": 12,
|
20 |
+
"output_past": true,
|
21 |
+
"pad_token_id": 1,
|
22 |
+
"position_embedding_type": "absolute",
|
23 |
+
"problem_type": "multi_label_classification",
|
24 |
+
"torch_dtype": "float32",
|
25 |
+
"transformers_version": "4.45.2",
|
26 |
+
"type_vocab_size": 1,
|
27 |
+
"use_cache": true,
|
28 |
+
"vocab_size": 250002
|
29 |
+
}
|
events.out.tfevents.1729344456.e68f58a32709.1290.2
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:625d00840be1e57493fa43f8a042729d8b949f3e93bceee9c52b440ce9010bf3
|
3 |
+
size 6367
|
events.out.tfevents.1729349929.e68f58a32709.33818.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5bcff8fcefafcd2372ff6e9ef239b65b16f12ebc8d156ba74159141ad83105c4
|
3 |
+
size 8087
|
events.out.tfevents.1729362050.e68f58a32709.84141.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6b6ac395189d44f9a141a07a8e6a37d812f6798bab7522f10eacc2282102da33
|
3 |
+
size 7404
|
events.out.tfevents.1730058868.7ef9627f8aa1.700.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:26ee7d420a28578202896069cc36931a02c66b54529cd29ce4ac1855bdf525e6
|
3 |
+
size 88
|
guiltbert_train.xlsx
ADDED
Binary file (269 kB). View file
|
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a2e0fd8a7b04dc08c005a4cc83ddbeb6d29b7aca0275d6286eb32a6ceb01359f
|
3 |
+
size 1112205008
|
mores_parl_speech_1998_2002_xlmRoberta_predictions.xlsx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d3ad0bf12c8238c2d90305338c6b0edba91ea1b23a959160ed6cced917da12b7
|
3 |
+
size 98915510
|
mores_parl_speech_2010_2014_part_0_predictions.xlsx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4d382f2f650fe2346f76ae7fbbdd88d2d7fec58f7330d30db9fe20bda3c79819
|
3 |
+
size 74245658
|
pooled_v4_xlmRoberta_training.xlsx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5c641f391ef8fa3c68a7509cc06d3d56cb2dd58d4955f0a46b89359628d78afa
|
3 |
+
size 8796228
|
test_data.xlsx
ADDED
Binary file (128 kB). View file
|
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d91465c5a6bad4d928dd773ccf8231c79bfb32b197332c29d7457d91c0bda618
|
3 |
+
size 5176
|
xlm_roberta_polish_train_20241017.xlsx
ADDED
Binary file (886 kB). View file
|
|