lunadebruyne commited on
Commit
23ca94b
·
verified ·
1 Parent(s): 2d8e2bb

lunadebruyne/fine_tuned_model

Browse files
README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: j-hartmann/emotion-english-distilroberta-base
3
+ tags:
4
+ - generated_from_trainer
5
+ model-index:
6
+ - name: test_trainer
7
+ results: []
8
+ ---
9
+
10
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
11
+ should probably proofread and complete it, then remove this comment. -->
12
+
13
+ # test_trainer
14
+
15
+ This model is a fine-tuned version of [j-hartmann/emotion-english-distilroberta-base](https://huggingface.co/j-hartmann/emotion-english-distilroberta-base) on an unknown dataset.
16
+
17
+ ## Model description
18
+
19
+ More information needed
20
+
21
+ ## Intended uses & limitations
22
+
23
+ More information needed
24
+
25
+ ## Training and evaluation data
26
+
27
+ More information needed
28
+
29
+ ## Training procedure
30
+
31
+ ### Training hyperparameters
32
+
33
+ The following hyperparameters were used during training:
34
+ - learning_rate: 5e-05
35
+ - train_batch_size: 8
36
+ - eval_batch_size: 8
37
+ - seed: 42
38
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
39
+ - lr_scheduler_type: linear
40
+ - num_epochs: 3.0
41
+
42
+ ### Training results
43
+
44
+
45
+
46
+ ### Framework versions
47
+
48
+ - Transformers 4.37.2
49
+ - Pytorch 2.1.0+cu121
50
+ - Tokenizers 0.15.2
config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "j-hartmann/emotion-english-distilroberta-base",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "LABEL_0",
16
+ "1": "LABEL_1",
17
+ "2": "LABEL_2",
18
+ "3": "LABEL_3",
19
+ "4": "LABEL_4",
20
+ "5": "LABEL_5",
21
+ "6": "LABEL_6",
22
+ "7": "LABEL_7"
23
+ },
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 3072,
26
+ "label2id": {
27
+ "LABEL_0": 0,
28
+ "LABEL_1": 1,
29
+ "LABEL_2": 2,
30
+ "LABEL_3": 3,
31
+ "LABEL_4": 4,
32
+ "LABEL_5": 5,
33
+ "LABEL_6": 6,
34
+ "LABEL_7": 7
35
+ },
36
+ "layer_norm_eps": 1e-05,
37
+ "max_position_embeddings": 514,
38
+ "model_type": "roberta",
39
+ "num_attention_heads": 12,
40
+ "num_hidden_layers": 6,
41
+ "pad_token_id": 1,
42
+ "position_embedding_type": "absolute",
43
+ "problem_type": "single_label_classification",
44
+ "torch_dtype": "float32",
45
+ "transformers_version": "4.37.2",
46
+ "type_vocab_size": 1,
47
+ "use_cache": true,
48
+ "vocab_size": 50265
49
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c1e9b187d9539fd4e3270da88330e5ca87b5214a21cbb1e3e3fe65250419511
3
+ size 328510736
runs/Feb21_10-30-12_385d7f6a99a0/events.out.tfevents.1708511413.385d7f6a99a0.950.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0564e4bf9463372edff52d8d1ea0f8ee07393d9e20b787f7b3ff5964992418a4
3
+ size 4812
runs/Feb21_10-33-10_385d7f6a99a0/events.out.tfevents.1708511591.385d7f6a99a0.950.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c6eb0b38547239c768c2db69d8722d9104430ccf377d62bbd7e648de25c52cb
3
+ size 4807
runs/Feb21_10-34-02_385d7f6a99a0/events.out.tfevents.1708511642.385d7f6a99a0.950.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b730431c9000c16d12f5487dbb37dbb45b5eb3533f2265bc1b73c57a47d3bec6
3
+ size 4812
runs/Feb21_10-34-22_385d7f6a99a0/events.out.tfevents.1708511662.385d7f6a99a0.950.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4269ac68b4e7fcdd65bbb61d72202434734fcf1b245c11758c3f7de046d118ae
3
+ size 4815
runs/Feb21_10-38-01_385d7f6a99a0/events.out.tfevents.1708511882.385d7f6a99a0.950.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1b626ee40d7f5365aed8bb4b73ef69b096dc4fafe826b94de353c14ea7ba1dd
3
+ size 4815
runs/Feb21_10-38-17_385d7f6a99a0/events.out.tfevents.1708511899.385d7f6a99a0.950.5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97a1643efe1f69d20b9a0f4f34152c9417e77111d7ac2052503702cb5e0a7970
3
+ size 4812
runs/Feb21_10-42-33_385d7f6a99a0/events.out.tfevents.1708512154.385d7f6a99a0.950.6 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ed4361a58cd8cbcc9b99c1d39e38ccd922329a243600226c0c4bb204dd84fc9
3
+ size 4812
runs/Feb21_10-44-52_385d7f6a99a0/events.out.tfevents.1708512293.385d7f6a99a0.950.7 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8c64003fe29a3538d855d08808355448d9c56a19c9e390f0ed394f0db3a1117
3
+ size 5160
runs/Feb21_10-45-56_385d7f6a99a0/events.out.tfevents.1708512356.385d7f6a99a0.950.8 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eba71926c41ddf049faed17b2cc4ad8b62bc3a8be30a1493600bc4fc96e706ba
3
+ size 5160
runs/Feb21_11-00-19_385d7f6a99a0/events.out.tfevents.1708513220.385d7f6a99a0.950.9 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01831d6234e2796da557343de7f582e2d38898a8e5f5c7e22bfab8ea97b9b50a
3
+ size 5159
runs/Feb21_11-07-07_385d7f6a99a0/events.out.tfevents.1708513627.385d7f6a99a0.950.10 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fba7ae776135cf70a7a386ad1a90bdc6b22af10d5e344b663408d3899f2d250a
3
+ size 5159
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13bf981e216c86b498be2489efdab04025cf064ba4d9843f12036937b7cee8e7
3
+ size 4664