hun3359 commited on
Commit
0af794b
·
1 Parent(s): c46081b

fine tuning finished

Browse files
config.json ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bongsoo/mdistilbertV3.1",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "id2label":{
12
+ "0": "분노",
13
+ "1": "툴툴대는",
14
+ "2": "좌절한",
15
+ "3": "짜증내는",
16
+ "4": "방어적인",
17
+ "5": "악의적인",
18
+ "6": "안달하는",
19
+ "7": "구역질 나는",
20
+ "8": "노여워하는",
21
+ "9": "성가신",
22
+ "10": "슬픔",
23
+ "11": "실망한",
24
+ "12": "비통한",
25
+ "13": "후회되는",
26
+ "14": "우울한",
27
+ "15": "마비된",
28
+ "16": "염세적인",
29
+ "17": "눈물이 나는",
30
+ "18": "낙담한",
31
+ "19": "환멸을 느끼는",
32
+ "20": "불안",
33
+ "21": "두려운",
34
+ "22": "스트레스 받는",
35
+ "23": "취약한",
36
+ "24": "혼란스러운",
37
+ "25": "당혹스러운",
38
+ "26": "회의적인",
39
+ "27": "걱정스러운",
40
+ "28": "조심스러운",
41
+ "29": "초조한",
42
+ "30": "상처",
43
+ "31": "질투하는",
44
+ "32": "배신당한",
45
+ "33": "고립된",
46
+ "34": "충격 받은",
47
+ "35": "가난한 불우한",
48
+ "36": "희생된",
49
+ "37": "억울한",
50
+ "38": "괴로워하는",
51
+ "39": "버려진",
52
+ "40": "당황",
53
+ "41": "고립된(당황한)",
54
+ "42": "남의 시선을 의식하는",
55
+ "43": "외로운",
56
+ "44": "열등감",
57
+ "45": "죄책감의",
58
+ "46": "부끄러운",
59
+ "47": "혐오스러운",
60
+ "48": "한심한",
61
+ "49": "혼란스러운(당황한)",
62
+ "50": "기쁨",
63
+ "51": "감사하는",
64
+ "52": "신뢰하는",
65
+ "53": "편안한",
66
+ "54": "만족스러운",
67
+ "55": "흥분",
68
+ "56": "느긋",
69
+ "57": "안도",
70
+ "58": "신이 난",
71
+ "59": "자신하는"
72
+ },
73
+ "initializer_range": 0.02,
74
+ "label2id": {
75
+ "분노": 0,
76
+ "툴툴대는": 1,
77
+ "좌절한": 2,
78
+ "짜증내는": 3,
79
+ "방어적인": 4,
80
+ "악의적인": 5,
81
+ "안달하는": 6,
82
+ "구역질 나는": 7,
83
+ "노여워하는": 8,
84
+ "성가신": 9,
85
+ "슬픔": 10,
86
+ "실망한": 11,
87
+ "비통한": 12,
88
+ "후회되는": 13,
89
+ "우울한": 14,
90
+ "마비된": 15,
91
+ "염세적인": 16,
92
+ "눈물이 나는": 17,
93
+ "낙담한": 18,
94
+ "환멸을 느끼는": 19,
95
+ "불안": 20,
96
+ "두려운": 21,
97
+ "스트레스 받는": 22,
98
+ "취약한": 23,
99
+ "혼란스러운": 24,
100
+ "당혹스러운": 25,
101
+ "회의적인": 26,
102
+ "걱정스러운": 27,
103
+ "조심스러운": 28,
104
+ "초조한": 29,
105
+ "상처": 30,
106
+ "질투하는": 31,
107
+ "배신당한": 32,
108
+ "고립된": 33,
109
+ "충격 받은": 34,
110
+ "가난한 불우한": 35,
111
+ "희생된": 36,
112
+ "억울한": 37,
113
+ "괴로워하는": 38,
114
+ "버려진": 39,
115
+ "당황": 40,
116
+ "고립된(당황한)": 41,
117
+ "남의 시선을 의식하는": 42,
118
+ "외로운": 43,
119
+ "열등감": 44,
120
+ "죄책감의": 45,
121
+ "부끄러운": 46,
122
+ "혐오스러운": 47,
123
+ "한심한": 48,
124
+ "혼란스러운(당황한)": 49,
125
+ "기쁨": 50,
126
+ "감사하는": 51,
127
+ "신뢰하는": 52,
128
+ "편안한": 53,
129
+ "만족스러운": 54,
130
+ "흥분": 55,
131
+ "느긋": 56,
132
+ "안도": 57,
133
+ "신이 난": 58,
134
+ "자신하는": 59
135
+ },
136
+ "max_position_embeddings": 512,
137
+ "model_type": "distilbert",
138
+ "n_heads": 12,
139
+ "n_layers": 6,
140
+ "output_past": true,
141
+ "pad_token_id": 0,
142
+ "problem_type": "single_label_classification",
143
+ "qa_dropout": 0.1,
144
+ "seq_classif_dropout": 0.2,
145
+ "sinusoidal_pos_embds": false,
146
+ "tie_weights_": true,
147
+ "torch_dtype": "float32",
148
+ "transformers_version": "4.30.2",
149
+ "vocab_size": 159552
150
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf12b671e861a45070e434481cf43254c40f8baa08750c050ee02b6b36b9dfe4
3
+ size 664414061
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": false,
6
+ "mask_token": "[MASK]",
7
+ "max_len": 128,
8
+ "model_max_length": 128,
9
+ "never_split": null,
10
+ "pad_token": "[PAD]",
11
+ "sep_token": "[SEP]",
12
+ "strip_accents": false,
13
+ "tokenize_chinese_chars": true,
14
+ "tokenizer_class": "DistilBertTokenizer",
15
+ "unk_token": "[UNK]"
16
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff