BounharAbdelaziz commited on
Commit
f7d8382
1 Parent(s): da0ba96

BounharAbdelaziz/Transliteration-Moroccan-Darija

Browse files
README.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - generated_from_trainer
4
+ model-index:
5
+ - name: Transliteration-Moroccan-Darija
6
+ results: []
7
+ ---
8
+
9
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
10
+ should probably proofread and complete it, then remove this comment. -->
11
+
12
+ # Transliteration-Moroccan-Darija
13
+
14
+ This model was trained from scratch on an unknown dataset.
15
+
16
+ ## Model description
17
+
18
+ More information needed
19
+
20
+ ## Intended uses & limitations
21
+
22
+ More information needed
23
+
24
+ ## Training and evaluation data
25
+
26
+ More information needed
27
+
28
+ ## Training procedure
29
+
30
+ ### Training hyperparameters
31
+
32
+ The following hyperparameters were used during training:
33
+ - learning_rate: 0.0005
34
+ - train_batch_size: 384
35
+ - eval_batch_size: 384
36
+ - seed: 42
37
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
38
+ - lr_scheduler_type: linear
39
+ - lr_scheduler_warmup_ratio: 0.01
40
+ - num_epochs: 30
41
+
42
+ ### Framework versions
43
+
44
+ - Transformers 4.39.2
45
+ - Pytorch 2.2.2+cpu
46
+ - Datasets 2.18.0
47
+ - Tokenizers 0.15.2
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "[CLS]": 86,
3
+ "[MASK]": 87,
4
+ "[SEP]": 85
5
+ }
config.json ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./BounharAbdelaziz/Transliteration-Moroccan-Darija/checkpoint-5000/",
3
+ "architectures": [
4
+ "EncoderDecoderModel"
5
+ ],
6
+ "decoder": {
7
+ "_name_or_path": "",
8
+ "add_cross_attention": true,
9
+ "architectures": null,
10
+ "attention_probs_dropout_prob": 0.1,
11
+ "bad_words_ids": null,
12
+ "begin_suppress_tokens": null,
13
+ "bos_token_id": null,
14
+ "chunk_size_feed_forward": 0,
15
+ "classifier_dropout": null,
16
+ "cross_attention_hidden_size": null,
17
+ "decoder_start_token_id": null,
18
+ "diversity_penalty": 0.0,
19
+ "do_sample": false,
20
+ "early_stopping": false,
21
+ "encoder_no_repeat_ngram_size": 0,
22
+ "eos_token_id": null,
23
+ "exponential_decay_length_penalty": null,
24
+ "finetuning_task": null,
25
+ "forced_bos_token_id": null,
26
+ "forced_eos_token_id": null,
27
+ "hidden_act": "gelu",
28
+ "hidden_dropout_prob": 0.1,
29
+ "hidden_size": 512,
30
+ "id2label": {
31
+ "0": "LABEL_0",
32
+ "1": "LABEL_1"
33
+ },
34
+ "initializer_range": 0.02,
35
+ "intermediate_size": 3072,
36
+ "is_decoder": true,
37
+ "is_encoder_decoder": false,
38
+ "label2id": {
39
+ "LABEL_0": 0,
40
+ "LABEL_1": 1
41
+ },
42
+ "layer_norm_eps": 1e-12,
43
+ "length_penalty": 1.0,
44
+ "max_length": 20,
45
+ "max_position_embeddings": 512,
46
+ "min_length": 0,
47
+ "model_type": "bert",
48
+ "no_repeat_ngram_size": 0,
49
+ "num_attention_heads": 8,
50
+ "num_beam_groups": 1,
51
+ "num_beams": 1,
52
+ "num_hidden_layers": 4,
53
+ "num_return_sequences": 1,
54
+ "output_attentions": false,
55
+ "output_hidden_states": false,
56
+ "output_scores": false,
57
+ "pad_token_id": 0,
58
+ "position_embedding_type": "absolute",
59
+ "prefix": null,
60
+ "problem_type": null,
61
+ "pruned_heads": {},
62
+ "remove_invalid_values": false,
63
+ "repetition_penalty": 1.0,
64
+ "return_dict": true,
65
+ "return_dict_in_generate": false,
66
+ "sep_token_id": null,
67
+ "suppress_tokens": null,
68
+ "task_specific_params": null,
69
+ "temperature": 1.0,
70
+ "tf_legacy_loss": false,
71
+ "tie_encoder_decoder": false,
72
+ "tie_word_embeddings": true,
73
+ "tokenizer_class": null,
74
+ "top_k": 50,
75
+ "top_p": 1.0,
76
+ "torch_dtype": null,
77
+ "torchscript": false,
78
+ "type_vocab_size": 2,
79
+ "typical_p": 1.0,
80
+ "use_bfloat16": false,
81
+ "use_cache": true,
82
+ "vocab_size": 30522
83
+ },
84
+ "decoder_start_token_id": 15,
85
+ "encoder": {
86
+ "_name_or_path": "",
87
+ "add_cross_attention": false,
88
+ "architectures": null,
89
+ "attention_probs_dropout_prob": 0.1,
90
+ "bad_words_ids": null,
91
+ "begin_suppress_tokens": null,
92
+ "bos_token_id": null,
93
+ "chunk_size_feed_forward": 0,
94
+ "classifier_dropout": null,
95
+ "cross_attention_hidden_size": null,
96
+ "decoder_start_token_id": null,
97
+ "diversity_penalty": 0.0,
98
+ "do_sample": false,
99
+ "early_stopping": false,
100
+ "encoder_no_repeat_ngram_size": 0,
101
+ "eos_token_id": null,
102
+ "exponential_decay_length_penalty": null,
103
+ "finetuning_task": null,
104
+ "forced_bos_token_id": null,
105
+ "forced_eos_token_id": null,
106
+ "hidden_act": "gelu",
107
+ "hidden_dropout_prob": 0.1,
108
+ "hidden_size": 512,
109
+ "id2label": {
110
+ "0": "LABEL_0",
111
+ "1": "LABEL_1"
112
+ },
113
+ "initializer_range": 0.02,
114
+ "intermediate_size": 3072,
115
+ "is_decoder": false,
116
+ "is_encoder_decoder": false,
117
+ "label2id": {
118
+ "LABEL_0": 0,
119
+ "LABEL_1": 1
120
+ },
121
+ "layer_norm_eps": 1e-12,
122
+ "length_penalty": 1.0,
123
+ "max_length": 20,
124
+ "max_position_embeddings": 512,
125
+ "min_length": 0,
126
+ "model_type": "bert",
127
+ "no_repeat_ngram_size": 0,
128
+ "num_attention_heads": 8,
129
+ "num_beam_groups": 1,
130
+ "num_beams": 1,
131
+ "num_hidden_layers": 4,
132
+ "num_return_sequences": 1,
133
+ "output_attentions": false,
134
+ "output_hidden_states": false,
135
+ "output_scores": false,
136
+ "pad_token_id": 0,
137
+ "position_embedding_type": "absolute",
138
+ "prefix": null,
139
+ "problem_type": null,
140
+ "pruned_heads": {},
141
+ "remove_invalid_values": false,
142
+ "repetition_penalty": 1.0,
143
+ "return_dict": true,
144
+ "return_dict_in_generate": false,
145
+ "sep_token_id": null,
146
+ "suppress_tokens": null,
147
+ "task_specific_params": null,
148
+ "temperature": 1.0,
149
+ "tf_legacy_loss": false,
150
+ "tie_encoder_decoder": false,
151
+ "tie_word_embeddings": true,
152
+ "tokenizer_class": null,
153
+ "top_k": 50,
154
+ "top_p": 1.0,
155
+ "torch_dtype": null,
156
+ "torchscript": false,
157
+ "type_vocab_size": 2,
158
+ "typical_p": 1.0,
159
+ "use_bfloat16": false,
160
+ "use_cache": true,
161
+ "vocab_size": 30522
162
+ },
163
+ "is_encoder_decoder": true,
164
+ "model_type": "encoder-decoder",
165
+ "pad_token_id": 14,
166
+ "torch_dtype": "float32",
167
+ "transformers_version": "4.39.2"
168
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 15,
4
+ "pad_token_id": 14,
5
+ "transformers_version": "4.39.2"
6
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ececda637307182678f8138549fe6e6a9a1bc86299ef7c211595b3807811187
3
+ size 280671840
special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<sos>",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "<eos>",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "<pad>",
7
+ "sep_token": "[SEP]",
8
+ "unk_token": "<unk>"
9
+ }
tokenizer.json ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 128,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
+ "padding": {
10
+ "strategy": {
11
+ "Fixed": 128
12
+ },
13
+ "direction": "Right",
14
+ "pad_to_multiple_of": null,
15
+ "pad_id": 14,
16
+ "pad_type_id": 0,
17
+ "pad_token": "<pad>"
18
+ },
19
+ "added_tokens": [
20
+ {
21
+ "id": 13,
22
+ "content": "<eos>",
23
+ "single_word": false,
24
+ "lstrip": false,
25
+ "rstrip": false,
26
+ "normalized": false,
27
+ "special": true
28
+ },
29
+ {
30
+ "id": 14,
31
+ "content": "<pad>",
32
+ "single_word": false,
33
+ "lstrip": false,
34
+ "rstrip": false,
35
+ "normalized": false,
36
+ "special": true
37
+ },
38
+ {
39
+ "id": 15,
40
+ "content": "<sos>",
41
+ "single_word": false,
42
+ "lstrip": false,
43
+ "rstrip": false,
44
+ "normalized": false,
45
+ "special": true
46
+ },
47
+ {
48
+ "id": 16,
49
+ "content": "<unk>",
50
+ "single_word": false,
51
+ "lstrip": false,
52
+ "rstrip": false,
53
+ "normalized": false,
54
+ "special": true
55
+ },
56
+ {
57
+ "id": 85,
58
+ "content": "[SEP]",
59
+ "single_word": false,
60
+ "lstrip": false,
61
+ "rstrip": false,
62
+ "normalized": false,
63
+ "special": true
64
+ },
65
+ {
66
+ "id": 86,
67
+ "content": "[CLS]",
68
+ "single_word": false,
69
+ "lstrip": false,
70
+ "rstrip": false,
71
+ "normalized": false,
72
+ "special": true
73
+ },
74
+ {
75
+ "id": 87,
76
+ "content": "[MASK]",
77
+ "single_word": false,
78
+ "lstrip": false,
79
+ "rstrip": false,
80
+ "normalized": false,
81
+ "special": true
82
+ }
83
+ ],
84
+ "normalizer": {
85
+ "type": "BertNormalizer",
86
+ "clean_text": true,
87
+ "handle_chinese_chars": true,
88
+ "strip_accents": null,
89
+ "lowercase": true
90
+ },
91
+ "pre_tokenizer": {
92
+ "type": "BertPreTokenizer"
93
+ },
94
+ "post_processor": {
95
+ "type": "TemplateProcessing",
96
+ "single": [
97
+ {
98
+ "SpecialToken": {
99
+ "id": "[CLS]",
100
+ "type_id": 0
101
+ }
102
+ },
103
+ {
104
+ "Sequence": {
105
+ "id": "A",
106
+ "type_id": 0
107
+ }
108
+ },
109
+ {
110
+ "SpecialToken": {
111
+ "id": "[SEP]",
112
+ "type_id": 0
113
+ }
114
+ }
115
+ ],
116
+ "pair": [
117
+ {
118
+ "SpecialToken": {
119
+ "id": "[CLS]",
120
+ "type_id": 0
121
+ }
122
+ },
123
+ {
124
+ "Sequence": {
125
+ "id": "A",
126
+ "type_id": 0
127
+ }
128
+ },
129
+ {
130
+ "SpecialToken": {
131
+ "id": "[SEP]",
132
+ "type_id": 0
133
+ }
134
+ },
135
+ {
136
+ "Sequence": {
137
+ "id": "B",
138
+ "type_id": 1
139
+ }
140
+ },
141
+ {
142
+ "SpecialToken": {
143
+ "id": "[SEP]",
144
+ "type_id": 1
145
+ }
146
+ }
147
+ ],
148
+ "special_tokens": {
149
+ "[CLS]": {
150
+ "id": "[CLS]",
151
+ "ids": [
152
+ 16
153
+ ],
154
+ "tokens": [
155
+ "[CLS]"
156
+ ]
157
+ },
158
+ "[SEP]": {
159
+ "id": "[SEP]",
160
+ "ids": [
161
+ 16
162
+ ],
163
+ "tokens": [
164
+ "[SEP]"
165
+ ]
166
+ }
167
+ }
168
+ },
169
+ "decoder": {
170
+ "type": "WordPiece",
171
+ "prefix": "##",
172
+ "cleanup": true
173
+ },
174
+ "model": {
175
+ "type": "WordPiece",
176
+ "unk_token": "<unk>",
177
+ "continuing_subword_prefix": "##",
178
+ "max_input_chars_per_word": 100,
179
+ "vocab": {
180
+ " ": 4,
181
+ "!": 5,
182
+ "+": 6,
183
+ "2": 7,
184
+ "3": 8,
185
+ "5": 9,
186
+ "7": 10,
187
+ "8": 11,
188
+ "9": 12,
189
+ "<eos>": 13,
190
+ "<pad>": 14,
191
+ "<sos>": 15,
192
+ "<unk>": 16,
193
+ "a": 17,
194
+ "b": 18,
195
+ "c": 19,
196
+ "d": 20,
197
+ "e": 21,
198
+ "f": 22,
199
+ "g": 23,
200
+ "h": 24,
201
+ "i": 25,
202
+ "j": 26,
203
+ "k": 27,
204
+ "l": 28,
205
+ "m": 29,
206
+ "n": 30,
207
+ "o": 31,
208
+ "p": 32,
209
+ "q": 33,
210
+ "r": 34,
211
+ "s": 35,
212
+ "t": 36,
213
+ "u": 37,
214
+ "v": 38,
215
+ "w": 39,
216
+ "x": 40,
217
+ "y": 41,
218
+ "z": 42,
219
+ "ç": 43,
220
+ "è": 44,
221
+ "é": 45,
222
+ "û": 46,
223
+ "ء": 47,
224
+ "آ": 48,
225
+ "أ": 49,
226
+ "ؤ": 50,
227
+ "إ": 51,
228
+ "ئ": 52,
229
+ "ا": 53,
230
+ "ب": 54,
231
+ "ة": 55,
232
+ "ت": 56,
233
+ "ث": 57,
234
+ "ج": 58,
235
+ "ح": 59,
236
+ "خ": 60,
237
+ "د": 61,
238
+ "ذ": 62,
239
+ "ر": 63,
240
+ "ز": 64,
241
+ "س": 65,
242
+ "ش": 66,
243
+ "ص": 67,
244
+ "ض": 68,
245
+ "ط": 69,
246
+ "ظ": 70,
247
+ "ع": 71,
248
+ "غ": 72,
249
+ "ف": 73,
250
+ "ق": 74,
251
+ "ك": 75,
252
+ "ل": 76,
253
+ "م": 77,
254
+ "ن": 78,
255
+ "ه": 79,
256
+ "و": 80,
257
+ "ى": 81,
258
+ "ي": 82,
259
+ "ُ": 83,
260
+ "ّ": 84,
261
+ "پ": 85,
262
+ "ڤ": 86,
263
+ "ڭ": 87,
264
+ "گ": 88
265
+ }
266
+ }
267
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "13": {
4
+ "content": "<eos>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "14": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "15": {
20
+ "content": "<sos>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "16": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "85": {
36
+ "content": "[SEP]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "86": {
44
+ "content": "[CLS]",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "87": {
52
+ "content": "[MASK]",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ }
59
+ },
60
+ "bos_token": "<sos>",
61
+ "clean_up_tokenization_spaces": true,
62
+ "cls_token": "[CLS]",
63
+ "do_basic_tokenize": true,
64
+ "do_lower_case": true,
65
+ "eos_token": "<eos>",
66
+ "mask_token": "[MASK]",
67
+ "max_length": 128,
68
+ "model_max_length": 1000000000000000019884624838656,
69
+ "never_split": null,
70
+ "pad_to_multiple_of": null,
71
+ "pad_token": "<pad>",
72
+ "pad_token_type_id": 0,
73
+ "padding_side": "right",
74
+ "sep_token": "[SEP]",
75
+ "stride": 0,
76
+ "strip_accents": null,
77
+ "tokenize_chinese_chars": true,
78
+ "tokenizer_class": "BertTokenizer",
79
+ "trainable": true,
80
+ "truncation_side": "right",
81
+ "truncation_strategy": "longest_first",
82
+ "unk_token": "<unk>"
83
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:451ec24aae7e96054ac1348fb7c1ce2acc36f6242c9a05fe6fb4a94c07f2d26f
3
+ size 5048
vocab.txt ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ !
3
+ +
4
+ 2
5
+ 3
6
+ 5
7
+ 7
8
+ 8
9
+ 9
10
+ <eos>
11
+ <pad>
12
+ <sos>
13
+ <unk>
14
+ a
15
+ b
16
+ c
17
+ d
18
+ e
19
+ f
20
+ g
21
+ h
22
+ i
23
+ j
24
+ k
25
+ l
26
+ m
27
+ n
28
+ o
29
+ p
30
+ q
31
+ r
32
+ s
33
+ t
34
+ u
35
+ v
36
+ w
37
+ x
38
+ y
39
+ z
40
+ ç
41
+ è
42
+ é
43
+ û
44
+ ء
45
+ آ
46
+ أ
47
+ ؤ
48
+ إ
49
+ ئ
50
+ ا
51
+ ب
52
+ ة
53
+ ت
54
+ ث
55
+ ج
56
+ ح
57
+ خ
58
+ د
59
+ ذ
60
+ ر
61
+ ز
62
+ س
63
+ ش
64
+ ص
65
+ ض
66
+ ط
67
+ ظ
68
+ ع
69
+ غ
70
+ ف
71
+ ق
72
+ ك
73
+ ل
74
+ م
75
+ ن
76
+ ه
77
+ و
78
+ ى
79
+ ي
80
+ ُ
81
+ ّ
82
+ پ
83
+ ڤ
84
+ ڭ
85
+ گ