smohammadi commited on
Commit
d1a1925
1 Parent(s): 4ce1493

Model save

Browse files
Files changed (3) hide show
  1. README.md +4 -6
  2. special_tokens_map.json +0 -7
  3. tokenizer_config.json +2 -4
README.md CHANGED
@@ -19,8 +19,8 @@ should probably proofread and complete it, then remove this comment. -->
19
 
20
  This model is a fine-tuned version of [TinyLlama/TinyLlama_v1.1](https://huggingface.co/TinyLlama/TinyLlama_v1.1) on an unknown dataset.
21
  It achieves the following results on the evaluation set:
22
- - Loss: 0.7411
23
- - Accuracy: 0.5943
24
 
25
  ## Model description
26
 
@@ -45,15 +45,13 @@ The following hyperparameters were used during training:
45
  - seed: 42
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: linear
48
- - num_epochs: 2.0
49
 
50
  ### Training results
51
 
52
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
53
  |:-------------:|:------:|:----:|:---------------:|:--------:|
54
- | 0.524 | 0.6410 | 100 | 0.6454 | 0.6373 |
55
- | 0.3259 | 1.2821 | 200 | 0.6920 | 0.6066 |
56
- | 0.237 | 1.9231 | 300 | 0.7411 | 0.5943 |
57
 
58
 
59
  ### Framework versions
 
19
 
20
  This model is a fine-tuned version of [TinyLlama/TinyLlama_v1.1](https://huggingface.co/TinyLlama/TinyLlama_v1.1) on an unknown dataset.
21
  It achieves the following results on the evaluation set:
22
+ - Loss: 0.6514
23
+ - Accuracy: 0.625
24
 
25
  ## Model description
26
 
 
45
  - seed: 42
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: linear
48
+ - num_epochs: 1.0
49
 
50
  ### Training results
51
 
52
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
53
  |:-------------:|:------:|:----:|:---------------:|:--------:|
54
+ | 0.6033 | 0.6410 | 100 | 0.6514 | 0.625 |
 
 
55
 
56
 
57
  ### Framework versions
special_tokens_map.json CHANGED
@@ -13,13 +13,6 @@
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
- "pad_token": {
17
- "content": "</s>",
18
- "lstrip": false,
19
- "normalized": false,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
  "unk_token": {
24
  "content": "<unk>",
25
  "lstrip": false,
 
13
  "rstrip": false,
14
  "single_word": false
15
  },
 
 
 
 
 
 
 
16
  "unk_token": {
17
  "content": "<unk>",
18
  "lstrip": false,
tokenizer_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
4
- "add_prefix_space": true,
5
  "added_tokens_decoder": {
6
  "0": {
7
  "content": "<unk>",
@@ -29,15 +29,13 @@
29
  }
30
  },
31
  "bos_token": "<s>",
32
- "chat_template": "{% for message in messages %}{{message['content']}}{% endfor %}{{eos_token}}",
33
  "clean_up_tokenization_spaces": false,
34
  "eos_token": "</s>",
35
  "legacy": false,
36
  "model_max_length": 1000000000000000019884624838656,
37
- "pad_token": "</s>",
38
  "padding_side": "right",
39
  "sp_model_kwargs": {},
40
- "spaces_between_special_tokens": false,
41
  "tokenizer_class": "LlamaTokenizer",
42
  "unk_token": "<unk>",
43
  "use_default_system_prompt": false
 
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
4
+ "add_prefix_space": null,
5
  "added_tokens_decoder": {
6
  "0": {
7
  "content": "<unk>",
 
29
  }
30
  },
31
  "bos_token": "<s>",
 
32
  "clean_up_tokenization_spaces": false,
33
  "eos_token": "</s>",
34
  "legacy": false,
35
  "model_max_length": 1000000000000000019884624838656,
36
+ "pad_token": null,
37
  "padding_side": "right",
38
  "sp_model_kwargs": {},
 
39
  "tokenizer_class": "LlamaTokenizer",
40
  "unk_token": "<unk>",
41
  "use_default_system_prompt": false