PaulD commited on
Commit
183d7f4
·
verified ·
1 Parent(s): a26a8f7

End of training

Browse files
README.md CHANGED
@@ -18,12 +18,12 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) on the None dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 1.6916
22
- - Eval/rewards/chosen: -0.4209
23
- - Eval/logps/chosen: -211.7889
24
- - Eval/rewards/rejected: -0.4483
25
- - Eval/logps/rejected: -226.0602
26
- - Eval/rewards/margins: 0.0274
27
  - Eval/kl: 0.0
28
 
29
  ## Model description
@@ -43,27 +43,22 @@ More information needed
43
  ### Training hyperparameters
44
 
45
  The following hyperparameters were used during training:
46
- - learning_rate: 1e-05
47
  - train_batch_size: 1
48
  - eval_batch_size: 2
49
- - seed: 9012
50
  - gradient_accumulation_steps: 8
51
  - total_train_batch_size: 8
52
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
53
  - lr_scheduler_type: cosine
54
  - lr_scheduler_warmup_ratio: 0.1
55
- - num_epochs: 6.0
56
 
57
  ### Training results
58
 
59
- | Training Loss | Epoch | Step | Validation Loss | |
60
- |:-------------:|:------:|:----:|:---------------:|:------:|
61
- | 1.074 | 0.9412 | 12 | 1.9606 | 0.0013 |
62
- | 1.0223 | 1.9608 | 25 | 1.8546 | 0.0 |
63
- | 0.9832 | 2.9804 | 38 | 1.7737 | 0.0 |
64
- | 0.9095 | 4.0 | 51 | 1.6998 | 0.0 |
65
- | 0.9155 | 4.9412 | 63 | 1.6833 | 0.0 |
66
- | 0.5901 | 5.6471 | 72 | 1.6916 | 0.0 |
67
 
68
 
69
  ### Framework versions
 
18
 
19
  This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) on the None dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 0.5931
22
+ - Eval/rewards/chosen: 0.0
23
+ - Eval/logps/chosen: -192.8253
24
+ - Eval/rewards/rejected: 0.0
25
+ - Eval/logps/rejected: -229.8692
26
+ - Eval/rewards/margins: 0.0
27
  - Eval/kl: 0.0
28
 
29
  ## Model description
 
43
  ### Training hyperparameters
44
 
45
  The following hyperparameters were used during training:
46
+ - learning_rate: 0.0
47
  - train_batch_size: 1
48
  - eval_batch_size: 2
49
+ - seed: 1234
50
  - gradient_accumulation_steps: 8
51
  - total_train_batch_size: 8
52
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
53
  - lr_scheduler_type: cosine
54
  - lr_scheduler_warmup_ratio: 0.1
55
+ - num_epochs: 1.0
56
 
57
  ### Training results
58
 
59
+ | Training Loss | Epoch | Step | Validation Loss | |
60
+ |:-------------:|:-----:|:----:|:---------------:|:---:|
61
+ | 0.8125 | 0.96 | 12 | 0.5931 | 0.0 |
 
 
 
 
 
62
 
63
 
64
  ### Framework versions
adapter_config.json CHANGED
@@ -20,10 +20,10 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
23
  "o_proj",
24
  "q_proj",
25
- "k_proj",
26
- "v_proj"
27
  ],
28
  "task_type": "CAUSAL_LM",
29
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "v_proj",
24
  "o_proj",
25
  "q_proj",
26
+ "k_proj"
 
27
  ],
28
  "task_type": "CAUSAL_LM",
29
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0d4f4801933fb10ed34a020af01f57536309084c7d678224b7c23af10b7bcf2a
3
  size 27297544
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42d136e0b0e73d38dc7c67bedb6732e527d052b8c2640d6e3f1b100b2c46a2a8
3
  size 27297544
metrics.jsonl CHANGED
@@ -3,3 +3,4 @@
3
  {"epoch": 4.0, "precision": 0.45999999908, "recall": 0.9583333293402777, "fold": 0}
4
  {"epoch": 4.9411764705882355, "precision": 0.47058823437139563, "recall": 0.9999999958333333, "fold": 0}
5
  {"epoch": 5.647058823529412, "precision": 0.4038461530695266, "recall": 0.8749999963541666, "fold": 0}
 
 
3
  {"epoch": 4.0, "precision": 0.45999999908, "recall": 0.9583333293402777, "fold": 0}
4
  {"epoch": 4.9411764705882355, "precision": 0.47058823437139563, "recall": 0.9999999958333333, "fold": 0}
5
  {"epoch": 5.647058823529412, "precision": 0.4038461530695266, "recall": 0.8749999963541666, "fold": 0}
6
+ {"epoch": 0.96, "precision": 0.41463414533016063, "recall": 0.9444444391975308, "fold": 0}
metrics_epoch_0.96_fold_0_lr_0.0_seed_1234_weight_2.0.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"epoch": 0.96, "precision": 0.41463414533016063, "recall": 0.9444444391975308, "fold": 0}
results_epoch_0.96_fold_0_lr_0.0_seed_1234_weight_2.0.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:12c8fd0a4b73198cfb183f8b9c6d47cfa697f16811760ffce0155941f1d89d93
3
  size 5688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:244803aebbac509d36d6c35fbb1f4fd5d59da2c39dd3fac5cfd69ab09a7bfe4d
3
  size 5688