shibajustfor commited on
Commit
1852c50
·
verified ·
1 Parent(s): cc5b600

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f2e24d7f9e7b8fa39718c8a0b447f45ceb462e74a815912dbb2b3520897457c
3
  size 80792096
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e647199bf95ee61d9fe9858f2dcda1f1245d807ddf6538274bf7cb73fe842cd4
3
  size 80792096
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:178adc6b783290c57d0820c548006474b2bc4aed85bee44ae8d119262c589681
3
  size 41459700
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:863546bd77606ffcfbb0b238fee97fa9dc32bdcfedcc7825995f8faf77537f39
3
  size 41459700
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d19cc634d572060c7c54ed88334b3af6634a5781c94789f45d6e1b43c72a5b75
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40bfb7a2bb9e09979ca0850fe7db152528103f342b31a7d7161ea872baf3db7f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b80fcc7599efca0c6313d990c467c2eb3001742b23ddaadc22e3499c12cea79
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81007ec48272bbdc4f9622c046f9c026bf8120ed11d1398fd97bb5168a6f3dda
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0032361935891005,
5
  "eval_steps": 50,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -58,6 +58,49 @@
58
  "eval_samples_per_second": 33.399,
59
  "eval_steps_per_second": 16.699,
60
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  }
62
  ],
63
  "logging_steps": 10,
@@ -77,7 +120,7 @@
77
  "attributes": {}
78
  }
79
  },
80
- "total_flos": 9323102846582784.0,
81
  "train_batch_size": 2,
82
  "trial_name": null,
83
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.006472387178201,
5
  "eval_steps": 50,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
58
  "eval_samples_per_second": 33.399,
59
  "eval_steps_per_second": 16.699,
60
  "step": 50
61
+ },
62
+ {
63
+ "epoch": 0.0038834323069206,
64
+ "grad_norm": 0.9267818927764893,
65
+ "learning_rate": 0.0002,
66
+ "loss": 1.1523,
67
+ "step": 60
68
+ },
69
+ {
70
+ "epoch": 0.0045306710247407,
71
+ "grad_norm": 1.0530893802642822,
72
+ "learning_rate": 0.0002,
73
+ "loss": 1.0864,
74
+ "step": 70
75
+ },
76
+ {
77
+ "epoch": 0.0051779097425608,
78
+ "grad_norm": 0.9379938244819641,
79
+ "learning_rate": 0.0002,
80
+ "loss": 1.0639,
81
+ "step": 80
82
+ },
83
+ {
84
+ "epoch": 0.0058251484603809,
85
+ "grad_norm": 0.6152510643005371,
86
+ "learning_rate": 0.0002,
87
+ "loss": 0.9727,
88
+ "step": 90
89
+ },
90
+ {
91
+ "epoch": 0.006472387178201,
92
+ "grad_norm": 1.0862324237823486,
93
+ "learning_rate": 0.0002,
94
+ "loss": 1.1407,
95
+ "step": 100
96
+ },
97
+ {
98
+ "epoch": 0.006472387178201,
99
+ "eval_loss": 1.0274646282196045,
100
+ "eval_runtime": 194.761,
101
+ "eval_samples_per_second": 33.405,
102
+ "eval_steps_per_second": 16.703,
103
+ "step": 100
104
  }
105
  ],
106
  "logging_steps": 10,
 
120
  "attributes": {}
121
  }
122
  },
123
+ "total_flos": 1.873333749547008e+16,
124
  "train_batch_size": 2,
125
  "trial_name": null,
126
  "trial_params": null