adammandic87 commited on
Commit
b710e1f
·
verified ·
1 Parent(s): 32b48fd

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea04fd6a2fd55239b0bef278eb068d31836116d771414f7a4e67652162826e1f
3
  size 125040
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:762d99ff7c507130ec23054b5cc9ba4a511e4c78d77ad9d2e79aaf30a15b66b0
3
  size 125040
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d204636774089f12e4cbfb400db02597e2a03776cadbab728abf0b2b3c51e799
3
  size 162868
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:031f102c81ee38b8f81643ede81cbfee79e06d047ac4c2bedd7bec7c96c1bbe6
3
  size 162868
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f0301aae68db5017871275fbfdb6928fd6f7537f55caa23bacf0eda6ec47376
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ae59499d6fa89d93656a32994352236f76ecdb3c0d5d7d01bbf5e497aa8ee6c
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:37841e69eda911caeb33edeefa0b2f140e72dcce247aeb757b2fe89c00d7887b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1df0528620c07325b8faa7567e59b0c1e86a1f1ee6af1245a69c6c0463fe4e2
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.019392131269811673,
5
  "eval_steps": 13,
6
- "global_step": 26,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -45,6 +45,35 @@
45
  "eval_samples_per_second": 309.106,
46
  "eval_steps_per_second": 154.827,
47
  "step": 26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  }
49
  ],
50
  "logging_steps": 10,
@@ -59,12 +88,12 @@
59
  "should_evaluate": false,
60
  "should_log": false,
61
  "should_save": true,
62
- "should_training_stop": false
63
  },
64
  "attributes": {}
65
  }
66
  },
67
- "total_flos": 501529706496.0,
68
  "train_batch_size": 2,
69
  "trial_name": null,
70
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.037292560134253215,
5
  "eval_steps": 13,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
45
  "eval_samples_per_second": 309.106,
46
  "eval_steps_per_second": 154.827,
47
  "step": 26
48
+ },
49
+ {
50
+ "epoch": 0.02237553608055193,
51
+ "grad_norm": 1.2946125268936157,
52
+ "learning_rate": 5e-05,
53
+ "loss": 44.3946,
54
+ "step": 30
55
+ },
56
+ {
57
+ "epoch": 0.02908819690471751,
58
+ "eval_loss": 11.084484100341797,
59
+ "eval_runtime": 1.8157,
60
+ "eval_samples_per_second": 311.172,
61
+ "eval_steps_per_second": 155.861,
62
+ "step": 39
63
+ },
64
+ {
65
+ "epoch": 0.029834048107402575,
66
+ "grad_norm": 1.4587301015853882,
67
+ "learning_rate": 1.4644660940672627e-05,
68
+ "loss": 44.3598,
69
+ "step": 40
70
+ },
71
+ {
72
+ "epoch": 0.037292560134253215,
73
+ "grad_norm": 1.4952447414398193,
74
+ "learning_rate": 0.0,
75
+ "loss": 44.3477,
76
+ "step": 50
77
  }
78
  ],
79
  "logging_steps": 10,
 
88
  "should_evaluate": false,
89
  "should_log": false,
90
  "should_save": true,
91
+ "should_training_stop": true
92
  },
93
  "attributes": {}
94
  }
95
  },
96
+ "total_flos": 954835402752.0,
97
  "train_batch_size": 2,
98
  "trial_name": null,
99
  "trial_params": null