eddysang commited on
Commit
3a93cb9
·
verified ·
1 Parent(s): c85b555

Training in progress, step 187, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:83fc03decf24c551f881ea371d2fc633c54f2d89682e39a733be683677fdd1e4
3
  size 97728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c679e7e83d3958ed05823d72368a34816d291f6a1537ef5d0f1943c104a8d40
3
  size 97728
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c6fa2042ba45af7848b2d2638ee0ba70d95e13987922ca7c2234efa8226baeaa
3
  size 212298
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f1b8af2e89d67205fa4b8e8c1c47189d2d9273a15a47547871725a62e9adb28
3
  size 212298
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:732562efa57057e9106da0ada8229f2e13dca05c703e75c7e3d2c742b55e2e3b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3de5ca30ff633d309a9b80db8e8d57d7760528e3bc1867a6faea71254e3b8c07
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c9a138d58efee4722b14f409e14731dcd0935c0eaa8a6078a4918744b9529bab
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f9ab26294f369ba5d99cf6e2514a5d05f3df1a90b36a045191f6b23077ba365
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5759661196400212,
5
  "eval_steps": 50,
6
- "global_step": 170,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1229,6 +1229,125 @@
1229
  "learning_rate": 1.0048094716167095e-05,
1230
  "loss": 10.344,
1231
  "step": 170
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1232
  }
1233
  ],
1234
  "logging_steps": 1,
@@ -1248,7 +1367,7 @@
1248
  "attributes": {}
1249
  }
1250
  },
1251
- "total_flos": 72825986482176.0,
1252
  "train_batch_size": 2,
1253
  "trial_name": null,
1254
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.6335627316040233,
5
  "eval_steps": 50,
6
+ "global_step": 187,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1229
  "learning_rate": 1.0048094716167095e-05,
1230
  "loss": 10.344,
1231
  "step": 170
1232
+ },
1233
+ {
1234
+ "epoch": 0.5793541556379036,
1235
+ "grad_norm": 0.02616795524954796,
1236
+ "learning_rate": 9.40352196454532e-06,
1237
+ "loss": 10.3444,
1238
+ "step": 171
1239
+ },
1240
+ {
1241
+ "epoch": 0.5827421916357861,
1242
+ "grad_norm": 0.01909734308719635,
1243
+ "learning_rate": 8.778930535580474e-06,
1244
+ "loss": 10.3453,
1245
+ "step": 172
1246
+ },
1247
+ {
1248
+ "epoch": 0.5861302276336686,
1249
+ "grad_norm": 0.0253478791564703,
1250
+ "learning_rate": 8.174510685872415e-06,
1251
+ "loss": 10.3448,
1252
+ "step": 173
1253
+ },
1254
+ {
1255
+ "epoch": 0.5895182636315511,
1256
+ "grad_norm": 0.020116323605179787,
1257
+ "learning_rate": 7.5904465275624884e-06,
1258
+ "loss": 10.3443,
1259
+ "step": 174
1260
+ },
1261
+ {
1262
+ "epoch": 0.5929062996294335,
1263
+ "grad_norm": 0.0158366896212101,
1264
+ "learning_rate": 7.026915972251254e-06,
1265
+ "loss": 10.3452,
1266
+ "step": 175
1267
+ },
1268
+ {
1269
+ "epoch": 0.596294335627316,
1270
+ "grad_norm": 0.020508933812379837,
1271
+ "learning_rate": 6.484090676804926e-06,
1272
+ "loss": 10.3454,
1273
+ "step": 176
1274
+ },
1275
+ {
1276
+ "epoch": 0.5996823716251986,
1277
+ "grad_norm": 0.020298492163419724,
1278
+ "learning_rate": 5.962135991066971e-06,
1279
+ "loss": 10.344,
1280
+ "step": 177
1281
+ },
1282
+ {
1283
+ "epoch": 0.603070407623081,
1284
+ "grad_norm": 0.01768598146736622,
1285
+ "learning_rate": 5.461210907490951e-06,
1286
+ "loss": 10.3442,
1287
+ "step": 178
1288
+ },
1289
+ {
1290
+ "epoch": 0.6064584436209635,
1291
+ "grad_norm": 0.02038932777941227,
1292
+ "learning_rate": 4.981468012709877e-06,
1293
+ "loss": 10.3456,
1294
+ "step": 179
1295
+ },
1296
+ {
1297
+ "epoch": 0.609846479618846,
1298
+ "grad_norm": 0.01764788292348385,
1299
+ "learning_rate": 4.523053441056876e-06,
1300
+ "loss": 10.3448,
1301
+ "step": 180
1302
+ },
1303
+ {
1304
+ "epoch": 0.6132345156167285,
1305
+ "grad_norm": 0.023541666567325592,
1306
+ "learning_rate": 4.086106830051236e-06,
1307
+ "loss": 10.3449,
1308
+ "step": 181
1309
+ },
1310
+ {
1311
+ "epoch": 0.6166225516146109,
1312
+ "grad_norm": 0.06664617359638214,
1313
+ "learning_rate": 3.670761277863485e-06,
1314
+ "loss": 10.3473,
1315
+ "step": 182
1316
+ },
1317
+ {
1318
+ "epoch": 0.6200105876124934,
1319
+ "grad_norm": 0.022008830681443214,
1320
+ "learning_rate": 3.277143302772342e-06,
1321
+ "loss": 10.345,
1322
+ "step": 183
1323
+ },
1324
+ {
1325
+ "epoch": 0.6233986236103759,
1326
+ "grad_norm": 0.020388372242450714,
1327
+ "learning_rate": 2.9053728046260825e-06,
1328
+ "loss": 10.3434,
1329
+ "step": 184
1330
+ },
1331
+ {
1332
+ "epoch": 0.6267866596082583,
1333
+ "grad_norm": 0.02203419804573059,
1334
+ "learning_rate": 2.555563028319885e-06,
1335
+ "loss": 10.3441,
1336
+ "step": 185
1337
+ },
1338
+ {
1339
+ "epoch": 0.6301746956061408,
1340
+ "grad_norm": 0.020321670919656754,
1341
+ "learning_rate": 2.227820529300264e-06,
1342
+ "loss": 10.3448,
1343
+ "step": 186
1344
+ },
1345
+ {
1346
+ "epoch": 0.6335627316040233,
1347
+ "grad_norm": 0.0204358771443367,
1348
+ "learning_rate": 1.9222451411073645e-06,
1349
+ "loss": 10.3445,
1350
+ "step": 187
1351
  }
1352
  ],
1353
  "logging_steps": 1,
 
1367
  "attributes": {}
1368
  }
1369
  },
1370
+ "total_flos": 80109921632256.0,
1371
  "train_batch_size": 2,
1372
  "trial_name": null,
1373
  "trial_params": null