joelniklaus commited on
Commit
2ef6f25
1 Parent(s): 61d597a

Training in progress, step 900000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9ced13f9fe1c6abb065f1454d1dfe3a3851a905572b70da9e3a5f2e82f447517
3
  size 885325017
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7be3c63a7db355df0c16f44b3a84ae1827088c374b677acc161d3c118059df7f
3
  size 885325017
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c06640eca11b1304ee4e8f12cf7b6eaef37faaf4e6e12f436d38316a0500c5fe
3
  size 442675755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:094ffaaef6861196b82664bedfc2e8503866ca513e71d6cb3ec80b0579b21580
3
  size 442675755
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:51584d101168558d08938c91c664aa455798605c22e29a39ce52d5cec5946e57
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77efe9cf27bf75190b7c4227ba2b681a2bb5fd8a8adef3d6ef4ecedcdb622b56
3
  size 13611
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfa3a3ff1bbe510ea64c418f0f4b7ae4ff637b2f675aa7327308814422c14671
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b90e0a0db0e7b32645a0ce271a721bd086ca63e926689a7c41527f798e0a2045
3
  size 13611
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfa3a3ff1bbe510ea64c418f0f4b7ae4ff637b2f675aa7327308814422c14671
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b90e0a0db0e7b32645a0ce271a721bd086ca63e926689a7c41527f798e0a2045
3
  size 13611
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfa3a3ff1bbe510ea64c418f0f4b7ae4ff637b2f675aa7327308814422c14671
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b90e0a0db0e7b32645a0ce271a721bd086ca63e926689a7c41527f798e0a2045
3
  size 13611
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:51584d101168558d08938c91c664aa455798605c22e29a39ce52d5cec5946e57
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77efe9cf27bf75190b7c4227ba2b681a2bb5fd8a8adef3d6ef4ecedcdb622b56
3
  size 13611
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:51584d101168558d08938c91c664aa455798605c22e29a39ce52d5cec5946e57
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77efe9cf27bf75190b7c4227ba2b681a2bb5fd8a8adef3d6ef4ecedcdb622b56
3
  size 13611
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfa3a3ff1bbe510ea64c418f0f4b7ae4ff637b2f675aa7327308814422c14671
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b90e0a0db0e7b32645a0ce271a721bd086ca63e926689a7c41527f798e0a2045
3
  size 13611
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfa3a3ff1bbe510ea64c418f0f4b7ae4ff637b2f675aa7327308814422c14671
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b90e0a0db0e7b32645a0ce271a721bd086ca63e926689a7c41527f798e0a2045
3
  size 13611
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:adedebe0cc7e07de957a9e2967d6e9c3934a9fdca3245f46a29d125e5e36192e
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98fbf159ce1bb90afdab5d6ac994b4ab633fc21d8eb6c04c41c7f3a26253e5b5
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 11.044322,
5
- "global_step": 850000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -5242,11 +5242,319 @@
5242
  "eval_samples_per_second": 71.459,
5243
  "eval_steps_per_second": 0.572,
5244
  "step": 850000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5245
  }
5246
  ],
5247
  "max_steps": 1000000,
5248
  "num_train_epochs": 9223372036854775807,
5249
- "total_flos": 1.431867725329308e+19,
5250
  "trial_name": null,
5251
  "trial_params": null
5252
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 12.019497,
5
+ "global_step": 900000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
5242
  "eval_samples_per_second": 71.459,
5243
  "eval_steps_per_second": 0.572,
5244
  "step": 850000
5245
+ },
5246
+ {
5247
+ "epoch": 11.05,
5248
+ "learning_rate": 5.947856562792925e-06,
5249
+ "loss": 0.5579,
5250
+ "step": 851000
5251
+ },
5252
+ {
5253
+ "epoch": 11.05,
5254
+ "learning_rate": 5.869882433093155e-06,
5255
+ "loss": 0.5342,
5256
+ "step": 852000
5257
+ },
5258
+ {
5259
+ "epoch": 11.05,
5260
+ "learning_rate": 5.79239090328883e-06,
5261
+ "loss": 0.4915,
5262
+ "step": 853000
5263
+ },
5264
+ {
5265
+ "epoch": 11.05,
5266
+ "learning_rate": 5.715382820814885e-06,
5267
+ "loss": 0.5185,
5268
+ "step": 854000
5269
+ },
5270
+ {
5271
+ "epoch": 11.05,
5272
+ "learning_rate": 5.6388590278194096e-06,
5273
+ "loss": 0.5439,
5274
+ "step": 855000
5275
+ },
5276
+ {
5277
+ "epoch": 11.05,
5278
+ "learning_rate": 5.562820361154314e-06,
5279
+ "loss": 0.5144,
5280
+ "step": 856000
5281
+ },
5282
+ {
5283
+ "epoch": 11.05,
5284
+ "learning_rate": 5.48726765236629e-06,
5285
+ "loss": 0.4734,
5286
+ "step": 857000
5287
+ },
5288
+ {
5289
+ "epoch": 11.05,
5290
+ "learning_rate": 5.412201727687644e-06,
5291
+ "loss": 0.491,
5292
+ "step": 858000
5293
+ },
5294
+ {
5295
+ "epoch": 11.05,
5296
+ "learning_rate": 5.337623408027293e-06,
5297
+ "loss": 0.5233,
5298
+ "step": 859000
5299
+ },
5300
+ {
5301
+ "epoch": 11.05,
5302
+ "learning_rate": 5.263533508961827e-06,
5303
+ "loss": 0.4936,
5304
+ "step": 860000
5305
+ },
5306
+ {
5307
+ "epoch": 11.06,
5308
+ "learning_rate": 5.1899328407264855e-06,
5309
+ "loss": 0.4585,
5310
+ "step": 861000
5311
+ },
5312
+ {
5313
+ "epoch": 11.06,
5314
+ "learning_rate": 5.116822208206396e-06,
5315
+ "loss": 0.5104,
5316
+ "step": 862000
5317
+ },
5318
+ {
5319
+ "epoch": 11.06,
5320
+ "learning_rate": 5.044202410927706e-06,
5321
+ "loss": 0.5342,
5322
+ "step": 863000
5323
+ },
5324
+ {
5325
+ "epoch": 11.06,
5326
+ "learning_rate": 4.972074243048897e-06,
5327
+ "loss": 0.4968,
5328
+ "step": 864000
5329
+ },
5330
+ {
5331
+ "epoch": 11.06,
5332
+ "learning_rate": 4.900438493352055e-06,
5333
+ "loss": 0.4527,
5334
+ "step": 865000
5335
+ },
5336
+ {
5337
+ "epoch": 11.06,
5338
+ "learning_rate": 4.829295945234258e-06,
5339
+ "loss": 0.5301,
5340
+ "step": 866000
5341
+ },
5342
+ {
5343
+ "epoch": 11.06,
5344
+ "learning_rate": 4.758647376699032e-06,
5345
+ "loss": 0.5213,
5346
+ "step": 867000
5347
+ },
5348
+ {
5349
+ "epoch": 11.06,
5350
+ "learning_rate": 4.688493560347773e-06,
5351
+ "loss": 0.5033,
5352
+ "step": 868000
5353
+ },
5354
+ {
5355
+ "epoch": 11.06,
5356
+ "learning_rate": 4.618835263371396e-06,
5357
+ "loss": 0.4709,
5358
+ "step": 869000
5359
+ },
5360
+ {
5361
+ "epoch": 11.06,
5362
+ "learning_rate": 4.549673247541875e-06,
5363
+ "loss": 0.4998,
5364
+ "step": 870000
5365
+ },
5366
+ {
5367
+ "epoch": 11.07,
5368
+ "learning_rate": 4.48100826920394e-06,
5369
+ "loss": 0.5318,
5370
+ "step": 871000
5371
+ },
5372
+ {
5373
+ "epoch": 11.07,
5374
+ "learning_rate": 4.412841079266777e-06,
5375
+ "loss": 0.4929,
5376
+ "step": 872000
5377
+ },
5378
+ {
5379
+ "epoch": 11.07,
5380
+ "learning_rate": 4.3451724231958644e-06,
5381
+ "loss": 0.4505,
5382
+ "step": 873000
5383
+ },
5384
+ {
5385
+ "epoch": 11.07,
5386
+ "learning_rate": 4.27800304100478e-06,
5387
+ "loss": 0.4804,
5388
+ "step": 874000
5389
+ },
5390
+ {
5391
+ "epoch": 11.07,
5392
+ "learning_rate": 4.2113336672471245e-06,
5393
+ "loss": 0.5057,
5394
+ "step": 875000
5395
+ },
5396
+ {
5397
+ "epoch": 11.07,
5398
+ "learning_rate": 4.145165031008508e-06,
5399
+ "loss": 0.4685,
5400
+ "step": 876000
5401
+ },
5402
+ {
5403
+ "epoch": 11.07,
5404
+ "learning_rate": 4.079497855898501e-06,
5405
+ "loss": 0.4303,
5406
+ "step": 877000
5407
+ },
5408
+ {
5409
+ "epoch": 11.07,
5410
+ "learning_rate": 4.01433286004283e-06,
5411
+ "loss": 0.46,
5412
+ "step": 878000
5413
+ },
5414
+ {
5415
+ "epoch": 11.07,
5416
+ "learning_rate": 3.949670756075447e-06,
5417
+ "loss": 0.5197,
5418
+ "step": 879000
5419
+ },
5420
+ {
5421
+ "epoch": 11.07,
5422
+ "learning_rate": 3.885512251130763e-06,
5423
+ "loss": 0.4886,
5424
+ "step": 880000
5425
+ },
5426
+ {
5427
+ "epoch": 12.0,
5428
+ "learning_rate": 3.821858046835913e-06,
5429
+ "loss": 0.4545,
5430
+ "step": 881000
5431
+ },
5432
+ {
5433
+ "epoch": 12.0,
5434
+ "learning_rate": 3.75870883930306e-06,
5435
+ "loss": 0.4543,
5436
+ "step": 882000
5437
+ },
5438
+ {
5439
+ "epoch": 12.0,
5440
+ "learning_rate": 3.696065319121833e-06,
5441
+ "loss": 0.4902,
5442
+ "step": 883000
5443
+ },
5444
+ {
5445
+ "epoch": 12.0,
5446
+ "learning_rate": 3.6339281713517303e-06,
5447
+ "loss": 0.5127,
5448
+ "step": 884000
5449
+ },
5450
+ {
5451
+ "epoch": 12.0,
5452
+ "learning_rate": 3.5722980755146517e-06,
5453
+ "loss": 0.4717,
5454
+ "step": 885000
5455
+ },
5456
+ {
5457
+ "epoch": 12.01,
5458
+ "learning_rate": 3.511175705587433e-06,
5459
+ "loss": 0.4767,
5460
+ "step": 886000
5461
+ },
5462
+ {
5463
+ "epoch": 12.01,
5464
+ "learning_rate": 3.4505617299945336e-06,
5465
+ "loss": 0.5242,
5466
+ "step": 887000
5467
+ },
5468
+ {
5469
+ "epoch": 12.01,
5470
+ "learning_rate": 3.390456811600673e-06,
5471
+ "loss": 0.5297,
5472
+ "step": 888000
5473
+ },
5474
+ {
5475
+ "epoch": 12.01,
5476
+ "learning_rate": 3.3308616077036115e-06,
5477
+ "loss": 0.4846,
5478
+ "step": 889000
5479
+ },
5480
+ {
5481
+ "epoch": 12.01,
5482
+ "learning_rate": 3.271776770026963e-06,
5483
+ "loss": 0.4763,
5484
+ "step": 890000
5485
+ },
5486
+ {
5487
+ "epoch": 12.01,
5488
+ "learning_rate": 3.213202944713023e-06,
5489
+ "loss": 0.5236,
5490
+ "step": 891000
5491
+ },
5492
+ {
5493
+ "epoch": 12.01,
5494
+ "learning_rate": 3.155140772315773e-06,
5495
+ "loss": 0.537,
5496
+ "step": 892000
5497
+ },
5498
+ {
5499
+ "epoch": 12.01,
5500
+ "learning_rate": 3.0975908877938277e-06,
5501
+ "loss": 0.4858,
5502
+ "step": 893000
5503
+ },
5504
+ {
5505
+ "epoch": 12.01,
5506
+ "learning_rate": 3.040553920503503e-06,
5507
+ "loss": 0.4746,
5508
+ "step": 894000
5509
+ },
5510
+ {
5511
+ "epoch": 12.01,
5512
+ "learning_rate": 2.9840304941919415e-06,
5513
+ "loss": 0.523,
5514
+ "step": 895000
5515
+ },
5516
+ {
5517
+ "epoch": 12.02,
5518
+ "learning_rate": 2.928021226990263e-06,
5519
+ "loss": 0.5302,
5520
+ "step": 896000
5521
+ },
5522
+ {
5523
+ "epoch": 12.02,
5524
+ "learning_rate": 2.8725267314068495e-06,
5525
+ "loss": 0.4774,
5526
+ "step": 897000
5527
+ },
5528
+ {
5529
+ "epoch": 12.02,
5530
+ "learning_rate": 2.817547614320615e-06,
5531
+ "loss": 0.4829,
5532
+ "step": 898000
5533
+ },
5534
+ {
5535
+ "epoch": 12.02,
5536
+ "learning_rate": 2.7630844769743757e-06,
5537
+ "loss": 0.506,
5538
+ "step": 899000
5539
+ },
5540
+ {
5541
+ "epoch": 12.02,
5542
+ "learning_rate": 2.7091379149682685e-06,
5543
+ "loss": 0.4766,
5544
+ "step": 900000
5545
+ },
5546
+ {
5547
+ "epoch": 12.02,
5548
+ "eval_loss": 0.4155268967151642,
5549
+ "eval_runtime": 11.4713,
5550
+ "eval_samples_per_second": 435.87,
5551
+ "eval_steps_per_second": 3.487,
5552
+ "step": 900000
5553
  }
5554
  ],
5555
  "max_steps": 1000000,
5556
  "num_train_epochs": 9223372036854775807,
5557
+ "total_flos": 1.5160955606283583e+19,
5558
  "trial_name": null,
5559
  "trial_params": null
5560
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c06640eca11b1304ee4e8f12cf7b6eaef37faaf4e6e12f436d38316a0500c5fe
3
  size 442675755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:094ffaaef6861196b82664bedfc2e8503866ca513e71d6cb3ec80b0579b21580
3
  size 442675755
runs/Apr11_08-45-40_t1v-n-fb892c44-w-0/events.out.tfevents.1681202915.t1v-n-fb892c44-w-0.3296147.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4994b45b170ab1f359b7e107842f42459f2607cb47cb4dc1faa2792f0581f1e3
3
- size 119713
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7beb2be86fc830bafff189c1a9c28e039245ef535f924cd8c862e116fb2ab071
3
+ size 127989