leixa commited on
Commit
9d758ea
1 Parent(s): dc5a155

Training in progress, step 420, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:204a349fb5f2880ebc9716e2c999f98555ac7db9085761fb01ccb8be60805c18
3
  size 671149168
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6030cdfec156be32c0ce5ce5c733e922acd059040b45ccb4972e725954306c40
3
  size 671149168
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f474ba23721d4d9a1c85d01088a42d6297ce6fce23f3e15e68800d582b9d412b
3
  size 341314644
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6abef001eb5046117ec6eee464e1792aa5acbda3c0d305591284db66b9017c9d
3
  size 341314644
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b43a77608e3595ef8ce792c3fd5462a71b9b7958002088c96e8d041e0e2ab5b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:601d3cd30158a5ec1e80bcf2fe124c9ff7d378ee828c7f46d1e57e335b5624be
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6ac207b57c6cefba3838e335ba7ebf320ffdaee8162f1c0afc72ea9ad9f0725f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9507fdefdfac1d5dbc7a23a3aeb675b6dd3cc22a3762b7e85ff02a1c9c43105
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.231270358306189,
5
  "eval_steps": 42,
6
- "global_step": 378,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -969,6 +969,112 @@
969
  "eval_samples_per_second": 23.31,
970
  "eval_steps_per_second": 5.861,
971
  "step": 378
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
972
  }
973
  ],
974
  "logging_steps": 3,
@@ -988,7 +1094,7 @@
988
  "attributes": {}
989
  }
990
  },
991
- "total_flos": 2.8506192533363098e+17,
992
  "train_batch_size": 4,
993
  "trial_name": null,
994
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.3680781758957654,
5
  "eval_steps": 42,
6
+ "global_step": 420,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
969
  "eval_samples_per_second": 23.31,
970
  "eval_steps_per_second": 5.861,
971
  "step": 378
972
+ },
973
+ {
974
+ "epoch": 1.241042345276873,
975
+ "grad_norm": 2.3673043251037598,
976
+ "learning_rate": 1.3860256808630428e-05,
977
+ "loss": 0.8592,
978
+ "step": 381
979
+ },
980
+ {
981
+ "epoch": 1.2508143322475571,
982
+ "grad_norm": 2.451141357421875,
983
+ "learning_rate": 1.3202379370768252e-05,
984
+ "loss": 0.7932,
985
+ "step": 384
986
+ },
987
+ {
988
+ "epoch": 1.2605863192182412,
989
+ "grad_norm": 2.4431982040405273,
990
+ "learning_rate": 1.2558115014363592e-05,
991
+ "loss": 0.7265,
992
+ "step": 387
993
+ },
994
+ {
995
+ "epoch": 1.2703583061889252,
996
+ "grad_norm": 2.317182779312134,
997
+ "learning_rate": 1.1927702081543279e-05,
998
+ "loss": 0.7116,
999
+ "step": 390
1000
+ },
1001
+ {
1002
+ "epoch": 1.2801302931596092,
1003
+ "grad_norm": 2.2101731300354004,
1004
+ "learning_rate": 1.1311373790174657e-05,
1005
+ "loss": 0.7642,
1006
+ "step": 393
1007
+ },
1008
+ {
1009
+ "epoch": 1.2899022801302933,
1010
+ "grad_norm": 2.8971657752990723,
1011
+ "learning_rate": 1.0709358147587884e-05,
1012
+ "loss": 0.7537,
1013
+ "step": 396
1014
+ },
1015
+ {
1016
+ "epoch": 1.2996742671009773,
1017
+ "grad_norm": 2.7442216873168945,
1018
+ "learning_rate": 1.0121877866225781e-05,
1019
+ "loss": 0.765,
1020
+ "step": 399
1021
+ },
1022
+ {
1023
+ "epoch": 1.3094462540716614,
1024
+ "grad_norm": 2.479196310043335,
1025
+ "learning_rate": 9.549150281252633e-06,
1026
+ "loss": 0.8282,
1027
+ "step": 402
1028
+ },
1029
+ {
1030
+ "epoch": 1.3192182410423452,
1031
+ "grad_norm": 2.463113307952881,
1032
+ "learning_rate": 8.991387270152201e-06,
1033
+ "loss": 0.8097,
1034
+ "step": 405
1035
+ },
1036
+ {
1037
+ "epoch": 1.3289902280130292,
1038
+ "grad_norm": 2.4927496910095215,
1039
+ "learning_rate": 8.448795174344804e-06,
1040
+ "loss": 0.8298,
1041
+ "step": 408
1042
+ },
1043
+ {
1044
+ "epoch": 1.3387622149837133,
1045
+ "grad_norm": 2.711042881011963,
1046
+ "learning_rate": 7.921574722852343e-06,
1047
+ "loss": 0.7205,
1048
+ "step": 411
1049
+ },
1050
+ {
1051
+ "epoch": 1.3485342019543973,
1052
+ "grad_norm": 2.803149938583374,
1053
+ "learning_rate": 7.409920958039795e-06,
1054
+ "loss": 0.7575,
1055
+ "step": 414
1056
+ },
1057
+ {
1058
+ "epoch": 1.3583061889250814,
1059
+ "grad_norm": 2.474118232727051,
1060
+ "learning_rate": 6.9140231634602485e-06,
1061
+ "loss": 0.7231,
1062
+ "step": 417
1063
+ },
1064
+ {
1065
+ "epoch": 1.3680781758957654,
1066
+ "grad_norm": 2.5949273109436035,
1067
+ "learning_rate": 6.43406479383053e-06,
1068
+ "loss": 0.8594,
1069
+ "step": 420
1070
+ },
1071
+ {
1072
+ "epoch": 1.3680781758957654,
1073
+ "eval_loss": 1.3078192472457886,
1074
+ "eval_runtime": 22.1879,
1075
+ "eval_samples_per_second": 23.301,
1076
+ "eval_steps_per_second": 5.859,
1077
+ "step": 420
1078
  }
1079
  ],
1080
  "logging_steps": 3,
 
1094
  "attributes": {}
1095
  }
1096
  },
1097
+ "total_flos": 3.1674071048729395e+17,
1098
  "train_batch_size": 4,
1099
  "trial_name": null,
1100
  "trial_params": null