leixa commited on
Commit
d512708
1 Parent(s): 3d89765

Training in progress, step 462, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6030cdfec156be32c0ce5ce5c733e922acd059040b45ccb4972e725954306c40
3
  size 671149168
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3d71f135aaf51849845b589a0599d3e3c87aa7bb1cbd6352df3fb636193a03b
3
  size 671149168
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6abef001eb5046117ec6eee464e1792aa5acbda3c0d305591284db66b9017c9d
3
  size 341314644
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfe1fa92a7323db508573f5ba92831f8d0c60c3665cbbe91b3e18368afe5ddfe
3
  size 341314644
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:601d3cd30158a5ec1e80bcf2fe124c9ff7d378ee828c7f46d1e57e335b5624be
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35955a38f305f08a71cf5a6aeede99e60dbfc9c66fbb6b89f8f788d949eaddfc
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b9507fdefdfac1d5dbc7a23a3aeb675b6dd3cc22a3762b7e85ff02a1c9c43105
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b58b44a2d5024ddc12e64ead45d5d25c7fc985d9aaeb44c7bc3de9b8cf56f23
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.3680781758957654,
5
  "eval_steps": 42,
6
- "global_step": 420,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1075,6 +1075,112 @@
1075
  "eval_samples_per_second": 23.301,
1076
  "eval_steps_per_second": 5.859,
1077
  "step": 420
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1078
  }
1079
  ],
1080
  "logging_steps": 3,
@@ -1094,7 +1200,7 @@
1094
  "attributes": {}
1095
  }
1096
  },
1097
- "total_flos": 3.1674071048729395e+17,
1098
  "train_batch_size": 4,
1099
  "trial_name": null,
1100
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.504885993485342,
5
  "eval_steps": 42,
6
+ "global_step": 462,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1075
  "eval_samples_per_second": 23.301,
1076
  "eval_steps_per_second": 5.859,
1077
  "step": 420
1078
+ },
1079
+ {
1080
+ "epoch": 1.3778501628664495,
1081
+ "grad_norm": 2.6252212524414062,
1082
+ "learning_rate": 5.9702234071631e-06,
1083
+ "loss": 0.694,
1084
+ "step": 423
1085
+ },
1086
+ {
1087
+ "epoch": 1.3876221498371335,
1088
+ "grad_norm": 2.314903497695923,
1089
+ "learning_rate": 5.5226705990794155e-06,
1090
+ "loss": 0.7619,
1091
+ "step": 426
1092
+ },
1093
+ {
1094
+ "epoch": 1.3973941368078175,
1095
+ "grad_norm": 2.5821728706359863,
1096
+ "learning_rate": 5.091571939329048e-06,
1097
+ "loss": 0.8219,
1098
+ "step": 429
1099
+ },
1100
+ {
1101
+ "epoch": 1.4071661237785016,
1102
+ "grad_norm": 2.4021191596984863,
1103
+ "learning_rate": 4.677086910538092e-06,
1104
+ "loss": 0.6944,
1105
+ "step": 432
1106
+ },
1107
+ {
1108
+ "epoch": 1.4169381107491856,
1109
+ "grad_norm": 2.5493106842041016,
1110
+ "learning_rate": 4.279368849209381e-06,
1111
+ "loss": 0.7945,
1112
+ "step": 435
1113
+ },
1114
+ {
1115
+ "epoch": 1.4267100977198697,
1116
+ "grad_norm": 2.4301791191101074,
1117
+ "learning_rate": 3.898564888996476e-06,
1118
+ "loss": 0.6487,
1119
+ "step": 438
1120
+ },
1121
+ {
1122
+ "epoch": 1.4364820846905537,
1123
+ "grad_norm": 2.63675856590271,
1124
+ "learning_rate": 3.534815906272404e-06,
1125
+ "loss": 0.8646,
1126
+ "step": 441
1127
+ },
1128
+ {
1129
+ "epoch": 1.4462540716612378,
1130
+ "grad_norm": 2.5258288383483887,
1131
+ "learning_rate": 3.18825646801314e-06,
1132
+ "loss": 0.7682,
1133
+ "step": 444
1134
+ },
1135
+ {
1136
+ "epoch": 1.4560260586319218,
1137
+ "grad_norm": 2.3839597702026367,
1138
+ "learning_rate": 2.8590147820153513e-06,
1139
+ "loss": 0.8041,
1140
+ "step": 447
1141
+ },
1142
+ {
1143
+ "epoch": 1.4657980456026058,
1144
+ "grad_norm": 2.62052583694458,
1145
+ "learning_rate": 2.547212649466568e-06,
1146
+ "loss": 0.806,
1147
+ "step": 450
1148
+ },
1149
+ {
1150
+ "epoch": 1.47557003257329,
1151
+ "grad_norm": 2.2883756160736084,
1152
+ "learning_rate": 2.2529654198854835e-06,
1153
+ "loss": 0.7404,
1154
+ "step": 453
1155
+ },
1156
+ {
1157
+ "epoch": 1.485342019543974,
1158
+ "grad_norm": 2.6609508991241455,
1159
+ "learning_rate": 1.9763819484490355e-06,
1160
+ "loss": 0.7702,
1161
+ "step": 456
1162
+ },
1163
+ {
1164
+ "epoch": 1.495114006514658,
1165
+ "grad_norm": 2.525723695755005,
1166
+ "learning_rate": 1.7175645557220566e-06,
1167
+ "loss": 0.769,
1168
+ "step": 459
1169
+ },
1170
+ {
1171
+ "epoch": 1.504885993485342,
1172
+ "grad_norm": 2.766369581222534,
1173
+ "learning_rate": 1.4766089898042678e-06,
1174
+ "loss": 0.7819,
1175
+ "step": 462
1176
+ },
1177
+ {
1178
+ "epoch": 1.504885993485342,
1179
+ "eval_loss": 1.304705262184143,
1180
+ "eval_runtime": 22.2456,
1181
+ "eval_samples_per_second": 23.241,
1182
+ "eval_steps_per_second": 5.844,
1183
+ "step": 462
1184
  }
1185
  ],
1186
  "logging_steps": 3,
 
1200
  "attributes": {}
1201
  }
1202
  },
1203
+ "total_flos": 3.484194956409569e+17,
1204
  "train_batch_size": 4,
1205
  "trial_name": null,
1206
  "trial_params": null