Shresthadev403 commited on
Commit
0dcb964
1 Parent(s): 63224a1

End of training

Browse files
README.md CHANGED
@@ -1,6 +1,4 @@
1
  ---
2
- license: mit
3
- base_model: gpt2
4
  tags:
5
  - generated_from_trainer
6
  model-index:
@@ -13,14 +11,14 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # controlled-food-recipe-generation
15
 
16
- This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
- - eval_loss: 6.9974
19
- - eval_runtime: 0.0211
20
- - eval_samples_per_second: 47.457
21
- - eval_steps_per_second: 47.457
22
- - epoch: 15.0
23
- - step: 15
24
 
25
  ## Model description
26
 
 
1
  ---
 
 
2
  tags:
3
  - generated_from_trainer
4
  model-index:
 
11
 
12
  # controlled-food-recipe-generation
13
 
14
+ This model was trained from scratch on an unknown dataset.
15
  It achieves the following results on the evaluation set:
16
+ - eval_loss: 4.0246
17
+ - eval_runtime: 0.0201
18
+ - eval_samples_per_second: 49.72
19
+ - eval_steps_per_second: 49.72
20
+ - epoch: 17.0
21
+ - step: 17
22
 
23
  ## Model description
24
 
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "gpt2",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
 
1
  {
2
+ "_name_or_path": "/kaggle/working/controlled-food-recipe-generation",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
logs/events.out.tfevents.1707026005.d77ab3a64c5c.34.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ba3d6e581aab74637b693922b066f79ffe151620bbe2904e337fe6d48bb02b4
3
- size 10933
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da95de961c5fb3d0a3f1731fe1c2359a0c7bf25b33787bc95384f23fe090c360
3
+ size 11353
logs/events.out.tfevents.1707026588.d77ab3a64c5c.34.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85d7d859600c3d05c0805bb0af742fd93e2d4b0b623fcfe47c886bb73a46fc60
3
+ size 5126
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f0f5d17dafec3300164ff2ba0d1d63fbe9d86331d49f4e267f4f76e9f82cadf
3
  size 497918592
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c5c324b6aa17b080b6e2d404e011302113fe31203e74eff5f0ef92d145c737b
3
  size 497918592
trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 9.170014381408691,
3
- "best_model_checkpoint": "controlled-food-recipe-generation/checkpoint-14",
4
- "epoch": 15.0,
5
  "eval_steps": 1,
6
- "global_step": 15,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -217,6 +217,34 @@
217
  "eval_samples_per_second": 47.457,
218
  "eval_steps_per_second": 47.457,
219
  "step": 15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
  }
221
  ],
222
  "logging_steps": 1,
@@ -224,7 +252,7 @@
224
  "num_input_tokens_seen": 0,
225
  "num_train_epochs": 100,
226
  "save_steps": 1,
227
- "total_flos": 8818606080000.0,
228
  "train_batch_size": 32,
229
  "trial_name": null,
230
  "trial_params": null
 
1
  {
2
+ "best_metric": 6.997350215911865,
3
+ "best_model_checkpoint": "controlled-food-recipe-generation/checkpoint-15",
4
+ "epoch": 17.0,
5
  "eval_steps": 1,
6
+ "global_step": 17,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
217
  "eval_samples_per_second": 47.457,
218
  "eval_steps_per_second": 47.457,
219
  "step": 15
220
+ },
221
+ {
222
+ "epoch": 16.0,
223
+ "learning_rate": 4.600000000000001e-05,
224
+ "loss": 5.6621,
225
+ "step": 16
226
+ },
227
+ {
228
+ "epoch": 16.0,
229
+ "eval_loss": 4.715316295623779,
230
+ "eval_runtime": 0.0208,
231
+ "eval_samples_per_second": 48.084,
232
+ "eval_steps_per_second": 48.084,
233
+ "step": 16
234
+ },
235
+ {
236
+ "epoch": 17.0,
237
+ "learning_rate": 5e-05,
238
+ "loss": 5.8038,
239
+ "step": 17
240
+ },
241
+ {
242
+ "epoch": 17.0,
243
+ "eval_loss": 4.024607181549072,
244
+ "eval_runtime": 0.0201,
245
+ "eval_samples_per_second": 49.72,
246
+ "eval_steps_per_second": 49.72,
247
+ "step": 17
248
  }
249
  ],
250
  "logging_steps": 1,
 
252
  "num_input_tokens_seen": 0,
253
  "num_train_epochs": 100,
254
  "save_steps": 1,
255
+ "total_flos": 9994420224000.0,
256
  "train_batch_size": 32,
257
  "trial_name": null,
258
  "trial_params": null