diff --git a/sp-1b-100m/checkpoint-100/config.json b/sp-1b-100m/checkpoint-100/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3903adf7927af053d4ae6418b1469fba5e14eacd --- /dev/null +++ b/sp-1b-100m/checkpoint-100/config.json @@ -0,0 +1,33 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "initializer_range": 0.02, + "intermediate_size": 12288, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_embd": 3072, + "n_head": 64, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "num_layers": 12, + "reorder_and_upcast_attn": false, + "resid_pdrop": 0.1, + "scale_attn_by_inverse_layer_idx": false, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "torch_dtype": "float32", + "transformers_version": "4.25.1", + "use_cache": false, + "vocab_size": 50257 +} diff --git a/sp-1b-100m/checkpoint-100/optimizer.pt b/sp-1b-100m/checkpoint-100/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..28447236f52206e1cc68180495db971492341234 --- /dev/null +++ b/sp-1b-100m/checkpoint-100/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d33cc8ed49fde04377a4aa3b5c21aa69213a091f7f5eee135712782dea5c282c +size 12135889757 diff --git a/sp-1b-100m/checkpoint-100/pytorch_model.bin b/sp-1b-100m/checkpoint-100/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..09a2fd531db79ee9fb5889e0871b898c1335889f --- /dev/null +++ b/sp-1b-100m/checkpoint-100/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0dbc01cfdc8748bf8b0a9fa8dce3ff0340d7e03b03df4b3c8864b1cb491dc0d5 +size 6080539805 diff --git a/sp-1b-100m/checkpoint-100/rng_state.pth b/sp-1b-100m/checkpoint-100/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..7e2e4c76919138c6b9b6410e5c9f43050161863c --- /dev/null +++ b/sp-1b-100m/checkpoint-100/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba78ebce0e6b5b3f4e93d2b800e3ebc1f9c36976d4963bf270e6783fb8c0606e +size 14575 diff --git a/sp-1b-100m/checkpoint-100/scaler.pt b/sp-1b-100m/checkpoint-100/scaler.pt new file mode 100644 index 0000000000000000000000000000000000000000..1e00f588d3f0176a99d362447a49f57ff6e1b1ad --- /dev/null +++ b/sp-1b-100m/checkpoint-100/scaler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfa44e8523f62833816d29aa6c576eaa7783e3bbdb3e132e248b1d8aaee6132b +size 557 diff --git a/sp-1b-100m/checkpoint-100/scheduler.pt b/sp-1b-100m/checkpoint-100/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..0249f4e09966b681aaba44adb4be90280089c708 --- /dev/null +++ b/sp-1b-100m/checkpoint-100/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7ea57e999fee966b1265f956366ee033f13ca6a2b608b4432ce2605ac0d88ed +size 627 diff --git a/sp-1b-100m/checkpoint-100/trainer_state.json b/sp-1b-100m/checkpoint-100/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..7117fac9a6b67c827edf28e59c8750faa5d40a87 --- /dev/null +++ b/sp-1b-100m/checkpoint-100/trainer_state.json @@ -0,0 +1,15 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.26309839468870116, + "global_step": 100, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [], + "max_steps": 380, + "num_train_epochs": 1, + "total_flos": 2.138214034833408e+17, + "trial_name": null, + "trial_params": null +} diff --git a/sp-1b-100m/checkpoint-100/training_args.bin b/sp-1b-100m/checkpoint-100/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..e3839c34fb8e804b52ba989ad75c2786dd40ce54 --- /dev/null +++ b/sp-1b-100m/checkpoint-100/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:feee864b9593fe1171ce64e635566351bc726e347de5c1946387ce337455125d +size 3387 diff --git a/sp-1b-100m/checkpoint-200/config.json b/sp-1b-100m/checkpoint-200/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3903adf7927af053d4ae6418b1469fba5e14eacd --- /dev/null +++ b/sp-1b-100m/checkpoint-200/config.json @@ -0,0 +1,33 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "initializer_range": 0.02, + "intermediate_size": 12288, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_embd": 3072, + "n_head": 64, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "num_layers": 12, + "reorder_and_upcast_attn": false, + "resid_pdrop": 0.1, + "scale_attn_by_inverse_layer_idx": false, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "torch_dtype": "float32", + "transformers_version": "4.25.1", + "use_cache": false, + "vocab_size": 50257 +} diff --git a/sp-1b-100m/checkpoint-200/optimizer.pt b/sp-1b-100m/checkpoint-200/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..4be44dc4ec6d556aeafab2a4652f16b8c08ce024 --- /dev/null +++ b/sp-1b-100m/checkpoint-200/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e916bfdb2400f3353cd7e42f749d2a3d9394cbf6a1b13445fc17d9ef4f86052 +size 12135889757 diff --git a/sp-1b-100m/checkpoint-200/pytorch_model.bin b/sp-1b-100m/checkpoint-200/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..7029e925eb394187deea0c7eff345131a5a23514 --- /dev/null +++ b/sp-1b-100m/checkpoint-200/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a7e467ee0f58c2493d2e8f8afd547c0ca9ce47cbdd3a3328f22dbdea1f68f3a +size 6080539805 diff --git a/sp-1b-100m/checkpoint-200/rng_state.pth b/sp-1b-100m/checkpoint-200/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..a460b1ce216580a0dedd230586a2bf8b6cad6eae --- /dev/null +++ b/sp-1b-100m/checkpoint-200/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3b95dbfc53cdce2495f06e1c586eed0d4a66d02153a8ae6886b56fc37002ed2 +size 14575 diff --git a/sp-1b-100m/checkpoint-200/scaler.pt b/sp-1b-100m/checkpoint-200/scaler.pt new file mode 100644 index 0000000000000000000000000000000000000000..87710d0ddc627c070366fcb3112b07dc60d97295 --- /dev/null +++ b/sp-1b-100m/checkpoint-200/scaler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4fccf0f9be1bb8f24861e4393745b3e09cc2687125a69e3757955fb0f0925ea5 +size 557 diff --git a/sp-1b-100m/checkpoint-200/scheduler.pt b/sp-1b-100m/checkpoint-200/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..815adebc7ffc5e25ee14c8fef012b899cf34ad54 --- /dev/null +++ b/sp-1b-100m/checkpoint-200/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c162896f6df3ba4475b56099d964ded55f9fe1ecbaa5d3b3cdb9215d9d04af81 +size 627 diff --git a/sp-1b-100m/checkpoint-200/trainer_state.json b/sp-1b-100m/checkpoint-200/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..2fa684345b7e6661b483dd13f0ba4b9eca00b2b7 --- /dev/null +++ b/sp-1b-100m/checkpoint-200/trainer_state.json @@ -0,0 +1,15 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.5261967893774023, + "global_step": 200, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [], + "max_steps": 380, + "num_train_epochs": 1, + "total_flos": 4.276428069666816e+17, + "trial_name": null, + "trial_params": null +} diff --git a/sp-1b-100m/checkpoint-200/training_args.bin b/sp-1b-100m/checkpoint-200/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..e3839c34fb8e804b52ba989ad75c2786dd40ce54 --- /dev/null +++ b/sp-1b-100m/checkpoint-200/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:feee864b9593fe1171ce64e635566351bc726e347de5c1946387ce337455125d +size 3387 diff --git a/sp-1b-100m/checkpoint-300/config.json b/sp-1b-100m/checkpoint-300/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3903adf7927af053d4ae6418b1469fba5e14eacd --- /dev/null +++ b/sp-1b-100m/checkpoint-300/config.json @@ -0,0 +1,33 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "initializer_range": 0.02, + "intermediate_size": 12288, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_embd": 3072, + "n_head": 64, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "num_layers": 12, + "reorder_and_upcast_attn": false, + "resid_pdrop": 0.1, + "scale_attn_by_inverse_layer_idx": false, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "torch_dtype": "float32", + "transformers_version": "4.25.1", + "use_cache": false, + "vocab_size": 50257 +} diff --git a/sp-1b-100m/checkpoint-300/optimizer.pt b/sp-1b-100m/checkpoint-300/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..ca37ad39d1acf8c197325f776aaa0c1408a5ab94 --- /dev/null +++ b/sp-1b-100m/checkpoint-300/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:263c995c30ba2be19018d3b20db9aee7b61dcb9de7ff86807cee7d72ed0304f7 +size 12135889885 diff --git a/sp-1b-100m/checkpoint-300/pytorch_model.bin b/sp-1b-100m/checkpoint-300/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..d55e9689717cf9a9a72de75eede01ddaa8979c5b --- /dev/null +++ b/sp-1b-100m/checkpoint-300/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32075e5f905116b3d1be94622c51f8fe46cc06b58ff08cafaa0a1b3b57f85af1 +size 6080539805 diff --git a/sp-1b-100m/checkpoint-300/rng_state.pth b/sp-1b-100m/checkpoint-300/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..83f2f996709aac281ed93ffa88224ba57562193e --- /dev/null +++ b/sp-1b-100m/checkpoint-300/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2188bfd57d858ee450d8255287416c27d03305b15e592c12098f02f2ed02d870 +size 14575 diff --git a/sp-1b-100m/checkpoint-300/scaler.pt b/sp-1b-100m/checkpoint-300/scaler.pt new file mode 100644 index 0000000000000000000000000000000000000000..96d1b8ea2cbad321ffe9c10840a99bb1bcef18f5 --- /dev/null +++ b/sp-1b-100m/checkpoint-300/scaler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:efafd90182e3d39d1b7c4a686f86e5913f5abc094dc3e2f827a6d479c6cef247 +size 557 diff --git a/sp-1b-100m/checkpoint-300/scheduler.pt b/sp-1b-100m/checkpoint-300/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..89a3685511ff0baf0caa02c2ab8f23fa6e214317 --- /dev/null +++ b/sp-1b-100m/checkpoint-300/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4546d9532f4bee54bea33da91e5c41d94c56696c6723d0717ee8575a4c90c582 +size 627 diff --git a/sp-1b-100m/checkpoint-300/trainer_state.json b/sp-1b-100m/checkpoint-300/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..534e45083cda659567bdaf5613bc0d9ad29db6e6 --- /dev/null +++ b/sp-1b-100m/checkpoint-300/trainer_state.json @@ -0,0 +1,15 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.7892951840661034, + "global_step": 300, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [], + "max_steps": 380, + "num_train_epochs": 1, + "total_flos": 6.414642104500224e+17, + "trial_name": null, + "trial_params": null +} diff --git a/sp-1b-100m/checkpoint-300/training_args.bin b/sp-1b-100m/checkpoint-300/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..e3839c34fb8e804b52ba989ad75c2786dd40ce54 --- /dev/null +++ b/sp-1b-100m/checkpoint-300/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:feee864b9593fe1171ce64e635566351bc726e347de5c1946387ce337455125d +size 3387 diff --git a/sp-1b-100m/config.json b/sp-1b-100m/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3903adf7927af053d4ae6418b1469fba5e14eacd --- /dev/null +++ b/sp-1b-100m/config.json @@ -0,0 +1,33 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "initializer_range": 0.02, + "intermediate_size": 12288, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_embd": 3072, + "n_head": 64, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "num_layers": 12, + "reorder_and_upcast_attn": false, + "resid_pdrop": 0.1, + "scale_attn_by_inverse_layer_idx": false, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "torch_dtype": "float32", + "transformers_version": "4.25.1", + "use_cache": false, + "vocab_size": 50257 +} diff --git a/sp-1b-100m/pytorch_model.bin b/sp-1b-100m/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..75c99577f2cff015322a342dc1e9749e1462b6da --- /dev/null +++ b/sp-1b-100m/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5304288c5cc05479cbe6d40c5f0bce44426cd1c6f53fb430d6cb290d31448fdc +size 6080539805 diff --git a/sp-1b-100m/training_args.bin b/sp-1b-100m/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..e3839c34fb8e804b52ba989ad75c2786dd40ce54 --- /dev/null +++ b/sp-1b-100m/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:feee864b9593fe1171ce64e635566351bc726e347de5c1946387ce337455125d +size 3387 diff --git a/sp-200m-100m/checkpoint-100/config.json b/sp-200m-100m/checkpoint-100/config.json new file mode 100644 index 0000000000000000000000000000000000000000..0ae7950ec5903f52f21738da8c3242510e5242f3 --- /dev/null +++ b/sp-200m-100m/checkpoint-100/config.json @@ -0,0 +1,33 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "initializer_range": 0.02, + "intermediate_size": 4096, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_embd": 1024, + "n_head": 32, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "num_layers": 12, + "reorder_and_upcast_attn": false, + "resid_pdrop": 0.1, + "scale_attn_by_inverse_layer_idx": false, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "torch_dtype": "float32", + "transformers_version": "4.25.1", + "use_cache": false, + "vocab_size": 50257 +} diff --git a/sp-200m-100m/checkpoint-100/optimizer.pt b/sp-200m-100m/checkpoint-100/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..ee5d7115046624344c03e7cb8ad3af5efb140376 --- /dev/null +++ b/sp-200m-100m/checkpoint-100/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7dca74745ea576c9ba7c6580733d0b9544223a6ac9dab358dd8838b71b6d1a50 +size 1629434309 diff --git a/sp-200m-100m/checkpoint-100/pytorch_model.bin b/sp-200m-100m/checkpoint-100/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..67fdcea8dff79f82189bcc5163a16ec81dcabdd6 --- /dev/null +++ b/sp-200m-100m/checkpoint-100/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9de15ae087dea9bd556536a910cf273304f0d48353fca437be4a4f7df6061ad +size 827312701 diff --git a/sp-200m-100m/checkpoint-100/rng_state.pth b/sp-200m-100m/checkpoint-100/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..ae9cceb6d5e49d67070bb8eff38f56cba6ff07b0 --- /dev/null +++ b/sp-200m-100m/checkpoint-100/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66ebba15e85e086af7736fdcf05e9cd682b1a2e05cbcf39415aaa86a3b656a0f +size 14575 diff --git a/sp-200m-100m/checkpoint-100/scaler.pt b/sp-200m-100m/checkpoint-100/scaler.pt new file mode 100644 index 0000000000000000000000000000000000000000..1e00f588d3f0176a99d362447a49f57ff6e1b1ad --- /dev/null +++ b/sp-200m-100m/checkpoint-100/scaler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfa44e8523f62833816d29aa6c576eaa7783e3bbdb3e132e248b1d8aaee6132b +size 557 diff --git a/sp-200m-100m/checkpoint-100/scheduler.pt b/sp-200m-100m/checkpoint-100/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..0249f4e09966b681aaba44adb4be90280089c708 --- /dev/null +++ b/sp-200m-100m/checkpoint-100/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7ea57e999fee966b1265f956366ee033f13ca6a2b608b4432ce2605ac0d88ed +size 627 diff --git a/sp-200m-100m/checkpoint-100/trainer_state.json b/sp-200m-100m/checkpoint-100/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..9c22b0b9f18f4e717c47e63e67c4d76fd52a03a9 --- /dev/null +++ b/sp-200m-100m/checkpoint-100/trainer_state.json @@ -0,0 +1,15 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.2630929869275672, + "global_step": 100, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [], + "max_steps": 380, + "num_train_epochs": 1, + "total_flos": 2.37748988411904e+16, + "trial_name": null, + "trial_params": null +} diff --git a/sp-200m-100m/checkpoint-100/training_args.bin b/sp-200m-100m/checkpoint-100/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..832edbba610e9038b7d3e5be85dda15445c5fe03 --- /dev/null +++ b/sp-200m-100m/checkpoint-100/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:334dd1c91498da0d2485dba103030ffcef7471025d2adba3a950d4a720fba08b +size 3387 diff --git a/sp-200m-100m/checkpoint-200/config.json b/sp-200m-100m/checkpoint-200/config.json new file mode 100644 index 0000000000000000000000000000000000000000..0ae7950ec5903f52f21738da8c3242510e5242f3 --- /dev/null +++ b/sp-200m-100m/checkpoint-200/config.json @@ -0,0 +1,33 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "initializer_range": 0.02, + "intermediate_size": 4096, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_embd": 1024, + "n_head": 32, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "num_layers": 12, + "reorder_and_upcast_attn": false, + "resid_pdrop": 0.1, + "scale_attn_by_inverse_layer_idx": false, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "torch_dtype": "float32", + "transformers_version": "4.25.1", + "use_cache": false, + "vocab_size": 50257 +} diff --git a/sp-200m-100m/checkpoint-200/optimizer.pt b/sp-200m-100m/checkpoint-200/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..37cb1e9d32e6fd99dc25e0b7d203a576d317c556 --- /dev/null +++ b/sp-200m-100m/checkpoint-200/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e13a3e586d7bfbd082efe67bd40654dda1133a17b52c58f540d9d4c038ef0237 +size 1629434309 diff --git a/sp-200m-100m/checkpoint-200/pytorch_model.bin b/sp-200m-100m/checkpoint-200/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..3c91b7b1cdd24e675932b61ee5f89695de7b0628 --- /dev/null +++ b/sp-200m-100m/checkpoint-200/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c77811a3465bf64a82617298848e9a3d6b369e7af6cdb5fa0ca9a287315eaead +size 827312701 diff --git a/sp-200m-100m/checkpoint-200/rng_state.pth b/sp-200m-100m/checkpoint-200/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..25163a3f615e123afe6acbead1893960ac68f2c9 --- /dev/null +++ b/sp-200m-100m/checkpoint-200/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4851f3b302b7c4fee9d9420148364c058cf57804100694ae01ab586d33879732 +size 14575 diff --git a/sp-200m-100m/checkpoint-200/scaler.pt b/sp-200m-100m/checkpoint-200/scaler.pt new file mode 100644 index 0000000000000000000000000000000000000000..87710d0ddc627c070366fcb3112b07dc60d97295 --- /dev/null +++ b/sp-200m-100m/checkpoint-200/scaler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4fccf0f9be1bb8f24861e4393745b3e09cc2687125a69e3757955fb0f0925ea5 +size 557 diff --git a/sp-200m-100m/checkpoint-200/scheduler.pt b/sp-200m-100m/checkpoint-200/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..815adebc7ffc5e25ee14c8fef012b899cf34ad54 --- /dev/null +++ b/sp-200m-100m/checkpoint-200/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c162896f6df3ba4475b56099d964ded55f9fe1ecbaa5d3b3cdb9215d9d04af81 +size 627 diff --git a/sp-200m-100m/checkpoint-200/trainer_state.json b/sp-200m-100m/checkpoint-200/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..9e20aebfbd8a9a01122eba77d0dafd14c6188a4c --- /dev/null +++ b/sp-200m-100m/checkpoint-200/trainer_state.json @@ -0,0 +1,15 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.5261859738551344, + "global_step": 200, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [], + "max_steps": 380, + "num_train_epochs": 1, + "total_flos": 4.75497976823808e+16, + "trial_name": null, + "trial_params": null +} diff --git a/sp-200m-100m/checkpoint-200/training_args.bin b/sp-200m-100m/checkpoint-200/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..832edbba610e9038b7d3e5be85dda15445c5fe03 --- /dev/null +++ b/sp-200m-100m/checkpoint-200/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:334dd1c91498da0d2485dba103030ffcef7471025d2adba3a950d4a720fba08b +size 3387 diff --git a/sp-200m-100m/checkpoint-300/config.json b/sp-200m-100m/checkpoint-300/config.json new file mode 100644 index 0000000000000000000000000000000000000000..0ae7950ec5903f52f21738da8c3242510e5242f3 --- /dev/null +++ b/sp-200m-100m/checkpoint-300/config.json @@ -0,0 +1,33 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "initializer_range": 0.02, + "intermediate_size": 4096, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_embd": 1024, + "n_head": 32, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "num_layers": 12, + "reorder_and_upcast_attn": false, + "resid_pdrop": 0.1, + "scale_attn_by_inverse_layer_idx": false, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "torch_dtype": "float32", + "transformers_version": "4.25.1", + "use_cache": false, + "vocab_size": 50257 +} diff --git a/sp-200m-100m/checkpoint-300/optimizer.pt b/sp-200m-100m/checkpoint-300/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..fb3938f3dacdd789bedfbebfe5791c32390ee75e --- /dev/null +++ b/sp-200m-100m/checkpoint-300/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f6a6179d4f91596677cc4638dfb4fb9cc993e03f9098e4c09312c54c3427cd0 +size 1629434437 diff --git a/sp-200m-100m/checkpoint-300/pytorch_model.bin b/sp-200m-100m/checkpoint-300/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..11667a2360a1339eebb85706b4ba0bdd92d56102 --- /dev/null +++ b/sp-200m-100m/checkpoint-300/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60b9a2117ba0c4fa31230300d872c83eee01d63f134deb9a34d6edbb8833bb41 +size 827312701 diff --git a/sp-200m-100m/checkpoint-300/rng_state.pth b/sp-200m-100m/checkpoint-300/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..faaebb5af425ee88845db69079f330c73f50b52a --- /dev/null +++ b/sp-200m-100m/checkpoint-300/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08950dc609e5df5b063c350f814f906ffba4fb0870677e17789bafc766cec4b7 +size 14575 diff --git a/sp-200m-100m/checkpoint-300/scaler.pt b/sp-200m-100m/checkpoint-300/scaler.pt new file mode 100644 index 0000000000000000000000000000000000000000..96d1b8ea2cbad321ffe9c10840a99bb1bcef18f5 --- /dev/null +++ b/sp-200m-100m/checkpoint-300/scaler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:efafd90182e3d39d1b7c4a686f86e5913f5abc094dc3e2f827a6d479c6cef247 +size 557 diff --git a/sp-200m-100m/checkpoint-300/scheduler.pt b/sp-200m-100m/checkpoint-300/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..89a3685511ff0baf0caa02c2ab8f23fa6e214317 --- /dev/null +++ b/sp-200m-100m/checkpoint-300/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4546d9532f4bee54bea33da91e5c41d94c56696c6723d0717ee8575a4c90c582 +size 627 diff --git a/sp-200m-100m/checkpoint-300/trainer_state.json b/sp-200m-100m/checkpoint-300/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..cf06e326ddb04069ed7461ad76a7a302a01f5cbb --- /dev/null +++ b/sp-200m-100m/checkpoint-300/trainer_state.json @@ -0,0 +1,15 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.7892789607827017, + "global_step": 300, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [], + "max_steps": 380, + "num_train_epochs": 1, + "total_flos": 7.13246965235712e+16, + "trial_name": null, + "trial_params": null +} diff --git a/sp-200m-100m/checkpoint-300/training_args.bin b/sp-200m-100m/checkpoint-300/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..832edbba610e9038b7d3e5be85dda15445c5fe03 --- /dev/null +++ b/sp-200m-100m/checkpoint-300/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:334dd1c91498da0d2485dba103030ffcef7471025d2adba3a950d4a720fba08b +size 3387 diff --git a/sp-200m-100m/config.json b/sp-200m-100m/config.json new file mode 100644 index 0000000000000000000000000000000000000000..0ae7950ec5903f52f21738da8c3242510e5242f3 --- /dev/null +++ b/sp-200m-100m/config.json @@ -0,0 +1,33 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "initializer_range": 0.02, + "intermediate_size": 4096, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_embd": 1024, + "n_head": 32, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "num_layers": 12, + "reorder_and_upcast_attn": false, + "resid_pdrop": 0.1, + "scale_attn_by_inverse_layer_idx": false, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "torch_dtype": "float32", + "transformers_version": "4.25.1", + "use_cache": false, + "vocab_size": 50257 +} diff --git a/sp-200m-100m/pytorch_model.bin b/sp-200m-100m/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..048db107379a648dc45478be07c39697588fcccb --- /dev/null +++ b/sp-200m-100m/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3939f73c47cb76a56949657d3f5a226a742644ec1022b6db200867d6ceb4675 +size 827312701 diff --git a/sp-200m-100m/training_args.bin b/sp-200m-100m/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..832edbba610e9038b7d3e5be85dda15445c5fe03 --- /dev/null +++ b/sp-200m-100m/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:334dd1c91498da0d2485dba103030ffcef7471025d2adba3a950d4a720fba08b +size 3387 diff --git a/sp-800m-100m/checkpoint-100/config.json b/sp-800m-100m/checkpoint-100/config.json new file mode 100644 index 0000000000000000000000000000000000000000..9b078bbb190177977b8627544f824b8002ab80a7 --- /dev/null +++ b/sp-800m-100m/checkpoint-100/config.json @@ -0,0 +1,33 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "initializer_range": 0.02, + "intermediate_size": 8192, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_embd": 2048, + "n_head": 64, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "num_layers": 12, + "reorder_and_upcast_attn": false, + "resid_pdrop": 0.1, + "scale_attn_by_inverse_layer_idx": false, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "torch_dtype": "float32", + "transformers_version": "4.25.1", + "use_cache": false, + "vocab_size": 50257 +} diff --git a/sp-800m-100m/checkpoint-100/optimizer.pt b/sp-800m-100m/checkpoint-100/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..c902e0547ea0939cd8b733f23a3d05785f249a4c --- /dev/null +++ b/sp-800m-100m/checkpoint-100/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f280b69603aa08f2a39475468e4e981c16d0f6ace342254285391539c7b1e63d +size 5674702281 diff --git a/sp-800m-100m/checkpoint-100/pytorch_model.bin b/sp-800m-100m/checkpoint-100/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..2f290c8d4073715922c98f24a8c3546fad748f53 --- /dev/null +++ b/sp-800m-100m/checkpoint-100/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3148f5bfc09bba2efdb0b75d1bd0a9133712f747f51752af1efe91964415f8bd +size 2849946173 diff --git a/sp-800m-100m/checkpoint-100/rng_state.pth b/sp-800m-100m/checkpoint-100/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..25163a3f615e123afe6acbead1893960ac68f2c9 --- /dev/null +++ b/sp-800m-100m/checkpoint-100/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4851f3b302b7c4fee9d9420148364c058cf57804100694ae01ab586d33879732 +size 14575 diff --git a/sp-800m-100m/checkpoint-100/scaler.pt b/sp-800m-100m/checkpoint-100/scaler.pt new file mode 100644 index 0000000000000000000000000000000000000000..1e00f588d3f0176a99d362447a49f57ff6e1b1ad --- /dev/null +++ b/sp-800m-100m/checkpoint-100/scaler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfa44e8523f62833816d29aa6c576eaa7783e3bbdb3e132e248b1d8aaee6132b +size 557 diff --git a/sp-800m-100m/checkpoint-100/scheduler.pt b/sp-800m-100m/checkpoint-100/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..0249f4e09966b681aaba44adb4be90280089c708 --- /dev/null +++ b/sp-800m-100m/checkpoint-100/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7ea57e999fee966b1265f956366ee033f13ca6a2b608b4432ce2605ac0d88ed +size 627 diff --git a/sp-800m-100m/checkpoint-100/trainer_state.json b/sp-800m-100m/checkpoint-100/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..dc938ec2337ab173bb50e591be5d2d92182cbd10 --- /dev/null +++ b/sp-800m-100m/checkpoint-100/trainer_state.json @@ -0,0 +1,15 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.26309839468870116, + "global_step": 100, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [], + "max_steps": 380, + "num_train_epochs": 1, + "total_flos": 9.5048700002304e+16, + "trial_name": null, + "trial_params": null +} diff --git a/sp-800m-100m/checkpoint-100/training_args.bin b/sp-800m-100m/checkpoint-100/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..56c4451f7e80f0f4a852df216de1dc8b49a2bf92 --- /dev/null +++ b/sp-800m-100m/checkpoint-100/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd15838c67b2dc703bd7aace54466ca728aaf02e4991f7c3a737f9abbaabb74e +size 3387 diff --git a/sp-800m-100m/checkpoint-200/config.json b/sp-800m-100m/checkpoint-200/config.json new file mode 100644 index 0000000000000000000000000000000000000000..9b078bbb190177977b8627544f824b8002ab80a7 --- /dev/null +++ b/sp-800m-100m/checkpoint-200/config.json @@ -0,0 +1,33 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "initializer_range": 0.02, + "intermediate_size": 8192, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_embd": 2048, + "n_head": 64, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "num_layers": 12, + "reorder_and_upcast_attn": false, + "resid_pdrop": 0.1, + "scale_attn_by_inverse_layer_idx": false, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "torch_dtype": "float32", + "transformers_version": "4.25.1", + "use_cache": false, + "vocab_size": 50257 +} diff --git a/sp-800m-100m/checkpoint-200/optimizer.pt b/sp-800m-100m/checkpoint-200/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..bc980daac4b73a323df5ff082e2363acca32bae1 --- /dev/null +++ b/sp-800m-100m/checkpoint-200/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5f41b172ec0af558e367c9ed7eca31b11d314a06c145369a7ca636d7ade0dc7 +size 5674702281 diff --git a/sp-800m-100m/checkpoint-200/pytorch_model.bin b/sp-800m-100m/checkpoint-200/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..f7ba68710a8d65e30f71a25b116ed426dd241c4d --- /dev/null +++ b/sp-800m-100m/checkpoint-200/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b017f3e3c6e0e690a7a1a8093486a5bef8af80bde708d367c2692503126281c +size 2849946173 diff --git a/sp-800m-100m/checkpoint-200/rng_state.pth b/sp-800m-100m/checkpoint-200/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..caecf4284bfd694aaba3fff8c317c3fca9f5411a --- /dev/null +++ b/sp-800m-100m/checkpoint-200/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a02e73f7c8974df633ab86c99ffd01ab988a956ee7165e74c535f0f9be5364f0 +size 14575 diff --git a/sp-800m-100m/checkpoint-200/scaler.pt b/sp-800m-100m/checkpoint-200/scaler.pt new file mode 100644 index 0000000000000000000000000000000000000000..87710d0ddc627c070366fcb3112b07dc60d97295 --- /dev/null +++ b/sp-800m-100m/checkpoint-200/scaler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4fccf0f9be1bb8f24861e4393745b3e09cc2687125a69e3757955fb0f0925ea5 +size 557 diff --git a/sp-800m-100m/checkpoint-200/scheduler.pt b/sp-800m-100m/checkpoint-200/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..815adebc7ffc5e25ee14c8fef012b899cf34ad54 --- /dev/null +++ b/sp-800m-100m/checkpoint-200/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c162896f6df3ba4475b56099d964ded55f9fe1ecbaa5d3b3cdb9215d9d04af81 +size 627 diff --git a/sp-800m-100m/checkpoint-200/trainer_state.json b/sp-800m-100m/checkpoint-200/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..875429697fde116b63adbe0e3d8bb945bd35cf2a --- /dev/null +++ b/sp-800m-100m/checkpoint-200/trainer_state.json @@ -0,0 +1,15 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.5261967893774023, + "global_step": 200, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [], + "max_steps": 380, + "num_train_epochs": 1, + "total_flos": 1.90097400004608e+17, + "trial_name": null, + "trial_params": null +} diff --git a/sp-800m-100m/checkpoint-200/training_args.bin b/sp-800m-100m/checkpoint-200/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..56c4451f7e80f0f4a852df216de1dc8b49a2bf92 --- /dev/null +++ b/sp-800m-100m/checkpoint-200/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd15838c67b2dc703bd7aace54466ca728aaf02e4991f7c3a737f9abbaabb74e +size 3387 diff --git a/sp-800m-100m/checkpoint-300/config.json b/sp-800m-100m/checkpoint-300/config.json new file mode 100644 index 0000000000000000000000000000000000000000..9b078bbb190177977b8627544f824b8002ab80a7 --- /dev/null +++ b/sp-800m-100m/checkpoint-300/config.json @@ -0,0 +1,33 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "initializer_range": 0.02, + "intermediate_size": 8192, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_embd": 2048, + "n_head": 64, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "num_layers": 12, + "reorder_and_upcast_attn": false, + "resid_pdrop": 0.1, + "scale_attn_by_inverse_layer_idx": false, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "torch_dtype": "float32", + "transformers_version": "4.25.1", + "use_cache": false, + "vocab_size": 50257 +} diff --git a/sp-800m-100m/checkpoint-300/optimizer.pt b/sp-800m-100m/checkpoint-300/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..c95b63d921b79b88343c05dea4377a1ea758a5f7 --- /dev/null +++ b/sp-800m-100m/checkpoint-300/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb4d070a0ed71674ce2efa1f9bcf453c5d389d5e4522281f2d016c53639a2db6 +size 5674702409 diff --git a/sp-800m-100m/checkpoint-300/pytorch_model.bin b/sp-800m-100m/checkpoint-300/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..1342e990bfc598cb1e9c827c01dabc3949dc373f --- /dev/null +++ b/sp-800m-100m/checkpoint-300/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22e129d96b8d624a336e04c457518427f05b4dc35e818789ae71e70088c26310 +size 2849946173 diff --git a/sp-800m-100m/checkpoint-300/rng_state.pth b/sp-800m-100m/checkpoint-300/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..3f0c712fdc22e8cbdc4da49611638946644e77d2 --- /dev/null +++ b/sp-800m-100m/checkpoint-300/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9b81be87d3a4a0333c3e9463067829ade4eff5466bc50d43f413021e46b0083 +size 14575 diff --git a/sp-800m-100m/checkpoint-300/scaler.pt b/sp-800m-100m/checkpoint-300/scaler.pt new file mode 100644 index 0000000000000000000000000000000000000000..96d1b8ea2cbad321ffe9c10840a99bb1bcef18f5 --- /dev/null +++ b/sp-800m-100m/checkpoint-300/scaler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:efafd90182e3d39d1b7c4a686f86e5913f5abc094dc3e2f827a6d479c6cef247 +size 557 diff --git a/sp-800m-100m/checkpoint-300/scheduler.pt b/sp-800m-100m/checkpoint-300/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..89a3685511ff0baf0caa02c2ab8f23fa6e214317 --- /dev/null +++ b/sp-800m-100m/checkpoint-300/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4546d9532f4bee54bea33da91e5c41d94c56696c6723d0717ee8575a4c90c582 +size 627 diff --git a/sp-800m-100m/checkpoint-300/trainer_state.json b/sp-800m-100m/checkpoint-300/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..e6edae1f95c6847bbde2c12b83055be42671eb67 --- /dev/null +++ b/sp-800m-100m/checkpoint-300/trainer_state.json @@ -0,0 +1,15 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.7892951840661034, + "global_step": 300, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [], + "max_steps": 380, + "num_train_epochs": 1, + "total_flos": 2.85146100006912e+17, + "trial_name": null, + "trial_params": null +} diff --git a/sp-800m-100m/checkpoint-300/training_args.bin b/sp-800m-100m/checkpoint-300/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..56c4451f7e80f0f4a852df216de1dc8b49a2bf92 --- /dev/null +++ b/sp-800m-100m/checkpoint-300/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd15838c67b2dc703bd7aace54466ca728aaf02e4991f7c3a737f9abbaabb74e +size 3387 diff --git a/sp-800m-100m/config.json b/sp-800m-100m/config.json new file mode 100644 index 0000000000000000000000000000000000000000..9b078bbb190177977b8627544f824b8002ab80a7 --- /dev/null +++ b/sp-800m-100m/config.json @@ -0,0 +1,33 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "initializer_range": 0.02, + "intermediate_size": 8192, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_embd": 2048, + "n_head": 64, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "num_layers": 12, + "reorder_and_upcast_attn": false, + "resid_pdrop": 0.1, + "scale_attn_by_inverse_layer_idx": false, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "torch_dtype": "float32", + "transformers_version": "4.25.1", + "use_cache": false, + "vocab_size": 50257 +} diff --git a/sp-800m-100m/pytorch_model.bin b/sp-800m-100m/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..f160a3e5c4885fa678e0089d897d9104db540515 --- /dev/null +++ b/sp-800m-100m/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ca872620c24d6316642c303da291ea6b38a67a7fc275146bbdb6e0cf9256bc1 +size 2849946173 diff --git a/sp-800m-100m/training_args.bin b/sp-800m-100m/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..56c4451f7e80f0f4a852df216de1dc8b49a2bf92 --- /dev/null +++ b/sp-800m-100m/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd15838c67b2dc703bd7aace54466ca728aaf02e4991f7c3a737f9abbaabb74e +size 3387