Training in progress, step 900, checkpoint
Browse files
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 645975704
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a73ccdef1570147d126299fecf0da458f1ba19bb57d6fdc7db1abe9d83d52c25
|
3 |
size 645975704
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 172499150
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f941851548332d5c59fd66e5e5ff0c39cf73f6812af458137852d4a81a14d635
|
3 |
size 172499150
|
last-checkpoint/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:705272c5ad69f3447ec8c3ef520427cc9e3a750c4d06cc9a86c3411b8c96c72f
|
3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 2080
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f40671505ac1f1b5a829b35d9dc2110b449a6843e5a694b695ca6aced79d181b
|
3 |
size 2080
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": 0.22727738320827484,
|
3 |
"best_model_checkpoint": "miner_id_24/checkpoint-500",
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 100,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -5679,6 +5679,714 @@
|
|
5679 |
"eval_samples_per_second": 4.772,
|
5680 |
"eval_steps_per_second": 4.772,
|
5681 |
"step": 800
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5682 |
}
|
5683 |
],
|
5684 |
"logging_steps": 1,
|
@@ -5693,7 +6401,7 @@
|
|
5693 |
"early_stopping_threshold": 0.0
|
5694 |
},
|
5695 |
"attributes": {
|
5696 |
-
"early_stopping_patience_counter":
|
5697 |
}
|
5698 |
},
|
5699 |
"TrainerControl": {
|
@@ -5702,12 +6410,12 @@
|
|
5702 |
"should_evaluate": false,
|
5703 |
"should_log": false,
|
5704 |
"should_save": true,
|
5705 |
-
"should_training_stop":
|
5706 |
},
|
5707 |
"attributes": {}
|
5708 |
}
|
5709 |
},
|
5710 |
-
"total_flos":
|
5711 |
"train_batch_size": 1,
|
5712 |
"trial_name": null,
|
5713 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": 0.22727738320827484,
|
3 |
"best_model_checkpoint": "miner_id_24/checkpoint-500",
|
4 |
+
"epoch": 0.013280408446339771,
|
5 |
"eval_steps": 100,
|
6 |
+
"global_step": 900,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
5679 |
"eval_samples_per_second": 4.772,
|
5680 |
"eval_steps_per_second": 4.772,
|
5681 |
"step": 800
|
5682 |
+
},
|
5683 |
+
{
|
5684 |
+
"epoch": 0.011819563517242396,
|
5685 |
+
"grad_norm": 1.8588392734527588,
|
5686 |
+
"learning_rate": 0.0001999928936129436,
|
5687 |
+
"loss": 0.3129,
|
5688 |
+
"step": 801
|
5689 |
+
},
|
5690 |
+
{
|
5691 |
+
"epoch": 0.011834319526627219,
|
5692 |
+
"grad_norm": 2.285543441772461,
|
5693 |
+
"learning_rate": 0.00019999287906102836,
|
5694 |
+
"loss": 0.3976,
|
5695 |
+
"step": 802
|
5696 |
+
},
|
5697 |
+
{
|
5698 |
+
"epoch": 0.011849075536012041,
|
5699 |
+
"grad_norm": 0.8554681539535522,
|
5700 |
+
"learning_rate": 0.0001999928499571979,
|
5701 |
+
"loss": 0.2226,
|
5702 |
+
"step": 803
|
5703 |
+
},
|
5704 |
+
{
|
5705 |
+
"epoch": 0.011863831545396863,
|
5706 |
+
"grad_norm": 1.9635239839553833,
|
5707 |
+
"learning_rate": 0.00019999283540528268,
|
5708 |
+
"loss": 0.2287,
|
5709 |
+
"step": 804
|
5710 |
+
},
|
5711 |
+
{
|
5712 |
+
"epoch": 0.011878587554781685,
|
5713 |
+
"grad_norm": 2.571794033050537,
|
5714 |
+
"learning_rate": 0.00019999282085336745,
|
5715 |
+
"loss": 0.2562,
|
5716 |
+
"step": 805
|
5717 |
+
},
|
5718 |
+
{
|
5719 |
+
"epoch": 0.011893343564166508,
|
5720 |
+
"grad_norm": 0.862459659576416,
|
5721 |
+
"learning_rate": 0.00019999280630145222,
|
5722 |
+
"loss": 0.2025,
|
5723 |
+
"step": 806
|
5724 |
+
},
|
5725 |
+
{
|
5726 |
+
"epoch": 0.011908099573551328,
|
5727 |
+
"grad_norm": 0.749636709690094,
|
5728 |
+
"learning_rate": 0.00019999277719762176,
|
5729 |
+
"loss": 0.2318,
|
5730 |
+
"step": 807
|
5731 |
+
},
|
5732 |
+
{
|
5733 |
+
"epoch": 0.01192285558293615,
|
5734 |
+
"grad_norm": 0.8192420601844788,
|
5735 |
+
"learning_rate": 0.00019999277719762176,
|
5736 |
+
"loss": 0.2551,
|
5737 |
+
"step": 808
|
5738 |
+
},
|
5739 |
+
{
|
5740 |
+
"epoch": 0.011937611592320973,
|
5741 |
+
"grad_norm": 0.7866696715354919,
|
5742 |
+
"learning_rate": 0.0001999927480937913,
|
5743 |
+
"loss": 0.1355,
|
5744 |
+
"step": 809
|
5745 |
+
},
|
5746 |
+
{
|
5747 |
+
"epoch": 0.011952367601705795,
|
5748 |
+
"grad_norm": 1.7909586429595947,
|
5749 |
+
"learning_rate": 0.00019999273354187608,
|
5750 |
+
"loss": 0.2249,
|
5751 |
+
"step": 810
|
5752 |
+
},
|
5753 |
+
{
|
5754 |
+
"epoch": 0.011967123611090617,
|
5755 |
+
"grad_norm": 2.0296058654785156,
|
5756 |
+
"learning_rate": 0.00019999270443804562,
|
5757 |
+
"loss": 0.5412,
|
5758 |
+
"step": 811
|
5759 |
+
},
|
5760 |
+
{
|
5761 |
+
"epoch": 0.01198187962047544,
|
5762 |
+
"grad_norm": 1.0121299028396606,
|
5763 |
+
"learning_rate": 0.0001999926898861304,
|
5764 |
+
"loss": 0.1499,
|
5765 |
+
"step": 812
|
5766 |
+
},
|
5767 |
+
{
|
5768 |
+
"epoch": 0.011996635629860261,
|
5769 |
+
"grad_norm": 2.553496837615967,
|
5770 |
+
"learning_rate": 0.00019999267533421516,
|
5771 |
+
"loss": 0.3079,
|
5772 |
+
"step": 813
|
5773 |
+
},
|
5774 |
+
{
|
5775 |
+
"epoch": 0.012011391639245082,
|
5776 |
+
"grad_norm": 1.5361422300338745,
|
5777 |
+
"learning_rate": 0.0001999926462303847,
|
5778 |
+
"loss": 0.3867,
|
5779 |
+
"step": 814
|
5780 |
+
},
|
5781 |
+
{
|
5782 |
+
"epoch": 0.012026147648629904,
|
5783 |
+
"grad_norm": 1.4031721353530884,
|
5784 |
+
"learning_rate": 0.0001999926462303847,
|
5785 |
+
"loss": 0.1639,
|
5786 |
+
"step": 815
|
5787 |
+
},
|
5788 |
+
{
|
5789 |
+
"epoch": 0.012040903658014726,
|
5790 |
+
"grad_norm": 3.4092421531677246,
|
5791 |
+
"learning_rate": 0.00019999261712655425,
|
5792 |
+
"loss": 0.3241,
|
5793 |
+
"step": 816
|
5794 |
+
},
|
5795 |
+
{
|
5796 |
+
"epoch": 0.012055659667399549,
|
5797 |
+
"grad_norm": 1.655253529548645,
|
5798 |
+
"learning_rate": 0.00019999260257463902,
|
5799 |
+
"loss": 0.2299,
|
5800 |
+
"step": 817
|
5801 |
+
},
|
5802 |
+
{
|
5803 |
+
"epoch": 0.012070415676784371,
|
5804 |
+
"grad_norm": 1.2588114738464355,
|
5805 |
+
"learning_rate": 0.00019999257347080857,
|
5806 |
+
"loss": 0.1894,
|
5807 |
+
"step": 818
|
5808 |
+
},
|
5809 |
+
{
|
5810 |
+
"epoch": 0.012085171686169193,
|
5811 |
+
"grad_norm": 5.756618976593018,
|
5812 |
+
"learning_rate": 0.00019999255891889334,
|
5813 |
+
"loss": 0.2173,
|
5814 |
+
"step": 819
|
5815 |
+
},
|
5816 |
+
{
|
5817 |
+
"epoch": 0.012099927695554014,
|
5818 |
+
"grad_norm": 3.3106698989868164,
|
5819 |
+
"learning_rate": 0.0001999925443669781,
|
5820 |
+
"loss": 0.5165,
|
5821 |
+
"step": 820
|
5822 |
+
},
|
5823 |
+
{
|
5824 |
+
"epoch": 0.012114683704938836,
|
5825 |
+
"grad_norm": 2.1290841102600098,
|
5826 |
+
"learning_rate": 0.00019999251526314765,
|
5827 |
+
"loss": 0.2764,
|
5828 |
+
"step": 821
|
5829 |
+
},
|
5830 |
+
{
|
5831 |
+
"epoch": 0.012129439714323658,
|
5832 |
+
"grad_norm": 3.6686553955078125,
|
5833 |
+
"learning_rate": 0.00019999251526314765,
|
5834 |
+
"loss": 0.3461,
|
5835 |
+
"step": 822
|
5836 |
+
},
|
5837 |
+
{
|
5838 |
+
"epoch": 0.01214419572370848,
|
5839 |
+
"grad_norm": 1.1680198907852173,
|
5840 |
+
"learning_rate": 0.0001999924861593172,
|
5841 |
+
"loss": 0.1711,
|
5842 |
+
"step": 823
|
5843 |
+
},
|
5844 |
+
{
|
5845 |
+
"epoch": 0.012158951733093303,
|
5846 |
+
"grad_norm": 0.8747446537017822,
|
5847 |
+
"learning_rate": 0.00019999245705548674,
|
5848 |
+
"loss": 0.1633,
|
5849 |
+
"step": 824
|
5850 |
+
},
|
5851 |
+
{
|
5852 |
+
"epoch": 0.012173707742478125,
|
5853 |
+
"grad_norm": 1.3591049909591675,
|
5854 |
+
"learning_rate": 0.0001999924425035715,
|
5855 |
+
"loss": 0.1543,
|
5856 |
+
"step": 825
|
5857 |
+
},
|
5858 |
+
{
|
5859 |
+
"epoch": 0.012188463751862947,
|
5860 |
+
"grad_norm": 0.7809590697288513,
|
5861 |
+
"learning_rate": 0.00019999242795165628,
|
5862 |
+
"loss": 0.2076,
|
5863 |
+
"step": 826
|
5864 |
+
},
|
5865 |
+
{
|
5866 |
+
"epoch": 0.012203219761247767,
|
5867 |
+
"grad_norm": 1.4864180088043213,
|
5868 |
+
"learning_rate": 0.00019999241339974105,
|
5869 |
+
"loss": 0.2414,
|
5870 |
+
"step": 827
|
5871 |
+
},
|
5872 |
+
{
|
5873 |
+
"epoch": 0.01221797577063259,
|
5874 |
+
"grad_norm": 2.2250685691833496,
|
5875 |
+
"learning_rate": 0.0001999923842959106,
|
5876 |
+
"loss": 0.2012,
|
5877 |
+
"step": 828
|
5878 |
+
},
|
5879 |
+
{
|
5880 |
+
"epoch": 0.012232731780017412,
|
5881 |
+
"grad_norm": 1.8341047763824463,
|
5882 |
+
"learning_rate": 0.00019999236974399537,
|
5883 |
+
"loss": 0.2294,
|
5884 |
+
"step": 829
|
5885 |
+
},
|
5886 |
+
{
|
5887 |
+
"epoch": 0.012247487789402234,
|
5888 |
+
"grad_norm": 2.336993932723999,
|
5889 |
+
"learning_rate": 0.00019999235519208014,
|
5890 |
+
"loss": 0.2665,
|
5891 |
+
"step": 830
|
5892 |
+
},
|
5893 |
+
{
|
5894 |
+
"epoch": 0.012262243798787056,
|
5895 |
+
"grad_norm": 1.2305421829223633,
|
5896 |
+
"learning_rate": 0.00019999232608824968,
|
5897 |
+
"loss": 0.2372,
|
5898 |
+
"step": 831
|
5899 |
+
},
|
5900 |
+
{
|
5901 |
+
"epoch": 0.012276999808171879,
|
5902 |
+
"grad_norm": 1.3784295320510864,
|
5903 |
+
"learning_rate": 0.00019999231153633446,
|
5904 |
+
"loss": 0.3309,
|
5905 |
+
"step": 832
|
5906 |
+
},
|
5907 |
+
{
|
5908 |
+
"epoch": 0.012291755817556699,
|
5909 |
+
"grad_norm": 0.9471179842948914,
|
5910 |
+
"learning_rate": 0.00019999229698441923,
|
5911 |
+
"loss": 0.2573,
|
5912 |
+
"step": 833
|
5913 |
+
},
|
5914 |
+
{
|
5915 |
+
"epoch": 0.012306511826941521,
|
5916 |
+
"grad_norm": 0.8644984364509583,
|
5917 |
+
"learning_rate": 0.00019999226788058877,
|
5918 |
+
"loss": 0.1812,
|
5919 |
+
"step": 834
|
5920 |
+
},
|
5921 |
+
{
|
5922 |
+
"epoch": 0.012321267836326344,
|
5923 |
+
"grad_norm": 1.345884084701538,
|
5924 |
+
"learning_rate": 0.00019999225332867354,
|
5925 |
+
"loss": 0.2861,
|
5926 |
+
"step": 835
|
5927 |
+
},
|
5928 |
+
{
|
5929 |
+
"epoch": 0.012336023845711166,
|
5930 |
+
"grad_norm": 4.202690124511719,
|
5931 |
+
"learning_rate": 0.0001999922387767583,
|
5932 |
+
"loss": 0.3206,
|
5933 |
+
"step": 836
|
5934 |
+
},
|
5935 |
+
{
|
5936 |
+
"epoch": 0.012350779855095988,
|
5937 |
+
"grad_norm": 0.8438819050788879,
|
5938 |
+
"learning_rate": 0.00019999222422484308,
|
5939 |
+
"loss": 0.1899,
|
5940 |
+
"step": 837
|
5941 |
+
},
|
5942 |
+
{
|
5943 |
+
"epoch": 0.01236553586448081,
|
5944 |
+
"grad_norm": 1.8235636949539185,
|
5945 |
+
"learning_rate": 0.00019999219512101263,
|
5946 |
+
"loss": 0.3505,
|
5947 |
+
"step": 838
|
5948 |
+
},
|
5949 |
+
{
|
5950 |
+
"epoch": 0.012380291873865632,
|
5951 |
+
"grad_norm": 0.7939683198928833,
|
5952 |
+
"learning_rate": 0.0001999921805690974,
|
5953 |
+
"loss": 0.2743,
|
5954 |
+
"step": 839
|
5955 |
+
},
|
5956 |
+
{
|
5957 |
+
"epoch": 0.012395047883250453,
|
5958 |
+
"grad_norm": 1.7612792253494263,
|
5959 |
+
"learning_rate": 0.00019999216601718217,
|
5960 |
+
"loss": 0.3142,
|
5961 |
+
"step": 840
|
5962 |
+
},
|
5963 |
+
{
|
5964 |
+
"epoch": 0.012409803892635275,
|
5965 |
+
"grad_norm": 1.8200362920761108,
|
5966 |
+
"learning_rate": 0.00019999213691335171,
|
5967 |
+
"loss": 0.3512,
|
5968 |
+
"step": 841
|
5969 |
+
},
|
5970 |
+
{
|
5971 |
+
"epoch": 0.012424559902020097,
|
5972 |
+
"grad_norm": 0.8324838280677795,
|
5973 |
+
"learning_rate": 0.00019999212236143649,
|
5974 |
+
"loss": 0.1305,
|
5975 |
+
"step": 842
|
5976 |
+
},
|
5977 |
+
{
|
5978 |
+
"epoch": 0.01243931591140492,
|
5979 |
+
"grad_norm": 1.1254831552505493,
|
5980 |
+
"learning_rate": 0.00019999210780952126,
|
5981 |
+
"loss": 0.2469,
|
5982 |
+
"step": 843
|
5983 |
+
},
|
5984 |
+
{
|
5985 |
+
"epoch": 0.012454071920789742,
|
5986 |
+
"grad_norm": 0.7088738679885864,
|
5987 |
+
"learning_rate": 0.0001999920787056908,
|
5988 |
+
"loss": 0.2798,
|
5989 |
+
"step": 844
|
5990 |
+
},
|
5991 |
+
{
|
5992 |
+
"epoch": 0.012468827930174564,
|
5993 |
+
"grad_norm": 0.7736618518829346,
|
5994 |
+
"learning_rate": 0.00019999206415377557,
|
5995 |
+
"loss": 0.1676,
|
5996 |
+
"step": 845
|
5997 |
+
},
|
5998 |
+
{
|
5999 |
+
"epoch": 0.012483583939559386,
|
6000 |
+
"grad_norm": 12.703507423400879,
|
6001 |
+
"learning_rate": 0.00019999204960186034,
|
6002 |
+
"loss": 0.3818,
|
6003 |
+
"step": 846
|
6004 |
+
},
|
6005 |
+
{
|
6006 |
+
"epoch": 0.012498339948944207,
|
6007 |
+
"grad_norm": 0.668497622013092,
|
6008 |
+
"learning_rate": 0.0001999920204980299,
|
6009 |
+
"loss": 0.2096,
|
6010 |
+
"step": 847
|
6011 |
+
},
|
6012 |
+
{
|
6013 |
+
"epoch": 0.012513095958329029,
|
6014 |
+
"grad_norm": 0.7278184294700623,
|
6015 |
+
"learning_rate": 0.00019999200594611466,
|
6016 |
+
"loss": 0.2179,
|
6017 |
+
"step": 848
|
6018 |
+
},
|
6019 |
+
{
|
6020 |
+
"epoch": 0.012527851967713851,
|
6021 |
+
"grad_norm": 0.7589313983917236,
|
6022 |
+
"learning_rate": 0.0001999919768422842,
|
6023 |
+
"loss": 0.1714,
|
6024 |
+
"step": 849
|
6025 |
+
},
|
6026 |
+
{
|
6027 |
+
"epoch": 0.012542607977098674,
|
6028 |
+
"grad_norm": 0.637834906578064,
|
6029 |
+
"learning_rate": 0.00019999196229036897,
|
6030 |
+
"loss": 0.1514,
|
6031 |
+
"step": 850
|
6032 |
+
},
|
6033 |
+
{
|
6034 |
+
"epoch": 0.012557363986483496,
|
6035 |
+
"grad_norm": 2.347073793411255,
|
6036 |
+
"learning_rate": 0.00019999194773845375,
|
6037 |
+
"loss": 0.5262,
|
6038 |
+
"step": 851
|
6039 |
+
},
|
6040 |
+
{
|
6041 |
+
"epoch": 0.012572119995868318,
|
6042 |
+
"grad_norm": 0.46010205149650574,
|
6043 |
+
"learning_rate": 0.0001999919186346233,
|
6044 |
+
"loss": 0.1787,
|
6045 |
+
"step": 852
|
6046 |
+
},
|
6047 |
+
{
|
6048 |
+
"epoch": 0.012586876005253139,
|
6049 |
+
"grad_norm": 1.3159286975860596,
|
6050 |
+
"learning_rate": 0.00019999190408270806,
|
6051 |
+
"loss": 0.3263,
|
6052 |
+
"step": 853
|
6053 |
+
},
|
6054 |
+
{
|
6055 |
+
"epoch": 0.01260163201463796,
|
6056 |
+
"grad_norm": 0.6969799995422363,
|
6057 |
+
"learning_rate": 0.00019999188953079283,
|
6058 |
+
"loss": 0.1956,
|
6059 |
+
"step": 854
|
6060 |
+
},
|
6061 |
+
{
|
6062 |
+
"epoch": 0.012616388024022783,
|
6063 |
+
"grad_norm": 0.7132418751716614,
|
6064 |
+
"learning_rate": 0.00019999186042696238,
|
6065 |
+
"loss": 0.0918,
|
6066 |
+
"step": 855
|
6067 |
+
},
|
6068 |
+
{
|
6069 |
+
"epoch": 0.012631144033407605,
|
6070 |
+
"grad_norm": 1.258521556854248,
|
6071 |
+
"learning_rate": 0.00019999184587504715,
|
6072 |
+
"loss": 0.2805,
|
6073 |
+
"step": 856
|
6074 |
+
},
|
6075 |
+
{
|
6076 |
+
"epoch": 0.012645900042792427,
|
6077 |
+
"grad_norm": 2.1078953742980957,
|
6078 |
+
"learning_rate": 0.00019999183132313192,
|
6079 |
+
"loss": 0.3306,
|
6080 |
+
"step": 857
|
6081 |
+
},
|
6082 |
+
{
|
6083 |
+
"epoch": 0.01266065605217725,
|
6084 |
+
"grad_norm": 1.695388674736023,
|
6085 |
+
"learning_rate": 0.00019999180221930146,
|
6086 |
+
"loss": 0.2416,
|
6087 |
+
"step": 858
|
6088 |
+
},
|
6089 |
+
{
|
6090 |
+
"epoch": 0.012675412061562072,
|
6091 |
+
"grad_norm": 0.9387673735618591,
|
6092 |
+
"learning_rate": 0.00019999178766738623,
|
6093 |
+
"loss": 0.3334,
|
6094 |
+
"step": 859
|
6095 |
+
},
|
6096 |
+
{
|
6097 |
+
"epoch": 0.012690168070946892,
|
6098 |
+
"grad_norm": 1.193365216255188,
|
6099 |
+
"learning_rate": 0.000199991773115471,
|
6100 |
+
"loss": 0.1776,
|
6101 |
+
"step": 860
|
6102 |
+
},
|
6103 |
+
{
|
6104 |
+
"epoch": 0.012704924080331715,
|
6105 |
+
"grad_norm": 1.4889655113220215,
|
6106 |
+
"learning_rate": 0.00019999174401164055,
|
6107 |
+
"loss": 0.1784,
|
6108 |
+
"step": 861
|
6109 |
+
},
|
6110 |
+
{
|
6111 |
+
"epoch": 0.012719680089716537,
|
6112 |
+
"grad_norm": 1.3445992469787598,
|
6113 |
+
"learning_rate": 0.00019999172945972532,
|
6114 |
+
"loss": 0.2482,
|
6115 |
+
"step": 862
|
6116 |
+
},
|
6117 |
+
{
|
6118 |
+
"epoch": 0.012734436099101359,
|
6119 |
+
"grad_norm": 1.2509477138519287,
|
6120 |
+
"learning_rate": 0.0001999917149078101,
|
6121 |
+
"loss": 0.1961,
|
6122 |
+
"step": 863
|
6123 |
+
},
|
6124 |
+
{
|
6125 |
+
"epoch": 0.012749192108486181,
|
6126 |
+
"grad_norm": 0.9405835270881653,
|
6127 |
+
"learning_rate": 0.00019999168580397964,
|
6128 |
+
"loss": 0.1089,
|
6129 |
+
"step": 864
|
6130 |
+
},
|
6131 |
+
{
|
6132 |
+
"epoch": 0.012763948117871004,
|
6133 |
+
"grad_norm": 0.4583745300769806,
|
6134 |
+
"learning_rate": 0.0001999916712520644,
|
6135 |
+
"loss": 0.0451,
|
6136 |
+
"step": 865
|
6137 |
+
},
|
6138 |
+
{
|
6139 |
+
"epoch": 0.012778704127255826,
|
6140 |
+
"grad_norm": 0.8631611466407776,
|
6141 |
+
"learning_rate": 0.00019999165670014918,
|
6142 |
+
"loss": 0.2,
|
6143 |
+
"step": 866
|
6144 |
+
},
|
6145 |
+
{
|
6146 |
+
"epoch": 0.012793460136640646,
|
6147 |
+
"grad_norm": 2.4206740856170654,
|
6148 |
+
"learning_rate": 0.00019999162759631872,
|
6149 |
+
"loss": 0.3839,
|
6150 |
+
"step": 867
|
6151 |
+
},
|
6152 |
+
{
|
6153 |
+
"epoch": 0.012808216146025468,
|
6154 |
+
"grad_norm": 4.0995659828186035,
|
6155 |
+
"learning_rate": 0.00019999159849248827,
|
6156 |
+
"loss": 0.3994,
|
6157 |
+
"step": 868
|
6158 |
+
},
|
6159 |
+
{
|
6160 |
+
"epoch": 0.01282297215541029,
|
6161 |
+
"grad_norm": 0.8621058464050293,
|
6162 |
+
"learning_rate": 0.00019999158394057304,
|
6163 |
+
"loss": 0.1875,
|
6164 |
+
"step": 869
|
6165 |
+
},
|
6166 |
+
{
|
6167 |
+
"epoch": 0.012837728164795113,
|
6168 |
+
"grad_norm": 1.5550447702407837,
|
6169 |
+
"learning_rate": 0.0001999915693886578,
|
6170 |
+
"loss": 0.2768,
|
6171 |
+
"step": 870
|
6172 |
+
},
|
6173 |
+
{
|
6174 |
+
"epoch": 0.012852484174179935,
|
6175 |
+
"grad_norm": 2.0353689193725586,
|
6176 |
+
"learning_rate": 0.00019999154028482735,
|
6177 |
+
"loss": 0.3118,
|
6178 |
+
"step": 871
|
6179 |
+
},
|
6180 |
+
{
|
6181 |
+
"epoch": 0.012867240183564757,
|
6182 |
+
"grad_norm": 1.4061765670776367,
|
6183 |
+
"learning_rate": 0.00019999152573291212,
|
6184 |
+
"loss": 0.3058,
|
6185 |
+
"step": 872
|
6186 |
+
},
|
6187 |
+
{
|
6188 |
+
"epoch": 0.012881996192949578,
|
6189 |
+
"grad_norm": 1.2627537250518799,
|
6190 |
+
"learning_rate": 0.0001999915111809969,
|
6191 |
+
"loss": 0.3512,
|
6192 |
+
"step": 873
|
6193 |
+
},
|
6194 |
+
{
|
6195 |
+
"epoch": 0.0128967522023344,
|
6196 |
+
"grad_norm": 2.3084962368011475,
|
6197 |
+
"learning_rate": 0.00019999148207716644,
|
6198 |
+
"loss": 0.3348,
|
6199 |
+
"step": 874
|
6200 |
+
},
|
6201 |
+
{
|
6202 |
+
"epoch": 0.012911508211719222,
|
6203 |
+
"grad_norm": 0.8072152137756348,
|
6204 |
+
"learning_rate": 0.0001999914675252512,
|
6205 |
+
"loss": 0.2395,
|
6206 |
+
"step": 875
|
6207 |
+
},
|
6208 |
+
{
|
6209 |
+
"epoch": 0.012926264221104045,
|
6210 |
+
"grad_norm": 1.2461738586425781,
|
6211 |
+
"learning_rate": 0.00019999145297333598,
|
6212 |
+
"loss": 0.4091,
|
6213 |
+
"step": 876
|
6214 |
+
},
|
6215 |
+
{
|
6216 |
+
"epoch": 0.012941020230488867,
|
6217 |
+
"grad_norm": 0.8416983485221863,
|
6218 |
+
"learning_rate": 0.00019999142386950552,
|
6219 |
+
"loss": 0.2494,
|
6220 |
+
"step": 877
|
6221 |
+
},
|
6222 |
+
{
|
6223 |
+
"epoch": 0.012955776239873689,
|
6224 |
+
"grad_norm": 0.7885262370109558,
|
6225 |
+
"learning_rate": 0.0001999914093175903,
|
6226 |
+
"loss": 0.2535,
|
6227 |
+
"step": 878
|
6228 |
+
},
|
6229 |
+
{
|
6230 |
+
"epoch": 0.012970532249258511,
|
6231 |
+
"grad_norm": 0.6679831743240356,
|
6232 |
+
"learning_rate": 0.00019999139476567507,
|
6233 |
+
"loss": 0.2249,
|
6234 |
+
"step": 879
|
6235 |
+
},
|
6236 |
+
{
|
6237 |
+
"epoch": 0.012985288258643332,
|
6238 |
+
"grad_norm": 1.0578850507736206,
|
6239 |
+
"learning_rate": 0.0001999913656618446,
|
6240 |
+
"loss": 0.2963,
|
6241 |
+
"step": 880
|
6242 |
+
},
|
6243 |
+
{
|
6244 |
+
"epoch": 0.013000044268028154,
|
6245 |
+
"grad_norm": 0.6506802439689636,
|
6246 |
+
"learning_rate": 0.00019999135110992938,
|
6247 |
+
"loss": 0.2141,
|
6248 |
+
"step": 881
|
6249 |
+
},
|
6250 |
+
{
|
6251 |
+
"epoch": 0.013014800277412976,
|
6252 |
+
"grad_norm": 0.920437753200531,
|
6253 |
+
"learning_rate": 0.00019999132200609893,
|
6254 |
+
"loss": 0.2699,
|
6255 |
+
"step": 882
|
6256 |
+
},
|
6257 |
+
{
|
6258 |
+
"epoch": 0.013029556286797798,
|
6259 |
+
"grad_norm": 1.2478429079055786,
|
6260 |
+
"learning_rate": 0.0001999913074541837,
|
6261 |
+
"loss": 0.2938,
|
6262 |
+
"step": 883
|
6263 |
+
},
|
6264 |
+
{
|
6265 |
+
"epoch": 0.01304431229618262,
|
6266 |
+
"grad_norm": 0.9770641922950745,
|
6267 |
+
"learning_rate": 0.00019999127835035324,
|
6268 |
+
"loss": 0.2247,
|
6269 |
+
"step": 884
|
6270 |
+
},
|
6271 |
+
{
|
6272 |
+
"epoch": 0.013059068305567443,
|
6273 |
+
"grad_norm": 0.7141462564468384,
|
6274 |
+
"learning_rate": 0.000199991263798438,
|
6275 |
+
"loss": 0.1756,
|
6276 |
+
"step": 885
|
6277 |
+
},
|
6278 |
+
{
|
6279 |
+
"epoch": 0.013073824314952265,
|
6280 |
+
"grad_norm": 0.6457951664924622,
|
6281 |
+
"learning_rate": 0.00019999124924652278,
|
6282 |
+
"loss": 0.2273,
|
6283 |
+
"step": 886
|
6284 |
+
},
|
6285 |
+
{
|
6286 |
+
"epoch": 0.013088580324337086,
|
6287 |
+
"grad_norm": 1.0006299018859863,
|
6288 |
+
"learning_rate": 0.00019999122014269233,
|
6289 |
+
"loss": 0.1971,
|
6290 |
+
"step": 887
|
6291 |
+
},
|
6292 |
+
{
|
6293 |
+
"epoch": 0.013103336333721908,
|
6294 |
+
"grad_norm": 1.2667323350906372,
|
6295 |
+
"learning_rate": 0.0001999912055907771,
|
6296 |
+
"loss": 0.1411,
|
6297 |
+
"step": 888
|
6298 |
+
},
|
6299 |
+
{
|
6300 |
+
"epoch": 0.01311809234310673,
|
6301 |
+
"grad_norm": 1.0697591304779053,
|
6302 |
+
"learning_rate": 0.00019999119103886187,
|
6303 |
+
"loss": 0.2687,
|
6304 |
+
"step": 889
|
6305 |
+
},
|
6306 |
+
{
|
6307 |
+
"epoch": 0.013132848352491552,
|
6308 |
+
"grad_norm": 0.8275360465049744,
|
6309 |
+
"learning_rate": 0.00019999116193503141,
|
6310 |
+
"loss": 0.1895,
|
6311 |
+
"step": 890
|
6312 |
+
},
|
6313 |
+
{
|
6314 |
+
"epoch": 0.013147604361876375,
|
6315 |
+
"grad_norm": 1.5881019830703735,
|
6316 |
+
"learning_rate": 0.00019999113283120096,
|
6317 |
+
"loss": 0.1936,
|
6318 |
+
"step": 891
|
6319 |
+
},
|
6320 |
+
{
|
6321 |
+
"epoch": 0.013162360371261197,
|
6322 |
+
"grad_norm": 11.928915023803711,
|
6323 |
+
"learning_rate": 0.00019999113283120096,
|
6324 |
+
"loss": 0.6702,
|
6325 |
+
"step": 892
|
6326 |
+
},
|
6327 |
+
{
|
6328 |
+
"epoch": 0.013177116380646017,
|
6329 |
+
"grad_norm": 0.9647379517555237,
|
6330 |
+
"learning_rate": 0.0001999911037273705,
|
6331 |
+
"loss": 0.2089,
|
6332 |
+
"step": 893
|
6333 |
+
},
|
6334 |
+
{
|
6335 |
+
"epoch": 0.01319187239003084,
|
6336 |
+
"grad_norm": 0.9707943797111511,
|
6337 |
+
"learning_rate": 0.00019999107462354004,
|
6338 |
+
"loss": 0.2171,
|
6339 |
+
"step": 894
|
6340 |
+
},
|
6341 |
+
{
|
6342 |
+
"epoch": 0.013206628399415662,
|
6343 |
+
"grad_norm": 0.6461355686187744,
|
6344 |
+
"learning_rate": 0.00019999106007162482,
|
6345 |
+
"loss": 0.1915,
|
6346 |
+
"step": 895
|
6347 |
+
},
|
6348 |
+
{
|
6349 |
+
"epoch": 0.013221384408800484,
|
6350 |
+
"grad_norm": 1.8797247409820557,
|
6351 |
+
"learning_rate": 0.0001999910455197096,
|
6352 |
+
"loss": 0.4694,
|
6353 |
+
"step": 896
|
6354 |
+
},
|
6355 |
+
{
|
6356 |
+
"epoch": 0.013236140418185306,
|
6357 |
+
"grad_norm": 2.4890024662017822,
|
6358 |
+
"learning_rate": 0.00019999101641587913,
|
6359 |
+
"loss": 0.2185,
|
6360 |
+
"step": 897
|
6361 |
+
},
|
6362 |
+
{
|
6363 |
+
"epoch": 0.013250896427570128,
|
6364 |
+
"grad_norm": 1.5101224184036255,
|
6365 |
+
"learning_rate": 0.0001999910018639639,
|
6366 |
+
"loss": 0.3527,
|
6367 |
+
"step": 898
|
6368 |
+
},
|
6369 |
+
{
|
6370 |
+
"epoch": 0.01326565243695495,
|
6371 |
+
"grad_norm": 1.7768837213516235,
|
6372 |
+
"learning_rate": 0.00019999097276013345,
|
6373 |
+
"loss": 0.3919,
|
6374 |
+
"step": 899
|
6375 |
+
},
|
6376 |
+
{
|
6377 |
+
"epoch": 0.013280408446339771,
|
6378 |
+
"grad_norm": 1.1590951681137085,
|
6379 |
+
"learning_rate": 0.00019999095820821822,
|
6380 |
+
"loss": 0.2561,
|
6381 |
+
"step": 900
|
6382 |
+
},
|
6383 |
+
{
|
6384 |
+
"epoch": 0.013280408446339771,
|
6385 |
+
"eval_loss": 0.30002906918525696,
|
6386 |
+
"eval_runtime": 28.4979,
|
6387 |
+
"eval_samples_per_second": 4.772,
|
6388 |
+
"eval_steps_per_second": 4.772,
|
6389 |
+
"step": 900
|
6390 |
}
|
6391 |
],
|
6392 |
"logging_steps": 1,
|
|
|
6401 |
"early_stopping_threshold": 0.0
|
6402 |
},
|
6403 |
"attributes": {
|
6404 |
+
"early_stopping_patience_counter": 4
|
6405 |
}
|
6406 |
},
|
6407 |
"TrainerControl": {
|
|
|
6410 |
"should_evaluate": false,
|
6411 |
"should_log": false,
|
6412 |
"should_save": true,
|
6413 |
+
"should_training_stop": true
|
6414 |
},
|
6415 |
"attributes": {}
|
6416 |
}
|
6417 |
},
|
6418 |
+
"total_flos": 7.533788331245568e+16,
|
6419 |
"train_batch_size": 1,
|
6420 |
"trial_name": null,
|
6421 |
"trial_params": null
|