diff --git a/.gitattributes b/.gitattributes index b60dc9bd424e1102398dd7e9935e33eb4cb4dd39..439f2627b57d8dd049b697062a192a202f19ddc1 100644 --- a/.gitattributes +++ b/.gitattributes @@ -11961,3 +11961,42 @@ train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/ train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds filter=lfs diff=lfs merge=lfs -text train/cc_en_head/cc_en_head_0262-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00055.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00044.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00047.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00050.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00045.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00042.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00049.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00046.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00043.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00040.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00041.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00051.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00048.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00052.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00053.mds filter=lfs diff=lfs merge=lfs -text diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..65424bd48daa4a069458a0363d82a767a6ef4bb3 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52312a90599e554bd8158ea753922f2981682d8d9b96d9789bd4a85b2ca1daf8 +size 67107130 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..7d367e0f40dcb865ccb4e7f97a2b6cdbb0d3fd28 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:001bd4b8608fe8126b644cc2c51951551cbe79ec81f4eed9558856f0a4e79e36 +size 67108461 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds new file mode 100644 index 0000000000000000000000000000000000000000..2ec17015634ab660171085cb745fb083cad59117 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8268691831c0300ae25a2cfaf9c7c870dec4f97c28604b11fa5c55083e3bdd1 +size 67108818 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds new file mode 100644 index 0000000000000000000000000000000000000000..c824b356e70098734a8114a82ce326b263579f5b --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:173a8662b73068e23601297bad6adbc2c45eaaf8687e379a73e30e191ba26120 +size 67108608 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds new file mode 100644 index 0000000000000000000000000000000000000000..689880dd7f37624ed0fa84aaeeec4692e91f42b6 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d2a068ae1ad8a642af4a1db56356894393037ae3432c2d82c431c9be2f6c595 +size 67108200 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds new file mode 100644 index 0000000000000000000000000000000000000000..06b3496e402492055aacaf76ec87a6f20e82afc3 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68510429c87ba9d09a3c556e675f795f0604f3b72de1e3e3db90cc0063de84ec +size 67107669 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds new file mode 100644 index 0000000000000000000000000000000000000000..b96949ff49484467713bb586fac42acaf826ffde --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c911fb0dfad1f4e3c80e692b5ddd24cd2ed927fa4bd3758335f6f8fb47989ad +size 67108814 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds new file mode 100644 index 0000000000000000000000000000000000000000..0762aeee62d227c0d2179038d5ff73fc61263208 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69446d27a71443ae33a73421f4867054f4f38b3f07d67a04118eada6cc138eb0 +size 67107970 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds new file mode 100644 index 0000000000000000000000000000000000000000..41fcbb495e9726b2a150985c4f12c050c82be6fa --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00ed74db6a79b791ab1a4f4357f6d446abcd1dd316d0ffc1609a1548cd80aaac +size 67107816 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds new file mode 100644 index 0000000000000000000000000000000000000000..53bc6e9f64b249829de8ff0be08a570c74ca02e1 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c657c4b526b8721a6d952b6b71ba55b3d1f10dd5a969b1a6a01fc3ddac85055 +size 67107755 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds new file mode 100644 index 0000000000000000000000000000000000000000..6b77f8a7bbf464daee96169401e3b48a2f9a1f8e --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1fb81c5e8cbdb66a31672f8718cc6d00c1fc7972ec55d70172a622fe801e6b0 +size 67108588 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds new file mode 100644 index 0000000000000000000000000000000000000000..4f89833e46c40fa3763cdc16f9110f0ab0d3bf08 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cd6d0fc0975914571a2385cc963f1e4effec3b94381b6513078917af9ebdce8 +size 67107577 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds new file mode 100644 index 0000000000000000000000000000000000000000..aae7c5c1e5a13cde001ae7bf013216e02b039267 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e9c4488137f0848242b87f40478a91d0bb62337c53b9cab064e6bb0ada1c38d +size 67108597 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds new file mode 100644 index 0000000000000000000000000000000000000000..f185c18a33aac60b22ce2604152118d7921d13f5 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:574d9b451cdfb6dec75cbfe0f38c0a833a4481962a9dfe83a1f8d3246c76a90e +size 67107265 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds new file mode 100644 index 0000000000000000000000000000000000000000..6455ae2fbb66eed062cf9a766d01d9a41c601c87 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b60fb739e2e4275976e30633510fd5cbff023a6377f6a1204d643b0708d816e4 +size 67107805 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds new file mode 100644 index 0000000000000000000000000000000000000000..8e6236e7da4564bb6ee2e65abb6eef9d448075d9 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3eccf83d7c7f578d4bd02a4334662e8252d315645b72bfbf8955bd9ee94ba27 +size 67107846 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds new file mode 100644 index 0000000000000000000000000000000000000000..7766fc6ed1a009d9110eb20b353607b7cc3bb50d --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42dc31702b88381098182a467fd3fa87dfb522df31375028d72ca6b0d43aa5ed +size 67108806 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds new file mode 100644 index 0000000000000000000000000000000000000000..e3591a79aea933424abaa6662e842ca9c714fbf3 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d2bcaa3583afa9aef719f98a53aa01de6da4ee031d6c0d26f3508bbbe66e889 +size 67108583 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds new file mode 100644 index 0000000000000000000000000000000000000000..c37927eae5f79479794751e8cc7dfd349927d601 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a79f126638741147de483bbcca01554328d398673e2c6ae53177dc7422a9567a +size 67108496 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds new file mode 100644 index 0000000000000000000000000000000000000000..8e6798d7950902b153ce9f0e41cdaacdcfad9ac8 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:799bf64701d012402ea4f5b91f61e502c87086cc7f63693430ce0a01104d7f1d +size 67107333 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds new file mode 100644 index 0000000000000000000000000000000000000000..73a8e2d514d2b9973e17f2e6a3e8da528f02f906 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:258b08140219516058e72c37e12810cc2eed4c790916c1fdd2467585029ac5b3 +size 67108486 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds new file mode 100644 index 0000000000000000000000000000000000000000..01c536e18c4a8e6a2edf278a43327b583fb177fa --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f6a90909c35560fc29b61b3964b511bc1237b8331cb4629562c41705a83478f +size 67106802 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds new file mode 100644 index 0000000000000000000000000000000000000000..760f0554780679010b35d5db4c649b58f39af83f --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c2c76616bfed3da486fd77f2e8321745adb3b6ec5340f7c5bb8d321e4800be3 +size 67108266 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds new file mode 100644 index 0000000000000000000000000000000000000000..f79595fb4c86f62cf15a1ab3d9496ee478fa2cd7 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:264a03e469dadfdccce488643aec165422e14d538ce3b3a3e8f683801548d50a +size 67107659 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00040.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00040.mds new file mode 100644 index 0000000000000000000000000000000000000000..61bbfb954f0cf6d4012cece84f17471dca85fb7d --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00040.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a63f58f9d73ba10f3ee1d65b550d403491ec3c98efd3fd4b35b6ed7a8b1ab23 +size 67107096 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00041.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00041.mds new file mode 100644 index 0000000000000000000000000000000000000000..603464046c1101fd227099c65659d2f5bd5ccb1d --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00041.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59c048a2e333bfabf62359144ce09624ba35a9132ab6a12ae9e356c3e6530819 +size 67108287 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00042.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00042.mds new file mode 100644 index 0000000000000000000000000000000000000000..716027a8e5467f0dee4c78855348198487b9754f --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00042.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80a5a0c0a65f441726727bd6beef64409b25b2201ef49cbf41d817a252d8cb9e +size 67108511 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00043.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00043.mds new file mode 100644 index 0000000000000000000000000000000000000000..b8aca63c96b7633f37eb08b99aff1a6eec33322b --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00043.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:030b2d4e47d797fbd7473fac4b1dcd6163365c8803cee7fd8836c5b6e1e9069d +size 67108383 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00044.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00044.mds new file mode 100644 index 0000000000000000000000000000000000000000..6ab29b058dfa2ee6cf889e0b7fbc3bf97b763cc1 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00044.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18d36a3fae7460ea6ec19c586544915cf900bebead8065646ec41f9313a14f70 +size 67107381 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00045.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00045.mds new file mode 100644 index 0000000000000000000000000000000000000000..b839301783527c53e492eaa7795e74a598fa6709 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00045.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f3a0d533721e7ef4839fae9443688186b0e81269099bd95715fb414dddc26c7 +size 67108680 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00046.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00046.mds new file mode 100644 index 0000000000000000000000000000000000000000..ec9e5e6aaf553821518eb374fcaa9458623fefb7 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00046.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52be7862c2e0c7633a7c04d3d1d30ac9c87e1d8192e0c0bbcef3a4a75c8b1f5f +size 67107942 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00047.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00047.mds new file mode 100644 index 0000000000000000000000000000000000000000..98f77ca76f8acbd30d5a3edff5fa7431778504dd --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00047.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:448fe2c62ef6118ee97592f8a5237135aa6e0c4dbec98631f0d7302ef3ec977a +size 67107868 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00048.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00048.mds new file mode 100644 index 0000000000000000000000000000000000000000..ba855b3affadd93411af946f512cdd7c6867780a --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00048.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f857518a2c5cdd447aabe7704ae4b2e6d2065af2e83d6c0e6257115c0e4c6850 +size 67107456 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00049.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00049.mds new file mode 100644 index 0000000000000000000000000000000000000000..ddc75736fbef07fad6598e81f21ce6de58c41806 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00049.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e184507e1a9d84aa61ad7985a04cf0f9326f840c3dd1e7e4113550c9c2b7a396 +size 67108440 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00050.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00050.mds new file mode 100644 index 0000000000000000000000000000000000000000..050212d1b2b66fff642e349c99291a5e6406d6f1 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00050.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b1a7fe4a7ba3a635b6d5a16d6877fa6cd8bc65a3426fd71bbd9a20a6bf3979e +size 67107339 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00051.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00051.mds new file mode 100644 index 0000000000000000000000000000000000000000..8c593463cdad20232810d0619cf932bdb91f825b --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00051.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d25c1fc8b61c94ff328f897bb676582afbefc100dc02708287023e1e8b21a98a +size 67108448 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00052.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00052.mds new file mode 100644 index 0000000000000000000000000000000000000000..109eaf5f2da93eb92d1ffc965a1cd7c3ad8d7d3d --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00052.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7bad06df1d8b5220e14faf131bcd3134fcac6aed5febe195549f606d62089ad3 +size 67107684 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00053.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00053.mds new file mode 100644 index 0000000000000000000000000000000000000000..4f344eb82f50a341996eed6d5a97f4924129ffbd --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00053.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:541292d71bca3c4cd3450cff1ba342b7cf2ccfc2ab58d7bd9a3832bec227cb77 +size 67107906 diff --git a/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00055.mds b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00055.mds new file mode 100644 index 0000000000000000000000000000000000000000..230438e15338bb5675c3be618284333f99562413 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0221-tokenized-chunked-1024-512-128-backfill-nodups/shard.00055.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba2ec619dda3ea00df3c1344d32317a78bce3b7d33efafe5b00fb1e9104b6487 +size 67108073 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10273-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10273-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..c82ceb8585c2fbae14055611ddc96fb7591e13fd --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10273-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108362, "hashes": {}}, "samples": 44360, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48029713, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9934098, "hashes": {}}, "samples": 6535, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7101406, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10273-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10273-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..bac7c7abad71ab813a697ec2e49ce5dc86af7c46 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10273-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36946104, + "num_truncated_tokens": 36917246 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11784-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11784-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..5c6e206a67d20012c64025a646c9a4a76c440706 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11784-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108020, "hashes": {}}, "samples": 44358, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47813275, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9894275, "hashes": {}}, "samples": 6652, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7067877, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11784-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11784-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..cae279fb629ff6f7c2c06138f45cd452dd568840 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11784-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36922613, + "num_truncated_tokens": 36894383 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17394-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17394-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..35ab00b3f6ef4f2ece31fcc23a42b32ce8133ecb --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17394-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108544, "hashes": {}}, "samples": 42935, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47568744, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20995875, "hashes": {}}, "samples": 13050, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14834740, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17394-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17394-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..058be8f936ccdc4d2dcfb29bf159ecee8948a2b4 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17394-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42319173, + "num_truncated_tokens": 42282898 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18830-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18830-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..8fd5199188e0c5ea7da161c860692cdff9f0280e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18830-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108272, "hashes": {}}, "samples": 43849, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47711065, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13467144, "hashes": {}}, "samples": 8773, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9558987, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18830-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18830-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..12330398b0831c381c17e14e59472341259a4f46 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18830-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38659106, + "num_truncated_tokens": 38627701 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21249-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21249-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..08867af48fd02168dc7dc60f28996fe59dc9bc71 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21249-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107545, "hashes": {}}, "samples": 43905, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47424699, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11348853, "hashes": {}}, "samples": 7442, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8041791, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21249-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21249-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..cf6a12171e7c21849b03413906736aae2694199f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21249-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37638908, + "num_truncated_tokens": 37610704 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21661-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21661-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..47b71ac85c1ee583635f7e780835c2693b74f701 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21661-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108223, "hashes": {}}, "samples": 43096, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47985402, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18412108, "hashes": {}}, "samples": 11697, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13131563, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21661-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21661-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..9d520bc15e8510bd8e51c147df305c1114ea3e1b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21661-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41064035, + "num_truncated_tokens": 41028953 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23115-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23115-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..bca9480c1f918e6f69aa13866e750925fd637089 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23115-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108717, "hashes": {}}, "samples": 42785, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47682571, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21011233, "hashes": {}}, "samples": 13266, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14920123, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23115-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23115-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..57617a9a4b32f9ddda7d9f9c9f0f6c0d2a7de1bc --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23115-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42324945, + "num_truncated_tokens": 42287980 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25602-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25602-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..70cc8186bf17cd5a99332b77263d5d00a98e994c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25602-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108411, "hashes": {}}, "samples": 44096, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47804086, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13878057, "hashes": {}}, "samples": 8933, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9802043, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25602-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25602-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..06e4caa06223a3df3fb026c0aaeafa64eb674f05 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25602-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38852104, + "num_truncated_tokens": 38820071 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26520-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26520-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..44721373dac599631d27852824e0a7f7b75ed6ba --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26520-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107904, "hashes": {}}, "samples": 44123, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47508023, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10331586, "hashes": {}}, "samples": 6896, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7310989, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26520-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26520-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..63bab0952f6780b995a4005050a8e184f7f00a3f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26520-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37140767, + "num_truncated_tokens": 37112844 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_2760-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_2760-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..22976e337cfcc7574ebc7bb56d49b181aa216ae2 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_2760-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108149, "hashes": {}}, "samples": 44051, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47527836, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11117802, "hashes": {}}, "samples": 7256, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7870179, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28250-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28250-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..4486a7c0b60cda3d36dada4ee5031c646186c808 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28250-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107407, "hashes": {}}, "samples": 42287, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47926749, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 22709515, "hashes": {}}, "samples": 14567, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 16259708, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28250-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28250-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..655eda22e5f68f21784f4ee771e418614d200dbf --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28250-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 43148507, + "num_truncated_tokens": 43109977 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30010-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30010-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..72ac0ec5fc2f37366446fe7d5908d8b9836431ca --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30010-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108105, "hashes": {}}, "samples": 43958, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47799616, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12590337, "hashes": {}}, "samples": 8151, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8963245, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30010-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30010-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..215c0f77f89a4d804b5705f50d9b21ff74a69be7 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30010-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38236468, + "num_truncated_tokens": 38206735 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30182-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30182-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..3b305e80e5b73d145f9dcfb7963c088daddca1e3 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30182-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106764, "hashes": {}}, "samples": 43880, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47539711, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11129998, "hashes": {}}, "samples": 7347, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7864348, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30182-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30182-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..50bcf78b6490e6716e1baffba975c6f709788cc7 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30182-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37532818, + "num_truncated_tokens": 37504525 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3098-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3098-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..0ef115d1d13615ab71e28a846983d84abb98e4ee --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3098-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108681, "hashes": {}}, "samples": 43727, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47631322, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15393017, "hashes": {}}, "samples": 10069, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10937580, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3098-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3098-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..9c101fe800f741c7e9527dfbedc760449f89bdb1 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3098-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39586011, + "num_truncated_tokens": 39553142 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31234-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31234-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..3c1dcdc9854743f0337a50b79a27b070dee178e0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31234-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108025, "hashes": {}}, "samples": 42661, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47758812, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21167636, "hashes": {}}, "samples": 13433, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14989898, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31234-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31234-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..e311e6411ca93bd074775e00f4208807262fd6ef --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31234-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42401407, + "num_truncated_tokens": 42364504 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36225-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36225-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..69cb4842cd171769cbf066cdb20c05cbfd65adc4 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36225-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108449, "hashes": {}}, "samples": 44562, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47826831, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8225912, "hashes": {}}, "samples": 5522, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5898923, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36225-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36225-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..bdbb8ea403fb3207575abd3efdda5f1d74290c83 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36225-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36117197, + "num_truncated_tokens": 36090457 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37309-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37309-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..cd2b9beb457320568683282b5737eddf8f28ad3d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37309-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108374, "hashes": {}}, "samples": 43425, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47943725, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14437453, "hashes": {}}, "samples": 9668, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10329675, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37309-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37309-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..5d1f6976c68097fe4e9d8f7ccb4acffa742b4473 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37309-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39129657, + "num_truncated_tokens": 39097525 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_38897-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_38897-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..f4e9d1e0336c95c81eb6f020dde316444c66932c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_38897-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108632, "hashes": {}}, "samples": 43378, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47438230, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14435397, "hashes": {}}, "samples": 9269, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10207296, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_38897-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_38897-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..dd12271856dbc9afd3c07ae80f487b6593f72e07 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_38897-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39142314, + "num_truncated_tokens": 39111681 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_39057-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_39057-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..e780bb50da9135a4477add821a68a65235317beb --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_39057-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107535, "hashes": {}}, "samples": 44568, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47920627, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9121019, "hashes": {}}, "samples": 6069, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6527316, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_39057-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_39057-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..de2502479604964b02fdb2259f6960ea59d28870 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_39057-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36547254, + "num_truncated_tokens": 36519303 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_42843-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_42843-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..a2a6e535916e30fd427af10f170e61f3fc128357 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_42843-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108830, "hashes": {}}, "samples": 43024, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47648487, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19290009, "hashes": {}}, "samples": 12152, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13689239, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_42843-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_42843-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..c530cce9d08c1676ee6553857db4dc5e848a3933 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_42843-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41491459, + "num_truncated_tokens": 41456177 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_43226-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_43226-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..4e3feadfa7a7b018fed632887a1ef95d0c56d4ac --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_43226-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107043, "hashes": {}}, "samples": 44684, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48076201, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8583463, "hashes": {}}, "samples": 5652, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6145022, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_43226-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_43226-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..94cd93dbbcca1e6588b3122426c6b16bc12ca594 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_43226-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36287496, + "num_truncated_tokens": 36259387 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44052-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44052-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..4810769aacd3afea20d8225822e7896dd74abb49 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44052-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108040, "hashes": {}}, "samples": 43158, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47522162, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16198405, "hashes": {}}, "samples": 10811, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11492544, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44052-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44052-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..226712d1c694a51d9ec3fac4f887b784f72f3254 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44052-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39982859, + "num_truncated_tokens": 39949546 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_53334-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_53334-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..1ff780dd818a8ba8211943a9f669a4ee03165b97 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_53334-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108287, "hashes": {}}, "samples": 44612, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47915084, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9050617, "hashes": {}}, "samples": 6013, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6478546, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_53334-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_53334-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..b6a5cf6d6ad6019b273c5d3d2f1d0294a555589e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_53334-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36512871, + "num_truncated_tokens": 36484426 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_5370-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_5370-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..6695d972b9260af4d262e54a93cb34ef0e6e6d0b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_5370-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107347, "hashes": {}}, "samples": 43320, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48020782, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17454239, "hashes": {}}, "samples": 11050, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12341617, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_5370-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_5370-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..5759b1270b01b7b8c58d8af2cf65a77a6870128a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_5370-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40597817, + "num_truncated_tokens": 40563445 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_5518-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_5518-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..05799f40d42085f1b3180821d1f15d082ca8102f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_5518-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107263, "hashes": {}}, "samples": 43620, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48047870, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14410049, "hashes": {}}, "samples": 9388, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10335578, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_5518-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_5518-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..b5d44393fbf7e7edef1c1368acc89efbf887fa0e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_5518-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39117980, + "num_truncated_tokens": 39085510 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63858-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63858-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..e61876aa29167c30d9efef65ae5382950f185980 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63858-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108013, "hashes": {}}, "samples": 45023, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47973151, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 6701711, "hashes": {}}, "samples": 4554, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 4749214, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63858-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63858-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..15d0223f44df0b7462b40f4fdbf186487be5b916 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63858-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 35370735, + "num_truncated_tokens": 35344227 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_66657-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_66657-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..8bcab6d90e4513bd76a9a0b4cc156bd54fe92776 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_66657-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37608004, + "num_truncated_tokens": 37579029 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_68857-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_68857-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..14754861778eb2a6aa29ceff36f03a2781f8ce58 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_68857-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108389, "hashes": {}}, "samples": 43912, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48005894, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12876161, "hashes": {}}, "samples": 8403, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9224984, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_68857-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_68857-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..7c830bb1f4930f1ff1c5f11cbe25254ad5ccb784 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_68857-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38373060, + "num_truncated_tokens": 38342026 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_7359-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_7359-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..d76c0fb4aaf0c4140249d4e5c38b43320e419671 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_7359-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107483, "hashes": {}}, "samples": 43733, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47736218, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12579438, "hashes": {}}, "samples": 8437, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9007251, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_7359-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_7359-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..b1d0cb028e64cd0bf13a48182c3f44681e015d85 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_7359-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38228870, + "num_truncated_tokens": 38198772 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_75992-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_75992-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..d4018fe74fadb51014546128195b49d13a5583c1 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_75992-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108109, "hashes": {}}, "samples": 44742, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47892277, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 6455537, "hashes": {}}, "samples": 4355, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 4617559, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_75992-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_75992-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..086f6da7c529ab5f6eff9e8a6e8a163932449e43 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_75992-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 35262399, + "num_truncated_tokens": 35237118 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_77254-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_77254-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..f2369861996bc3bc8cfbc5da62428fcfa8776980 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_77254-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108031, "hashes": {}}, "samples": 43701, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47842519, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15839185, "hashes": {}}, "samples": 10059, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11170361, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_77254-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_77254-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..19726761f2f495d0e8342a6857bec251a510def5 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_77254-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39809671, + "num_truncated_tokens": 39776485 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80590-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80590-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..4cd7d642ae49734a24c4514a30962d4228c31a67 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80590-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107758, "hashes": {}}, "samples": 42299, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47561569, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 25043967, "hashes": {}}, "samples": 15732, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 17861022, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80590-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80590-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..c724dfaa97a7b9bed3946d7324878b66314c2a65 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80590-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 44279519, + "num_truncated_tokens": 44239034 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81561-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81561-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..089dff546874750cfed7fadfdbeeacc575d714b8 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81561-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108832, "hashes": {}}, "samples": 45156, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47956592, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 6832408, "hashes": {}}, "samples": 4454, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 4833954, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81561-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81561-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..1cfeec483cccc958e15f2c6b2726e3b74efa07f4 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81561-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 35435495, + "num_truncated_tokens": 35409078 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81981-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81981-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..bc6cf161676253a3982a8772422632d23a70c2e0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81981-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108004, "hashes": {}}, "samples": 43001, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47929042, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17656030, "hashes": {}}, "samples": 11453, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12662670, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_82518-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_82518-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..ce18a57186cfc3c14399bbfaecbb259f6e69d9fe --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_82518-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108818, "hashes": {}}, "samples": 43851, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47613136, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15436772, "hashes": {}}, "samples": 9794, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10920384, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_82518-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_82518-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..d9bfd53e354b61d75f563a0888bfe1da7469f9b0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_82518-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39612492, + "num_truncated_tokens": 39579613 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_8300-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_8300-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..aa925bbc2231c84e2101a49329576dfb55af26b5 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_8300-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108174, "hashes": {}}, "samples": 43533, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47708521, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16115070, "hashes": {}}, "samples": 10306, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11463554, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_8300-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_8300-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..6d828bafd2a0fe8430516dfc450814281ac5915a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_8300-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39945249, + "num_truncated_tokens": 39912108 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_89513-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_89513-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..3f03b0c37f3bb759e27ce32bf99ee089603d0ec3 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_89513-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108813, "hashes": {}}, "samples": 42838, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47407161, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20475466, "hashes": {}}, "samples": 12980, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14459931, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_89513-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_89513-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..d5b4ddebd2f9c4b1a31095c919a08eaf00d6682a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_89513-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42064334, + "num_truncated_tokens": 42028442 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90636-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90636-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..a956653ae9e96a12652c661eb000e0098c6c78c3 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90636-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108673, "hashes": {}}, "samples": 43545, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48067081, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15099591, "hashes": {}}, "samples": 9714, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10796151, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90636-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90636-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..650a6b8c9dc3edfb6176856cd99d41dbf06af4ab --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90636-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39455580, + "num_truncated_tokens": 39423519 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94939-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94939-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..20035b76ad6244d62361568b20896954e17c6e99 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94939-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108713, "hashes": {}}, "samples": 43113, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48032025, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17063895, "hashes": {}}, "samples": 11011, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12090760, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94939-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94939-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..04e410f3e2febdf6bc86ed1d58d6d181ea480d9a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94939-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40410844, + "num_truncated_tokens": 40376792 +} \ No newline at end of file