diff --git a/.gitattributes b/.gitattributes index b0637f5060436a934ef3480bd2c56a5a5bbe0c40..6a665e23aa298053e071f81b6778d9bdc95777e7 100644 --- a/.gitattributes +++ b/.gitattributes @@ -10608,3 +10608,37 @@ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds filter=lfs diff=lfs merge=lfs -text train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds filter=lfs diff=lfs merge=lfs -text train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00040.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00034.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0171-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00041.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds filter=lfs diff=lfs merge=lfs -text diff --git a/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds new file mode 100644 index 0000000000000000000000000000000000000000..aa745f764293a23a1f0d9606edeb97cec84ef54f --- /dev/null +++ b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a552223ad2cad6bed80631d470f641e3ba7577b6732d28a02b241b05860f4a7 +size 67106749 diff --git a/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds new file mode 100644 index 0000000000000000000000000000000000000000..7aa7c2ff6a1051a72f7aad9365eaef049a2acda2 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9154a7448bd24012336248606e53064c9e5d5b9c1ca95c1298793fa1c95ad5d +size 67107940 diff --git a/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds new file mode 100644 index 0000000000000000000000000000000000000000..2ed4f0a2fddaecf8140fb113bbae00651dde2cea --- /dev/null +++ b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9aa98c068e626ba189c714126c1bd39b1dc07797b4ecfb523f79ca02572060a2 +size 67108569 diff --git a/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds new file mode 100644 index 0000000000000000000000000000000000000000..f12d53b4c6979413a43c3f7001d0db24f2fda583 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f392b18b0ce267a9c82f92b3eef07da7c2f5ca07b854558be1d402b3827d02f +size 67108815 diff --git a/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds new file mode 100644 index 0000000000000000000000000000000000000000..0661506331611c57265c701bdf9ee36266f8fe10 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:614b50e07bcf6ff1384066d5213a362396ba45b64e03ae3862cd31459c24554a +size 67108775 diff --git a/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds new file mode 100644 index 0000000000000000000000000000000000000000..69b68ee66c6eb3910a07f48382c431597eb7d3f0 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d6ffe8ede547c2e5a45e83538a4407f7ffa9b49350419a9a17bc42659bad1ce +size 67107180 diff --git a/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds new file mode 100644 index 0000000000000000000000000000000000000000..b610bbf83177c09d8c0cbe13a1a7cf3ca9586239 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62af524af3a227f40173128c8645c58edde0717efc7585deff954b8ecde9a114 +size 67108814 diff --git a/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds new file mode 100644 index 0000000000000000000000000000000000000000..588d6c21953883d3d7263a3732c1d02ce0492733 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9f4062c991a7d3f03244d56276d9d7edef1534e921635f89ab4d9d131a6b917 +size 67108119 diff --git a/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds new file mode 100644 index 0000000000000000000000000000000000000000..1f161cc459c7085730fa486a415e192fdc5394a0 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd4b146fb52f5c98e1ac1a99552293052e58175c0209b271405ce51ae6364a90 +size 67107911 diff --git a/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds new file mode 100644 index 0000000000000000000000000000000000000000..c4bf6e45ef74402cf7260e494267e593032fd6f6 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f28390ba5a99939b72c8a857187fb533d147deb4e9623357f719274c1fc2946 +size 67108365 diff --git a/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds new file mode 100644 index 0000000000000000000000000000000000000000..1ac6087fd79dadaaa6ad67f26bda541d5f9619ec --- /dev/null +++ b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8a51aaf49e522345cefc4b8dd2932b88a4826e34e098decba90181cfe821e54 +size 67108281 diff --git a/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds new file mode 100644 index 0000000000000000000000000000000000000000..8f27f9efa7e6be94963ef59151c0ed291feac7b1 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5be737b0184201984fe1f9a86110723d9b30340d080c3c5ea7e55f0b26b743cd +size 67107692 diff --git a/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds new file mode 100644 index 0000000000000000000000000000000000000000..ccd73e7a78a268f0aa18604c438061e73610139d --- /dev/null +++ b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d270501cfaa2072607a8ab40e5a2a6aff08d768deedd6c46013389c8b05bb859 +size 67108481 diff --git a/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds new file mode 100644 index 0000000000000000000000000000000000000000..b1ea5b04e6b39f4656da963d9da56e02fac14afc --- /dev/null +++ b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53d186760dfb807fa6f937965aec426b37e11bc3e0031eec92ef4b84154df872 +size 67108385 diff --git a/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00034.mds b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00034.mds new file mode 100644 index 0000000000000000000000000000000000000000..b3854a8c8bd0c7ef77e2caafb992283f7e135698 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00034.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71d1242324afe9de0f5fba26458535858ee2f119228c99e5decb56c301163ce3 +size 67108621 diff --git a/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds new file mode 100644 index 0000000000000000000000000000000000000000..18561bbb6477950fcaa37950cf293bfddb7f8576 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:799c5e21b43b55c806e62c34b78964f6c2f9d5a432a7a40fe7708d14ec03b8ce +size 67107668 diff --git a/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds new file mode 100644 index 0000000000000000000000000000000000000000..4c57db801b747b1481097bd8d9a57475012a0bb1 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00bcc709b1c7ece0fc2db9b280a293cd8ccd632ac7016bf1a15a3c68f905fef6 +size 67108247 diff --git a/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds new file mode 100644 index 0000000000000000000000000000000000000000..aa4357b9826bf4b6bb8d2ce5fb588f0b8341ad5e --- /dev/null +++ b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66e2508498c3469d22d04cf069da55a1ba97e8d3531499ec8024dae8cb74798f +size 67108265 diff --git a/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00040.mds b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00040.mds new file mode 100644 index 0000000000000000000000000000000000000000..66345119e47a61c19a1b234c8ba2820d0d6f2725 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00040.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9664b7d53b8a06a423a1ef3b369c0edb2833610a7b3d58801cc1cb2b309badd +size 20094696 diff --git a/train/cc_en_head/cc_en_head_0171-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds b/train/cc_en_head/cc_en_head_0171-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds new file mode 100644 index 0000000000000000000000000000000000000000..42899c9b918fba4a265261b68db3255a047e3b9b --- /dev/null +++ b/train/cc_en_head/cc_en_head_0171-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52e7f67507cbd3b4b9e13a72250ab89f32af60aee7c82d2d818a7b052688fdf8 +size 67106857 diff --git a/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..62cc4f4a210fa5cfb21104f62eedbdea65027f34 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c514ae7fc55b7814a26cd7570e31bbd3b96d54e16bd0f5abf0263aea71f5ee0b +size 67107782 diff --git a/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds new file mode 100644 index 0000000000000000000000000000000000000000..57e086d2726293bd4d6bc3ab4474af693419aa35 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:299a1319ba2409caee701671ead3b505ade402627b4868bcc14b99f327f54616 +size 67108697 diff --git a/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds new file mode 100644 index 0000000000000000000000000000000000000000..1ab47eafd0ab4ea8aef96cf2e3315b3604ab14ea --- /dev/null +++ b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c278af0099ea584185d3a22db8360a5cf8dd99ea48275064be972b6476f2fb1 +size 67108503 diff --git a/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds new file mode 100644 index 0000000000000000000000000000000000000000..f86a2a09c3a04fd297aa8424342f693c89e1410e --- /dev/null +++ b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e2d48fb921d8fe75c37837674ad11d88cbb954bd5e47a16f678c176222b0d2d +size 67107578 diff --git a/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds new file mode 100644 index 0000000000000000000000000000000000000000..094641dd9683249cc6f248fd32276e6da5b644f4 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80c4598cdbfd9f08bb921253df34d95852815ff077f683e4839cd8ac273f855e +size 67107996 diff --git a/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds new file mode 100644 index 0000000000000000000000000000000000000000..206ad0e8321d2f14b58d1719f66474b4c0ae0cba --- /dev/null +++ b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e13198213647552e09f1de6b89d43e2c1acb3aabd03cdd21d7ade98d7f0a05df +size 67107995 diff --git a/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds new file mode 100644 index 0000000000000000000000000000000000000000..4805dcf34067fa799048fce0adffd5e41d41420f --- /dev/null +++ b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95d6e568a0c391176d51c88d4aa2dcd37701221c55c21089e45e0308e4e2fb49 +size 67108047 diff --git a/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds new file mode 100644 index 0000000000000000000000000000000000000000..d2d4ba79fb621c4642aa085478732d344a72116d --- /dev/null +++ b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07c88316d7d01f4987ae231ff0dde52366b5f8023cd59d8a1ab8a198e531a445 +size 67108765 diff --git a/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds new file mode 100644 index 0000000000000000000000000000000000000000..045fa6c2be0ca8547ad1dda4b24dc25ef823e7ab --- /dev/null +++ b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47fb28245088eb6471d85b12a3264acccb7488c7c443c0e02c3ed333300d0cae +size 67108354 diff --git a/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds new file mode 100644 index 0000000000000000000000000000000000000000..3bd1b0ed4ea322ddba3ce5a987c273d690e48abf --- /dev/null +++ b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad36d31aa0b554215f2fe681ec2d1fccbece12945cd2665b5cd3e267efa15a8c +size 67108690 diff --git a/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds new file mode 100644 index 0000000000000000000000000000000000000000..0600481c34b25e7889d3a3b5ca408d4a9ce7ac14 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de99be7a436859c46911e86b6eba692b006a8a8a0c6d463733bf26597f287a43 +size 67107291 diff --git a/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds new file mode 100644 index 0000000000000000000000000000000000000000..f3b0ef1883586b85a7fd1512c763da6b81a26a7f --- /dev/null +++ b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e44a52828af71717680bff7578076a50f8e5e1d5f5cbf86cc1a75f6cc50f8a4f +size 67107549 diff --git a/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds new file mode 100644 index 0000000000000000000000000000000000000000..aeddf4dd64a39868b60cf02e97762585b6957f75 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a74e422f7212fbdc356688cc65e35bbcf4e26629ea969e3238ef0c61b151664 +size 67108443 diff --git a/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00041.mds b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00041.mds new file mode 100644 index 0000000000000000000000000000000000000000..3e4e6478cd61a51f00f1b51203cc8d42360ec4dc --- /dev/null +++ b/train/cc_en_head/cc_en_head_0231-tokenized-chunked-1024-512-128-backfill-nodups/shard.00041.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98138d1f2d40f5328700c5e70697d2b0fc128a82cf80cb0c329ab39c882a6719 +size 67107845 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10461-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10461-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..c48e845a90f057645eb12334d3cd78e2861f72fa --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10461-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106962, "hashes": {}}, "samples": 43181, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47840600, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18927133, "hashes": {}}, "samples": 12021, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13413513, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10461-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10461-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..388a884fad0f284f6bb7f19361d99193bf935066 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10461-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41308354, + "num_truncated_tokens": 41273162 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13886-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13886-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..6dfb99e972f3720e00d7a9d822bf265b8431c520 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13886-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108595, "hashes": {}}, "samples": 44179, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47759846, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12011551, "hashes": {}}, "samples": 7814, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8506109, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13886-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13886-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..1362798e25bfec31b504f261af22d78a0630cfe2 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13886-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37951060, + "num_truncated_tokens": 37920796 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14930-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14930-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..dc36896b8947655190fc1524d41e247930a5f032 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14930-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107663, "hashes": {}}, "samples": 44001, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47620159, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11996775, "hashes": {}}, "samples": 7770, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8476410, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14930-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14930-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..ba5a1708570df074d1ed94d974be49cc590c7d75 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14930-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37949921, + "num_truncated_tokens": 37920236 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15037-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15037-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..6c07abe4662ae83ec7cb402a7c9a450bf52a93c4 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15037-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108377, "hashes": {}}, "samples": 43311, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48086662, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15608653, "hashes": {}}, "samples": 10168, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11200727, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15037-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15037-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..03362c87a07cb78feaa7c9263952133db9aea77a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15037-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39703170, + "num_truncated_tokens": 39670615 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16679-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16679-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..e840171e4a4a08514dd7bdafb5d86a84f15a1645 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16679-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108158, "hashes": {}}, "samples": 42750, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47775715, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20359542, "hashes": {}}, "samples": 12966, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14397547, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16679-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16679-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..60fa73bb9022d23dd8532bfd7588e486edfbc8a4 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16679-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42009152, + "num_truncated_tokens": 41973062 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17303-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17303-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..0ec9d41685ec7d2dce09bfb627428b75850da4bb --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17303-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107771, "hashes": {}}, "samples": 43422, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47582475, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17120477, "hashes": {}}, "samples": 11057, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12174329, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17303-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17303-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..c2c5b544703be3c04dffee4435d87c73db2dd5bf --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17303-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40428080, + "num_truncated_tokens": 40394203 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18534-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18534-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..a855e0d99f0a7e4ea1fa1fdfcdac9e15e89a49ae --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18534-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106760, "hashes": {}}, "samples": 43384, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47788939, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14822426, "hashes": {}}, "samples": 9874, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10609619, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18534-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18534-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..5cfa794cdfb7b7b6f28e1018bf62280196d66476 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18534-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39316303, + "num_truncated_tokens": 39283757 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18627-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18627-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..f2219eb633694b8d9ac4b585d8f039295b9cef54 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18627-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108479, "hashes": {}}, "samples": 44719, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47739305, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 7835688, "hashes": {}}, "samples": 5229, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5565327, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18627-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18627-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..be4fdc6ec74966582f7b1c707a43e87cd5d2627a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18627-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 35926335, + "num_truncated_tokens": 35899965 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20462-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20462-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..572db8c024d9df22f95a75ac35891aac25c1aaf8 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20462-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108156, "hashes": {}}, "samples": 42902, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47564025, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18975834, "hashes": {}}, "samples": 12161, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13447070, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20462-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20462-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..f74351e212977e8bb5cc68bf5727f7d37e8f974c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20462-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41337562, + "num_truncated_tokens": 41302760 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20656-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20656-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..57c8634afdd46cac3215a6146652f81cde4dbe03 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20656-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107708, "hashes": {}}, "samples": 42676, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47752660, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20560674, "hashes": {}}, "samples": 13228, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14635792, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20656-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20656-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..4acf3f1949e837d599a000180e68d5839d46fde1 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20656-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42103663, + "num_truncated_tokens": 42067281 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21894-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21894-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..86fd0b61aa504b99fe0e2340ec383c5b659529ee --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21894-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108768, "hashes": {}}, "samples": 44707, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47891878, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9210306, "hashes": {}}, "samples": 6008, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6574809, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21894-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21894-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..19c307d5ed92bc9c962112735dbf31234cfd2427 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21894-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36590106, + "num_truncated_tokens": 36561633 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27060-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27060-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..f31207fcc2f1c30c173d0dd5e20497895c54f556 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27060-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107448, "hashes": {}}, "samples": 42732, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47417186, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20235179, "hashes": {}}, "samples": 12925, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14446643, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27060-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27060-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..b7d6e1524d5f1fd6bc22fa7ecec0b78f55391d6b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27060-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41948446, + "num_truncated_tokens": 41912466 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_29894-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_29894-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..7d19eaed99ad1cf5968c46748368c66233fbb180 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_29894-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108629, "hashes": {}}, "samples": 44194, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47732681, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10523428, "hashes": {}}, "samples": 6876, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7540093, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_29894-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_29894-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..48418bdc59de176fb09d351c5f2c2581246ae7b6 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_29894-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37235437, + "num_truncated_tokens": 37206691 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35826-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35826-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..ed0156e23ccf00e32a4b54416a9bb322f119ab8e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35826-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107108, "hashes": {}}, "samples": 44165, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47809804, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11557434, "hashes": {}}, "samples": 7549, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8273823, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35826-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35826-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..030d34f9c9967e828e97e51a3efdcb5b96222b4c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35826-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37731815, + "num_truncated_tokens": 37701945 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3715-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3715-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..a76a949ae9b58c3ea38cba7d616468b77ce0f22d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3715-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108590, "hashes": {}}, "samples": 44046, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47785019, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11191588, "hashes": {}}, "samples": 7524, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7974230, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3715-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3715-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..3d4178c24a4d6c23744546abf301c4c4751bf143 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3715-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37554101, + "num_truncated_tokens": 37525238 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41154-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41154-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..e82811faaef2433b85d9043c23e05f64cda487f3 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41154-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108134, "hashes": {}}, "samples": 42568, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47512758, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 22599266, "hashes": {}}, "samples": 14299, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 16013508, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41154-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41154-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..14f55feae033dd6d7ca5820aa382e62ead2e8712 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41154-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 43093420, + "num_truncated_tokens": 43055411 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41587-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41587-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..60ea0d54060c227ec62d2472ce4e7d668d7647d0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41587-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106900, "hashes": {}}, "samples": 43329, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47484774, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14494942, "hashes": {}}, "samples": 9546, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10267944, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41587-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41587-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..a3372b6170215f61bf9cb6cdb66d755ba5be8872 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41587-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39164291, + "num_truncated_tokens": 39133485 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_4383-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_4383-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..a4efdf891a97ed60db24daff4807264f8f307568 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_4383-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106984, "hashes": {}}, "samples": 43526, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48016034, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15434514, "hashes": {}}, "samples": 9942, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11000335, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_4383-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_4383-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..1c30b16d17a77b293860f44b8a9b00cf0e458dec --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_4383-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39615775, + "num_truncated_tokens": 39582675 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45650-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45650-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..be51f6ce855c5fa5d2e556eb413c6a140340926d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45650-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107583, "hashes": {}}, "samples": 44897, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48037655, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 7283155, "hashes": {}}, "samples": 4946, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5222202, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45650-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45650-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..c99198402858f1ebe31c4bf127dcb96e6a2e7f4c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45650-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 35653032, + "num_truncated_tokens": 35626113 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_46467-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_46467-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..3dc9b24be884089594000bfedff9a8738514b0d1 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_46467-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107300, "hashes": {}}, "samples": 44140, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47846720, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12192319, "hashes": {}}, "samples": 7979, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8689881, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_46467-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_46467-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..dd0c53015dee5ccec53245f167b14fd046daab84 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_46467-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38036912, + "num_truncated_tokens": 38006483 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_51658-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_51658-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..aee0f93837f9f4fdaf8f621c38de85af9b314b4d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_51658-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107443, "hashes": {}}, "samples": 42964, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47970954, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18125811, "hashes": {}}, "samples": 11707, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12973890, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_51658-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_51658-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..9a13d81a314d595df884ca903b5fd2bf5d9f2f80 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_51658-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40924230, + "num_truncated_tokens": 40889756 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_60025-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_60025-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..63e18342946a1ff0b056883ae69355ad43d5adc5 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_60025-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108618, "hashes": {}}, "samples": 43170, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47702563, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16982158, "hashes": {}}, "samples": 11064, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12046654, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_60025-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_60025-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..82243e16ebb03d48254d659db8e6fc1a6005af0b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_60025-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40366704, + "num_truncated_tokens": 40333383 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_60635-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_60635-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..bdf63886bb90bf775c7d7f6a9a67d56890969a23 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_60635-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108630, "hashes": {}}, "samples": 43691, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47511036, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11599070, "hashes": {}}, "samples": 7809, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8302072, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_60635-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_60635-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..f431bd2916801993b3f81d2109314a892475088f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_60635-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37759878, + "num_truncated_tokens": 37731118 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_61678-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_61678-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..22d7e60f9cf2a50022f59934b10cf586d7b8115a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_61678-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108201, "hashes": {}}, "samples": 44680, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47914478, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8523024, "hashes": {}}, "samples": 5658, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6105184, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_61678-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_61678-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..06772c71bb1f43183fee0964931d1bf317624da3 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_61678-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36257851, + "num_truncated_tokens": 36230131 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_62061-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_62061-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..ade6ead3acafb3b2b8f3870178501475998fe198 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_62061-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107702, "hashes": {}}, "samples": 43658, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47793676, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14106271, "hashes": {}}, "samples": 9266, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10053014, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_62061-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_62061-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..d9cf4e4e2592822d9662d79dd9620cafb41b7add --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_62061-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38969023, + "num_truncated_tokens": 38937009 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..d7b3b7922014fff2a602db884b906a12e86ac91f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108669, "hashes": {}}, "samples": 43778, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47595240, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15050818, "hashes": {}}, "samples": 9719, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10720668, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..1fe8e5df85d04b1fe992b112949ba66b0c49364e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39424116, + "num_truncated_tokens": 39391700 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_67077-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_67077-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..7c814d8b002c7ce655d5659bdbe2d908a739584e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_67077-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107039, "hashes": {}}, "samples": 44187, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47715277, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10074827, "hashes": {}}, "samples": 6666, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7180560, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_67077-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_67077-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..fdd35977585e1fbca32192a236a5f9e9e3ae3418 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_67077-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37017116, + "num_truncated_tokens": 36988821 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6775-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6775-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..27c30257622e5cd875916c345474f088e6a89a8d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6775-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108018, "hashes": {}}, "samples": 42676, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47577780, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21766325, "hashes": {}}, "samples": 14039, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15427599, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6775-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6775-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..1936065323c8091f3e7ea9d9e5a4f24e9879cef3 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6775-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42681819, + "num_truncated_tokens": 42644073 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_70401-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_70401-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..1d83341a7548c86945b5084d20fddd793525c2b9 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_70401-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108579, "hashes": {}}, "samples": 44572, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47978992, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9049479, "hashes": {}}, "samples": 6084, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6501015, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_70401-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_70401-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..eb15bd67393397e8238c7db6f0f0c282b04f036a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_70401-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36511392, + "num_truncated_tokens": 36483107 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71090-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71090-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..8724f77b08e24c3b42ce4217c6d062d80ee22459 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71090-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108011, "hashes": {}}, "samples": 43881, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47687655, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11363201, "hashes": {}}, "samples": 7556, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8092281, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71090-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71090-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..4843f09fae47ab1b165fae230733a2d224f76c65 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71090-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37643636, + "num_truncated_tokens": 37614372 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_73268-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_73268-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..413e444c1872b76dcdd6763975361a7786d60356 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_73268-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107358, "hashes": {}}, "samples": 43540, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47924174, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14975061, "hashes": {}}, "samples": 9811, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10719792, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_73268-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_73268-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..ac18e1be3153666eeaae421da3f2ce168e7c0531 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_73268-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39389927, + "num_truncated_tokens": 39357211 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78165-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78165-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..5b60d62a171760ced1d2032ea18ebc02f95bde3f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78165-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106926, "hashes": {}}, "samples": 42841, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47484473, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19626194, "hashes": {}}, "samples": 12551, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13938081, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78165-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78165-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..c3c21f9da96cc9880f90f6f6594019afbf92a497 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78165-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41651897, + "num_truncated_tokens": 41616747 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80797-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80797-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..50901ed458cc9dc40cec561e1825523111cf5b16 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80797-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41642873, + "num_truncated_tokens": 41607022 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81027-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81027-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..1845c25e3a018075a419f02e00440c2228a33d8c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81027-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108497, "hashes": {}}, "samples": 42788, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47560199, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18664512, "hashes": {}}, "samples": 12011, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13339429, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81027-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81027-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..19a74923b646dba50a7554017ddb1c9f19706f46 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81027-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41190202, + "num_truncated_tokens": 41155703 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_82304-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_82304-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..be25679b6f230037913292aac1cd9b2da49ee24c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_82304-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108580, "hashes": {}}, "samples": 42735, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47577946, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20211343, "hashes": {}}, "samples": 12957, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14317812, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_82304-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_82304-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..e94bf7b5f7e714b6772edded10d89b43c4e588e4 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_82304-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41936048, + "num_truncated_tokens": 41900282 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_86270-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_86270-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..59dbbd9c03382a93dfeea70fa8ad88f04af1e79f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_86270-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108512, "hashes": {}}, "samples": 43593, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48038722, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14532266, "hashes": {}}, "samples": 9448, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10365815, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_86270-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_86270-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..e36e9bd5727e2e8a47ea5d5389dd89d79c22d496 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_86270-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39178646, + "num_truncated_tokens": 39146328 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_88952-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_88952-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..5d99ed3fd0e1cc0a31fa794cb69f179bffa87be4 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_88952-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107111, "hashes": {}}, "samples": 43047, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47531835, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17763069, "hashes": {}}, "samples": 11388, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12565610, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_88952-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_88952-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..85570360b25fb5986aa324a8f91488760cc9e295 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_88952-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40750120, + "num_truncated_tokens": 40716008 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94826-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94826-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..dd6a4adf418261d0391e428da4184a66957ccc4f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94826-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107829, "hashes": {}}, "samples": 43339, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47998456, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16801654, "hashes": {}}, "samples": 10726, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12023924, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94826-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94826-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..7479febe93ccb4c86963c671132c0b91eed696d2 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94826-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40281308, + "num_truncated_tokens": 40247105 +} \ No newline at end of file