diff --git a/.gitattributes b/.gitattributes index 164c05d33a64c94bc43b3653832540db8f64a905..dfa659c203f8bc407e742591dc859037d5f8b599 100644 --- a/.gitattributes +++ b/.gitattributes @@ -27747,3 +27747,41 @@ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_59696-tokenized-chun train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_73112-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_73112-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_19737-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_59696-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_28090-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_19737-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_5838-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_36117-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_76048-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_3445-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_36117-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_33485-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_5838-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_49059-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_9288-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_33485-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_3610-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_76048-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_26447-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_49059-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_19247-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_26447-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_97565-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_40900-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_40900-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_97565-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_9288-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_10388-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text diff --git a/train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds b/train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds new file mode 100644 index 0000000000000000000000000000000000000000..d79dbc560b5af014b817fb443b69f926f9855147 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:873d8752588d9f1e1650572372cc233e25209e0e5a37751987f327a6db12eb84 +size 67107018 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11145-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11145-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..072188bfe3f8cf6470486d129782b7ef48c8ff0e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11145-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107860, "hashes": {}}, "samples": 42400, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47690667, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 22237986, "hashes": {}}, "samples": 14300, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15710645, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11145-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11145-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..4b7f2795b8b05082e6a282277a04853e7a3d7075 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11145-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42917756, + "num_truncated_tokens": 42879673 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11372-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11372-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..fe8a96d23883e8040cf9a445055b23db12ce3840 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11372-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108172, "hashes": {}}, "samples": 42703, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47795428, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21166826, "hashes": {}}, "samples": 13380, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15066412, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11372-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11372-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..810f595fb77fe2a323d87e8ed4f135bfd54b27b8 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11372-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42401409, + "num_truncated_tokens": 42364409 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15637-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15637-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..350fccfd9d7d8e649ba96e726af3026f60b44100 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15637-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108676, "hashes": {}}, "samples": 42571, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47597136, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 22212728, "hashes": {}}, "samples": 13973, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15755339, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15637-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15637-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..93dc72ae9187ce49a5733425793beaaebd8d7fb4 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15637-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42910301, + "num_truncated_tokens": 42873146 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19325-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19325-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..2b7d58e91ae3800817052b9300c76ffa4e1ad8d9 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19325-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108695, "hashes": {}}, "samples": 42956, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47506419, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18283365, "hashes": {}}, "samples": 11758, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13039420, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19325-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19325-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..448920edfd169d779e077b7074af8c594ce84f98 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19325-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41002335, + "num_truncated_tokens": 40968064 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20996-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20996-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..46eea3fce5dd08655d5f27ced6ae0eb14048d585 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20996-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108140, "hashes": {}}, "samples": 43761, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47619746, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15534555, "hashes": {}}, "samples": 10038, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10996632, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20996-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20996-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..2c9bc85838d52c0f94f02693a143ba6014bd761d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20996-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39656376, + "num_truncated_tokens": 39623467 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21443-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21443-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..3aca0de6eb94c32879de17aef6e1b6cd164be323 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21443-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107808, "hashes": {}}, "samples": 43944, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47805257, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13370162, "hashes": {}}, "samples": 8605, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9515819, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21443-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21443-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..edd4da62e35890f755150bde43fd49be8f328f11 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21443-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38612612, + "num_truncated_tokens": 38581910 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_27233-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_27233-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..e4303ed1b7ba2ed0db03cfe7899df9fac2c02535 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_27233-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108330, "hashes": {}}, "samples": 43199, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47974502, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18514478, "hashes": {}}, "samples": 11712, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13134390, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_27233-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_27233-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..2fc9dfc2c802598d1f70f798790cf2345e91cead --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_27233-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41111653, + "num_truncated_tokens": 41076402 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28888-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28888-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..be88e4d69253546d06b4ad9349903c24119b6d8b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28888-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108834, "hashes": {}}, "samples": 44651, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47969138, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9835486, "hashes": {}}, "samples": 6416, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6963435, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28888-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28888-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..8feb0f02785d8481a825e29b8c8fb78962ec4eb1 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28888-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36891847, + "num_truncated_tokens": 36862961 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29627-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29627-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..a78043d46607ed289ee2e0c8aeb1ebb7287d035a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29627-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108507, "hashes": {}}, "samples": 43033, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47513753, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19349616, "hashes": {}}, "samples": 12205, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13577767, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29627-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29627-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..9f34e1a3e21d51b0693b781cc575f5b54393108b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29627-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41519154, + "num_truncated_tokens": 41484240 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29981-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29981-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..5e1884eef17c0e7a2cc5f3e43e1ff7513c78482f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29981-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108844, "hashes": {}}, "samples": 43359, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47992616, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16317070, "hashes": {}}, "samples": 10544, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11642694, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29981-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29981-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..3b16af54b749c2a09a8e24aa3ec2dca344abf17c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29981-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40044523, + "num_truncated_tokens": 40010818 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30492-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30492-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..75a2cacf0dc7111dedbd749d872cf09991214ba1 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30492-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108709, "hashes": {}}, "samples": 43640, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47999225, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13721316, "hashes": {}}, "samples": 8963, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9795150, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30492-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30492-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..b40d0ef6c913252143185a0c997174e7bff15349 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30492-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38786771, + "num_truncated_tokens": 38755710 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34339-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34339-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..046518c0db5ee5b7c6dc8b70cc848b72b468e4e0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34339-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108091, "hashes": {}}, "samples": 43772, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47591333, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12003418, "hashes": {}}, "samples": 7948, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8557589, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34339-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34339-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..a6ea83485f2d2ed56c49f5c6b24dd31686b9fc70 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34339-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37955035, + "num_truncated_tokens": 37925160 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35790-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35790-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..e56b8411aaa7c869a12ce17a74f880ade8b33e81 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35790-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106767, "hashes": {}}, "samples": 42997, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47830501, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17951860, "hashes": {}}, "samples": 11606, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12860390, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35790-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35790-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..5c58cd8ad6c25ed05e4ef95ebdb3884bdf0ca5bc --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35790-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40839092, + "num_truncated_tokens": 40804522 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37123-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37123-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..fa193bc4cb85d1da88f36724d44014b49311ea4b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37123-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106824, "hashes": {}}, "samples": 44201, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47655967, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10454781, "hashes": {}}, "samples": 6804, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7425362, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37123-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37123-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..b4ae520f98a9ea1d24a9295d284e7c407a69fa5a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37123-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37202233, + "num_truncated_tokens": 37173663 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3717-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3717-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..0397a841fd1d1f6ea616df898717d9dcdd43fcf5 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3717-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108178, "hashes": {}}, "samples": 42665, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47604437, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20141392, "hashes": {}}, "samples": 13010, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14336627, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3717-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3717-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..820bda429fd0977ac552446638c1648817d8f372 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3717-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41901363, + "num_truncated_tokens": 41865700 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3756-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3756-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..6ff53161b7499b9e8d2a805aa0fc7bf3051a4a30 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3756-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107605, "hashes": {}}, "samples": 44058, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47610260, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10440743, "hashes": {}}, "samples": 6902, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7399690, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3756-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3756-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..d02cd42e18602abf00531e868c1c4b4fbb101188 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3756-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37196951, + "num_truncated_tokens": 37168717 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37627-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37627-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..85190cdcfbf249349de5e84865c330300a3f3f0c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37627-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108565, "hashes": {}}, "samples": 43575, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47736105, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15952010, "hashes": {}}, "samples": 10276, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11280621, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37627-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37627-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..ea385ed09332beeb19f84450a48a1a18cf6d7970 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37627-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39863644, + "num_truncated_tokens": 39830837 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38636-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38636-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..b37645e80561e96d0f7cbab28af02ec7e118203e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38636-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107996, "hashes": {}}, "samples": 43761, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47479171, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12330082, "hashes": {}}, "samples": 8055, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8742182, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38636-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38636-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..dab23abe092654e4d906f8734de6fa70d1609a7d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38636-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38115224, + "num_truncated_tokens": 38086126 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40637-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40637-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..d3b6a26bde00577f0da9ea501e8acc047621d543 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40637-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108666, "hashes": {}}, "samples": 44271, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48064125, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9788703, "hashes": {}}, "samples": 6525, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6995261, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40637-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40637-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..8be4d93e62a5ed657450b9a420c3f431cde8ddec --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40637-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36876585, + "num_truncated_tokens": 36848037 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_442-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_442-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..fc984cba26124036e83c70523a31255a813eada9 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_442-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107081, "hashes": {}}, "samples": 44187, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47646185, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10493390, "hashes": {}}, "samples": 6832, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7425445, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_442-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_442-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..f5dec553066ce2615171beaa3ea839bd28566ef3 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_442-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37221237, + "num_truncated_tokens": 37192855 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4761-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4761-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..a90b8afbb86db620c24838471bc6b4932e04bd27 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4761-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106970, "hashes": {}}, "samples": 43355, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48002277, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15254187, "hashes": {}}, "samples": 9908, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10945165, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4761-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4761-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..a12b283e87d969ca599c61e69db26b0b84f93f30 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4761-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39531904, + "num_truncated_tokens": 39499258 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48181-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48181-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..81d0ecb73d2429845ebfd3f673eb52c456542a5a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48181-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108393, "hashes": {}}, "samples": 43745, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47947664, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13949561, "hashes": {}}, "samples": 9084, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9938808, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48181-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48181-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..c8cd8b9d27a0c6be0f64fc18dab8fee59043e05a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48181-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38893827, + "num_truncated_tokens": 38861901 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_51612-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_51612-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..0cc38ea9dd5244b48a4a35e7d9c85d1145464fb0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_51612-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108437, "hashes": {}}, "samples": 42714, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47671322, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20420130, "hashes": {}}, "samples": 13018, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14388025, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_51612-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_51612-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..64095ea9394987a4186192cab1200c196c70f196 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_51612-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42039072, + "num_truncated_tokens": 42002972 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56065-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56065-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..82867795503208b65861ce26b78c2b37d5f61bcf --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56065-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108367, "hashes": {}}, "samples": 43204, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47469847, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18360635, "hashes": {}}, "samples": 11824, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12998192, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56065-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56065-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..926ceef62891d0d13f44e1feec50722ad9638fa2 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56065-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41031417, + "num_truncated_tokens": 40996598 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_57188-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_57188-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..c5afa7a2b8ba6ea4825e5dfe14d77b2e70c3ff06 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_57188-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108026, "hashes": {}}, "samples": 43596, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47743287, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15807539, "hashes": {}}, "samples": 10183, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11228924, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_57188-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_57188-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..7736b6e23acce2c1cd88f3e8e07c57b5dce63e09 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_57188-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39793336, + "num_truncated_tokens": 39760336 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_58671-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_58671-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..50b0a76d1bcec01abe130713f48734072772d1d6 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_58671-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107150, "hashes": {}}, "samples": 43330, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47975842, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17297689, "hashes": {}}, "samples": 10890, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12321233, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_58671-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_58671-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..555c983f46791630e6e4a46786837296e023ffe7 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_58671-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40524044, + "num_truncated_tokens": 40489836 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63526-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63526-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..bc5ab7b687c5a2181b9e51b3a7872e7682dc1891 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63526-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107321, "hashes": {}}, "samples": 43723, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47994415, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13989515, "hashes": {}}, "samples": 9115, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9972149, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63526-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63526-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..d0201c844bab401fb081b2753e3dd1785da97ff7 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63526-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38912975, + "num_truncated_tokens": 38881206 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63912-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63912-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..03d0134ccbef597490087106f15c1f375ff721ad --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63912-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107750, "hashes": {}}, "samples": 44175, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47802206, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11768082, "hashes": {}}, "samples": 7784, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8409658, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63912-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63912-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..edb99e4eb719f4e7b11449360f6d838ee90365b8 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63912-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37829952, + "num_truncated_tokens": 37800161 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_68541-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_68541-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..18a39373008b6204cf6acf2ed42f04ff8682ab7e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_68541-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41006837, + "num_truncated_tokens": 40972441 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69010-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69010-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..cbfdc82f517c9ed0535366c968048ac2ba79d077 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69010-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107361, "hashes": {}}, "samples": 43678, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47620873, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12236801, "hashes": {}}, "samples": 8312, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8751938, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69010-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69010-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..cdfadac214d7c99547b0502dadd32fa120daa8a4 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69010-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38063064, + "num_truncated_tokens": 38033317 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_70666-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_70666-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..5e9b3951e63c757c2e38ed706dcc01bf61422ac9 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_70666-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108732, "hashes": {}}, "samples": 44242, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47746394, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9710579, "hashes": {}}, "samples": 6316, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6912358, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_70666-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_70666-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..c86f1e1a3d7dd2511e878a38d9a81899c273dfe5 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_70666-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36844856, + "num_truncated_tokens": 36817298 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_71726-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_71726-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..dc32427fda08f3d3d148ff9214ebaa53627904b7 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_71726-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108692, "hashes": {}}, "samples": 42983, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47944688, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17271899, "hashes": {}}, "samples": 11205, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12371420, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_72243-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_72243-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..438160f3f03dcd7b5198e7e1dc91cc4ff343c7d7 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_72243-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107926, "hashes": {}}, "samples": 44096, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47859923, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11232903, "hashes": {}}, "samples": 7453, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8061629, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_72243-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_72243-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..e603a29c27e37e001a54516ff9a1f68c5985338d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_72243-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37575086, + "num_truncated_tokens": 37544887 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_78796-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_78796-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..602da625be5a5556056ec02fe35b7d0ae0b51e1a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_78796-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107520, "hashes": {}}, "samples": 44313, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47711290, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9526354, "hashes": {}}, "samples": 6299, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6756981, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_78796-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_78796-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..bbd78cf75c46fed94d8e4e101dba3391c4ba803a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_78796-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36750547, + "num_truncated_tokens": 36722945 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_80645-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_80645-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..1a386fe36e92901e76bb8a4ec378358ebb87fd66 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_80645-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107327, "hashes": {}}, "samples": 42918, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47430049, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19093911, "hashes": {}}, "samples": 12451, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13594252, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_80645-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_80645-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..cd4d4f39d8ecc6c95f1d02b86530914b3131d762 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_80645-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41386917, + "num_truncated_tokens": 41351556 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_82377-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_82377-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..6f3767306a3a46dba9faf9eccf218484fb763526 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_82377-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108684, "hashes": {}}, "samples": 44514, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47971881, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10104360, "hashes": {}}, "samples": 6538, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7199038, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_89417-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_89417-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..facf385231ac33be4f368329a3e2d38bd410ef7d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_89417-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107002, "hashes": {}}, "samples": 43819, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47874425, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14289454, "hashes": {}}, "samples": 9236, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10259802, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_89417-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_89417-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..4fd726220fc1c65b310bce9637642bc60d9bb3f6 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_89417-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39056168, + "num_truncated_tokens": 39023507 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_90385-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_90385-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..e17509ec6de9d2174f6a31f74d48a394e1c93c95 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_90385-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108861, "hashes": {}}, "samples": 43079, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47997156, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17798562, "hashes": {}}, "samples": 11354, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12748987, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_90385-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_90385-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..be07562a06a7b1b6040e39482155ee180260e253 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_90385-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40768727, + "num_truncated_tokens": 40734309 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_94206-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_94206-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..62bf9965bf71d4ae4ef4ff49761d20a3b29e0d64 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_94206-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108312, "hashes": {}}, "samples": 43237, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48028344, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16623359, "hashes": {}}, "samples": 10806, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11884698, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_94206-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_94206-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..337dac26a4d109dc6f03d3af4c879d5f354dd555 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_94206-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40193033, + "num_truncated_tokens": 40159110 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_10388-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_10388-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..b03bbc98797c2942df754d027d847f1dbb712e40 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_10388-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3fb09ec8f3601067ac32b2e2731809d400956ab966324f1ecde83930bfea0ec +size 15203110 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_19247-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_19247-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..0fccc46f80c4cfcee056bdaefebac5ee2aa2a027 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_19247-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02e3f5ec71749f6cec61ad952215b05c0fa8df05e71af0277bbe91c288e27bd1 +size 8791847 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_19737-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_19737-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..acbba4782cb9ae7e25d017721dc7d64168f90315 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_19737-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3f832cc6803b57ef13efbee487d884c962871eb0b73c7a43170f1d3df1921f2 +size 67107368 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_26447-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_26447-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..272073b9f0a4a26cec6fac447deaebfd4b12c742 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_26447-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c06b03407a4732f6f36d708fd9ec4b0ca7b849f60acabeba9fd75c00e80d0e29 +size 67108237 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_26447-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_26447-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..7b8a36589ddb2430cea7003c869e6fc2d326048e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_26447-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eccc17f06cb4edd524025ccf447bdc8e3c423e0ba3f91a2e06e4762db87e15da +size 9306731 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_28090-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_28090-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..962550e0f18dca56b292b21434a2e256ab673f27 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_28090-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8817fac3a129901ba303027f3e79fa96c4da185a2921a59a296087f896be2ec9 +size 17559099 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_33485-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_33485-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..76ba0b378d88589c63382cf24e2fa6b69411e038 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_33485-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd58430b4350582b4e63d1ee68e02ecc0b4bfb94670c3ca1366db1753997e731 +size 67107474 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_33485-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_33485-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..b51dd5f71e07944ab36804bc9575a198f516193c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_33485-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78988b4f17000fc64ca5fb00fd718494b29f42c9089c8880d698b4277f9dd589 +size 12252942 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_3445-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_3445-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..3e7fbaf109ccd13d6de937fd401c96a610a9c38a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_3445-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26c74cc6a8a2766651944f561e4c0c9091c3f47dc8e30b62a80d80759eb93b8f +size 67108750 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_3610-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_3610-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..e16ab0b0732bc386f9578a0b6aaabe9db91f7d75 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_3610-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47569bfd441276eb5e252b47f0a6653e3aa2e3db89d032605a9f926e78fc7f18 +size 7759362 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_36117-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_36117-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..53d29ea9a7e36771e7564fd2bd05a332b0889715 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_36117-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0dbe4327170dd62b16b7ac853e8ee4e26429ee2bccac9c6454b6f4e1cd9b846 +size 67107415 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_36117-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_36117-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..6098b1cb634aa805691ae9d87743668dbf7b30f9 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_36117-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d11632a2a70b8ac8355c59c00841487fb2791bd5c185bec27a7ea67840127eee +size 19919489 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_40900-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_40900-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..8a0752735a960bad58ed3dfc0a0fb8ee7fddb326 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_40900-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:162c7543a90dac9ee7f6627d8f14543c55dffd663f6c8e1649d5e3c43ae4c499 +size 67108151 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_40900-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_40900-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..58770202a1fd7fe260e62658d33b7dc10688199a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_40900-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1c3af5b7ab7383606f6e57137ad68c00c09956f5e2610c1a7e6e3495259cb9f +size 7984787 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_49059-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_49059-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..864e23f395d87348aa76f383686190002cc1ccc6 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_49059-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5d86ef57dbea1b14181790692c0075e6199f799c31a1aacf0968eda9849de67 +size 67106870 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_49059-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_49059-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..64ee726fe984c004c7282307b038fa28c7e67e73 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_49059-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71f8bdbd157eb843653c2af1d58d85cf92afd9ff81c4bf106afe9475a776b111 +size 12909537 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_5838-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_5838-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..7581751914d124914b8f17b8f0d979d7fe2df44c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_5838-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a67f27d12a679b66e2cb15b2ff2cc610e657d525ee736c5b0740c1135deb7d54 +size 67106827 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_5838-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_5838-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..16227dbae8c84fecaf7927a8071c4fddd15a59b1 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_5838-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02757cf15c7151b11e04369e777f67e1bc756f557c7a456c84f2af7d401110ac +size 21845852 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_59696-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_59696-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..034e34f48d5b898944d4cad827a0bf09c15adeaf --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_59696-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b03eb74d61eebaa77a67098f2663db079994cd7b93bb3c79358e81783b3edefc +size 18243102 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_76048-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_76048-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..9c6db0d9c19230697672045cf82b15cc48abf441 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_76048-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97f9de5e68e200f4ef81e463ea32d139cf843691f8ba3ea749527273c4fa6ac9 +size 67108605 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_76048-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_76048-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..128e1453eac277a825ce6aa77c5323b45ad7c155 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_76048-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f05647a27611cc0d149348740e9daefdf94fec966c199aed927b1f2c48b8f278 +size 15080140 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_9288-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_9288-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..35deafe91d1553b30fe1c3d5d86bb4b2a92b2705 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_9288-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69bd8bda78ba34aa60e2b11f5ba1b95ab8ef6d3b1f66344472a624880bdc615b +size 67107669 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_9288-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_9288-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..a29ae55f97c1295bf472ce57906f3e9bc2e71b54 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_9288-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:358f8cf918d60a713e8563bdc1e00a7126137f1762657da9f9da7945de7c098b +size 10234606 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_97565-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_97565-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..627a14fffcd53493ee5cc08322807af0cca20260 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_97565-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fad746c96e3065457af3549a917f9729f6a6035050fb21d60a59205009473e64 +size 67108336 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_97565-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_97565-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..94a0ef542da7fc16a37b5d29b9c943e16bd101df --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_97565-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33a3b3caf11cc0ddfcfcd7aa487d9bb430f0e67d968c28ecd865bc3aad8c2591 +size 21570482 diff --git a/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..15a10c6dff60f3e514492c911f38a0fa11478eaf --- /dev/null +++ b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7686d8c69864f7e297f26aa4ad5a14d158a041edf62f116a21160010a36a7671 +size 67108534 diff --git a/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds new file mode 100644 index 0000000000000000000000000000000000000000..7ea3baad8ab86b283907b9b824c954930afc1c94 --- /dev/null +++ b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84c4275a39afc2526724db7be34b111ad68e43dbf6c80b2ef573add50f3047cc +size 67107142 diff --git a/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds new file mode 100644 index 0000000000000000000000000000000000000000..69b9a174fbfff29f4db6e8547ffdd3e6b339aad3 --- /dev/null +++ b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8135c023e49a425ec7969e0d4446381757c0750f3ab62e9a2521b27b3de9fea5 +size 67108747 diff --git a/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds new file mode 100644 index 0000000000000000000000000000000000000000..899d4ddbba6fb72438d6865a764c1fbd7e98a624 --- /dev/null +++ b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3352f5bded4cf787bd19511c19e41eabb202c2d6ab40bf4042ae43f63f37f6a5 +size 67107169 diff --git a/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds new file mode 100644 index 0000000000000000000000000000000000000000..30308793bb853105f08721389348c7f70051030d --- /dev/null +++ b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6eb666396278d19aeca07aef79cd4bf47d222ef3f1abe8d9102b8921ac0f6b6 +size 67108233 diff --git a/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds new file mode 100644 index 0000000000000000000000000000000000000000..3899058fff1e86a53bc2c218aa96b61b20ab9a3a --- /dev/null +++ b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b20f6d1f72422892a615bd09f3489ce6a5077f96365dc45571843b9ef1b58ca3 +size 67107058 diff --git a/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds new file mode 100644 index 0000000000000000000000000000000000000000..d4429ecdfe1b0dcce2ad47acd99500c9b7f1f86d --- /dev/null +++ b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f3ee3d6772bf458f60fa146868b81a22d8ee9068cfc1d322707b1271373dd8f +size 67108845 diff --git a/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds new file mode 100644 index 0000000000000000000000000000000000000000..e62d7225981d54788adae59584ba6d45ef207728 --- /dev/null +++ b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0538be4fb229ab57de95bd4017caa7667b2b79257fcf9f6a338f5280852359ac +size 67108192 diff --git a/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds new file mode 100644 index 0000000000000000000000000000000000000000..b4f6dc506fe2db71ebb9c5239578015f34fcd05b --- /dev/null +++ b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c29af5055245843f4b2554f3ffd7c404a1b42a21404928753849321fee5fbd46 +size 67107842 diff --git a/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds new file mode 100644 index 0000000000000000000000000000000000000000..704adf6756d3847bd9c35974d5aa2546f06c4a7c --- /dev/null +++ b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4fbaeae43842fd37e775930e540a69c290d5a3596022d7767561b0f56785a94f +size 67108803 diff --git a/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds new file mode 100644 index 0000000000000000000000000000000000000000..9efe2276eb165893ee79243fae9faec09fb055bf --- /dev/null +++ b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be0ad8c8d041cfcdfe01be5bc1853e39411189db4c13edf2bc23d69e9cbd29a2 +size 67108033 diff --git a/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds new file mode 100644 index 0000000000000000000000000000000000000000..bfca68588b513535c8f0827a29e6432e3c750a07 --- /dev/null +++ b/train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c8f3713c81eb05e9dff194f2e9d55ffc7bb88b673c8dde824340c753fdd3907 +size 67108254