orionweller commited on
Commit
218e845
1 Parent(s): e1822a2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +30 -0
  2. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  3. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds +3 -0
  4. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds +3 -0
  5. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds +3 -0
  6. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds +3 -0
  7. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds +3 -0
  8. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds +3 -0
  9. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds +3 -0
  10. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds +3 -0
  11. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds +3 -0
  12. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds +3 -0
  13. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds +3 -0
  14. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds +3 -0
  15. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds +3 -0
  16. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds +3 -0
  17. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds +3 -0
  18. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds +3 -0
  19. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds +3 -0
  20. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds +3 -0
  21. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00040.mds +3 -0
  22. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00044.mds +3 -0
  23. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00045.mds +3 -0
  24. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00046.mds +3 -0
  25. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00047.mds +3 -0
  26. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00048.mds +3 -0
  27. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00049.mds +3 -0
  28. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00051.mds +3 -0
  29. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00053.mds +3 -0
  30. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00054.mds +3 -0
  31. train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00055.mds +3 -0
  32. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1766-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  33. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1766-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  34. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20304-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  35. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20304-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  36. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22300-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  37. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22300-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  38. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22392-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  39. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22392-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  40. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22619-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  41. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22619-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  42. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_243-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  43. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_243-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  44. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25195-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  45. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25195-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  46. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25231-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  47. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25231-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  48. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28863-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  49. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28863-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  50. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30741-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
.gitattributes CHANGED
@@ -12116,3 +12116,33 @@ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/
12116
  train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00041.mds filter=lfs diff=lfs merge=lfs -text
12117
  train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text
12118
  train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00050.mds filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12116
  train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00041.mds filter=lfs diff=lfs merge=lfs -text
12117
  train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text
12118
  train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00050.mds filter=lfs diff=lfs merge=lfs -text
12119
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00049.mds filter=lfs diff=lfs merge=lfs -text
12120
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds filter=lfs diff=lfs merge=lfs -text
12121
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds filter=lfs diff=lfs merge=lfs -text
12122
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds filter=lfs diff=lfs merge=lfs -text
12123
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds filter=lfs diff=lfs merge=lfs -text
12124
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text
12125
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
12126
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
12127
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00044.mds filter=lfs diff=lfs merge=lfs -text
12128
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
12129
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
12130
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds filter=lfs diff=lfs merge=lfs -text
12131
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00055.mds filter=lfs diff=lfs merge=lfs -text
12132
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds filter=lfs diff=lfs merge=lfs -text
12133
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00045.mds filter=lfs diff=lfs merge=lfs -text
12134
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds filter=lfs diff=lfs merge=lfs -text
12135
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00053.mds filter=lfs diff=lfs merge=lfs -text
12136
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00054.mds filter=lfs diff=lfs merge=lfs -text
12137
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text
12138
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds filter=lfs diff=lfs merge=lfs -text
12139
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00046.mds filter=lfs diff=lfs merge=lfs -text
12140
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text
12141
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00047.mds filter=lfs diff=lfs merge=lfs -text
12142
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00051.mds filter=lfs diff=lfs merge=lfs -text
12143
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00048.mds filter=lfs diff=lfs merge=lfs -text
12144
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text
12145
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds filter=lfs diff=lfs merge=lfs -text
12146
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text
12147
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00040.mds filter=lfs diff=lfs merge=lfs -text
12148
+ train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds filter=lfs diff=lfs merge=lfs -text
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18a9ba49e4d8b7231a9e300b03c1a6a6d0cad86b4314f830fbe2719d601fbe27
3
+ size 67108097
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1853497ed691072d23e31d1a3f53ec686a76a2814a8d2b887e8c4b24459f1027
3
+ size 67108300
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2432a299d49b7138abeb70ea36d92b259d367924b418bf243a07ca249c49cdb
3
+ size 67107337
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d0544c050a3ce71bd23214f8e9c803117e105d06782bbcf7efed334b2f2fd54
3
+ size 67107000
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c0f148ece2d9f0a193b0ae7a54451ef1b9b0b67a566ac1432753825cb51fc59
3
+ size 67107392
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5995fd906166cf2a586108a0659112f7c34963f8791fac7b53d314850297dd20
3
+ size 67108850
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43e66ed5335747f6a9e2ea17001cdfeac0f1d3c056b7c1ceb855f983d2b60622
3
+ size 67108564
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66b92b674c6c4afd6c2d635d540adf5ba04d00b0582b1cca4cf55293ee41d683
3
+ size 67108112
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fea15c8a9547df6ac8be6f53f3cdb3ba602abc688d6c17556319adadad08636
3
+ size 67108346
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:389453fcc07368561daa194895ff4ab1623f69727cb7eea7b0d950cc7fee0a3c
3
+ size 67108116
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a9f2dd9ba883f459bea16cd974749d1bf1d76b7de2ef1c1cb74477d0bbbf5f3
3
+ size 67108396
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f739248f4f71c9c4256cc1f6bcce8bf80ae86e38a12afc482365ee23833cd618
3
+ size 67106941
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d03498c1f555e1baf78ae420eb7a8c71e3e569dcf0c66e8e4c10830b259d5185
3
+ size 67107977
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cbda72d018b35b81b02a5439e5be6dae70ea043e32ff9cef0b2a7af46564627
3
+ size 67108811
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a0a30cb89bd8fe18e92cc1496ee282c19a4ac6c602e606c8ab1bc38e729c375
3
+ size 67108824
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06261dbcafa149c02988dde1dce3bfc9de63311b441032c078f12bdf6e892d40
3
+ size 67108604
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab383d30316bef53c8ab4c2b39b07e5458ce9be1b8a27fd0ffcb80328f56cc9f
3
+ size 67108834
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7c3803e096b58d2b7377546921e57afb8e17f3e1aca99f11f7590bfaa797b04
3
+ size 67108061
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b3bd9f3e28964651ead2d11f8fa2a220687804b30f1834752d5fc0591ce27fe
3
+ size 67107553
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00040.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9244aa02d215f4d7fdb430d2745bc4aa4a382b1b398fa6b201136a9614a7eaa
3
+ size 67108761
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00044.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:597b4144283f315552c06b471ed5666d1d3c196997c30ea7fa686523bafcd685
3
+ size 67108607
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00045.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:187bac453bdc33ecb0899cd4859c5341c2a8251951c1b78db960da0193cdc813
3
+ size 67108068
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00046.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58169cd0c29b1a0919d2b197ca857f49e49833ed8b3b3b9de9c5c723a0f52a14
3
+ size 67107512
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00047.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84b1e9ca5d8f2d2bd5f7d4c65e89c35d1e9720d1a103b24b9aff5d2cbe4b7c10
3
+ size 67108090
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00048.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f50c85269735c783326fdacd05ae2fc118e32da827a971bd7ad8ace4aaa8814
3
+ size 67106739
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00049.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a44bb14dfd891dab9d40f1e27ef99310e221c5b289909ae90ae03cd8f9afca0f
3
+ size 67108628
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00051.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ee94a0263314d04c2ca604ef713d226b53020303d5f4759b4c4ec81443babfa
3
+ size 67108855
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00053.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f9d22defc072f7daed25f0f4f25b17d203ceb7e7caab30e548f99769496579b
3
+ size 67108383
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00054.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b16e35271a273799a6d46e8434c4440bbed6e74fadca352f857794ff2f5fc998
3
+ size 67107856
train/cc_en_head/cc_en_head_0174-tokenized-chunked-1024-512-128-backfill-nodups/shard.00055.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7b5715a8c0334c6d36fa495023671742a3e7aca8ade27332632c63032eb81a0
3
+ size 67108285
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1766-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107705, "hashes": {}}, "samples": 43008, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47856347, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19203577, "hashes": {}}, "samples": 12223, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13615897, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1766-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41446045,
3
+ "num_truncated_tokens": 41410955
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20304-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108507, "hashes": {}}, "samples": 42867, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47633576, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21279051, "hashes": {}}, "samples": 13380, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15066459, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_20304-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42452718,
3
+ "num_truncated_tokens": 42415426
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22300-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108039, "hashes": {}}, "samples": 44251, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47774540, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10838829, "hashes": {}}, "samples": 7175, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7739152, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22300-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37381906,
3
+ "num_truncated_tokens": 37353140
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22392-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107920, "hashes": {}}, "samples": 43222, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48012344, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16045564, "hashes": {}}, "samples": 10525, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11509408, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22392-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39913125,
3
+ "num_truncated_tokens": 39879529
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22619-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108324, "hashes": {}}, "samples": 43588, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47684454, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15329828, "hashes": {}}, "samples": 9957, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10899968, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22619-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39561851,
3
+ "num_truncated_tokens": 39529937
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_243-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107795, "hashes": {}}, "samples": 44398, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47785802, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9619645, "hashes": {}}, "samples": 6370, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6838661, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_243-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36792544,
3
+ "num_truncated_tokens": 36764771
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25195-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108553, "hashes": {}}, "samples": 43009, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47549422, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17967871, "hashes": {}}, "samples": 11511, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12695814, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25195-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40850569,
3
+ "num_truncated_tokens": 40816867
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25231-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108414, "hashes": {}}, "samples": 42693, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47542911, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20529154, "hashes": {}}, "samples": 13107, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14610610, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25231-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42091555,
3
+ "num_truncated_tokens": 42055158
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28863-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108475, "hashes": {}}, "samples": 43327, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48017890, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16563635, "hashes": {}}, "samples": 10601, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11848552, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28863-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40166752,
3
+ "num_truncated_tokens": 40133365
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30741-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108271, "hashes": {}}, "samples": 44356, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47855158, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10715660, "hashes": {}}, "samples": 7025, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7643041, "hashes": {}}}], "version": 2}