Datasets:
Update README.md
Browse files
README.md
CHANGED
@@ -167,10 +167,65 @@ process_files(file_list, output_dir)
|
|
167 |
```
|
168 |
|
169 |
#### **sharding script** :
|
170 |
-
- [here](https://huggingface.co/datasets/Tonic/WellReddit/blob/main/shard.py)
|
171 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
|
|
|
173 |
|
174 |
๐Probably there's a **big problem with the token count** on these long answers ๐
|
175 |
|
176 |
-
๐**
|
|
|
167 |
```
|
168 |
|
169 |
#### **sharding script** :
|
|
|
170 |
|
171 |
+
```python
|
172 |
+
|
173 |
+
import json
|
174 |
+
import os
|
175 |
+
|
176 |
+
def read_dataset(file_path):
|
177 |
+
try:
|
178 |
+
with open(file_path, 'r') as file:
|
179 |
+
data = [json.loads(line) for line in file]
|
180 |
+
print(f"Dataset loaded successfully from {file_path}.")
|
181 |
+
return data
|
182 |
+
except Exception as e:
|
183 |
+
print(f"Error reading dataset from {file_path}: {e}")
|
184 |
+
return []
|
185 |
+
|
186 |
+
def shard_dataset(dataset, num_shards):
|
187 |
+
shard_size = len(dataset) // num_shards
|
188 |
+
shards = [dataset[i:i + shard_size] for i in range(0, len(dataset), shard_size)]
|
189 |
+
if len(shards) > num_shards:
|
190 |
+
shards[num_shards - 1].extend(shards.pop())
|
191 |
+
print(f"Dataset sharded into {num_shards} parts.")
|
192 |
+
return shards
|
193 |
+
|
194 |
+
def write_shards(shards, output_dir):
|
195 |
+
if not os.path.exists(output_dir):
|
196 |
+
os.makedirs(output_dir)
|
197 |
+
print(f"Created output directory at {output_dir}.")
|
198 |
+
|
199 |
+
for i, shard in enumerate(shards):
|
200 |
+
shard_file = os.path.join(output_dir, f'shard_{i+1}.jsonl')
|
201 |
+
with open(shard_file, 'w') as file:
|
202 |
+
for item in shard:
|
203 |
+
json.dump(item, file)
|
204 |
+
file.write('\n')
|
205 |
+
print(f"Shard {i+1} written to {shard_file}.")
|
206 |
+
|
207 |
+
def main():
|
208 |
+
input_file = 'path_to_processed_dataset.jsonl' # Update with your processed dataset file path
|
209 |
+
output_dir = 'sharded_dataset' # Update with your output directory for shards
|
210 |
+
num_shards = 33
|
211 |
+
|
212 |
+
dataset = read_dataset(input_file)
|
213 |
+
if dataset:
|
214 |
+
shards = shard_dataset(dataset, num_shards)
|
215 |
+
write_shards(shards, output_dir)
|
216 |
+
print("All shards have been successfully written.")
|
217 |
+
else:
|
218 |
+
print("No dataset to process.")
|
219 |
+
|
220 |
+
if __name__ == "__main__":
|
221 |
+
main()
|
222 |
+
|
223 |
+
```
|
224 |
+
|
225 |
+
Disclaimer :
|
226 |
|
227 |
+
๐Re-format this dataset before use.
|
228 |
|
229 |
๐Probably there's a **big problem with the token count** on these long answers ๐
|
230 |
|
231 |
+
๐**Good Luck !** ๐ง๐ปโ๐๐
|