--- license: mit language: - en tags: - not-for-all-audiences - chemistry - biology - finance - legal - music - art - code - climate - medical pretty_name: Easy Reddit size_categories: - 10M num_shards: shards[num_shards - 1].extend(shards.pop()) print(f"Dataset sharded into {num_shards} parts.") return shards def write_shards(shards, output_dir): if not os.path.exists(output_dir): os.makedirs(output_dir) print(f"Created output directory at {output_dir}.") for i, shard in enumerate(shards): shard_file = os.path.join(output_dir, f'shard_{i+1}.jsonl') with open(shard_file, 'w') as file: for item in shard: json.dump(item, file) file.write('\n') print(f"Shard {i+1} written to {shard_file}.") def main(): input_file = 'path_to_processed_dataset.jsonl' # Update with your processed dataset file path output_dir = 'sharded_dataset' # Update with your output directory for shards num_shards = 33 dataset = read_dataset(input_file) if dataset: shards = shard_dataset(dataset, num_shards) write_shards(shards, output_dir) print("All shards have been successfully written.") else: print("No dataset to process.") if __name__ == "__main__": main() ``` ### Disclaimer : ๐ŸŒŸRe-format this dataset before use. ๐ŸŒŸProbably there's a **big problem with the token count** on these long answers ๐Ÿ˜‰ ๐ŸŒŸ**Good Luck !** ๐Ÿง‘๐Ÿปโ€๐Ÿš€๐Ÿš€