EasyReddit / shard.py
Tonic's picture
initial commit
e99f3c6
import json
import os
def read_and_process_dataset(file_path, num_shards, output_dir):
try:
with open(file_path, 'r') as file:
shard_size = sum(1 for line in file) // num_shards
file.seek(0) # Reset file pointer to the beginning
current_shard = 0
current_line = 0
shard = []
for line in file:
if current_line < shard_size * (current_shard + 1):
shard.append(json.loads(line))
else:
write_shard(shard, current_shard, output_dir)
shard = [json.loads(line)]
current_shard += 1
current_line += 1
# Write the last shard
if shard:
write_shard(shard, current_shard, output_dir)
print(f"Dataset processed and sharded successfully into {num_shards} parts.")
except Exception as e:
print(f"Error processing dataset from {file_path}: {e}")
def write_shard(shard, shard_index, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
shard_file = os.path.join(output_dir, f'shard_{shard_index+1}.jsonl')
with open(shard_file, 'w') as file:
for item in shard:
json.dump(item, file)
file.write('\n')
print(f"Shard {shard_index+1} written to {shard_file}.")
def main():
input_file = r"C:\Users\MeMyself\reddit_question_best_answers\processed\synthesized_dataset.jsonl" # Update with your processed dataset file path
output_dir = r"C:\Users\MeMyself\reddit_question_best_answers\processed" # Update with your output directory for shards
num_shards = 33
read_and_process_dataset(input_file, num_shards, output_dir)
if __name__ == "__main__":
main()