Tonic commited on
Commit
e99f3c6
1 Parent(s): a844e29

initial commit

Browse files
Files changed (1) hide show
  1. shard.py +50 -0
shard.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ def read_and_process_dataset(file_path, num_shards, output_dir):
5
+ try:
6
+ with open(file_path, 'r') as file:
7
+ shard_size = sum(1 for line in file) // num_shards
8
+ file.seek(0) # Reset file pointer to the beginning
9
+
10
+ current_shard = 0
11
+ current_line = 0
12
+ shard = []
13
+
14
+ for line in file:
15
+ if current_line < shard_size * (current_shard + 1):
16
+ shard.append(json.loads(line))
17
+ else:
18
+ write_shard(shard, current_shard, output_dir)
19
+ shard = [json.loads(line)]
20
+ current_shard += 1
21
+ current_line += 1
22
+
23
+ # Write the last shard
24
+ if shard:
25
+ write_shard(shard, current_shard, output_dir)
26
+
27
+ print(f"Dataset processed and sharded successfully into {num_shards} parts.")
28
+ except Exception as e:
29
+ print(f"Error processing dataset from {file_path}: {e}")
30
+
31
+ def write_shard(shard, shard_index, output_dir):
32
+ if not os.path.exists(output_dir):
33
+ os.makedirs(output_dir)
34
+
35
+ shard_file = os.path.join(output_dir, f'shard_{shard_index+1}.jsonl')
36
+ with open(shard_file, 'w') as file:
37
+ for item in shard:
38
+ json.dump(item, file)
39
+ file.write('\n')
40
+ print(f"Shard {shard_index+1} written to {shard_file}.")
41
+
42
+ def main():
43
+ input_file = r"C:\Users\MeMyself\reddit_question_best_answers\processed\synthesized_dataset.jsonl" # Update with your processed dataset file path
44
+ output_dir = r"C:\Users\MeMyself\reddit_question_best_answers\processed" # Update with your output directory for shards
45
+ num_shards = 33
46
+
47
+ read_and_process_dataset(input_file, num_shards, output_dir)
48
+
49
+ if __name__ == "__main__":
50
+ main()