File size: 1,670 Bytes
a25b795
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import json
import os

def read_dataset(file_path):
    try:
        with open(file_path, 'r') as file:
            data = [json.loads(line) for line in file]
        print(f"Dataset loaded successfully from {file_path}.")
        return data
    except Exception as e:
        print(f"Error reading dataset from {file_path}: {e}")
        return []

def shard_dataset(dataset, num_shards):
    shard_size = len(dataset) // num_shards
    shards = [dataset[i:i + shard_size] for i in range(0, len(dataset), shard_size)]
    if len(shards) > num_shards:
        shards[num_shards - 1].extend(shards.pop())
    print(f"Dataset sharded into {num_shards} parts.")
    return shards

def write_shards(shards, output_dir):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
        print(f"Created output directory at {output_dir}.")

    for i, shard in enumerate(shards):
        shard_file = os.path.join(output_dir, f'shard_{i+1}.jsonl')
        with open(shard_file, 'w') as file:
            for item in shard:
                json.dump(item, file)
                file.write('\n')
        print(f"Shard {i+1} written to {shard_file}.")

def main():
    input_file = 'path_to_processed_dataset.jsonl'  # Update with your processed dataset file path
    output_dir = 'sharded_dataset'  # Update with your output directory for shards
    num_shards = 33

    dataset = read_dataset(input_file)
    if dataset:
        shards = shard_dataset(dataset, num_shards)
        write_shards(shards, output_dir)
        print("All shards have been successfully written.")
    else:
        print("No dataset to process.")

if __name__ == "__main__":
    main()