File size: 4,263 Bytes
5b81e55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
roles_map = {
    'system': 'system',
    'user': 'user',
    'human': 'user',
    'assistant': 'assistant',
    'gpt': 'assistant',
    'AI': 'assistant',
}


pretrain_reasoning_datasets = [
    #
    # basic reasoning
    #
    # 10.8 MB, 15,770
    {'kind': 'instruct', 'path': 'AtlasUnified/Atlas-Reasoning', 'data_files': 'reasoning.csv', 'transform': lambda r: [
        {'role': 'user', 'content': r['Prompt']},
        {'role': 'assistant', 'content': r['Step-by-step reasoning'] + '\n' + r['Solution']},
    ]},
    # 1.23 GB, 859,594
    *[
        {'kind': 'instruct', 'path': 'AI-MO/NuminaMath-CoT', 'split': f'train[{i}%:{i + 10}%]', 'field': 'messages'}
        for i in range(0, 100, 10)
    ],
    # 148 MB, 72,540
    *[
        {'kind': 'instruct', 'path': 'AI-MO/NuminaMath-TIR', 'split': f'train[{i}%:{i + 10}%]', 'field': 'messages'}
        for i in range(0, 100, 10)
    ],

    #
    # math reasoning
    #
    # 1.79 MB, 3,963
    {'kind': 'instruct', 'path': 'AlgorithmicResearchGroup/math_reasoning_autoformalization_track', 'transform': lambda r: [
        {'role': 'user', 'content': r['informal_statement']},
        {'role': 'assistant', 'content': r['informal_proof'] + '\n' + r['formal_proof']},
    ]},
    # 307 MB, 19,944
    {'kind': 'instruct', 'path': 'KingNish/reasoning-base-20k', 'transform': lambda r: [
        {'role': 'user', 'content': r['user']},
        {'role': 'assistant', 'content': r['reasoning'] + '\n' + r['assistant']},
    ]},
    # 9.45 MB, 10,000
    {'kind': 'instruct', 'path': 'Aarushhh/math-reasoning-10k', 'transform': lambda r: [
        {'role': 'user', 'content': r['problem']},
        {'role': 'assistant', 'content': r['plan'] + '\n' + r['solution']},
    ]},

    #
    # cot reasoning
    #
    # 11.7 GB, 1,850,809
    *[
        {'kind': 'instruct', 'path': 'ServiceNow-AI/R1-Distill-SFT', 'data_dir': 'v0', 'split': f'train[{i}%:{i + 10}%]', 'transform': lambda r: [
            {'role': 'user', 'content': r['problem']},
            {'role': 'assistant', 'content': r['reannotated_assistant_content']},
        ]}
        for i in range(0, 100, 10)
    ],
    *[
        {'kind': 'instruct', 'path': 'ServiceNow-AI/R1-Distill-SFT', 'data_dir': 'v1', 'split': f'train[{i}%:{i + 10}%]', 'transform': lambda r: r['reannotated_messages']}
        for i in range(0, 100, 10)
    ],
    # 3.85 GB, 300k (3.98 GB, 814,334)
    *[
        {'kind': 'instruct', 'path': 'cognitivecomputations/dolphin-r1', 'data_files': 'dolphin-r1-reasoning-deepseek.jsonl', 'split': f'train[{i}%:{i + 10}%]', 'transform': lambda r: [
            *r['messages'],
            # {'role': 'assistant', 'content': (('<think>\n' + r['reasoning'] + '\n</think>\n') if r.get('reasoning') else '') + r['answer']},
            {'role': 'assistant', 'content': (r.get('reasoning') or '') + (r.get('answer') or '')},
        ]}
        for i in range(0, 100, 10)
    ],
    # 3.49 GB, 300k (3.98 GB, 814,334)
    *[
        {'kind': 'instruct', 'path': 'cognitivecomputations/dolphin-r1', 'data_files': 'dolphin-r1-reasoning-flash.jsonl', 'split': f'train[{i}%:{i + 10}%]', 'transform': lambda r: [
            *r['messages'],
            # {'role': 'assistant', 'content': (('<think>\n' + r['reasoning'] + '\n</think>\n') if r.get('reasoning') else '') + r['answer']},
            {'role': 'assistant', 'content': (r.get('reasoning') or '') + (r.get('answer') or '')},
        ]}
        for i in range(0, 100, 10)
    ],
    # 1.08 GB, 113,957
    {'kind': 'instruct', 'path': 'open-thoughts/OpenThoughts-114k', 'split': 'train', 'field': 'conversations', 'transform': lambda msgs: [
        {'role': roles_map[m['from']], 'content': m['value']}
        for m in msgs
    ]},
    # 384 MB, 77,685
    {'kind': 'instruct', 'path': 'O1-OPEN/OpenO1-SFT', 'split': 'train', 'transform': lambda r: [
        {'role': 'user', 'content': r['instruction']},
        {'role': 'assistant', 'content': r['output']},
    ]},
    # 6.88 MB, 1,000
    {'kind': 'instruct', 'path': 'simplescaling/s1K', 'split': 'train', 'transform': lambda r: [
        {'role': 'user', 'content': r['question']},
        {'role': 'assistant', 'content': '<think>\n' + '\n'.join(r['thinking_trajectories']) + '\n</think>\n' + r['solution']},
    ]},
]