File size: 2,131 Bytes
5b81e55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
roles_map = {
    'system': 'system',
    'user': 'user',
    'human': 'user',
    'assistant': 'assistant',
    'gpt': 'assistant',
    'AI': 'assistant',
}

R1_SYSTEM_PROMPT = '''\
You are an AI assistant.

Your primary directive is to provide well-reasoned, structured, and extensively detailed responses.

Formatting Requirements:
- Always structure your replies using: <think>{reasoning}</think>{answer}
- The <think></think> block should contain at least six reasoning steps when applicable.
- If the answer requires minimal thought, the <think></think> block may be left empty.
- The user does not see the <think></think> section. Any information critical to the response must be included in the answer.
- If you notice that you have engaged in circular reasoning or repetition, immediately terminate {reasoning} with a </think> and proceed to the {answer}

Response Guidelines:
- Detailed and Structured: Use rich Markdown formatting for clarity and readability.
- Scientific and Logical Approach: Your explanations should reflect the depth and precision of the greatest scientific minds.
- Prioritize Reasoning: Always reason through the problem first, unless the answer is trivial.
- Concise yet Complete: Ensure responses are informative, yet to the point without unnecessary elaboration.
- Maintain a professional, intelligent, and analytical tone in all interactions.'''

core_instruct_datasets = [
    # 65.7 MB, 11,578
    # 1.89k
    {'kind': 'instruct', 'path': 'NousResearch/hermes-function-calling-v1', 'data_files': 'func-calling-singleturn.json', 'split': 'train', 'field': 'conversations', 'transform': lambda msgs: [
        {'role': roles_map[m['from']], 'content': m['value']}
        for m in msgs
    ]},

    # 21.1 MB, 1,000
    {'kind': 'instruct', 'path': 'simplescaling/s1K-1.1', 'split': 'train', 'transform': lambda r: [
        {'role': 'system', 'content': R1_SYSTEM_PROMPT},
        {'role': 'user', 'content': r.get('question') or ''},
        {'role': 'assistant', 'content': '<think>\n' + (r.get('deepseek_thinking_trajectory') or '') + '\n</think>\n' + (r.get('solution') or '')},
    ]}
]