tangled-llama-a-128k-base-v0.1 / scripts /prepare_contrain_1_conversation_dataset.py
mtasic85's picture
tokenizer
1410318
raw
history blame
6.61 kB
from typing import Optional, Union
from functools import partial
import numpy as np
from datasets import load_dataset
from litdata import optimize, TokensLoader
from litgpt.tokenizer import Tokenizer
def batch_dict_iterator(path: str,
name: Optional[str]=None,
data_dir: Optional[str]=None,
data_files: Optional[str]=None,
keep_in_memory: bool=False,
revision: Optional[str]=None,
split: str='train',
num_proc: Optional[int]=None,
format: Optional[str]=None):
assert isinstance(format, str) or callable(format)
dataset = load_dataset(path=path,
name=name,
data_dir=data_dir,
data_files=data_files,
keep_in_memory=keep_in_memory,
revision=revision,
split=split,
trust_remote_code=True,
num_proc=num_proc)
if callable(format):
for row in dataset:
text = format(row)
yield text
else:
for row in dataset:
text = format.format(**row)
yield text
def batch_iterator(dataset_config: Union[list, dict]):
if isinstance(dataset_config, dict):
for text in batch_dict_iterator(**dataset_config):
yield text
elif isinstance(dataset_config, list):
for dc in dataset_config:
for text in batch_dict_iterator(**dc):
yield text
else:
raise ValueError('')
def tokenize_fn(dataset_config: Union[dict, list], tokenizer: Optional[Tokenizer]=None):
assert isinstance(dataset_config, (dict, list))
for text in batch_iterator(dataset_config):
text_ids = tokenizer.encode(text, bos=False, eos=True)
yield text_ids
roles_map = {
'system': 'system',
'user': 'user',
'human': 'user',
'assistant': 'assistant',
'gpt': 'assistant',
'AI': 'assistant',
}
datasets_configs = [
#
# cognition
#
# https://huggingface.co/datasets/Tongjilibo/self_cognition
#
# general instructs
#
# arcee-ai/The-Tome - 4.58 GB, 1,752,473
# - arcee-ai/infini-instruct-top-500k (BAAI/Infinity-Instruct)
# - TIGER-Lab/WebInstructSub (top-500k) - IGNORE
# - jondurbin/airoboros-3.2
# - gardner/glaive-function-calling-v2-sharegpt
# - arcee-ai/reasoning-sharegpt (SkunkworksAI/reasoning-0.01)
# - arcee-ai/self-instruct-sharegpt (bigcode/self-oss-instruct-sc2-exec-filter-50k)
# - cognitivecomputations/ultrainteract_trajectories_sharegpt
# - cognitivecomputations/SystemChat-2.0
# - arcee-ai/qwen2-72b-magpie-en
{'path': 'arcee-ai/The-Tome', 'field': 'conversations', 'transform': lambda msgs: [{'role': roles_map[m['from']], 'content': m['value']} for m in msgs]},
# teknium/OpenHermes-2.5 - 1.94 GB, 1,001,551
# - jondurbin/airoboros-2.2 - IGNORE
# - https://huggingface.co/camel-ai - CamelAI Domain Expert Datasets (Physics, Math, Chemistry & Biology)
# - lmsys/lmsys-chat-1m - IGNORE
# - CollectiveCognition/chats-data-2023-09-22
# - CoT Alpaca GPT4
# - Evol Instruct 70K && 140K
# - glaiveai/glaive-code-assistant
# - teknium/GPT4-LLM-Cleaned
# - https://github.com/teknium1/GPTeacher
# - https://github.com/CogStack/OpenGPT
# - meta-math/MetaMathQA
# - Open-Orca/SlimOrca
# - garage-bAInd/Open-Platypus
# - anon8231489123/ShareGPT_Vicuna_unfiltered - IGNORE
# - https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM
{'path': 'teknium/OpenHermes-2.5', 'field': 'conversations', 'transform': lambda msgs: [{'role': roles_map[m['from']], 'content': m['value']} for m in msgs]},
#
# math
#
# 6.07 GB, 11,402,286
{'path': 'ai2-adapt-dev/openmath-2-math', 'field': 'messages'},
#
# tool/function calling
#
# 65.7 MB, 11,578
{'path': 'NousResearch/hermes-function-calling-v1', 'field': 'conversations', 'transform': lambda msgs: [{'role': roles_map[m['from']], 'content': m['value']} for m in msgs]},
#
# agent
#
# 1.51 GB, 485,874
{'path': 'arcee-ai/agent-data', 'field': 'conversations', 'transform': lambda msgs: [{'role': roles_map[m['from']], 'content': m['value']} for m in msgs]},
#
# conversation, role-play
#
[
{'path': 'AtlasUnified/atlas-converse', 'field': 'conversations', 'transform': lambda msgs: [{'role': roles_map[m['from']], 'content': m['value']} for m in msgs]}, # 3.26 MB + 4.82 MB + 5.31 MB, <10k
{'path': 'PJMixers/hieunguyenminh_roleplay-deduped-ShareGPT', 'field': 'conversations'}, # 3.24 MB, 1,054
{'path': 'TokenBender/roleplay_alpaca', 'transform': lambda r: [{'role': 'user', 'content': r['instruction']}, {'role': 'assistant', 'content': r['output']}]}, # 10.2 MB, 30,530
],
#
# reflection
#
[
{'path': 'dvilasuero/reflection-v1-gpt-4o-judge', 'transform': lambda r: [{'role': 'system', 'content': r['system']}, {'role': 'user', 'content': r['prompt']}, {'role': 'assistant', 'content': r['response']}]}, # 4.17 MB, 1,000
{'path': 'dvilasuero/reflection-v1-openai-o-mini-judge', 'transform': lambda r: [{'role': 'system', 'content': r['system']}, {'role': 'user', 'content': r['prompt']}, {'role': 'assistant', 'content': r['response']}]}, # 12.4 MB, 3,000
{'path': 'dvilasuero/dvilasuero/reflection-v1-final-dedup', 'transform': lambda r: [{'role': 'system', 'content': r['system']}, {'role': 'user', 'content': r['prompt']}, {'role': 'assistant', 'content': r['response']}]}, # 70.8 MB, 36,549
{'path': 'flozi00/reflection-qwen2.5-72b-260924', 'transform': lambda r: [r['system'][0], {'role': 'user', 'content': r['input']}, {'role': 'assistant', 'content': r['reflection'] + '\n' + r['output']}]}, # 30.6 MB, 25,391
{'path': 'gretelai/synthetic-gsm8k-reflection-405b', 'split': 'train+test', 'transform': lambda r: [{'role': 'user', 'content': r['question']}, {'role': 'assistant', 'content': r['answer_with_tags']}]}, # 26.8 MB, 23,164
],
]
outputs = optimize(
fn=partial(tokenize_fn, tokenizer=Tokenizer('..')),
inputs=datasets_configs,
output_dir='../contrain-data/',
# Number of tokens to store by chunks. This is roughly 64MB of tokens per chunk.
# chunk_size=(2049 * 8012),
chunk_size=(8192 * 2003),
num_workers=32,
# compression='zstd',
)