Datasets:
File size: 1,009 Bytes
31d7612 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
from datasets import Dataset, load_dataset
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('models/RedPajama-INCITE-Instruct-7B')
max_seq = 2048
def make_prompt(code):
return f'Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{code}\n\n### Response:\n'
def is_not_too_long(data):
encoded = tokenizer.encode(make_prompt(data['content']))
return len(encoded) < max_seq
def deduplicate_dicts(dicts):
seen = {}
result = []
for d in dicts:
content = d.get('content')
if content not in seen:
seen[content] = True
result.append(d)
return result
dataset = load_dataset('json', data_files='ts_parser/ts-chunks.jsonl')
data_short = dataset.filter(is_not_too_long)
dedup = deduplicate_dicts(data_short['train'])
data_short_dedup = Dataset.from_list(dedup)
print(data_short_dedup)
data_short_dedup.to_json('typescript-chunks.json')
|