File size: 1,465 Bytes
909c530 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import os
from huggingface_hub import hf_hub_download
# ALL langauges
langs = {
'train': ['ar', 'bn', 'en', 'es', 'fa', 'fi', 'fr', 'hi', 'id', 'ja', 'ko', 'ru', 'sw', 'te', 'th', 'zh'],
'dev': ['ar', 'bn', 'en', 'es', 'fa', 'fi', 'fr', 'hi', 'id', 'ja', 'ko', 'ru', 'sw', 'te', 'th', 'zh'],
'testA': ['ar', 'bn', 'en', 'fi', 'id', 'ja', 'ko', 'ru', 'sw', 'te', 'th']
}
# the langauges we used in ESSIR
# the miracl corpus has been spitted into shards
languages2filesize = {'en': 66 , 'fa': 5, 'ru': 20, 'zh': 10}
for lang in languages2filesize.keys():
for split in range(languages2filesize[lang]):
hf_hub_download('miracl/miracl-corpus',
filename=f'docs-{split}.jsonl.gz',
subfolder=f'miracl-corpus-v1.0-{lang}',
repo_type='dataset',
cache_dir='/home/dju/datasets/essir-xlir/miracl/archived',
force_filename=f'miracl-{lang}-{split}.jsonl.gz'
)
cmd = f'gunzip /home/dju/datasets/essir-xlir/miracl/archived/*gz'
os.system(cmd)
cmd = f'rm -rvf /home/dju/datasets/essir-xlir/miracl/archived/*lock'
os.system(cmd)
for lang in languages2filesize.keys():
# merge into a big file
cmd = f'cat /home/dju/datasets/essir-xlir/miracl/archived/miracl-{lang}*.jsonl > /home/dju/datasets/essir-xlir/miracl/miracl-{lang}.jsonl'
os.system(cmd)
# remove shards
cmd = f'rm -rf /home/dju/datasets/essir-xlir/miracl/archived/miracl-{lang}*.jsonl'
|