|
|
|
|
|
|
|
|
|
sudo apt-get install libcurl4-openssl-dev |
|
sudo apt-get install libbz2-dev |
|
sudo apt-get install liblzma-dev |
|
sudo add-apt-repository ppa:boost-latest/ppa -y |
|
sudo apt-get update |
|
sudo apt-get purge boost* -y |
|
sudo apt-get install libboost-all-dev -y |
|
|
|
git clone https://github.com/kpu/preprocess |
|
cd preprocess |
|
git checkout wet |
|
git submodule update --init --recursive |
|
mkdir build |
|
cd build |
|
cmake .. |
|
make -j4 |
|
alias wet_lines="${PWD}/bin/wet_lines" |
|
cd ../../ |
|
|
|
|
|
|
|
|
|
|
|
export DIRECTION_SPEECH="enA" |
|
export DIRECTION_TEXT="jpn" |
|
export CHUNK_SIZE=20 |
|
python download_s2t_metadata.py |
|
for i in $(seq 1 ${CHUNK_SIZE}); |
|
do |
|
cat seamless.dataset.metadata.public.${DIRECTION_SPEECH}-${DIRECTION_TEXT}.withduration.reordered.batch_${i}.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee metadata.${DIRECTION_SPEECH}-${DIRECTION_TEXT}.batch_${i}.tsv & |
|
done |
|
python format_text.py |
|
|
|
for i in $(seq 1 60); |
|
do |
|
export N_POOL=15 |
|
export DATASET_ID=${i} |
|
export DIRECTION_SPEECH="enA" |
|
export DIRECTION_TEXT="jpn" |
|
export LINE_NO_START=$(((DATASET_ID-1) * 2500)) |
|
export LINE_NO_END=$((DATASET_ID * 2500)) |
|
echo ${LINE_NO_START} |
|
python fetch_dataset_s2t.py |
|
done |
|
|
|
|
|
|
|
|
|
python -c "from datasets import load_dataset; load_dataset('allenai/nllb', 'eng_Latn-jpn_Jpan')" |
|
|
|
|