set -eux | |
LLM_RECIPES_DIR=/code/llm-recipes | |
source $LLM_RECIPES_DIR/scripts/wmt2024/tokens.sh | |
for i in `seq 0 4`; do | |
python /code/llm-recipes/tools/hf_inference.py \ | |
--model /work/models/translation_finetuned_hf/llama2-ja-zh-continuous-pretrained-v0-dev-finetune-chunked-docs-cleaned-all-averaged-246-250 \ | |
-i /work/wmt2024_test/LLM/split/ja-zh/wmttest2024.src.sentence_splited.with_template.ja-zh.ja.jsonl.0${i} \ | |
-o /work/translation/wmt24_test/ja-zh/llama2-top-p-0.95/split_0${i} \ | |
-g ${i} \ | |
-b 503 \ | |
--attn_implementation sdpa \ | |
--dynamic_max_new_token_ratio 2.0 \ | |
--num_return_sequences 20 \ | |
--do_sample \ | |
--top_p 0.95 \ | |
--max_input_tokens 503 & | |
done | |
wait | |