Spaces:
Running
Running
File size: 3,545 Bytes
aefc9ef |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
#!/usr/bin/env bash
# Copyright (c) Guangsheng Bao.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# setup the environment
echo `date`, Setup the environment ...
set -e # exit if error
# prepare folders
exp_path=exp_main_ext
data_path=$exp_path/data
res_path=$exp_path/results
mkdir -p $exp_path $data_path $res_path
datasets="xsum squad writing"
source_models="bloom-7b1 opt-13b llama-13b llama2-13b"
# preparing dataset
for D in $datasets; do
for M in $source_models; do
echo `date`, Preparing dataset ${D}_${M} ...
python scripts/data_builder.py --dataset $D --n_samples 500 --base_model_name $M --output_file $data_path/${D}_${M}
done
done
exit
# White-box Setting
echo `date`, Evaluate models in the white-box setting:
# evaluate Fast-DetectGPT and fast baselines
for D in $datasets; do
for M in $source_models; do
echo `date`, Evaluating Fast-DetectGPT on ${D}_${M} ...
python scripts/fast_detect_gpt.py --reference_model_name $M --scoring_model_name $M --dataset $D \
--dataset_file $data_path/${D}_${M} --output_file $res_path/${D}_${M}
echo `date`, Evaluating baseline methods on ${D}_${M} ...
python scripts/baselines.py --scoring_model_name $M --dataset $D \
--dataset_file $data_path/${D}_${M} --output_file $res_path/${D}_${M}
done
done
# evaluate DetectGPT and its improvement DetectLLM
for D in $datasets; do
for M in $source_models; do
echo `date`, Evaluating DetectGPT on ${D}_${M} ...
python scripts/detect_gpt.py --scoring_model_name $M --mask_filling_model_name t5-3b --n_perturbations 100 --dataset $D \
--dataset_file $data_path/${D}_${M} --output_file $res_path/${D}_${M}
# we leverage DetectGPT to generate the perturbations
echo `date`, Evaluating DetectLLM methods on ${D}_${M} ...
python scripts/detect_llm.py --scoring_model_name $M --dataset $D \
--dataset_file $data_path/${D}_${M}.t5-3b.perturbation_100 --output_file $res_path/${D}_${M}
done
done
# Black-box Setting
echo `date`, Evaluate models in the black-box setting:
scoring_models="gpt-neo-2.7B"
# evaluate Fast-DetectGPT
for D in $datasets; do
for M in $source_models; do
M1=gpt-j-6B # sampling model
for M2 in $scoring_models; do
echo `date`, Evaluating Fast-DetectGPT on ${D}_${M}.${M1}_${M2} ...
python scripts/fast_detect_gpt.py --reference_model_name ${M1} --scoring_model_name ${M2} --dataset $D \
--dataset_file $data_path/${D}_${M} --output_file $res_path/${D}_${M}.${M1}_${M2}
done
done
done
# evaluate DetectGPT and its improvement DetectLLM
for D in $datasets; do
for M in $source_models; do
M1=t5-3b # perturbation model
for M2 in $scoring_models; do
echo `date`, Evaluating DetectGPT on ${D}_${M}.${M1}_${M2} ...
python scripts/detect_gpt.py --mask_filling_model_name ${M1} --scoring_model_name ${M2} --n_perturbations 100 --dataset $D \
--dataset_file $data_path/${D}_${M} --output_file $res_path/${D}_${M}.${M1}_${M2}
# we leverage DetectGPT to generate the perturbations
echo `date`, Evaluating DetectLLM methods on ${D}_${M}.${M1}_${M2} ...
python scripts/detect_llm.py --scoring_model_name ${M2} --dataset $D \
--dataset_file $data_path/${D}_${M}.${M1}.perturbation_100 --output_file $res_path/${D}_${M}.${M1}_${M2}
done
done
done
|