model_family: llama-7b model_path: openlm-research/open_llama_7b LoRA: r: 8 alpha: 32 dropout: 0.05 lr: 2.0e-05 split: WikiMIA_QA_256 data_path: lluvecwonv/WikiMIA_QA gradient_accumulation_steps: 4 num_epochs: 10 forget_loss: grad_ascent file_path: /root/kdu/output/openlm-research/open_llama_7b_lluvecwonv_WikiMIA_QA_WikiMIA_QA_256_logit/ unlearned_model: lluvecwonv/WikiMIA_QA_256_0_10 save_dir: ${model_family}/${split}_${unlearned_model}_${num_epochs} overwrite_dir: true weight_decay: 0.01 save_model: true eval_while_train: false eval_only: false seed: 42 master_port: 18765 eval: model_path: ${..model_path} model_family: ${..model_family} save_dir: ${..save_dir} data_path: luvecwonv/WikiMIA_QA split: ${..split} eval_task: - eval_log - eval_log_forget question_key: - Query - Query answer_key: - Answer - Answer generation: max_length: 200 max_new_tokens: null save_generated_text: true ds_size: 300 overwrite: true use_pretrained: false batch_size: 4 retain_result: null