base_model: NewEden/Erebus-Control-9B model_type: AutoModelForCausalLM tokenizer_type: AutoTokenizer hub_model_id: NewEden/Erebus-Control-9B-Final hub_strategy: "all_checkpoints" push_dataset_to_hub: hf_use_auth_token: true plugins: - axolotl.integrations.liger.LigerPlugin liger_rope: true liger_rms_norm: true liger_swiglu: true #liger_cross_entropy: true liger_fused_linear_cross_entropy: true load_in_8bit: false load_in_4bit: false strict: false datasets: - path: NewEden/4chan-smol-sharegpt type: sharegpt conversation: chatml - path: anthracite-org/kalo-opus-instruct-22k-no-refusal type: sharegpt conversation: chatml - path: NewEden/Claude-Instruct-2.7k type: sharegpt conversation: chatml - path: Epiculous/Synthstruct-Gens-v1.1-Filtered-n-Cleaned type: sharegpt conversation: chatml - path: NewEden/Claude-Instruct-5K type: sharegpt conversation: chatml - path: NewEden/OpenCAI-ShareGPT type: sharegpt conversation: chatml - path: NewEden/Roleplay-Logs-Sharegpt-Ngram-cleaned type: sharegpt conversation: chatml - path: NewEden/PIPPA-Mega-Filtered type: sharegpt conversation: chatml #chat_template: chatml shuffle_merged_datasets: true #default_system_message: "You are an assistant that responds to the user." dataset_prepared_path: Control-9B val_set_size: 0.0 output_dir: Control-9B sequence_len: 8192 sample_packing: true pad_to_sequence_len: true wandb_project: Erebus-Control-9b wandb_entity: wandb_watch: wandb_name: attempt-1 wandb_log_model: gradient_accumulation_steps: 4 micro_batch_size: 1 num_epochs: 4 optimizer: adamw_bnb_8bit lr_scheduler: cosine learning_rate: 0.00001 train_on_inputs: false group_by_length: false bf16: auto fp16: tf32: false gradient_checkpointing: true early_stopping_patience: resume_from_checkpoint: local_rank: logging_steps: 1 xformers_attention: flash_attention: true warmup_steps: 10 evals_per_epoch: eval_table_size: eval_max_new_tokens: saves_per_epoch: 2 debug: deepspeed: deepspeed_configs/zero2.json weight_decay: 0.001 fsdp: fsdp_config: special_tokens: pad_token: