base_model: Qwen/Qwen2-7B model_type: AutoModelForCausalLM tokenizer_type: AutoTokenizer trust_remote_code: true load_in_8bit: false load_in_4bit: false strict: false datasets: - path: PocketDoc/Dans-MemoryCore-CoreCurriculum-Small type: sharegpt conversation: chatml - path: NewEden/vanilla-backrooms-claude-sharegpt type: sharegpt conversation: chatml - path: anthracite-org/kalo_opus_misc_240827 type: sharegpt conversation: chatml type: sharegpt conversation: chatml - path: AquaV/Chemical-Biological-Safety-Applications-Sharegpt type: sharegpt conversation: chatml - path: AquaV/Energetic-Materials-Sharegpt type: sharegpt conversation: chatml - path: lodrick-the-lafted/NopmWritingStruct type: sharegpt conversation: chatml - path: NewEden/Claude-Instruct-5k type: sharegpt conversation: chatml - path: lodrick-the-lafted/kalo-opus-instruct-3k-filtered type: sharegpt conversation: chatml - path: anthracite-org/kalo-opus-instruct-22k-no-refusal type: sharegpt conversation: chatml - path: NewEden/Stheno-Data-filtered-8k-subset type: sharegpt conversation: chatml - path: Epiculous/Synthstruct-Gens-v1.1-Filtered-n-Cleaned type: sharegpt conversation: chatml - path: PJMixers/lodrick-the-lafted_OpusStories-ShareGPT type: sharegpt conversation: chatml chat_template: chatml dataset_prepared_path: val_set_size: 0.01 output_dir: ./outputs/out sequence_len: 8192 sample_packing: true eval_sample_packing: true pad_to_sequence_len: true adapter: lora_model_dir: lora_r: lora_alpha: lora_dropout: lora_target_linear: true lora_fan_in_fan_out: wandb_project: henbane 7b-attempt2 wandb_entity: wandb_watch: wandb_name: henbane 7b-attempt2 wandb_log_model: plugins: - axolotl.integrations.liger.LigerPlugin liger_rope: true liger_rms_norm: true liger_swiglu: true liger_fused_linear_cross_entropy: true gradient_accumulation_steps: 32 micro_batch_size: 4 num_epochs: 2 optimizer: adamw_torch lr_scheduler: cosine learning_rate: 0.00002 train_on_inputs: false group_by_length: false bf16: auto fp16: tf32: true gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: false early_stopping_patience: resume_from_checkpoint: local_rank: logging_steps: 1 xformers_attention: flash_attention: true warmup_steps: 10 evals_per_epoch: 4 saves_per_epoch: 1 debug: weight_decay: 0.5 special_tokens: