| | |
| | base_model: NewEden/Hamanasu-KTO-V2 |
| | model_type: AutoModelForCausalLM |
| | tokenizer_type: AutoTokenizer |
| |
|
| | |
| | load_in_8bit: false |
| | load_in_4bit: false |
| | strict: false |
| |
|
| | |
| | datasets: |
| | - path: PocketDoc/Dans-Prosemaxx-Cowriter-3-S |
| | type: dan-chat-advanced |
| | - path: PocketDoc/Dans-Prosemaxx-Adventure |
| | type: dan-chat-advanced |
| | - path: PocketDoc/Dans-Failuremaxx-Adventure-3 |
| | type: dan-chat-advanced |
| | - path: PocketDoc/Dans-Prosemaxx-InstructWriter-ZeroShot-2 |
| | type: dan-chat-advanced |
| | - path: PocketDoc/Dans-Prosemaxx-InstructWriter-ZeroShot-3 |
| | type: dan-chat-advanced |
| | - path: PocketDoc/Dans-Prosemaxx-InstructWriter-Continue-2 |
| | type: dan-chat-advanced |
| | - path: PocketDoc/Dans-Prosemaxx-Instructwriter-Long |
| | type: dan-chat-advanced |
| | shuffle_merged_datasets: true |
| | dataset_prepared_path: dataset_prepared |
| | val_set_size: 0.02 |
| | output_dir: 4b-out-rslora |
| |
|
| | |
| | plugins: |
| | - axolotl.integrations.liger.LigerPlugin |
| | liger_rope: true |
| | liger_rms_norm: true |
| | liger_layer_norm: true |
| | liger_glu_activation: true |
| | liger_fused_linear_cross_entropy: true |
| |
|
| | |
| | sequence_len: 32768 |
| | sample_packing: true |
| | eval_sample_packing: true |
| | pad_to_sequence_len: true |
| |
|
| | |
| | adapter: lora |
| | lora_model_dir: |
| | lora_r: 64 |
| | lora_alpha: 32 |
| | lora_dropout: 0.1 |
| | lora_target_modules: |
| | - gate_proj |
| | - down_proj |
| | - up_proj |
| | - q_proj |
| | - v_proj |
| | - k_proj |
| | - o_proj |
| | lora_fan_in_fan_out: |
| | peft_use_rslora: true |
| | lora_modules_to_save: |
| | - embed_tokens |
| | - lm_head |
| |
|
| | |
| | wandb_project: tavbussy |
| | wandb_entity: |
| | wandb_watch: |
| | wandb_name: adventure-v2 |
| | wandb_log_model: |
| |
|
| | |
| | evals_per_epoch: 4 |
| | eval_table_size: |
| | eval_max_new_tokens: 128 |
| |
|
| | |
| | gradient_accumulation_steps: 2 |
| | micro_batch_size: 6 |
| | num_epochs: 4 |
| | optimizer: paged_ademamix_8bit |
| | lr_scheduler: cosine |
| | learning_rate: 2.83e-5 |
| |
|
| | train_on_inputs: false |
| | group_by_length: false |
| | bf16: auto |
| | fp16: |
| | tf32: false |
| |
|
| | gradient_checkpointing: true |
| | early_stopping_patience: |
| | resume_from_checkpoint: |
| | local_rank: |
| | logging_steps: 1 |
| | xformers_attention: |
| | flash_attention: true |
| | s2_attention: |
| |
|
| | warmup_steps: 40 |
| | saves_per_epoch: 2 |
| | debug: |
| | deepspeed: ./deepspeed_configs/zero3_bf16.json |
| | weight_decay: 0.02 |
| | fsdp: |
| | fsdp_config: |
| | special_tokens: |
| | pad_token: <|finetune_right_pad_id|> |