Update big.yml
Browse files
big.yml
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
-
base_model:
|
2 |
base_model_ignore_patterns: "*/*"
|
3 |
model_type: AutoModelForCausalLM
|
4 |
tokenizer_type: AutoTokenizer
|
5 |
|
6 |
|
7 |
-
hub_model_id: NewEden/BigPicaro
|
8 |
hub_strategy: "all_checkpoints"
|
9 |
push_dataset_to_hub:
|
10 |
hf_use_auth_token: true
|
@@ -56,9 +56,7 @@ lora_dropout: 0.05
|
|
56 |
lora_target_linear: true
|
57 |
lora_fan_in_fan_out:
|
58 |
peft_use_rslora: true
|
59 |
-
|
60 |
-
- embed_tokens
|
61 |
-
- lm_head
|
62 |
|
63 |
wandb_project: tavbussy
|
64 |
wandb_entity:
|
@@ -68,10 +66,10 @@ wandb_log_model:
|
|
68 |
|
69 |
gradient_accumulation_steps: 1
|
70 |
micro_batch_size: 2
|
71 |
-
num_epochs:
|
72 |
optimizer: paged_adamw_8bit
|
73 |
lr_scheduler: cosine
|
74 |
-
learning_rate:
|
75 |
max_grad_norm: 0.2
|
76 |
|
77 |
train_on_inputs: false
|
@@ -80,7 +78,7 @@ bf16: auto
|
|
80 |
fp16:
|
81 |
tf32: false
|
82 |
|
83 |
-
gradient_checkpointing:
|
84 |
early_stopping_patience:
|
85 |
resume_from_checkpoint:
|
86 |
local_rank:
|
|
|
1 |
+
base_model: Qwen/Qwen2-72B-Instruct
|
2 |
base_model_ignore_patterns: "*/*"
|
3 |
model_type: AutoModelForCausalLM
|
4 |
tokenizer_type: AutoTokenizer
|
5 |
|
6 |
|
7 |
+
hub_model_id: NewEden/BigPicaro-qwen
|
8 |
hub_strategy: "all_checkpoints"
|
9 |
push_dataset_to_hub:
|
10 |
hf_use_auth_token: true
|
|
|
56 |
lora_target_linear: true
|
57 |
lora_fan_in_fan_out:
|
58 |
peft_use_rslora: true
|
59 |
+
|
|
|
|
|
60 |
|
61 |
wandb_project: tavbussy
|
62 |
wandb_entity:
|
|
|
66 |
|
67 |
gradient_accumulation_steps: 1
|
68 |
micro_batch_size: 2
|
69 |
+
num_epochs: 4
|
70 |
optimizer: paged_adamw_8bit
|
71 |
lr_scheduler: cosine
|
72 |
+
learning_rate: 5e-5
|
73 |
max_grad_norm: 0.2
|
74 |
|
75 |
train_on_inputs: false
|
|
|
78 |
fp16:
|
79 |
tf32: false
|
80 |
|
81 |
+
gradient_checkpointing: unsloth
|
82 |
early_stopping_patience:
|
83 |
resume_from_checkpoint:
|
84 |
local_rank:
|