Muthuchamy Selvaraj Nithish
commited on
Commit
·
d915723
1
Parent(s):
28c33d7
weights update
Browse files- llava-lrv+dino-siglip-phi3-lora-model+stage-align+x7/checkpoints/latest-checkpoint.pt +3 -0
- llava-lrv+dino-siglip-phi3-lora-model+stage-align+x7/config.json +58 -0
- llava-lrv+dino-siglip-phi3-lora-model+stage-align+x7/config.yaml +51 -0
- llava-lrv+dino-siglip-phi3-lora-model+stage-finetune+x7/checkpoints/latest-checkpoint.pt +3 -0
- llava-lrv+dino-siglip-phi3-lora-model+stage-finetune+x7/config.json +58 -0
- llava-lrv+dino-siglip-phi3-lora-model+stage-finetune+x7/config.yaml +51 -0
llava-lrv+dino-siglip-phi3-lora-model+stage-align+x7/checkpoints/latest-checkpoint.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5bd890d703a64a8a12659c7a38c9dcc5fa5da2e1617da198f26e6ef89638cbfa
|
3 |
+
size 220525570
|
llava-lrv+dino-siglip-phi3-lora-model+stage-align+x7/config.json
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"dataset": {
|
3 |
+
"align_stage_components": [
|
4 |
+
"download/llava-laion-cc-sbu-558k/chat.json",
|
5 |
+
"download/llava-laion-cc-sbu-558k"
|
6 |
+
],
|
7 |
+
"dataset_id": "llava-lrv",
|
8 |
+
"dataset_root_dir": "/data/projects/12003782/training_data",
|
9 |
+
"finetune_stage_components": [
|
10 |
+
"download/llava-v1.5-instruct/llava_v1_5_lrv_mix1008k.json",
|
11 |
+
"download/llava-v1.5-instruct"
|
12 |
+
],
|
13 |
+
"type": "llava-lrv"
|
14 |
+
},
|
15 |
+
"model": {
|
16 |
+
"align_epochs": 1,
|
17 |
+
"align_global_batch_size": 64,
|
18 |
+
"align_learning_rate": 0.001,
|
19 |
+
"align_lr_scheduler_type": "linear-warmup+cosine-decay",
|
20 |
+
"align_max_grad_norm": 1.0,
|
21 |
+
"align_max_steps": null,
|
22 |
+
"align_per_device_batch_size": 8,
|
23 |
+
"align_train_strategy": "fsdp-shard-grad-op",
|
24 |
+
"align_warmup_ratio": 0.03,
|
25 |
+
"align_weight_decay": 0.0,
|
26 |
+
"arch_specifier": "fused-gelu-mlp",
|
27 |
+
"enable_gradient_checkpointing": true,
|
28 |
+
"enable_mixed_precision_training": true,
|
29 |
+
"finetune_epochs": 1,
|
30 |
+
"finetune_global_batch_size": 32,
|
31 |
+
"finetune_learning_rate": 2e-05,
|
32 |
+
"finetune_lr_scheduler_type": "linear-warmup+cosine-decay",
|
33 |
+
"finetune_max_grad_norm": 1.0,
|
34 |
+
"finetune_max_steps": null,
|
35 |
+
"finetune_per_device_batch_size": 4,
|
36 |
+
"finetune_train_strategy": "fsdp-full-shard",
|
37 |
+
"finetune_warmup_ratio": 0.03,
|
38 |
+
"finetune_weight_decay": 0.1,
|
39 |
+
"image_resize_strategy": "resize-naive",
|
40 |
+
"llm_backbone_id": "phi3_base",
|
41 |
+
"llm_max_length": 2048,
|
42 |
+
"model_id": "dino-siglip-phi3-lora-model",
|
43 |
+
"reduce_in_full_precision": false,
|
44 |
+
"type": "dino-siglip-phi3-lora-model",
|
45 |
+
"vision_backbone_id": "dinosiglip-vit-so-384px"
|
46 |
+
},
|
47 |
+
"pretrained_checkpoint": null,
|
48 |
+
"run_id": "llava-lrv+dino-siglip-phi3-lora-model+stage-align+x7",
|
49 |
+
"run_root_dir": "/data/projects/12003782/model_weights/runs",
|
50 |
+
"seed": 7,
|
51 |
+
"stage": "align",
|
52 |
+
"trackers": [
|
53 |
+
"jsonl",
|
54 |
+
"wandb"
|
55 |
+
],
|
56 |
+
"wandb_entity": null,
|
57 |
+
"wandb_project": "nscc-prismatic-phi3"
|
58 |
+
}
|
llava-lrv+dino-siglip-phi3-lora-model+stage-align+x7/config.yaml
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dataset:
|
2 |
+
align_stage_components:
|
3 |
+
- download/llava-laion-cc-sbu-558k/chat.json
|
4 |
+
- download/llava-laion-cc-sbu-558k
|
5 |
+
dataset_id: llava-lrv
|
6 |
+
dataset_root_dir: /data/projects/12003782/training_data
|
7 |
+
finetune_stage_components:
|
8 |
+
- download/llava-v1.5-instruct/llava_v1_5_lrv_mix1008k.json
|
9 |
+
- download/llava-v1.5-instruct
|
10 |
+
type: llava-lrv
|
11 |
+
model:
|
12 |
+
align_epochs: 1
|
13 |
+
align_global_batch_size: 64
|
14 |
+
align_learning_rate: 0.001
|
15 |
+
align_lr_scheduler_type: linear-warmup+cosine-decay
|
16 |
+
align_max_grad_norm: 1.0
|
17 |
+
align_max_steps: null
|
18 |
+
align_per_device_batch_size: 8
|
19 |
+
align_train_strategy: fsdp-shard-grad-op
|
20 |
+
align_warmup_ratio: 0.03
|
21 |
+
align_weight_decay: 0.0
|
22 |
+
arch_specifier: fused-gelu-mlp
|
23 |
+
enable_gradient_checkpointing: true
|
24 |
+
enable_mixed_precision_training: true
|
25 |
+
finetune_epochs: 1
|
26 |
+
finetune_global_batch_size: 32
|
27 |
+
finetune_learning_rate: 2.0e-05
|
28 |
+
finetune_lr_scheduler_type: linear-warmup+cosine-decay
|
29 |
+
finetune_max_grad_norm: 1.0
|
30 |
+
finetune_max_steps: null
|
31 |
+
finetune_per_device_batch_size: 4
|
32 |
+
finetune_train_strategy: fsdp-full-shard
|
33 |
+
finetune_warmup_ratio: 0.03
|
34 |
+
finetune_weight_decay: 0.1
|
35 |
+
image_resize_strategy: resize-naive
|
36 |
+
llm_backbone_id: phi3_base
|
37 |
+
llm_max_length: 2048
|
38 |
+
model_id: dino-siglip-phi3-lora-model
|
39 |
+
reduce_in_full_precision: false
|
40 |
+
type: dino-siglip-phi3-lora-model
|
41 |
+
vision_backbone_id: dinosiglip-vit-so-384px
|
42 |
+
pretrained_checkpoint: null
|
43 |
+
run_id: llava-lrv+dino-siglip-phi3-lora-model+stage-align+x7
|
44 |
+
run_root_dir: /data/projects/12003782/model_weights/runs
|
45 |
+
seed: 7
|
46 |
+
stage: align
|
47 |
+
trackers:
|
48 |
+
- jsonl
|
49 |
+
- wandb
|
50 |
+
wandb_entity: null
|
51 |
+
wandb_project: nscc-prismatic-phi3
|
llava-lrv+dino-siglip-phi3-lora-model+stage-finetune+x7/checkpoints/latest-checkpoint.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aea9ed984634f0c9df7a3e9d640ac49c60fe3ae29eb4214cf35dcdfee45b28db
|
3 |
+
size 16310332705
|
llava-lrv+dino-siglip-phi3-lora-model+stage-finetune+x7/config.json
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"dataset": {
|
3 |
+
"align_stage_components": [
|
4 |
+
"download/llava-laion-cc-sbu-558k/chat.json",
|
5 |
+
"download/llava-laion-cc-sbu-558k"
|
6 |
+
],
|
7 |
+
"dataset_id": "llava-lrv",
|
8 |
+
"dataset_root_dir": "/data/projects/12003782/training_data",
|
9 |
+
"finetune_stage_components": [
|
10 |
+
"download/llava-v1.5-instruct/llava_v1_5_lrv_mix1008k.json",
|
11 |
+
"download/llava-v1.5-instruct"
|
12 |
+
],
|
13 |
+
"type": "llava-lrv"
|
14 |
+
},
|
15 |
+
"model": {
|
16 |
+
"align_epochs": 1,
|
17 |
+
"align_global_batch_size": 64,
|
18 |
+
"align_learning_rate": 0.001,
|
19 |
+
"align_lr_scheduler_type": "linear-warmup+cosine-decay",
|
20 |
+
"align_max_grad_norm": 1.0,
|
21 |
+
"align_max_steps": null,
|
22 |
+
"align_per_device_batch_size": 8,
|
23 |
+
"align_train_strategy": "fsdp-shard-grad-op",
|
24 |
+
"align_warmup_ratio": 0.03,
|
25 |
+
"align_weight_decay": 0.0,
|
26 |
+
"arch_specifier": "fused-gelu-mlp",
|
27 |
+
"enable_gradient_checkpointing": true,
|
28 |
+
"enable_mixed_precision_training": true,
|
29 |
+
"finetune_epochs": 1,
|
30 |
+
"finetune_global_batch_size": 32,
|
31 |
+
"finetune_learning_rate": 2e-05,
|
32 |
+
"finetune_lr_scheduler_type": "linear-warmup+cosine-decay",
|
33 |
+
"finetune_max_grad_norm": 1.0,
|
34 |
+
"finetune_max_steps": null,
|
35 |
+
"finetune_per_device_batch_size": 4,
|
36 |
+
"finetune_train_strategy": "fsdp-full-shard",
|
37 |
+
"finetune_warmup_ratio": 0.03,
|
38 |
+
"finetune_weight_decay": 0.1,
|
39 |
+
"image_resize_strategy": "resize-naive",
|
40 |
+
"llm_backbone_id": "phi3_base",
|
41 |
+
"llm_max_length": 2048,
|
42 |
+
"model_id": "dino-siglip-phi3-lora-model",
|
43 |
+
"reduce_in_full_precision": false,
|
44 |
+
"type": "dino-siglip-phi3-lora-model",
|
45 |
+
"vision_backbone_id": "dinosiglip-vit-so-384px"
|
46 |
+
},
|
47 |
+
"pretrained_checkpoint": "/data/projects/12003782/model_weights/runs/llava-lrv+dino-siglip-phi3-lora-model+stage-align+x7/checkpoints/latest-checkpoint.pt",
|
48 |
+
"run_id": "llava-lrv+dino-siglip-phi3-lora-model+stage-finetune+x7",
|
49 |
+
"run_root_dir": "/data/projects/12003782/model_weights/runs",
|
50 |
+
"seed": 7,
|
51 |
+
"stage": "finetune",
|
52 |
+
"trackers": [
|
53 |
+
"jsonl",
|
54 |
+
"wandb"
|
55 |
+
],
|
56 |
+
"wandb_entity": null,
|
57 |
+
"wandb_project": "nscc-prismatic-phi3"
|
58 |
+
}
|
llava-lrv+dino-siglip-phi3-lora-model+stage-finetune+x7/config.yaml
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dataset:
|
2 |
+
align_stage_components:
|
3 |
+
- download/llava-laion-cc-sbu-558k/chat.json
|
4 |
+
- download/llava-laion-cc-sbu-558k
|
5 |
+
dataset_id: llava-lrv
|
6 |
+
dataset_root_dir: /data/projects/12003782/training_data
|
7 |
+
finetune_stage_components:
|
8 |
+
- download/llava-v1.5-instruct/llava_v1_5_lrv_mix1008k.json
|
9 |
+
- download/llava-v1.5-instruct
|
10 |
+
type: llava-lrv
|
11 |
+
model:
|
12 |
+
align_epochs: 1
|
13 |
+
align_global_batch_size: 64
|
14 |
+
align_learning_rate: 0.001
|
15 |
+
align_lr_scheduler_type: linear-warmup+cosine-decay
|
16 |
+
align_max_grad_norm: 1.0
|
17 |
+
align_max_steps: null
|
18 |
+
align_per_device_batch_size: 8
|
19 |
+
align_train_strategy: fsdp-shard-grad-op
|
20 |
+
align_warmup_ratio: 0.03
|
21 |
+
align_weight_decay: 0.0
|
22 |
+
arch_specifier: fused-gelu-mlp
|
23 |
+
enable_gradient_checkpointing: true
|
24 |
+
enable_mixed_precision_training: true
|
25 |
+
finetune_epochs: 1
|
26 |
+
finetune_global_batch_size: 32
|
27 |
+
finetune_learning_rate: 2.0e-05
|
28 |
+
finetune_lr_scheduler_type: linear-warmup+cosine-decay
|
29 |
+
finetune_max_grad_norm: 1.0
|
30 |
+
finetune_max_steps: null
|
31 |
+
finetune_per_device_batch_size: 4
|
32 |
+
finetune_train_strategy: fsdp-full-shard
|
33 |
+
finetune_warmup_ratio: 0.03
|
34 |
+
finetune_weight_decay: 0.1
|
35 |
+
image_resize_strategy: resize-naive
|
36 |
+
llm_backbone_id: phi3_base
|
37 |
+
llm_max_length: 2048
|
38 |
+
model_id: dino-siglip-phi3-lora-model
|
39 |
+
reduce_in_full_precision: false
|
40 |
+
type: dino-siglip-phi3-lora-model
|
41 |
+
vision_backbone_id: dinosiglip-vit-so-384px
|
42 |
+
pretrained_checkpoint: /data/projects/12003782/model_weights/runs/llava-lrv+dino-siglip-phi3-lora-model+stage-align+x7/checkpoints/latest-checkpoint.pt
|
43 |
+
run_id: llava-lrv+dino-siglip-phi3-lora-model+stage-finetune+x7
|
44 |
+
run_root_dir: /data/projects/12003782/model_weights/runs
|
45 |
+
seed: 7
|
46 |
+
stage: finetune
|
47 |
+
trackers:
|
48 |
+
- jsonl
|
49 |
+
- wandb
|
50 |
+
wandb_entity: null
|
51 |
+
wandb_project: nscc-prismatic-phi3
|