liquid9212
commited on
Commit
•
ee332a8
1
Parent(s):
4d432ae
Upload folder using huggingface_hub
Browse files- README.md +15 -0
- meta_model_1.pt +3 -0
- training_config.yml +83 -0
README.md
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: mit
|
3 |
+
tags:
|
4 |
+
- any-to-any
|
5 |
+
- omega
|
6 |
+
- omegalabs
|
7 |
+
- bittensor
|
8 |
+
- agi
|
9 |
+
---
|
10 |
+
|
11 |
+
This is an Any-to-Any model checkpoint for the OMEGA Labs x Bittensor Any-to-Any subnet.
|
12 |
+
|
13 |
+
Check out the [git repo](https://github.com/omegalabsinc/omegalabs-anytoany-bittensor) and find OMEGA on X: [@omegalabsai](https://x.com/omegalabsai).
|
14 |
+
|
15 |
+
Trained by liquid9212
|
meta_model_1.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:db0f797d8ce1bd87257071d8f33c3b0d6b78fa73e457618074cb6d234c0c120f
|
3 |
+
size 16219158403
|
training_config.yml
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
_component_: models.lora_mmllama3_8b
|
3 |
+
lora_attn_modules:
|
4 |
+
- q_proj
|
5 |
+
- v_proj
|
6 |
+
apply_lora_to_mlp: false
|
7 |
+
apply_lora_to_output: false
|
8 |
+
lora_rank: 16
|
9 |
+
lora_alpha: 32
|
10 |
+
perception_tokens: 2
|
11 |
+
use_clip: false
|
12 |
+
tokenizer:
|
13 |
+
_component_: models.a2a_tokenizer
|
14 |
+
path: models/tokenizer.model
|
15 |
+
checkpointer:
|
16 |
+
_component_: torchtune.utils.FullModelMetaCheckpointer
|
17 |
+
checkpoint_dir: checkpoints/Meta-Llama-3-8B-Instruct/original
|
18 |
+
checkpoint_files:
|
19 |
+
- consolidated.00.pth
|
20 |
+
adapter_checkpoint: null
|
21 |
+
recipe_checkpoint: null
|
22 |
+
output_dir: output/Meta-Llama-3-8B-Instruct/
|
23 |
+
model_type: LLAMA3
|
24 |
+
resume_from_checkpoint: false
|
25 |
+
interim_checkpoint_steps: 5000
|
26 |
+
interim_gen_steps: null
|
27 |
+
max_new_tokens: 100
|
28 |
+
temperature: 0.6
|
29 |
+
top_k: 300
|
30 |
+
dataset:
|
31 |
+
_component_: ds.EvenBatcher
|
32 |
+
buffer_size: 36
|
33 |
+
dataset:
|
34 |
+
_component_: ds.RoundRobinDataset
|
35 |
+
datasets:
|
36 |
+
- _component_: ds.OmegaVideoCaptionDataset
|
37 |
+
length: 500000
|
38 |
+
- _component_: ds.LlavaInstructDataset
|
39 |
+
dataset_path: ds/coco_llava_instruct/output.parquet
|
40 |
+
train_on_input: false
|
41 |
+
- _component_: ds.LlavaInstructDataset
|
42 |
+
dataset_path: ds/vision_flan/output.parquet
|
43 |
+
train_on_input: false
|
44 |
+
- _component_: ds.CaptionInstructDataset
|
45 |
+
dataset_path: ds/sam_llava/output.parquet
|
46 |
+
train_on_input: false
|
47 |
+
seed: null
|
48 |
+
shuffle: true
|
49 |
+
batch_size: 4
|
50 |
+
optimizer:
|
51 |
+
_component_: torch.optim.AdamW
|
52 |
+
weight_decay: 0.01
|
53 |
+
lr: 0.0003
|
54 |
+
lr_scheduler:
|
55 |
+
_component_: torchtune.modules.get_cosine_schedule_with_warmup
|
56 |
+
num_warmup_steps: 100
|
57 |
+
loss:
|
58 |
+
_component_: torch.nn.CrossEntropyLoss
|
59 |
+
epochs: 1
|
60 |
+
max_steps_per_epoch: null
|
61 |
+
gradient_accumulation_steps: 64
|
62 |
+
compile: false
|
63 |
+
output_dir: /tmp/lora_finetune_output
|
64 |
+
metric_logger:
|
65 |
+
_component_: torchtune.utils.metric_logging.DiskLogger
|
66 |
+
log_dir: ${output_dir}
|
67 |
+
log_every_n_steps: null
|
68 |
+
device: cuda
|
69 |
+
dtype: bf16
|
70 |
+
enable_activation_checkpointing: false
|
71 |
+
profiler:
|
72 |
+
_component_: torchtune.utils.profiler
|
73 |
+
enabled: false
|
74 |
+
inference:
|
75 |
+
prompt_template: 'Video:
|
76 |
+
|
77 |
+
{video}
|
78 |
+
|
79 |
+
Caption the previous video.'
|
80 |
+
max_new_tokens: 300
|
81 |
+
temperature: 0.6
|
82 |
+
top_k: 300
|
83 |
+
quantizer: null
|