Xenova HF staff commited on
Commit
8e4f7b9
·
verified ·
1 Parent(s): 9a76d4f

Upload 7 files

Browse files
config.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "aligner_config": {
3
+ "cls": "MlpProjector",
4
+ "model_type": "aligner",
5
+ "params": {
6
+ "depth": 2,
7
+ "input_dim": 1024,
8
+ "n_embed": 2048,
9
+ "projector_type": "mlp_gelu"
10
+ }
11
+ },
12
+ "architectures": [
13
+ "MultiModalityCausalLM"
14
+ ],
15
+ "gen_aligner_config": {
16
+ "cls": "MlpProjector",
17
+ "model_type": "gen_aligner",
18
+ "params": {
19
+ "depth": 2,
20
+ "input_dim": 8,
21
+ "n_embed": 2048,
22
+ "projector_type": "mlp_gelu"
23
+ }
24
+ },
25
+ "gen_head_config": {
26
+ "cls": "vision_head",
27
+ "model_type": "gen_head",
28
+ "params": {
29
+ "image_token_embed": 2048,
30
+ "image_token_size": 16384,
31
+ "n_embed": 2048
32
+ }
33
+ },
34
+ "gen_vision_config": {
35
+ "cls": "VQ-16",
36
+ "model_type": "gen_vision",
37
+ "params": {
38
+ "image_token_size": 16384,
39
+ "n_embed": 8
40
+ }
41
+ },
42
+ "language_config": {
43
+ "hidden_size": 2048,
44
+ "intermediate_size": 5632,
45
+ "max_position_embeddings": 16384,
46
+ "model_type": "llama",
47
+ "num_attention_heads": 16,
48
+ "num_hidden_layers": 24,
49
+ "num_key_value_heads": 16,
50
+ "torch_dtype": "bfloat16",
51
+ "vocab_size": 102400
52
+ },
53
+ "model_type": "multi_modality",
54
+ "torch_dtype": "bfloat16",
55
+ "transformers_version": "4.33.1",
56
+ "transformers.js_config": {
57
+ "kv_cache_dtype": {
58
+ "fp16": "float16",
59
+ "q4f16": "float16"
60
+ },
61
+ "dtype": {
62
+ "prepare_inputs_embeds": "fp32",
63
+ "language_model": "q4",
64
+ "lm_head": "fp32",
65
+ "gen_head": "fp32",
66
+ "gen_img_embeds": "fp32",
67
+ "image_decode": "fp32"
68
+ }
69
+ },
70
+ "vision_config": {
71
+ "cls": "CLIPVisionTower",
72
+ "model_type": "vision",
73
+ "params": {
74
+ "image_size": 384,
75
+ "model_name": "siglip_large_patch16_384",
76
+ "select_feature": "same",
77
+ "select_layer": -1
78
+ }
79
+ }
80
+ }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 100000,
3
+ "eos_token_id": 100001,
4
+ "do_sample": true,
5
+ "num_image_tokens": 576,
6
+ "pad_token_id": 100015,
7
+ "temperature": 0.7,
8
+ "top_p": 0.95
9
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "background_color": [
3
+ 127,
4
+ 127,
5
+ 127
6
+ ],
7
+ "do_normalize": true,
8
+ "image_mean": [
9
+ 0.5,
10
+ 0.5,
11
+ 0.5
12
+ ],
13
+ "image_processor_type": "VLMImageProcessor",
14
+ "image_size": 384,
15
+ "image_std": [
16
+ 0.5,
17
+ 0.5,
18
+ 0.5
19
+ ],
20
+ "min_size": 14,
21
+ "processor_class": "VLChatProcessor",
22
+ "rescale_factor": 0.00392156862745098
23
+ }
processor_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_special_token": false,
3
+ "ignore_id": -100,
4
+ "image_tag": "<image_placeholder>",
5
+ "image_start_tag": "<begin_of_image>",
6
+ "image_end_tag": "<end_of_image>",
7
+ "mask_prompt": true,
8
+ "num_image_tokens": 576,
9
+ "processor_class": "VLChatProcessor",
10
+ "sft_format": "deepseek"
11
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<image_placeholder>",
4
+ "<patch_placeholder>",
5
+ "<|ref|>",
6
+ "<|/ref|>",
7
+ "<|det|>",
8
+ "<|/det|>",
9
+ "<|grounding|>",
10
+ "<|User|>",
11
+ "<|Assistant|>"
12
+ ],
13
+ "bos_token": "<|begin▁of▁sentence|>",
14
+ "eos_token": "<|end▁of▁sentence|>",
15
+ "pad_token": "<|▁pad▁|>"
16
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|begin▁of▁sentence|>",
3
+ "clean_up_tokenization_spaces": false,
4
+ "eos_token": "<|end▁of▁sentence|>",
5
+ "model_max_length": 16384,
6
+ "pad_token": null,
7
+ "tokenizer_class": "LlamaTokenizer",
8
+ "unk_token": null,
9
+ "use_default_system_prompt": true
10
+ }