Louisnguyen commited on
Commit
f504fcf
·
verified ·
1 Parent(s): aa94c26

Training in progress, step 500

Browse files
adapter_config.json CHANGED
@@ -4,7 +4,7 @@
4
  "base_model_class": "LlavaNextForConditionalGeneration",
5
  "parent_library": "transformers.models.llava_next.modeling_llava_next"
6
  },
7
- "base_model_name_or_path": "llava-hf/llava-v1.6-vicuna-7b-hf",
8
  "bias": "none",
9
  "fan_in_fan_out": false,
10
  "inference_mode": true,
@@ -24,11 +24,10 @@
24
  "revision": null,
25
  "target_modules": [
26
  "k_proj",
 
 
27
  "gate_proj",
28
- "v_projfc1",
29
- "q_proj",
30
- "fc2up_proj",
31
- "down_proj"
32
  ],
33
  "task_type": null,
34
  "use_dora": false,
 
4
  "base_model_class": "LlavaNextForConditionalGeneration",
5
  "parent_library": "transformers.models.llava_next.modeling_llava_next"
6
  },
7
+ "base_model_name_or_path": "llava-hf/llava-v1.6-mistral-7b-hf",
8
  "bias": "none",
9
  "fan_in_fan_out": false,
10
  "inference_mode": true,
 
24
  "revision": null,
25
  "target_modules": [
26
  "k_proj",
27
+ "v_projmm_projectorup_proj",
28
+ "down_proj",
29
  "gate_proj",
30
+ "q_proj"
 
 
 
31
  ],
32
  "task_type": null,
33
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8dedd5497cd5d4febc1e396f4ad8901b2133eb7b38e2ebd726486488b902bf52
3
- size 406901688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:169d8604dc8a0fc430534667d370722c402792cc4fb04bb9dfbc0a01ef688821
3
+ size 436261832
added_tokens.json CHANGED
@@ -1,3 +1,4 @@
1
  {
2
- "<image>": 32000
 
3
  }
 
1
  {
2
+ "<image>": 32000,
3
+ "<pad>": 32001
4
  }
runs/Jul26_15-16-35_ip-10-192-12-185/events.out.tfevents.1722006997.ip-10-192-12-185.22674.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:976601913cfc004bc21584200bd137aa25e643217694a2949dbbd8a9715f6ddb
3
- size 10538
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d705a192f658451ad529cfd9f5756bdda05eafc727814b9ceaf060090bfb380b
3
+ size 10749
runs/Jul26_16-54-58_ip-10-192-12-185/events.out.tfevents.1722012900.ip-10-192-12-185.35787.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e937a0338b323a59e075fb62f72bc9dc694e070d878632c8f77ecccbddcbd828
3
+ size 10676
special_tokens_map.json CHANGED
@@ -14,7 +14,7 @@
14
  "single_word": false
15
  },
16
  "pad_token": {
17
- "content": "<unk>",
18
  "lstrip": false,
19
  "normalized": false,
20
  "rstrip": false,
 
14
  "single_word": false
15
  },
16
  "pad_token": {
17
+ "content": "<pad>",
18
  "lstrip": false,
19
  "normalized": false,
20
  "rstrip": false,
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
- size 499723
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
4
- "add_prefix_space": true,
5
  "added_tokens_decoder": {
6
  "0": {
7
  "content": "<unk>",
@@ -34,16 +34,28 @@
34
  "rstrip": false,
35
  "single_word": false,
36
  "special": true
 
 
 
 
 
 
 
 
37
  }
38
  },
 
39
  "bos_token": "<s>",
40
  "chat_template": "A chat between an user and an artificial intelligence assistant about Science Question Answering. The assistant gives helpful, detailed, and polite answers to the user's questions.\nBased on the image, question and hint, please choose one of the given choices that answer the question.\nGive yourself room to think by extracting the image, question and hint before choosing the choice.\nDon't return the thinking, only return the highest accuracy choice.\nMake sure your answers are as correct as possible.\n{% for tag, content in messages.items() %}\n{% if tag == 'sample_question' %}\nUse the following examples as reference for the ideal answer style.\n{% for message in content %}\n{% if message['role'] == 'user' %} \nExample\nUSER: {% else %}ASSISTANT: {% endif %}\n{% for item in message['content'] %}\n{% if item['type'] == 'text_question' %}\nQuestion: {{ item['question'] }}\n{% elif item['type'] == 'text_hint' %}\nHint: {{ item['hint'] }}\n{% elif item['type'] == 'text_choice' %}\nChoices: {{ item['choice'] }}\n{% elif item['type'] == 'text_solution' %}\nSolution: {{ item['solution'] }}\n{% elif item['type'] == 'text_answer' %}\nAnswer: {{ item['answer'] }}{% elif item['type'] == 'image' %}<image>\n{% endif %}\n{% endfor %}\n{% if message['role'] == 'user' %}\n{% else %}\n{{eos_token}}\n{% endif %}{% endfor %}{% endif %}\n\n{% if tag == 'real_question' %}\nNow use the following image and question to choose the choice:\n{% for message in content %}\n{% if message['role'] == 'user' %}USER: {% else %}ASSISTANT: {% endif %}\n{% for item in message['content'] %}\n{% if item['type'] == 'text_question' %}\nQuestion: {{ item['question'] }}\n{% elif item['type'] == 'text_hint' %}\nHint: {{ item['hint'] }}\n{% elif item['type'] == 'text_choice' %}\nChoices: {{ item['choice'] }}\n{% elif item['type'] == 'text_solution' %}\nSolution: {{ item['solution'] }}\n{% elif item['type'] == 'text_answer' %}\nAnswer: {{ item['answer'] }}{% elif item['type'] == 'image' %}<image>\n{% endif %}\n{% endfor %}\n{% if message['role'] == 'user' %}\n{% else %}\n{{eos_token}}\n{% endif %}{% endfor %}{% endif %}\n{% endfor %}",
41
  "clean_up_tokenization_spaces": false,
42
  "eos_token": "</s>",
43
- "legacy": false,
44
- "model_max_length": 4096,
45
- "pad_token": "<unk>",
46
- "padding_side": "right",
 
 
 
47
  "processor_class": "LlavaNextProcessor",
48
  "sp_model_kwargs": {},
49
  "spaces_between_special_tokens": false,
 
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
4
+ "add_prefix_space": null,
5
  "added_tokens_decoder": {
6
  "0": {
7
  "content": "<unk>",
 
34
  "rstrip": false,
35
  "single_word": false,
36
  "special": true
37
+ },
38
+ "32001": {
39
+ "content": "<pad>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": true
45
  }
46
  },
47
+ "additional_special_tokens": [],
48
  "bos_token": "<s>",
49
  "chat_template": "A chat between an user and an artificial intelligence assistant about Science Question Answering. The assistant gives helpful, detailed, and polite answers to the user's questions.\nBased on the image, question and hint, please choose one of the given choices that answer the question.\nGive yourself room to think by extracting the image, question and hint before choosing the choice.\nDon't return the thinking, only return the highest accuracy choice.\nMake sure your answers are as correct as possible.\n{% for tag, content in messages.items() %}\n{% if tag == 'sample_question' %}\nUse the following examples as reference for the ideal answer style.\n{% for message in content %}\n{% if message['role'] == 'user' %} \nExample\nUSER: {% else %}ASSISTANT: {% endif %}\n{% for item in message['content'] %}\n{% if item['type'] == 'text_question' %}\nQuestion: {{ item['question'] }}\n{% elif item['type'] == 'text_hint' %}\nHint: {{ item['hint'] }}\n{% elif item['type'] == 'text_choice' %}\nChoices: {{ item['choice'] }}\n{% elif item['type'] == 'text_solution' %}\nSolution: {{ item['solution'] }}\n{% elif item['type'] == 'text_answer' %}\nAnswer: {{ item['answer'] }}{% elif item['type'] == 'image' %}<image>\n{% endif %}\n{% endfor %}\n{% if message['role'] == 'user' %}\n{% else %}\n{{eos_token}}\n{% endif %}{% endfor %}{% endif %}\n\n{% if tag == 'real_question' %}\nNow use the following image and question to choose the choice:\n{% for message in content %}\n{% if message['role'] == 'user' %}USER: {% else %}ASSISTANT: {% endif %}\n{% for item in message['content'] %}\n{% if item['type'] == 'text_question' %}\nQuestion: {{ item['question'] }}\n{% elif item['type'] == 'text_hint' %}\nHint: {{ item['hint'] }}\n{% elif item['type'] == 'text_choice' %}\nChoices: {{ item['choice'] }}\n{% elif item['type'] == 'text_solution' %}\nSolution: {{ item['solution'] }}\n{% elif item['type'] == 'text_answer' %}\nAnswer: {{ item['answer'] }}{% elif item['type'] == 'image' %}<image>\n{% endif %}\n{% endfor %}\n{% if message['role'] == 'user' %}\n{% else %}\n{{eos_token}}\n{% endif %}{% endfor %}{% endif %}\n{% endfor %}",
50
  "clean_up_tokenization_spaces": false,
51
  "eos_token": "</s>",
52
+ "legacy": true,
53
+ "max_length": null,
54
+ "model_max_length": 1000000000000000019884624838656,
55
+ "pad_to_multiple_of": null,
56
+ "pad_token": "<pad>",
57
+ "pad_token_type_id": 0,
58
+ "padding_side": "left",
59
  "processor_class": "LlavaNextProcessor",
60
  "sp_model_kwargs": {},
61
  "spaces_between_special_tokens": false,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0865f4e9f64ef51679a056063f605c047cdfa2cba01be794dc2ec65f1563909a
3
  size 5496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d07b90852007dee836f8471a303db9de6b17957abf8ae2c6ef661d759bf1b8cd
3
  size 5496