bobofrut commited on
Commit
81fc1f2
1 Parent(s): ed34469

Upload to fix corrupted files

Browse files
README.md ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ language:
4
+ - en
5
+ tags:
6
+ - mistral
7
+ - text-generation-inference
8
+ - conversational
9
+ - finetuned
10
+ ---
11
+
12
+ # Ladybird-base-7B-v8
13
+
14
+ Welcome to the repository of Ladybird-base-7B-v8, a cutting-edge Large Language Model (LLM) developed as a result of extensive research and learning in the field of Artificial Intelligence (AI), particularly focusing on LLMs. This model represents a significant milestone in my journey to understand and contribute to the advancement of AI technologies.
15
+
16
+ ## About the Creator
17
+
18
+ As an avid learner and researcher of AI, I embarked on the journey to not only understand but also to contribute to the field of Large Language Models. Building and fine-tuning my own models allowed me to deeply engage with the intricacies of AI, culminating in the development of the Ladybird-base-7B-v8. This project is a testament to my dedication to learning and my passion for pushing the boundaries of what AI models can achieve.
19
+
20
+ ## Model Overview
21
+
22
+ Ladybird-base-7B-v8 is based on the Mistral architecture, which is known for its efficiency and effectiveness in handling complex language understanding and generation tasks. The model incorporates several innovative architecture choices to enhance its performance:
23
+
24
+ - **Grouped-Query Attention**: Optimizes attention mechanisms by grouping queries, reducing computational complexity while maintaining model quality.
25
+ - **Sliding-Window Attention**: Improves the model's ability to handle long-range dependencies by focusing on relevant segments of input, enhancing understanding and coherence.
26
+ - **Byte-fallback BPE Tokenizer**: Offers robust tokenization by combining the effectiveness of Byte-Pair Encoding (BPE) with a fallback mechanism for out-of-vocabulary bytes, ensuring comprehensive language coverage.
27
+
28
+ ## Instruction Format
29
+
30
+ To fully leverage the capabilities of Ladybird-base-7B-v8, especially its instruction fine-tuning feature, users are advised to follow a specific instruction format. This format ensures that prompts are effectively processed, resulting in accurate and context-aware responses from the model. Here's how to construct your prompts:
31
+
32
+ ```
33
+ <s>[INST] Your specific instruction here [/INST]Additional context or instruction details go here</s>
34
+ ```
35
+
36
+ - Begin your instruction with `<s>[INST]` followed by your specific request or command.
37
+ - Close your instruction with `[/INST]`. If additional context or details are necessary, include them following the closing tag.
38
+ - The instruction sequence should be terminated with an end-of-sentence token `</s>`.
39
+
40
+ ### Example
41
+
42
+ ```
43
+ <s>[INST] What are the primary benefits of using renewable energy sources? [/INST]You are an AI trained to provide comprehensive and concise answers</s>
44
+ ```
45
+
46
+ ## Eval results
47
+
48
+ | Tasks |Version| Filter |n-shot| Metric |Value | |Stderr|
49
+ |-------------------------------|-------|----------------|------|-----------|-----:|---|-----:|
50
+ |winogrande | 1|none |None |acc |0.8272|± |0.0106|
51
+ |truthfulqa_mc2 | 2|none |0 |acc |0.7736|± |0.0139|
52
+ |truthfulqa_mc1 | 2|none |0 |acc |0.6242|± |0.0170|
53
+ |stem |N/A |none |None |acc |0.5109|± |0.0085|
54
+ | - abstract_algebra | 0|none |None |acc |0.2900|± |0.0456|
55
+ | - anatomy | 0|none |None |acc |0.5852|± |0.0426|
56
+ | - astronomy | 0|none |None |acc |0.6908|± |0.0376|
57
+ | - college_biology | 0|none |None |acc |0.6875|± |0.0388|
58
+ | - college_chemistry | 0|none |None |acc |0.4000|± |0.0492|
59
+ | - college_computer_science | 0|none |None |acc |0.5300|± |0.0502|
60
+ | - college_mathematics | 0|none |None |acc |0.2600|± |0.0441|
61
+ | - college_physics | 0|none |None |acc |0.4314|± |0.0493|
62
+ | - computer_security | 0|none |None |acc |0.7100|± |0.0456|
63
+ | - conceptual_physics | 0|none |None |acc |0.5702|± |0.0324|
64
+ | - electrical_engineering | 0|none |None |acc |0.5586|± |0.0414|
65
+ | - elementary_mathematics | 0|none |None |acc |0.4259|± |0.0255|
66
+ | - high_school_biology | 0|none |None |acc |0.7710|± |0.0239|
67
+ | - high_school_chemistry | 0|none |None |acc |0.4483|± |0.0350|
68
+ | - high_school_computer_science| 0|none |None |acc |0.7000|± |0.0461|
69
+ | - high_school_mathematics | 0|none |None |acc |0.3259|± |0.0286|
70
+ | - high_school_physics | 0|none |None |acc |0.3179|± |0.0380|
71
+ | - high_school_statistics | 0|none |None |acc |0.4491|± |0.0339|
72
+ | - machine_learning | 0|none |None |acc |0.5000|± |0.0475|
73
+ |hellaswag | 1|none |None |acc |0.7010|± |0.0046|
74
+ | | |none |None |acc_norm |0.8763|± |0.0033|
75
+ |gsm8k | 3|strict-match |5 |exact_match|0.7650|± |0.0117|
76
+ | | |flexible-extract|5 |exact_match|0.7695|± |0.0116|
77
+ |arc_challenge | 1|none |None |acc |0.6749|± |0.0137|
78
+ | | |none |None |acc_norm |0.6800|± |0.0136|
79
+
80
+
81
+
82
+
83
+ ### Contact
84
+
85
+ ---
86
87
+ ---
88
+
config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MistralForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 4096,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 14336,
12
+ "max_position_embeddings": 32768,
13
+ "model_type": "mistral",
14
+ "num_attention_heads": 32,
15
+ "num_hidden_layers": 32,
16
+ "num_key_value_heads": 8,
17
+ "rms_norm_eps": 1e-05,
18
+ "rope_theta": 10000.0,
19
+ "sliding_window": 4096,
20
+ "tie_word_embeddings": false,
21
+ "torch_dtype": "bfloat16",
22
+ "transformers_version": "4.38.2",
23
+ "use_cache": true,
24
+ "vocab_size": 32000
25
+ }
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cd9bab6e57d62cf77fc6ac5a2c1a12320facf22aaee99aac0fbcf8844168b7d
3
+ size 9825524456
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a077459594d21fce62b4a5c2b98c822a0c61d838da485ff45163918668a54bd
3
+ size 4657973592
model.safetensors.index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata": {"mergekit_version": "0.0.4"}, "weight_map": {"model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors", "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors", "model.embed_tokens.weight": "model-00001-of-00002.safetensors", "lm_head.weight": "model-00002-of-00002.safetensors", "model.norm.weight": "model-00002-of-00002.safetensors", "model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.31.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.31.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.31.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.31.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.31.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.30.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.30.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.30.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.30.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.30.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.29.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.29.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.29.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.29.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.29.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.28.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.28.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.28.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.28.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.28.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors", "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors", "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors", "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors", "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors", "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors", "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00002.safetensors", "model.layers.10.mlp.down_proj.weight": "model-00002-of-00002.safetensors", "model.layers.10.input_layernorm.weight": "model-00002-of-00002.safetensors"}}
special_tokens_map.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<unk>",
4
+ "<s>",
5
+ "</s>"
6
+ ],
7
+ "bos_token": {
8
+ "content": "<s>",
9
+ "lstrip": false,
10
+ "normalized": false,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "eos_token": {
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "pad_token": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
+ "unk_token": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": false
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": false
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": false
28
+ }
29
+ },
30
+ "additional_special_tokens": [
31
+ "<unk>",
32
+ "<s>",
33
+ "</s>"
34
+ ],
35
+ "bos_token": "<s>",
36
+ "chat_template": "{% for message in messages %}{{bos_token + message['role'] + '\n' + message['content'] + eos_token + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ bos_token + 'assistant\n' }}{% endif %}",
37
+ "clean_up_tokenization_spaces": false,
38
+ "eos_token": "</s>",
39
+ "legacy": true,
40
+ "model_max_length": 8192,
41
+ "pad_token": "</s>",
42
+ "padding_side": "left",
43
+ "sp_model_kwargs": {},
44
+ "spaces_between_special_tokens": false,
45
+ "split_special_tokens": false,
46
+ "tokenizer_class": "LlamaTokenizer",
47
+ "unk_token": "<unk>",
48
+ "use_default_system_prompt": true
49
+ }