abanm commited on
Commit
cc3210c
1 Parent(s): e0a8166

Update: Uploading latest local project files

Browse files
Files changed (39) hide show
  1. Dubs/v0.0.2/full_model/config.json +3 -3
  2. Dubs/v0.0.2/full_model/model-00001-of-00002.safetensors +2 -2
  3. Dubs/v0.0.2/full_model/model-00002-of-00002.safetensors +2 -2
  4. Dubs/v0.0.3/P_C/checkpoint-1180/README.md +0 -202
  5. Dubs/v0.0.3/P_C/checkpoint-1180/adapter_config.json +0 -32
  6. Dubs/v0.0.3/P_C/checkpoint-1180/adapter_model.safetensors +0 -3
  7. Dubs/v0.0.3/P_C/checkpoint-1180/added_tokens.json +0 -13
  8. Dubs/v0.0.3/P_C/checkpoint-1180/optimizer.pt +0 -3
  9. Dubs/v0.0.3/P_C/checkpoint-1180/rng_state.pth +0 -3
  10. Dubs/v0.0.3/P_C/checkpoint-1180/scheduler.pt +0 -3
  11. Dubs/v0.0.3/P_C/checkpoint-1180/special_tokens_map.json +0 -30
  12. Dubs/v0.0.3/P_C/checkpoint-1180/tokenizer.json +0 -0
  13. Dubs/v0.0.3/P_C/checkpoint-1180/tokenizer.model +0 -3
  14. Dubs/v0.0.3/P_C/checkpoint-1180/tokenizer_config.json +0 -132
  15. Dubs/v0.0.3/P_C/checkpoint-1180/trainer_state.json +0 -362
  16. Dubs/v0.0.3/P_C/checkpoint-1180/training_args.bin +0 -3
  17. Dubs/v0.0.3/P_C/runs/Dec15_03-01-10_a2e074eee72a/events.out.tfevents.1734232840.a2e074eee72a.11068.0 +0 -3
  18. Dubs/v0.0.3/P_C/runs/Dec16_04-30-14_a2e074eee72a/events.out.tfevents.1734323636.a2e074eee72a.21060.1 +0 -3
  19. Dubs/v0.0.3/P_C/runs/Dec16_04-39-16_a2e074eee72a/events.out.tfevents.1734323995.a2e074eee72a.23396.0 +0 -3
  20. Dubs/v0.0.3/P_C/runs/Dec16_04-41-02_a2e074eee72a/events.out.tfevents.1734324092.a2e074eee72a.23396.1 +0 -3
  21. Dubs/v0.0.3/P_C/runs/{Dec16_04-30-14_a2e074eee72a/events.out.tfevents.1734323590.a2e074eee72a.21060.0 → Dec16_10-32-06_a2e074eee72a/events.out.tfevents.1734345218.a2e074eee72a.92225.0} +2 -2
  22. Dubs/v0.0.3/adapter/README.md +0 -202
  23. Dubs/v0.0.3/adapter/adapter_config.json +0 -32
  24. Dubs/v0.0.3/adapter/adapter_model.safetensors +0 -3
  25. Dubs/v0.0.3/adapter/added_tokens.json +0 -13
  26. Dubs/v0.0.3/adapter/special_tokens_map.json +0 -30
  27. Dubs/v0.0.3/adapter/tokenizer.json +0 -0
  28. Dubs/v0.0.3/adapter/tokenizer.model +0 -3
  29. Dubs/v0.0.3/adapter/tokenizer_config.json +0 -132
  30. Dubs/v0.0.3/full_model/added_tokens.json +0 -13
  31. Dubs/v0.0.3/full_model/config.json +0 -139
  32. Dubs/v0.0.3/full_model/generation_config.json +0 -11
  33. Dubs/v0.0.3/full_model/model-00001-of-00002.safetensors +0 -3
  34. Dubs/v0.0.3/full_model/model-00002-of-00002.safetensors +0 -3
  35. Dubs/v0.0.3/full_model/model.safetensors.index.json +0 -202
  36. Dubs/v0.0.3/full_model/special_tokens_map.json +0 -24
  37. Dubs/v0.0.3/full_model/tokenizer.json +0 -0
  38. Dubs/v0.0.3/full_model/tokenizer.model +0 -3
  39. Dubs/v0.0.3/full_model/tokenizer_config.json +0 -132
Dubs/v0.0.2/full_model/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "Dubs_0.0.3",
3
  "architectures": [
4
  "Phi3ForCausalLM"
5
  ],
@@ -10,7 +10,7 @@
10
  "AutoModelForCausalLM": "microsoft/Phi-3-mini-128k-instruct--modeling_phi3.Phi3ForCausalLM"
11
  },
12
  "bos_token_id": 1,
13
- "description": "Finetuned on RayBernard/leetcode, MAsad789565/Coding_GPT4_Data,abanm/preprocessed-leetcode-multi-shot",
14
  "embd_pdrop": 0.0,
15
  "eos_token_id": 32000,
16
  "hidden_act": "silu",
@@ -132,7 +132,7 @@
132
  "rope_theta": 10000.0,
133
  "sliding_window": 262144,
134
  "tie_word_embeddings": false,
135
- "torch_dtype": "float16",
136
  "transformers_version": "4.47.0",
137
  "use_cache": true,
138
  "vocab_size": 32064
 
1
  {
2
+ "_name_or_path": "Dubs_0.0.2",
3
  "architectures": [
4
  "Phi3ForCausalLM"
5
  ],
 
10
  "AutoModelForCausalLM": "microsoft/Phi-3-mini-128k-instruct--modeling_phi3.Phi3ForCausalLM"
11
  },
12
  "bos_token_id": 1,
13
+ "description": "LoRA fine-tuned version of Phi-3 with updated configuration.",
14
  "embd_pdrop": 0.0,
15
  "eos_token_id": 32000,
16
  "hidden_act": "silu",
 
132
  "rope_theta": 10000.0,
133
  "sliding_window": 262144,
134
  "tie_word_embeddings": false,
135
+ "torch_dtype": "bfloat16",
136
  "transformers_version": "4.47.0",
137
  "use_cache": true,
138
  "vocab_size": 32064
Dubs/v0.0.2/full_model/model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:941a3751222a841f371980dfe36b1dbc2b4059f0ece911473e2f05875554c22c
3
- size 4972489200
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:301fbd95011db347b6d798c97c613ff95842e79c7c36bd45ebe8927212419452
3
+ size 4972489328
Dubs/v0.0.2/full_model/model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cd3b83ee0870ce9dae877fd342d61a131ace6d832bc1eb90d056220b69c92a4e
3
- size 2669692488
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05bee8e7c75e72dd00515b537ac0bee81b783c0640861604531167bc03e0a6e2
3
+ size 2669692552
Dubs/v0.0.3/P_C/checkpoint-1180/README.md DELETED
@@ -1,202 +0,0 @@
1
- ---
2
- base_model: /content/Ai_Reboot/Dubs/v0.0.2/full_model
3
- library_name: peft
4
- ---
5
-
6
- # Model Card for Model ID
7
-
8
- <!-- Provide a quick summary of what the model is/does. -->
9
-
10
-
11
-
12
- ## Model Details
13
-
14
- ### Model Description
15
-
16
- <!-- Provide a longer summary of what this model is. -->
17
-
18
-
19
-
20
- - **Developed by:** [More Information Needed]
21
- - **Funded by [optional]:** [More Information Needed]
22
- - **Shared by [optional]:** [More Information Needed]
23
- - **Model type:** [More Information Needed]
24
- - **Language(s) (NLP):** [More Information Needed]
25
- - **License:** [More Information Needed]
26
- - **Finetuned from model [optional]:** [More Information Needed]
27
-
28
- ### Model Sources [optional]
29
-
30
- <!-- Provide the basic links for the model. -->
31
-
32
- - **Repository:** [More Information Needed]
33
- - **Paper [optional]:** [More Information Needed]
34
- - **Demo [optional]:** [More Information Needed]
35
-
36
- ## Uses
37
-
38
- <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
-
40
- ### Direct Use
41
-
42
- <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
-
44
- [More Information Needed]
45
-
46
- ### Downstream Use [optional]
47
-
48
- <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
-
50
- [More Information Needed]
51
-
52
- ### Out-of-Scope Use
53
-
54
- <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
-
56
- [More Information Needed]
57
-
58
- ## Bias, Risks, and Limitations
59
-
60
- <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
-
62
- [More Information Needed]
63
-
64
- ### Recommendations
65
-
66
- <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
-
68
- Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
-
70
- ## How to Get Started with the Model
71
-
72
- Use the code below to get started with the model.
73
-
74
- [More Information Needed]
75
-
76
- ## Training Details
77
-
78
- ### Training Data
79
-
80
- <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
-
82
- [More Information Needed]
83
-
84
- ### Training Procedure
85
-
86
- <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
-
88
- #### Preprocessing [optional]
89
-
90
- [More Information Needed]
91
-
92
-
93
- #### Training Hyperparameters
94
-
95
- - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
-
97
- #### Speeds, Sizes, Times [optional]
98
-
99
- <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
-
101
- [More Information Needed]
102
-
103
- ## Evaluation
104
-
105
- <!-- This section describes the evaluation protocols and provides the results. -->
106
-
107
- ### Testing Data, Factors & Metrics
108
-
109
- #### Testing Data
110
-
111
- <!-- This should link to a Dataset Card if possible. -->
112
-
113
- [More Information Needed]
114
-
115
- #### Factors
116
-
117
- <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
-
119
- [More Information Needed]
120
-
121
- #### Metrics
122
-
123
- <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
-
125
- [More Information Needed]
126
-
127
- ### Results
128
-
129
- [More Information Needed]
130
-
131
- #### Summary
132
-
133
-
134
-
135
- ## Model Examination [optional]
136
-
137
- <!-- Relevant interpretability work for the model goes here -->
138
-
139
- [More Information Needed]
140
-
141
- ## Environmental Impact
142
-
143
- <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
-
145
- Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
-
147
- - **Hardware Type:** [More Information Needed]
148
- - **Hours used:** [More Information Needed]
149
- - **Cloud Provider:** [More Information Needed]
150
- - **Compute Region:** [More Information Needed]
151
- - **Carbon Emitted:** [More Information Needed]
152
-
153
- ## Technical Specifications [optional]
154
-
155
- ### Model Architecture and Objective
156
-
157
- [More Information Needed]
158
-
159
- ### Compute Infrastructure
160
-
161
- [More Information Needed]
162
-
163
- #### Hardware
164
-
165
- [More Information Needed]
166
-
167
- #### Software
168
-
169
- [More Information Needed]
170
-
171
- ## Citation [optional]
172
-
173
- <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
-
175
- **BibTeX:**
176
-
177
- [More Information Needed]
178
-
179
- **APA:**
180
-
181
- [More Information Needed]
182
-
183
- ## Glossary [optional]
184
-
185
- <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
-
187
- [More Information Needed]
188
-
189
- ## More Information [optional]
190
-
191
- [More Information Needed]
192
-
193
- ## Model Card Authors [optional]
194
-
195
- [More Information Needed]
196
-
197
- ## Model Card Contact
198
-
199
- [More Information Needed]
200
- ### Framework versions
201
-
202
- - PEFT 0.14.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Dubs/v0.0.3/P_C/checkpoint-1180/adapter_config.json DELETED
@@ -1,32 +0,0 @@
1
- {
2
- "alpha_pattern": {},
3
- "auto_mapping": null,
4
- "base_model_name_or_path": "/content/Ai_Reboot/Dubs/v0.0.2/full_model",
5
- "bias": "none",
6
- "eva_config": null,
7
- "exclude_modules": null,
8
- "fan_in_fan_out": false,
9
- "inference_mode": true,
10
- "init_lora_weights": true,
11
- "layer_replication": null,
12
- "layers_pattern": null,
13
- "layers_to_transform": null,
14
- "loftq_config": {},
15
- "lora_alpha": 32,
16
- "lora_bias": false,
17
- "lora_dropout": 0.1,
18
- "megatron_config": null,
19
- "megatron_core": "megatron.core",
20
- "modules_to_save": null,
21
- "peft_type": "LORA",
22
- "r": 128,
23
- "rank_pattern": {},
24
- "revision": null,
25
- "target_modules": [
26
- "self_attn.qkv_proj",
27
- "self_attn.o_proj"
28
- ],
29
- "task_type": "CAUSAL_LM",
30
- "use_dora": false,
31
- "use_rslora": false
32
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Dubs/v0.0.3/P_C/checkpoint-1180/adapter_model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f0d958f98ec828c030528700230daeea209514f03658f2be540c3ee1f196abe9
3
- size 302007584
 
 
 
 
Dubs/v0.0.3/P_C/checkpoint-1180/added_tokens.json DELETED
@@ -1,13 +0,0 @@
1
- {
2
- "<|assistant|>": 32001,
3
- "<|endoftext|>": 32000,
4
- "<|end|>": 32007,
5
- "<|placeholder1|>": 32002,
6
- "<|placeholder2|>": 32003,
7
- "<|placeholder3|>": 32004,
8
- "<|placeholder4|>": 32005,
9
- "<|placeholder5|>": 32008,
10
- "<|placeholder6|>": 32009,
11
- "<|system|>": 32006,
12
- "<|user|>": 32010
13
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Dubs/v0.0.3/P_C/checkpoint-1180/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5f57df75d294eb3f0d5e52a9bd07f57478ce60669ec59e5292d3f02d66aa12a6
3
- size 604059386
 
 
 
 
Dubs/v0.0.3/P_C/checkpoint-1180/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:795cf4f259dca40dc81bafec6b5275b52f053a70f70595c2783d7b8a130ee50e
3
- size 14244
 
 
 
 
Dubs/v0.0.3/P_C/checkpoint-1180/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:168d0a918fdd6e13ff263bac0cbb778603a9daa4e88ab22cb146177136b2e02b
3
- size 1064
 
 
 
 
Dubs/v0.0.3/P_C/checkpoint-1180/special_tokens_map.json DELETED
@@ -1,30 +0,0 @@
1
- {
2
- "bos_token": {
3
- "content": "<s>",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "eos_token": {
10
- "content": "<|endoftext|>",
11
- "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "pad_token": {
17
- "content": "<|endoftext|>",
18
- "lstrip": false,
19
- "normalized": false,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
- "unk_token": {
24
- "content": "<unk>",
25
- "lstrip": false,
26
- "normalized": false,
27
- "rstrip": false,
28
- "single_word": false
29
- }
30
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Dubs/v0.0.3/P_C/checkpoint-1180/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff
 
Dubs/v0.0.3/P_C/checkpoint-1180/tokenizer.model DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
- size 499723
 
 
 
 
Dubs/v0.0.3/P_C/checkpoint-1180/tokenizer_config.json DELETED
@@ -1,132 +0,0 @@
1
- {
2
- "add_bos_token": false,
3
- "add_eos_token": false,
4
- "add_prefix_space": null,
5
- "added_tokens_decoder": {
6
- "0": {
7
- "content": "<unk>",
8
- "lstrip": false,
9
- "normalized": false,
10
- "rstrip": false,
11
- "single_word": false,
12
- "special": true
13
- },
14
- "1": {
15
- "content": "<s>",
16
- "lstrip": false,
17
- "normalized": false,
18
- "rstrip": false,
19
- "single_word": false,
20
- "special": true
21
- },
22
- "2": {
23
- "content": "</s>",
24
- "lstrip": false,
25
- "normalized": false,
26
- "rstrip": true,
27
- "single_word": false,
28
- "special": false
29
- },
30
- "32000": {
31
- "content": "<|endoftext|>",
32
- "lstrip": false,
33
- "normalized": false,
34
- "rstrip": false,
35
- "single_word": false,
36
- "special": true
37
- },
38
- "32001": {
39
- "content": "<|assistant|>",
40
- "lstrip": false,
41
- "normalized": false,
42
- "rstrip": true,
43
- "single_word": false,
44
- "special": true
45
- },
46
- "32002": {
47
- "content": "<|placeholder1|>",
48
- "lstrip": false,
49
- "normalized": false,
50
- "rstrip": true,
51
- "single_word": false,
52
- "special": true
53
- },
54
- "32003": {
55
- "content": "<|placeholder2|>",
56
- "lstrip": false,
57
- "normalized": false,
58
- "rstrip": true,
59
- "single_word": false,
60
- "special": true
61
- },
62
- "32004": {
63
- "content": "<|placeholder3|>",
64
- "lstrip": false,
65
- "normalized": false,
66
- "rstrip": true,
67
- "single_word": false,
68
- "special": true
69
- },
70
- "32005": {
71
- "content": "<|placeholder4|>",
72
- "lstrip": false,
73
- "normalized": false,
74
- "rstrip": true,
75
- "single_word": false,
76
- "special": true
77
- },
78
- "32006": {
79
- "content": "<|system|>",
80
- "lstrip": false,
81
- "normalized": false,
82
- "rstrip": true,
83
- "single_word": false,
84
- "special": true
85
- },
86
- "32007": {
87
- "content": "<|end|>",
88
- "lstrip": false,
89
- "normalized": false,
90
- "rstrip": true,
91
- "single_word": false,
92
- "special": true
93
- },
94
- "32008": {
95
- "content": "<|placeholder5|>",
96
- "lstrip": false,
97
- "normalized": false,
98
- "rstrip": true,
99
- "single_word": false,
100
- "special": true
101
- },
102
- "32009": {
103
- "content": "<|placeholder6|>",
104
- "lstrip": false,
105
- "normalized": false,
106
- "rstrip": true,
107
- "single_word": false,
108
- "special": true
109
- },
110
- "32010": {
111
- "content": "<|user|>",
112
- "lstrip": false,
113
- "normalized": false,
114
- "rstrip": true,
115
- "single_word": false,
116
- "special": true
117
- }
118
- },
119
- "bos_token": "<s>",
120
- "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{{'<|system|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
121
- "clean_up_tokenization_spaces": false,
122
- "eos_token": "<|endoftext|>",
123
- "extra_special_tokens": {},
124
- "legacy": false,
125
- "model_max_length": 131072,
126
- "pad_token": "<|endoftext|>",
127
- "padding_side": "right",
128
- "sp_model_kwargs": {},
129
- "tokenizer_class": "LlamaTokenizer",
130
- "unk_token": "<unk>",
131
- "use_default_system_prompt": false
132
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Dubs/v0.0.3/P_C/checkpoint-1180/trainer_state.json DELETED
@@ -1,362 +0,0 @@
1
- {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
- "epoch": 1.0,
5
- "eval_steps": 500,
6
- "global_step": 1180,
7
- "is_hyper_param_search": false,
8
- "is_local_process_zero": true,
9
- "is_world_process_zero": true,
10
- "log_history": [
11
- {
12
- "epoch": 0.0211864406779661,
13
- "grad_norm": 4.423079013824463,
14
- "learning_rate": 3.1779661016949146e-05,
15
- "loss": 13.741,
16
- "step": 25
17
- },
18
- {
19
- "epoch": 0.0423728813559322,
20
- "grad_norm": 1.091511845588684,
21
- "learning_rate": 6.228813559322034e-05,
22
- "loss": 6.2476,
23
- "step": 50
24
- },
25
- {
26
- "epoch": 0.0635593220338983,
27
- "grad_norm": 0.253110408782959,
28
- "learning_rate": 9.406779661016948e-05,
29
- "loss": 1.2771,
30
- "step": 75
31
- },
32
- {
33
- "epoch": 0.0847457627118644,
34
- "grad_norm": 0.4360060691833496,
35
- "learning_rate": 0.00012584745762711863,
36
- "loss": 1.1627,
37
- "step": 100
38
- },
39
- {
40
- "epoch": 0.1059322033898305,
41
- "grad_norm": 0.9220973253250122,
42
- "learning_rate": 0.00014998818664978784,
43
- "loss": 1.1255,
44
- "step": 125
45
- },
46
- {
47
- "epoch": 0.1271186440677966,
48
- "grad_norm": 0.34438595175743103,
49
- "learning_rate": 0.00014968486184124044,
50
- "loss": 0.9374,
51
- "step": 150
52
- },
53
- {
54
- "epoch": 0.1483050847457627,
55
- "grad_norm": 0.17265631258487701,
56
- "learning_rate": 0.00014897325021132057,
57
- "loss": 0.9722,
58
- "step": 175
59
- },
60
- {
61
- "epoch": 0.1694915254237288,
62
- "grad_norm": 0.2722087800502777,
63
- "learning_rate": 0.00014785724199485324,
64
- "loss": 0.8952,
65
- "step": 200
66
- },
67
- {
68
- "epoch": 0.1906779661016949,
69
- "grad_norm": 0.25106003880500793,
70
- "learning_rate": 0.00014634293817985785,
71
- "loss": 0.952,
72
- "step": 225
73
- },
74
- {
75
- "epoch": 0.211864406779661,
76
- "grad_norm": 0.6425552368164062,
77
- "learning_rate": 0.0001444386171546982,
78
- "loss": 0.9457,
79
- "step": 250
80
- },
81
- {
82
- "epoch": 0.2330508474576271,
83
- "grad_norm": 0.270170658826828,
84
- "learning_rate": 0.00014215468945183118,
85
- "loss": 0.8517,
86
- "step": 275
87
- },
88
- {
89
- "epoch": 0.2542372881355932,
90
- "grad_norm": 0.5266702771186829,
91
- "learning_rate": 0.00013950364083556102,
92
- "loss": 0.9191,
93
- "step": 300
94
- },
95
- {
96
- "epoch": 0.2754237288135593,
97
- "grad_norm": 0.16475124657154083,
98
- "learning_rate": 0.00013649996404492692,
99
- "loss": 1.048,
100
- "step": 325
101
- },
102
- {
103
- "epoch": 0.2966101694915254,
104
- "grad_norm": 0.22005195915699005,
105
- "learning_rate": 0.0001331600795648711,
106
- "loss": 0.8598,
107
- "step": 350
108
- },
109
- {
110
- "epoch": 0.3177966101694915,
111
- "grad_norm": 0.6684536933898926,
112
- "learning_rate": 0.0001295022458588157,
113
- "loss": 0.8269,
114
- "step": 375
115
- },
116
- {
117
- "epoch": 0.3389830508474576,
118
- "grad_norm": 0.37612682580947876,
119
- "learning_rate": 0.00012554645955338803,
120
- "loss": 0.8911,
121
- "step": 400
122
- },
123
- {
124
- "epoch": 0.3601694915254237,
125
- "grad_norm": 0.11635318398475647,
126
- "learning_rate": 0.00012131434612096438,
127
- "loss": 0.8436,
128
- "step": 425
129
- },
130
- {
131
- "epoch": 0.3813559322033898,
132
- "grad_norm": 0.2107096016407013,
133
- "learning_rate": 0.0001168290416576479,
134
- "loss": 0.8583,
135
- "step": 450
136
- },
137
- {
138
- "epoch": 0.4025423728813559,
139
- "grad_norm": 0.30154237151145935,
140
- "learning_rate": 0.00011211506640297668,
141
- "loss": 0.8846,
142
- "step": 475
143
- },
144
- {
145
- "epoch": 0.423728813559322,
146
- "grad_norm": 0.17550069093704224,
147
- "learning_rate": 0.00010719819069280337,
148
- "loss": 0.8752,
149
- "step": 500
150
- },
151
- {
152
- "epoch": 0.4449152542372881,
153
- "grad_norm": 0.4593244791030884,
154
- "learning_rate": 0.00010210529407815477,
155
- "loss": 0.8576,
156
- "step": 525
157
- },
158
- {
159
- "epoch": 0.4661016949152542,
160
- "grad_norm": 0.11149267852306366,
161
- "learning_rate": 9.686421838023943e-05,
162
- "loss": 0.7814,
163
- "step": 550
164
- },
165
- {
166
- "epoch": 0.4872881355932203,
167
- "grad_norm": 0.19643345475196838,
168
- "learning_rate": 9.150361548492185e-05,
169
- "loss": 0.8651,
170
- "step": 575
171
- },
172
- {
173
- "epoch": 0.5084745762711864,
174
- "grad_norm": 0.29600661993026733,
175
- "learning_rate": 8.605279070873881e-05,
176
- "loss": 0.9697,
177
- "step": 600
178
- },
179
- {
180
- "epoch": 0.5296610169491526,
181
- "grad_norm": 0.32854074239730835,
182
- "learning_rate": 8.054154259274484e-05,
183
- "loss": 0.7701,
184
- "step": 625
185
- },
186
- {
187
- "epoch": 0.5508474576271186,
188
- "grad_norm": 1.0469460487365723,
189
- "learning_rate": 7.5e-05,
190
- "loss": 0.794,
191
- "step": 650
192
- },
193
- {
194
- "epoch": 0.5720338983050848,
195
- "grad_norm": 0.18007223308086395,
196
- "learning_rate": 6.945845740725515e-05,
197
- "loss": 0.8436,
198
- "step": 675
199
- },
200
- {
201
- "epoch": 0.5932203389830508,
202
- "grad_norm": 0.5170131325721741,
203
- "learning_rate": 6.394720929126116e-05,
204
- "loss": 0.8871,
205
- "step": 700
206
- },
207
- {
208
- "epoch": 0.614406779661017,
209
- "grad_norm": 0.3806993067264557,
210
- "learning_rate": 5.849638451507817e-05,
211
- "loss": 0.9176,
212
- "step": 725
213
- },
214
- {
215
- "epoch": 0.635593220338983,
216
- "grad_norm": 0.23310065269470215,
217
- "learning_rate": 5.313578161976055e-05,
218
- "loss": 0.8681,
219
- "step": 750
220
- },
221
- {
222
- "epoch": 0.6567796610169492,
223
- "grad_norm": 0.4720727205276489,
224
- "learning_rate": 4.789470592184522e-05,
225
- "loss": 0.7799,
226
- "step": 775
227
- },
228
- {
229
- "epoch": 0.6779661016949152,
230
- "grad_norm": 0.29976892471313477,
231
- "learning_rate": 4.280180930719661e-05,
232
- "loss": 0.7153,
233
- "step": 800
234
- },
235
- {
236
- "epoch": 0.6991525423728814,
237
- "grad_norm": 0.5382867455482483,
238
- "learning_rate": 3.788493359702329e-05,
239
- "loss": 0.829,
240
- "step": 825
241
- },
242
- {
243
- "epoch": 0.7203389830508474,
244
- "grad_norm": 0.1544274091720581,
245
- "learning_rate": 3.31709583423521e-05,
246
- "loss": 0.8372,
247
- "step": 850
248
- },
249
- {
250
- "epoch": 0.7415254237288136,
251
- "grad_norm": 0.26499661803245544,
252
- "learning_rate": 2.868565387903561e-05,
253
- "loss": 0.821,
254
- "step": 875
255
- },
256
- {
257
- "epoch": 0.7627118644067796,
258
- "grad_norm": 0.22001473605632782,
259
- "learning_rate": 2.4453540446611943e-05,
260
- "loss": 0.8232,
261
- "step": 900
262
- },
263
- {
264
- "epoch": 0.7838983050847458,
265
- "grad_norm": 0.6087915897369385,
266
- "learning_rate": 2.0497754141184302e-05,
267
- "loss": 0.8384,
268
- "step": 925
269
- },
270
- {
271
- "epoch": 0.8050847457627118,
272
- "grad_norm": 0.3740731179714203,
273
- "learning_rate": 1.68399204351289e-05,
274
- "loss": 0.8695,
275
- "step": 950
276
- },
277
- {
278
- "epoch": 0.826271186440678,
279
- "grad_norm": 0.15192225575447083,
280
- "learning_rate": 1.3500035955073088e-05,
281
- "loss": 0.796,
282
- "step": 975
283
- },
284
- {
285
- "epoch": 0.847457627118644,
286
- "grad_norm": 0.10293978452682495,
287
- "learning_rate": 1.0496359164438988e-05,
288
- "loss": 0.7925,
289
- "step": 1000
290
- },
291
- {
292
- "epoch": 0.8686440677966102,
293
- "grad_norm": 0.34073716402053833,
294
- "learning_rate": 7.845310548168824e-06,
295
- "loss": 0.8325,
296
- "step": 1025
297
- },
298
- {
299
- "epoch": 0.8898305084745762,
300
- "grad_norm": 0.2313806414604187,
301
- "learning_rate": 5.561382845301771e-06,
302
- "loss": 0.8819,
303
- "step": 1050
304
- },
305
- {
306
- "epoch": 0.9110169491525424,
307
- "grad_norm": 0.1316712498664856,
308
- "learning_rate": 3.657061820142132e-06,
309
- "loss": 0.7955,
310
- "step": 1075
311
- },
312
- {
313
- "epoch": 0.9322033898305084,
314
- "grad_norm": 0.1465558558702469,
315
- "learning_rate": 2.142758005146761e-06,
316
- "loss": 0.6769,
317
- "step": 1100
318
- },
319
- {
320
- "epoch": 0.9533898305084746,
321
- "grad_norm": 0.2290423959493637,
322
- "learning_rate": 1.0267497886794206e-06,
323
- "loss": 0.838,
324
- "step": 1125
325
- },
326
- {
327
- "epoch": 0.9745762711864406,
328
- "grad_norm": 0.15858058631420135,
329
- "learning_rate": 3.1513815875957303e-07,
330
- "loss": 0.8082,
331
- "step": 1150
332
- },
333
- {
334
- "epoch": 0.9957627118644068,
335
- "grad_norm": 0.12132962793111801,
336
- "learning_rate": 1.1813350212161455e-08,
337
- "loss": 0.7888,
338
- "step": 1175
339
- }
340
- ],
341
- "logging_steps": 25,
342
- "max_steps": 1180,
343
- "num_input_tokens_seen": 0,
344
- "num_train_epochs": 1,
345
- "save_steps": 0,
346
- "stateful_callbacks": {
347
- "TrainerControl": {
348
- "args": {
349
- "should_epoch_stop": false,
350
- "should_evaluate": false,
351
- "should_log": false,
352
- "should_save": true,
353
- "should_training_stop": true
354
- },
355
- "attributes": {}
356
- }
357
- },
358
- "total_flos": 5.504816495827354e+16,
359
- "train_batch_size": 2,
360
- "trial_name": null,
361
- "trial_params": null
362
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Dubs/v0.0.3/P_C/checkpoint-1180/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f7edcce57d73eea8bb0aac0d1a0d76f32655a89c6363b59bd3da38146264d285
3
- size 5624
 
 
 
 
Dubs/v0.0.3/P_C/runs/Dec15_03-01-10_a2e074eee72a/events.out.tfevents.1734232840.a2e074eee72a.11068.0 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:77cf5d4b620d7b583553b781b2edb43ab3abd38557a378e5206bd77ab11448b2
3
- size 874908
 
 
 
 
Dubs/v0.0.3/P_C/runs/Dec16_04-30-14_a2e074eee72a/events.out.tfevents.1734323636.a2e074eee72a.21060.1 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:46e25145604e108a49344f36e4240d5fe58e478cbba9782c7aa8e66feffeff89
3
- size 8733
 
 
 
 
Dubs/v0.0.3/P_C/runs/Dec16_04-39-16_a2e074eee72a/events.out.tfevents.1734323995.a2e074eee72a.23396.0 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d875e661f09f86353b3205c966134645a0185b1e7ef42241643a3451ba2486be
3
- size 8940
 
 
 
 
Dubs/v0.0.3/P_C/runs/Dec16_04-41-02_a2e074eee72a/events.out.tfevents.1734324092.a2e074eee72a.23396.1 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d98bc402128520c72fe64c2a80d5dcfe28613434e3f0ac782d976030c89ff2b7
3
- size 18983
 
 
 
 
Dubs/v0.0.3/P_C/runs/{Dec16_04-30-14_a2e074eee72a/events.out.tfevents.1734323590.a2e074eee72a.21060.0 → Dec16_10-32-06_a2e074eee72a/events.out.tfevents.1734345218.a2e074eee72a.92225.0} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c48056829ea891e573fcc5b206c6b01f327124b658e9a90fe16bcfd203c76a3
3
- size 8733
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6dcb27609442c6301b7163092cd947d1800f9daaa17c7c653e55f5521307fd98
3
+ size 25378
Dubs/v0.0.3/adapter/README.md DELETED
@@ -1,202 +0,0 @@
1
- ---
2
- base_model: /content/Ai_Reboot/Dubs/v0.0.2/full_model
3
- library_name: peft
4
- ---
5
-
6
- # Model Card for Model ID
7
-
8
- <!-- Provide a quick summary of what the model is/does. -->
9
-
10
-
11
-
12
- ## Model Details
13
-
14
- ### Model Description
15
-
16
- <!-- Provide a longer summary of what this model is. -->
17
-
18
-
19
-
20
- - **Developed by:** [More Information Needed]
21
- - **Funded by [optional]:** [More Information Needed]
22
- - **Shared by [optional]:** [More Information Needed]
23
- - **Model type:** [More Information Needed]
24
- - **Language(s) (NLP):** [More Information Needed]
25
- - **License:** [More Information Needed]
26
- - **Finetuned from model [optional]:** [More Information Needed]
27
-
28
- ### Model Sources [optional]
29
-
30
- <!-- Provide the basic links for the model. -->
31
-
32
- - **Repository:** [More Information Needed]
33
- - **Paper [optional]:** [More Information Needed]
34
- - **Demo [optional]:** [More Information Needed]
35
-
36
- ## Uses
37
-
38
- <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
-
40
- ### Direct Use
41
-
42
- <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
-
44
- [More Information Needed]
45
-
46
- ### Downstream Use [optional]
47
-
48
- <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
-
50
- [More Information Needed]
51
-
52
- ### Out-of-Scope Use
53
-
54
- <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
-
56
- [More Information Needed]
57
-
58
- ## Bias, Risks, and Limitations
59
-
60
- <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
-
62
- [More Information Needed]
63
-
64
- ### Recommendations
65
-
66
- <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
-
68
- Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
-
70
- ## How to Get Started with the Model
71
-
72
- Use the code below to get started with the model.
73
-
74
- [More Information Needed]
75
-
76
- ## Training Details
77
-
78
- ### Training Data
79
-
80
- <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
-
82
- [More Information Needed]
83
-
84
- ### Training Procedure
85
-
86
- <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
-
88
- #### Preprocessing [optional]
89
-
90
- [More Information Needed]
91
-
92
-
93
- #### Training Hyperparameters
94
-
95
- - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
-
97
- #### Speeds, Sizes, Times [optional]
98
-
99
- <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
-
101
- [More Information Needed]
102
-
103
- ## Evaluation
104
-
105
- <!-- This section describes the evaluation protocols and provides the results. -->
106
-
107
- ### Testing Data, Factors & Metrics
108
-
109
- #### Testing Data
110
-
111
- <!-- This should link to a Dataset Card if possible. -->
112
-
113
- [More Information Needed]
114
-
115
- #### Factors
116
-
117
- <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
-
119
- [More Information Needed]
120
-
121
- #### Metrics
122
-
123
- <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
-
125
- [More Information Needed]
126
-
127
- ### Results
128
-
129
- [More Information Needed]
130
-
131
- #### Summary
132
-
133
-
134
-
135
- ## Model Examination [optional]
136
-
137
- <!-- Relevant interpretability work for the model goes here -->
138
-
139
- [More Information Needed]
140
-
141
- ## Environmental Impact
142
-
143
- <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
-
145
- Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
-
147
- - **Hardware Type:** [More Information Needed]
148
- - **Hours used:** [More Information Needed]
149
- - **Cloud Provider:** [More Information Needed]
150
- - **Compute Region:** [More Information Needed]
151
- - **Carbon Emitted:** [More Information Needed]
152
-
153
- ## Technical Specifications [optional]
154
-
155
- ### Model Architecture and Objective
156
-
157
- [More Information Needed]
158
-
159
- ### Compute Infrastructure
160
-
161
- [More Information Needed]
162
-
163
- #### Hardware
164
-
165
- [More Information Needed]
166
-
167
- #### Software
168
-
169
- [More Information Needed]
170
-
171
- ## Citation [optional]
172
-
173
- <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
-
175
- **BibTeX:**
176
-
177
- [More Information Needed]
178
-
179
- **APA:**
180
-
181
- [More Information Needed]
182
-
183
- ## Glossary [optional]
184
-
185
- <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
-
187
- [More Information Needed]
188
-
189
- ## More Information [optional]
190
-
191
- [More Information Needed]
192
-
193
- ## Model Card Authors [optional]
194
-
195
- [More Information Needed]
196
-
197
- ## Model Card Contact
198
-
199
- [More Information Needed]
200
- ### Framework versions
201
-
202
- - PEFT 0.14.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Dubs/v0.0.3/adapter/adapter_config.json DELETED
@@ -1,32 +0,0 @@
1
- {
2
- "alpha_pattern": {},
3
- "auto_mapping": null,
4
- "base_model_name_or_path": "/content/Ai_Reboot/Dubs/v0.0.2/full_model",
5
- "bias": "none",
6
- "eva_config": null,
7
- "exclude_modules": null,
8
- "fan_in_fan_out": false,
9
- "inference_mode": true,
10
- "init_lora_weights": true,
11
- "layer_replication": null,
12
- "layers_pattern": null,
13
- "layers_to_transform": null,
14
- "loftq_config": {},
15
- "lora_alpha": 32,
16
- "lora_bias": false,
17
- "lora_dropout": 0.1,
18
- "megatron_config": null,
19
- "megatron_core": "megatron.core",
20
- "modules_to_save": null,
21
- "peft_type": "LORA",
22
- "r": 128,
23
- "rank_pattern": {},
24
- "revision": null,
25
- "target_modules": [
26
- "self_attn.qkv_proj",
27
- "self_attn.o_proj"
28
- ],
29
- "task_type": "CAUSAL_LM",
30
- "use_dora": false,
31
- "use_rslora": false
32
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Dubs/v0.0.3/adapter/adapter_model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f0d958f98ec828c030528700230daeea209514f03658f2be540c3ee1f196abe9
3
- size 302007584
 
 
 
 
Dubs/v0.0.3/adapter/added_tokens.json DELETED
@@ -1,13 +0,0 @@
1
- {
2
- "<|assistant|>": 32001,
3
- "<|endoftext|>": 32000,
4
- "<|end|>": 32007,
5
- "<|placeholder1|>": 32002,
6
- "<|placeholder2|>": 32003,
7
- "<|placeholder3|>": 32004,
8
- "<|placeholder4|>": 32005,
9
- "<|placeholder5|>": 32008,
10
- "<|placeholder6|>": 32009,
11
- "<|system|>": 32006,
12
- "<|user|>": 32010
13
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Dubs/v0.0.3/adapter/special_tokens_map.json DELETED
@@ -1,30 +0,0 @@
1
- {
2
- "bos_token": {
3
- "content": "<s>",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "eos_token": {
10
- "content": "<|endoftext|>",
11
- "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "pad_token": {
17
- "content": "<|endoftext|>",
18
- "lstrip": false,
19
- "normalized": false,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
- "unk_token": {
24
- "content": "<unk>",
25
- "lstrip": false,
26
- "normalized": false,
27
- "rstrip": false,
28
- "single_word": false
29
- }
30
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Dubs/v0.0.3/adapter/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff
 
Dubs/v0.0.3/adapter/tokenizer.model DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
- size 499723
 
 
 
 
Dubs/v0.0.3/adapter/tokenizer_config.json DELETED
@@ -1,132 +0,0 @@
1
- {
2
- "add_bos_token": false,
3
- "add_eos_token": false,
4
- "add_prefix_space": null,
5
- "added_tokens_decoder": {
6
- "0": {
7
- "content": "<unk>",
8
- "lstrip": false,
9
- "normalized": false,
10
- "rstrip": false,
11
- "single_word": false,
12
- "special": true
13
- },
14
- "1": {
15
- "content": "<s>",
16
- "lstrip": false,
17
- "normalized": false,
18
- "rstrip": false,
19
- "single_word": false,
20
- "special": true
21
- },
22
- "2": {
23
- "content": "</s>",
24
- "lstrip": false,
25
- "normalized": false,
26
- "rstrip": true,
27
- "single_word": false,
28
- "special": false
29
- },
30
- "32000": {
31
- "content": "<|endoftext|>",
32
- "lstrip": false,
33
- "normalized": false,
34
- "rstrip": false,
35
- "single_word": false,
36
- "special": true
37
- },
38
- "32001": {
39
- "content": "<|assistant|>",
40
- "lstrip": false,
41
- "normalized": false,
42
- "rstrip": true,
43
- "single_word": false,
44
- "special": true
45
- },
46
- "32002": {
47
- "content": "<|placeholder1|>",
48
- "lstrip": false,
49
- "normalized": false,
50
- "rstrip": true,
51
- "single_word": false,
52
- "special": true
53
- },
54
- "32003": {
55
- "content": "<|placeholder2|>",
56
- "lstrip": false,
57
- "normalized": false,
58
- "rstrip": true,
59
- "single_word": false,
60
- "special": true
61
- },
62
- "32004": {
63
- "content": "<|placeholder3|>",
64
- "lstrip": false,
65
- "normalized": false,
66
- "rstrip": true,
67
- "single_word": false,
68
- "special": true
69
- },
70
- "32005": {
71
- "content": "<|placeholder4|>",
72
- "lstrip": false,
73
- "normalized": false,
74
- "rstrip": true,
75
- "single_word": false,
76
- "special": true
77
- },
78
- "32006": {
79
- "content": "<|system|>",
80
- "lstrip": false,
81
- "normalized": false,
82
- "rstrip": true,
83
- "single_word": false,
84
- "special": true
85
- },
86
- "32007": {
87
- "content": "<|end|>",
88
- "lstrip": false,
89
- "normalized": false,
90
- "rstrip": true,
91
- "single_word": false,
92
- "special": true
93
- },
94
- "32008": {
95
- "content": "<|placeholder5|>",
96
- "lstrip": false,
97
- "normalized": false,
98
- "rstrip": true,
99
- "single_word": false,
100
- "special": true
101
- },
102
- "32009": {
103
- "content": "<|placeholder6|>",
104
- "lstrip": false,
105
- "normalized": false,
106
- "rstrip": true,
107
- "single_word": false,
108
- "special": true
109
- },
110
- "32010": {
111
- "content": "<|user|>",
112
- "lstrip": false,
113
- "normalized": false,
114
- "rstrip": true,
115
- "single_word": false,
116
- "special": true
117
- }
118
- },
119
- "bos_token": "<s>",
120
- "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{{'<|system|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
121
- "clean_up_tokenization_spaces": false,
122
- "eos_token": "<|endoftext|>",
123
- "extra_special_tokens": {},
124
- "legacy": false,
125
- "model_max_length": 131072,
126
- "pad_token": "<|endoftext|>",
127
- "padding_side": "right",
128
- "sp_model_kwargs": {},
129
- "tokenizer_class": "LlamaTokenizer",
130
- "unk_token": "<unk>",
131
- "use_default_system_prompt": false
132
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Dubs/v0.0.3/full_model/added_tokens.json DELETED
@@ -1,13 +0,0 @@
1
- {
2
- "<|assistant|>": 32001,
3
- "<|endoftext|>": 32000,
4
- "<|end|>": 32007,
5
- "<|placeholder1|>": 32002,
6
- "<|placeholder2|>": 32003,
7
- "<|placeholder3|>": 32004,
8
- "<|placeholder4|>": 32005,
9
- "<|placeholder5|>": 32008,
10
- "<|placeholder6|>": 32009,
11
- "<|system|>": 32006,
12
- "<|user|>": 32010
13
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Dubs/v0.0.3/full_model/config.json DELETED
@@ -1,139 +0,0 @@
1
- {
2
- "_name_or_path": "Dubs_0.0.3",
3
- "architectures": [
4
- "Phi3ForCausalLM"
5
- ],
6
- "attention_bias": false,
7
- "attention_dropout": 0.0,
8
- "auto_map": {
9
- "AutoConfig": "microsoft/Phi-3-mini-128k-instruct--configuration_phi3.Phi3Config",
10
- "AutoModelForCausalLM": "microsoft/Phi-3-mini-128k-instruct--modeling_phi3.Phi3ForCausalLM"
11
- },
12
- "bos_token_id": 1,
13
- "description": "Finetuned on RayBernard/leetcode, MAsad789565/Coding_GPT4_Data,abanm/preprocessed-leetcode-multi-shot",
14
- "embd_pdrop": 0.0,
15
- "eos_token_id": 32000,
16
- "hidden_act": "silu",
17
- "hidden_size": 3072,
18
- "initializer_range": 0.02,
19
- "intermediate_size": 8192,
20
- "max_position_embeddings": 131072,
21
- "model_type": "phi3",
22
- "num_attention_heads": 32,
23
- "num_hidden_layers": 32,
24
- "num_key_value_heads": 32,
25
- "original_max_position_embeddings": 4096,
26
- "pad_token_id": 32000,
27
- "resid_pdrop": 0.0,
28
- "rms_norm_eps": 1e-05,
29
- "rope_scaling": {
30
- "long_factor": [
31
- 1.0700000524520874,
32
- 1.1200000047683716,
33
- 1.149999976158142,
34
- 1.4199999570846558,
35
- 1.5699999332427979,
36
- 1.7999999523162842,
37
- 2.129999876022339,
38
- 2.129999876022339,
39
- 3.009999990463257,
40
- 5.910000324249268,
41
- 6.950000286102295,
42
- 9.070000648498535,
43
- 9.930000305175781,
44
- 10.710000038146973,
45
- 11.130000114440918,
46
- 14.609999656677246,
47
- 15.409998893737793,
48
- 19.809999465942383,
49
- 37.279998779296875,
50
- 38.279998779296875,
51
- 38.599998474121094,
52
- 40.12000274658203,
53
- 46.20000457763672,
54
- 50.940006256103516,
55
- 53.66000747680664,
56
- 54.9373893737793,
57
- 56.89738845825195,
58
- 57.28738784790039,
59
- 59.98738479614258,
60
- 60.86738586425781,
61
- 60.887386322021484,
62
- 61.71739196777344,
63
- 62.91739273071289,
64
- 62.957393646240234,
65
- 63.41739273071289,
66
- 63.8173942565918,
67
- 63.83739471435547,
68
- 63.897396087646484,
69
- 63.93739700317383,
70
- 64.06739807128906,
71
- 64.11434936523438,
72
- 64.12435150146484,
73
- 64.15435028076172,
74
- 64.19435119628906,
75
- 64.24435424804688,
76
- 64.57435607910156,
77
- 64.69000244140625,
78
- 64.76000213623047
79
- ],
80
- "short_factor": [
81
- 1.1,
82
- 1.1,
83
- 1.1,
84
- 1.3000000000000003,
85
- 1.3500000000000003,
86
- 1.3500000000000003,
87
- 1.4000000000000004,
88
- 1.5500000000000005,
89
- 2.000000000000001,
90
- 2.000000000000001,
91
- 2.000000000000001,
92
- 2.000000000000001,
93
- 2.000000000000001,
94
- 2.000000000000001,
95
- 2.000000000000001,
96
- 2.000000000000001,
97
- 2.000000000000001,
98
- 2.000000000000001,
99
- 2.000000000000001,
100
- 2.000000000000001,
101
- 2.000000000000001,
102
- 2.000000000000001,
103
- 2.000000000000001,
104
- 2.000000000000001,
105
- 2.000000000000001,
106
- 2.0500000000000007,
107
- 2.0500000000000007,
108
- 2.0500000000000007,
109
- 2.0500000000000007,
110
- 2.0500000000000007,
111
- 2.0500000000000007,
112
- 2.1000000000000005,
113
- 2.1000000000000005,
114
- 2.1500000000000004,
115
- 2.25,
116
- 2.25,
117
- 2.25,
118
- 2.25,
119
- 2.25,
120
- 2.3999999999999995,
121
- 2.4499999999999993,
122
- 2.499999999999999,
123
- 2.6999999999999984,
124
- 2.6999999999999984,
125
- 2.7499999999999982,
126
- 2.799999999999998,
127
- 2.8999999999999977,
128
- 3.049999999999997
129
- ],
130
- "type": "longrope"
131
- },
132
- "rope_theta": 10000.0,
133
- "sliding_window": 262144,
134
- "tie_word_embeddings": false,
135
- "torch_dtype": "float16",
136
- "transformers_version": "4.47.0",
137
- "use_cache": true,
138
- "vocab_size": 32064
139
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Dubs/v0.0.3/full_model/generation_config.json DELETED
@@ -1,11 +0,0 @@
1
- {
2
- "_from_model_config": true,
3
- "bos_token_id": 1,
4
- "eos_token_id": [
5
- 32000,
6
- 32001,
7
- 32007
8
- ],
9
- "pad_token_id": 32000,
10
- "transformers_version": "4.47.0"
11
- }
 
 
 
 
 
 
 
 
 
 
 
 
Dubs/v0.0.3/full_model/model-00001-of-00002.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:878793f3d6773c165f435e36b74c2bd5111f5a340d0eadfd920236a9e897795e
3
- size 4972489200
 
 
 
 
Dubs/v0.0.3/full_model/model-00002-of-00002.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a22d77f240871a4df9910260b8214469c33b8ecaf2a4c76efd91311abc2fb16
3
- size 2669692488
 
 
 
 
Dubs/v0.0.3/full_model/model.safetensors.index.json DELETED
@@ -1,202 +0,0 @@
1
- {
2
- "metadata": {
3
- "total_size": 7642159104
4
- },
5
- "weight_map": {
6
- "lm_head.weight": "model-00002-of-00002.safetensors",
7
- "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
8
- "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
9
- "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
10
- "model.layers.0.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
11
- "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
12
- "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
13
- "model.layers.0.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
14
- "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
15
- "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
16
- "model.layers.1.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
17
- "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
18
- "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
19
- "model.layers.1.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
20
- "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
21
- "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
22
- "model.layers.10.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
23
- "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
24
- "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
25
- "model.layers.10.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
26
- "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
27
- "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
28
- "model.layers.11.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
29
- "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
30
- "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
31
- "model.layers.11.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
32
- "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
33
- "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
34
- "model.layers.12.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
35
- "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
36
- "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
37
- "model.layers.12.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
38
- "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
39
- "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
40
- "model.layers.13.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
41
- "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
42
- "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
43
- "model.layers.13.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
44
- "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
45
- "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
46
- "model.layers.14.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
47
- "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
48
- "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
49
- "model.layers.14.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
50
- "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
51
- "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
52
- "model.layers.15.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
53
- "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
54
- "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
55
- "model.layers.15.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
56
- "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
57
- "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
58
- "model.layers.16.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
59
- "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
60
- "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
61
- "model.layers.16.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
62
- "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
63
- "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
64
- "model.layers.17.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
65
- "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
66
- "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
67
- "model.layers.17.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
68
- "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
69
- "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
70
- "model.layers.18.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
71
- "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
72
- "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
73
- "model.layers.18.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
74
- "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
75
- "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
76
- "model.layers.19.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
77
- "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
78
- "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
79
- "model.layers.19.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
80
- "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
81
- "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
82
- "model.layers.2.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
83
- "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
84
- "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
85
- "model.layers.2.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
86
- "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
87
- "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
88
- "model.layers.20.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
89
- "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
90
- "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
91
- "model.layers.20.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
92
- "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
93
- "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
94
- "model.layers.21.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
95
- "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
96
- "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
97
- "model.layers.21.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
98
- "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
99
- "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
100
- "model.layers.22.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
101
- "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
102
- "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
103
- "model.layers.22.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
104
- "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
105
- "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
106
- "model.layers.23.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
107
- "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
108
- "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
109
- "model.layers.23.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
110
- "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
111
- "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
112
- "model.layers.24.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
113
- "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
114
- "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
115
- "model.layers.24.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
116
- "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
117
- "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
118
- "model.layers.25.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
119
- "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
120
- "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
121
- "model.layers.25.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
122
- "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
123
- "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
124
- "model.layers.26.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
125
- "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
126
- "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
127
- "model.layers.26.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
128
- "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
129
- "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
130
- "model.layers.27.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
131
- "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
132
- "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
133
- "model.layers.27.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
134
- "model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
135
- "model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
136
- "model.layers.28.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
137
- "model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
138
- "model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
139
- "model.layers.28.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
140
- "model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
141
- "model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
142
- "model.layers.29.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
143
- "model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
144
- "model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
145
- "model.layers.29.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
146
- "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
147
- "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
148
- "model.layers.3.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
149
- "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
150
- "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
151
- "model.layers.3.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
152
- "model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
153
- "model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
154
- "model.layers.30.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
155
- "model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
156
- "model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
157
- "model.layers.30.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
158
- "model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
159
- "model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
160
- "model.layers.31.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
161
- "model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
162
- "model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
163
- "model.layers.31.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
164
- "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
165
- "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
166
- "model.layers.4.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
167
- "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
168
- "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
169
- "model.layers.4.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
170
- "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
171
- "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
172
- "model.layers.5.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
173
- "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
174
- "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
175
- "model.layers.5.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
176
- "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
177
- "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
178
- "model.layers.6.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
179
- "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
180
- "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
181
- "model.layers.6.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
182
- "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
183
- "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
184
- "model.layers.7.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
185
- "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
186
- "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
187
- "model.layers.7.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
188
- "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
189
- "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
190
- "model.layers.8.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
191
- "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
192
- "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
193
- "model.layers.8.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
194
- "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
195
- "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
196
- "model.layers.9.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
197
- "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
198
- "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
199
- "model.layers.9.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
200
- "model.norm.weight": "model-00002-of-00002.safetensors"
201
- }
202
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Dubs/v0.0.3/full_model/special_tokens_map.json DELETED
@@ -1,24 +0,0 @@
1
- {
2
- "bos_token": {
3
- "content": "<s>",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "eos_token": {
10
- "content": "<|endoftext|>",
11
- "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "pad_token": "<|endoftext|>",
17
- "unk_token": {
18
- "content": "<unk>",
19
- "lstrip": false,
20
- "normalized": false,
21
- "rstrip": false,
22
- "single_word": false
23
- }
24
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Dubs/v0.0.3/full_model/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff
 
Dubs/v0.0.3/full_model/tokenizer.model DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
- size 499723
 
 
 
 
Dubs/v0.0.3/full_model/tokenizer_config.json DELETED
@@ -1,132 +0,0 @@
1
- {
2
- "add_bos_token": false,
3
- "add_eos_token": false,
4
- "add_prefix_space": null,
5
- "added_tokens_decoder": {
6
- "0": {
7
- "content": "<unk>",
8
- "lstrip": false,
9
- "normalized": false,
10
- "rstrip": false,
11
- "single_word": false,
12
- "special": true
13
- },
14
- "1": {
15
- "content": "<s>",
16
- "lstrip": false,
17
- "normalized": false,
18
- "rstrip": false,
19
- "single_word": false,
20
- "special": true
21
- },
22
- "2": {
23
- "content": "</s>",
24
- "lstrip": false,
25
- "normalized": false,
26
- "rstrip": true,
27
- "single_word": false,
28
- "special": false
29
- },
30
- "32000": {
31
- "content": "<|endoftext|>",
32
- "lstrip": false,
33
- "normalized": false,
34
- "rstrip": false,
35
- "single_word": false,
36
- "special": true
37
- },
38
- "32001": {
39
- "content": "<|assistant|>",
40
- "lstrip": false,
41
- "normalized": false,
42
- "rstrip": true,
43
- "single_word": false,
44
- "special": true
45
- },
46
- "32002": {
47
- "content": "<|placeholder1|>",
48
- "lstrip": false,
49
- "normalized": false,
50
- "rstrip": true,
51
- "single_word": false,
52
- "special": true
53
- },
54
- "32003": {
55
- "content": "<|placeholder2|>",
56
- "lstrip": false,
57
- "normalized": false,
58
- "rstrip": true,
59
- "single_word": false,
60
- "special": true
61
- },
62
- "32004": {
63
- "content": "<|placeholder3|>",
64
- "lstrip": false,
65
- "normalized": false,
66
- "rstrip": true,
67
- "single_word": false,
68
- "special": true
69
- },
70
- "32005": {
71
- "content": "<|placeholder4|>",
72
- "lstrip": false,
73
- "normalized": false,
74
- "rstrip": true,
75
- "single_word": false,
76
- "special": true
77
- },
78
- "32006": {
79
- "content": "<|system|>",
80
- "lstrip": false,
81
- "normalized": false,
82
- "rstrip": true,
83
- "single_word": false,
84
- "special": true
85
- },
86
- "32007": {
87
- "content": "<|end|>",
88
- "lstrip": false,
89
- "normalized": false,
90
- "rstrip": true,
91
- "single_word": false,
92
- "special": true
93
- },
94
- "32008": {
95
- "content": "<|placeholder5|>",
96
- "lstrip": false,
97
- "normalized": false,
98
- "rstrip": true,
99
- "single_word": false,
100
- "special": true
101
- },
102
- "32009": {
103
- "content": "<|placeholder6|>",
104
- "lstrip": false,
105
- "normalized": false,
106
- "rstrip": true,
107
- "single_word": false,
108
- "special": true
109
- },
110
- "32010": {
111
- "content": "<|user|>",
112
- "lstrip": false,
113
- "normalized": false,
114
- "rstrip": true,
115
- "single_word": false,
116
- "special": true
117
- }
118
- },
119
- "bos_token": "<s>",
120
- "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{{'<|system|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
121
- "clean_up_tokenization_spaces": false,
122
- "eos_token": "<|endoftext|>",
123
- "extra_special_tokens": {},
124
- "legacy": false,
125
- "model_max_length": 131072,
126
- "pad_token": "<|endoftext|>",
127
- "padding_side": "right",
128
- "sp_model_kwargs": {},
129
- "tokenizer_class": "LlamaTokenizer",
130
- "unk_token": "<unk>",
131
- "use_default_system_prompt": false
132
- }