Marcio Lima Inácio commited on
Commit
bd5119e
·
1 Parent(s): 0174c19

Add trained models

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. GlorIA-1.3B-all/checkpoint-100/added_tokens.json +3 -0
  2. GlorIA-1.3B-all/checkpoint-100/config.json +86 -0
  3. GlorIA-1.3B-all/checkpoint-100/merges.txt +0 -0
  4. GlorIA-1.3B-all/checkpoint-100/model-00001-of-00002.safetensors +3 -0
  5. GlorIA-1.3B-all/checkpoint-100/model-00002-of-00002.safetensors +3 -0
  6. GlorIA-1.3B-all/checkpoint-100/model.safetensors.index.json +325 -0
  7. GlorIA-1.3B-all/checkpoint-100/optimizer.pt +3 -0
  8. GlorIA-1.3B-all/checkpoint-100/rng_state.pth +3 -0
  9. GlorIA-1.3B-all/checkpoint-100/scheduler.pt +3 -0
  10. GlorIA-1.3B-all/checkpoint-100/special_tokens_map.json +6 -0
  11. GlorIA-1.3B-all/checkpoint-100/tokenizer.json +0 -0
  12. GlorIA-1.3B-all/checkpoint-100/tokenizer_config.json +24 -0
  13. GlorIA-1.3B-all/checkpoint-100/trainer_state.json +47 -0
  14. GlorIA-1.3B-all/checkpoint-100/training_args.bin +3 -0
  15. GlorIA-1.3B-all/checkpoint-100/vocab.json +0 -0
  16. GlorIA-1.3B-all/checkpoint-200/added_tokens.json +3 -0
  17. GlorIA-1.3B-all/checkpoint-200/config.json +86 -0
  18. GlorIA-1.3B-all/checkpoint-200/merges.txt +0 -0
  19. GlorIA-1.3B-all/checkpoint-200/model-00001-of-00002.safetensors +3 -0
  20. GlorIA-1.3B-all/checkpoint-200/model-00002-of-00002.safetensors +3 -0
  21. GlorIA-1.3B-all/checkpoint-200/model.safetensors.index.json +325 -0
  22. GlorIA-1.3B-all/checkpoint-200/optimizer.pt +3 -0
  23. GlorIA-1.3B-all/checkpoint-200/rng_state.pth +3 -0
  24. GlorIA-1.3B-all/checkpoint-200/scheduler.pt +3 -0
  25. GlorIA-1.3B-all/checkpoint-200/special_tokens_map.json +6 -0
  26. GlorIA-1.3B-all/checkpoint-200/tokenizer.json +0 -0
  27. GlorIA-1.3B-all/checkpoint-200/tokenizer_config.json +24 -0
  28. GlorIA-1.3B-all/checkpoint-200/trainer_state.json +73 -0
  29. GlorIA-1.3B-all/checkpoint-200/training_args.bin +3 -0
  30. GlorIA-1.3B-all/checkpoint-200/vocab.json +0 -0
  31. GlorIA-1.3B-all/checkpoint-300/added_tokens.json +3 -0
  32. GlorIA-1.3B-all/checkpoint-300/config.json +86 -0
  33. GlorIA-1.3B-all/checkpoint-300/merges.txt +0 -0
  34. GlorIA-1.3B-all/checkpoint-300/model-00001-of-00002.safetensors +3 -0
  35. GlorIA-1.3B-all/checkpoint-300/model-00002-of-00002.safetensors +3 -0
  36. GlorIA-1.3B-all/checkpoint-300/model.safetensors.index.json +325 -0
  37. GlorIA-1.3B-all/checkpoint-300/optimizer.pt +3 -0
  38. GlorIA-1.3B-all/checkpoint-300/rng_state.pth +3 -0
  39. GlorIA-1.3B-all/checkpoint-300/scheduler.pt +3 -0
  40. GlorIA-1.3B-all/checkpoint-300/special_tokens_map.json +6 -0
  41. GlorIA-1.3B-all/checkpoint-300/tokenizer.json +0 -0
  42. GlorIA-1.3B-all/checkpoint-300/tokenizer_config.json +24 -0
  43. GlorIA-1.3B-all/checkpoint-300/trainer_state.json +99 -0
  44. GlorIA-1.3B-all/checkpoint-300/training_args.bin +3 -0
  45. GlorIA-1.3B-all/checkpoint-300/vocab.json +0 -0
  46. GlorIA-1.3B-positive/checkpoint-100/added_tokens.json +3 -0
  47. GlorIA-1.3B-positive/checkpoint-100/config.json +86 -0
  48. GlorIA-1.3B-positive/checkpoint-100/merges.txt +0 -0
  49. GlorIA-1.3B-positive/checkpoint-100/model-00001-of-00002.safetensors +3 -0
  50. GlorIA-1.3B-positive/checkpoint-100/model-00002-of-00002.safetensors +3 -0
GlorIA-1.3B-all/checkpoint-100/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 50257
3
+ }
GlorIA-1.3B-all/checkpoint-100/config.json ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "NOVA-vision-language/GlorIA-1.3B",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPTNeoForTokenClassification"
6
+ ],
7
+ "attention_dropout": 0,
8
+ "attention_layers": [
9
+ "global",
10
+ "local",
11
+ "global",
12
+ "local",
13
+ "global",
14
+ "local",
15
+ "global",
16
+ "local",
17
+ "global",
18
+ "local",
19
+ "global",
20
+ "local",
21
+ "global",
22
+ "local",
23
+ "global",
24
+ "local",
25
+ "global",
26
+ "local",
27
+ "global",
28
+ "local",
29
+ "global",
30
+ "local",
31
+ "global",
32
+ "local"
33
+ ],
34
+ "attention_types": [
35
+ [
36
+ [
37
+ "global",
38
+ "local"
39
+ ],
40
+ 12
41
+ ]
42
+ ],
43
+ "bos_token_id": 50256,
44
+ "classifier_dropout": 0.1,
45
+ "do_sample": true,
46
+ "embed_dropout": 0,
47
+ "eos_token_id": 50256,
48
+ "gradient_checkpointing": false,
49
+ "hidden_size": 2048,
50
+ "id2label": {
51
+ "0": "Non-Pun",
52
+ "1": "Pun"
53
+ },
54
+ "initializer_range": 0.02,
55
+ "intermediate_size": null,
56
+ "label2id": {
57
+ "Non-Pun": 0,
58
+ "Pun": 1
59
+ },
60
+ "layer_norm_epsilon": 1e-05,
61
+ "max_length": 50,
62
+ "max_position_embeddings": 2048,
63
+ "model_type": "gpt_neo",
64
+ "num_heads": 16,
65
+ "num_layers": 24,
66
+ "resid_dropout": 0,
67
+ "summary_activation": null,
68
+ "summary_first_dropout": 0.1,
69
+ "summary_proj_to_labels": true,
70
+ "summary_type": "cls_index",
71
+ "summary_use_proj": true,
72
+ "task_specific_params": {
73
+ "text-generation": {
74
+ "do_sample": true,
75
+ "max_length": 50,
76
+ "temperature": 0.9
77
+ }
78
+ },
79
+ "temperature": 0.9,
80
+ "tokenizer_class": "GPT2Tokenizer",
81
+ "torch_dtype": "float32",
82
+ "transformers_version": "4.40.2",
83
+ "use_cache": true,
84
+ "vocab_size": 50258,
85
+ "window_size": 256
86
+ }
GlorIA-1.3B-all/checkpoint-100/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
GlorIA-1.3B-all/checkpoint-100/model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73b25a995964b586ae8e971629ff81e999981f63f3c725264be8f74061a1d3f8
3
+ size 4993802376
GlorIA-1.3B-all/checkpoint-100/model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:220e7be7ec3bd29de575aa3f2f5bbd514a2b9166771f5c560ae326f99df4dbc7
3
+ size 268560328
GlorIA-1.3B-all/checkpoint-100/model.safetensors.index.json ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 5262327816
4
+ },
5
+ "weight_map": {
6
+ "classifier.bias": "model-00002-of-00002.safetensors",
7
+ "classifier.weight": "model-00002-of-00002.safetensors",
8
+ "transformer.h.0.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
9
+ "transformer.h.0.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
10
+ "transformer.h.0.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
11
+ "transformer.h.0.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
12
+ "transformer.h.0.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
13
+ "transformer.h.0.ln_1.bias": "model-00001-of-00002.safetensors",
14
+ "transformer.h.0.ln_1.weight": "model-00001-of-00002.safetensors",
15
+ "transformer.h.0.ln_2.bias": "model-00001-of-00002.safetensors",
16
+ "transformer.h.0.ln_2.weight": "model-00001-of-00002.safetensors",
17
+ "transformer.h.0.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
18
+ "transformer.h.0.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
19
+ "transformer.h.0.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
20
+ "transformer.h.0.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
21
+ "transformer.h.1.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
22
+ "transformer.h.1.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
23
+ "transformer.h.1.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
24
+ "transformer.h.1.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
25
+ "transformer.h.1.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
26
+ "transformer.h.1.ln_1.bias": "model-00001-of-00002.safetensors",
27
+ "transformer.h.1.ln_1.weight": "model-00001-of-00002.safetensors",
28
+ "transformer.h.1.ln_2.bias": "model-00001-of-00002.safetensors",
29
+ "transformer.h.1.ln_2.weight": "model-00001-of-00002.safetensors",
30
+ "transformer.h.1.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
31
+ "transformer.h.1.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
32
+ "transformer.h.1.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
33
+ "transformer.h.1.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
34
+ "transformer.h.10.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
35
+ "transformer.h.10.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
36
+ "transformer.h.10.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
37
+ "transformer.h.10.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
38
+ "transformer.h.10.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
39
+ "transformer.h.10.ln_1.bias": "model-00001-of-00002.safetensors",
40
+ "transformer.h.10.ln_1.weight": "model-00001-of-00002.safetensors",
41
+ "transformer.h.10.ln_2.bias": "model-00001-of-00002.safetensors",
42
+ "transformer.h.10.ln_2.weight": "model-00001-of-00002.safetensors",
43
+ "transformer.h.10.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
44
+ "transformer.h.10.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
45
+ "transformer.h.10.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
46
+ "transformer.h.10.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
47
+ "transformer.h.11.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
48
+ "transformer.h.11.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
49
+ "transformer.h.11.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
50
+ "transformer.h.11.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
51
+ "transformer.h.11.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
52
+ "transformer.h.11.ln_1.bias": "model-00001-of-00002.safetensors",
53
+ "transformer.h.11.ln_1.weight": "model-00001-of-00002.safetensors",
54
+ "transformer.h.11.ln_2.bias": "model-00001-of-00002.safetensors",
55
+ "transformer.h.11.ln_2.weight": "model-00001-of-00002.safetensors",
56
+ "transformer.h.11.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
57
+ "transformer.h.11.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
58
+ "transformer.h.11.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
59
+ "transformer.h.11.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
60
+ "transformer.h.12.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
61
+ "transformer.h.12.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
62
+ "transformer.h.12.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
63
+ "transformer.h.12.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
64
+ "transformer.h.12.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
65
+ "transformer.h.12.ln_1.bias": "model-00001-of-00002.safetensors",
66
+ "transformer.h.12.ln_1.weight": "model-00001-of-00002.safetensors",
67
+ "transformer.h.12.ln_2.bias": "model-00001-of-00002.safetensors",
68
+ "transformer.h.12.ln_2.weight": "model-00001-of-00002.safetensors",
69
+ "transformer.h.12.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
70
+ "transformer.h.12.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
71
+ "transformer.h.12.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
72
+ "transformer.h.12.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
73
+ "transformer.h.13.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
74
+ "transformer.h.13.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
75
+ "transformer.h.13.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
76
+ "transformer.h.13.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
77
+ "transformer.h.13.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
78
+ "transformer.h.13.ln_1.bias": "model-00001-of-00002.safetensors",
79
+ "transformer.h.13.ln_1.weight": "model-00001-of-00002.safetensors",
80
+ "transformer.h.13.ln_2.bias": "model-00001-of-00002.safetensors",
81
+ "transformer.h.13.ln_2.weight": "model-00001-of-00002.safetensors",
82
+ "transformer.h.13.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
83
+ "transformer.h.13.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
84
+ "transformer.h.13.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
85
+ "transformer.h.13.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
86
+ "transformer.h.14.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
87
+ "transformer.h.14.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
88
+ "transformer.h.14.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
89
+ "transformer.h.14.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
90
+ "transformer.h.14.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
91
+ "transformer.h.14.ln_1.bias": "model-00001-of-00002.safetensors",
92
+ "transformer.h.14.ln_1.weight": "model-00001-of-00002.safetensors",
93
+ "transformer.h.14.ln_2.bias": "model-00001-of-00002.safetensors",
94
+ "transformer.h.14.ln_2.weight": "model-00001-of-00002.safetensors",
95
+ "transformer.h.14.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
96
+ "transformer.h.14.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
97
+ "transformer.h.14.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
98
+ "transformer.h.14.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
99
+ "transformer.h.15.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
100
+ "transformer.h.15.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
101
+ "transformer.h.15.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
102
+ "transformer.h.15.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
103
+ "transformer.h.15.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
104
+ "transformer.h.15.ln_1.bias": "model-00001-of-00002.safetensors",
105
+ "transformer.h.15.ln_1.weight": "model-00001-of-00002.safetensors",
106
+ "transformer.h.15.ln_2.bias": "model-00001-of-00002.safetensors",
107
+ "transformer.h.15.ln_2.weight": "model-00001-of-00002.safetensors",
108
+ "transformer.h.15.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
109
+ "transformer.h.15.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
110
+ "transformer.h.15.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
111
+ "transformer.h.15.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
112
+ "transformer.h.16.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
113
+ "transformer.h.16.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
114
+ "transformer.h.16.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
115
+ "transformer.h.16.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
116
+ "transformer.h.16.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
117
+ "transformer.h.16.ln_1.bias": "model-00001-of-00002.safetensors",
118
+ "transformer.h.16.ln_1.weight": "model-00001-of-00002.safetensors",
119
+ "transformer.h.16.ln_2.bias": "model-00001-of-00002.safetensors",
120
+ "transformer.h.16.ln_2.weight": "model-00001-of-00002.safetensors",
121
+ "transformer.h.16.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
122
+ "transformer.h.16.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
123
+ "transformer.h.16.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
124
+ "transformer.h.16.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
125
+ "transformer.h.17.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
126
+ "transformer.h.17.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
127
+ "transformer.h.17.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
128
+ "transformer.h.17.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
129
+ "transformer.h.17.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
130
+ "transformer.h.17.ln_1.bias": "model-00001-of-00002.safetensors",
131
+ "transformer.h.17.ln_1.weight": "model-00001-of-00002.safetensors",
132
+ "transformer.h.17.ln_2.bias": "model-00001-of-00002.safetensors",
133
+ "transformer.h.17.ln_2.weight": "model-00001-of-00002.safetensors",
134
+ "transformer.h.17.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
135
+ "transformer.h.17.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
136
+ "transformer.h.17.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
137
+ "transformer.h.17.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
138
+ "transformer.h.18.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
139
+ "transformer.h.18.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
140
+ "transformer.h.18.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
141
+ "transformer.h.18.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
142
+ "transformer.h.18.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
143
+ "transformer.h.18.ln_1.bias": "model-00001-of-00002.safetensors",
144
+ "transformer.h.18.ln_1.weight": "model-00001-of-00002.safetensors",
145
+ "transformer.h.18.ln_2.bias": "model-00001-of-00002.safetensors",
146
+ "transformer.h.18.ln_2.weight": "model-00001-of-00002.safetensors",
147
+ "transformer.h.18.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
148
+ "transformer.h.18.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
149
+ "transformer.h.18.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
150
+ "transformer.h.18.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
151
+ "transformer.h.19.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
152
+ "transformer.h.19.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
153
+ "transformer.h.19.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
154
+ "transformer.h.19.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
155
+ "transformer.h.19.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
156
+ "transformer.h.19.ln_1.bias": "model-00001-of-00002.safetensors",
157
+ "transformer.h.19.ln_1.weight": "model-00001-of-00002.safetensors",
158
+ "transformer.h.19.ln_2.bias": "model-00001-of-00002.safetensors",
159
+ "transformer.h.19.ln_2.weight": "model-00001-of-00002.safetensors",
160
+ "transformer.h.19.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
161
+ "transformer.h.19.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
162
+ "transformer.h.19.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
163
+ "transformer.h.19.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
164
+ "transformer.h.2.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
165
+ "transformer.h.2.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
166
+ "transformer.h.2.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
167
+ "transformer.h.2.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
168
+ "transformer.h.2.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
169
+ "transformer.h.2.ln_1.bias": "model-00001-of-00002.safetensors",
170
+ "transformer.h.2.ln_1.weight": "model-00001-of-00002.safetensors",
171
+ "transformer.h.2.ln_2.bias": "model-00001-of-00002.safetensors",
172
+ "transformer.h.2.ln_2.weight": "model-00001-of-00002.safetensors",
173
+ "transformer.h.2.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
174
+ "transformer.h.2.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
175
+ "transformer.h.2.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
176
+ "transformer.h.2.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
177
+ "transformer.h.20.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
178
+ "transformer.h.20.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
179
+ "transformer.h.20.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
180
+ "transformer.h.20.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
181
+ "transformer.h.20.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
182
+ "transformer.h.20.ln_1.bias": "model-00001-of-00002.safetensors",
183
+ "transformer.h.20.ln_1.weight": "model-00001-of-00002.safetensors",
184
+ "transformer.h.20.ln_2.bias": "model-00001-of-00002.safetensors",
185
+ "transformer.h.20.ln_2.weight": "model-00001-of-00002.safetensors",
186
+ "transformer.h.20.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
187
+ "transformer.h.20.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
188
+ "transformer.h.20.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
189
+ "transformer.h.20.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
190
+ "transformer.h.21.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
191
+ "transformer.h.21.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
192
+ "transformer.h.21.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
193
+ "transformer.h.21.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
194
+ "transformer.h.21.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
195
+ "transformer.h.21.ln_1.bias": "model-00001-of-00002.safetensors",
196
+ "transformer.h.21.ln_1.weight": "model-00001-of-00002.safetensors",
197
+ "transformer.h.21.ln_2.bias": "model-00001-of-00002.safetensors",
198
+ "transformer.h.21.ln_2.weight": "model-00001-of-00002.safetensors",
199
+ "transformer.h.21.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
200
+ "transformer.h.21.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
201
+ "transformer.h.21.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
202
+ "transformer.h.21.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
203
+ "transformer.h.22.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
204
+ "transformer.h.22.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
205
+ "transformer.h.22.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
206
+ "transformer.h.22.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
207
+ "transformer.h.22.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
208
+ "transformer.h.22.ln_1.bias": "model-00001-of-00002.safetensors",
209
+ "transformer.h.22.ln_1.weight": "model-00001-of-00002.safetensors",
210
+ "transformer.h.22.ln_2.bias": "model-00001-of-00002.safetensors",
211
+ "transformer.h.22.ln_2.weight": "model-00001-of-00002.safetensors",
212
+ "transformer.h.22.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
213
+ "transformer.h.22.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
214
+ "transformer.h.22.mlp.c_proj.bias": "model-00002-of-00002.safetensors",
215
+ "transformer.h.22.mlp.c_proj.weight": "model-00002-of-00002.safetensors",
216
+ "transformer.h.23.attn.attention.k_proj.weight": "model-00002-of-00002.safetensors",
217
+ "transformer.h.23.attn.attention.out_proj.bias": "model-00002-of-00002.safetensors",
218
+ "transformer.h.23.attn.attention.out_proj.weight": "model-00002-of-00002.safetensors",
219
+ "transformer.h.23.attn.attention.q_proj.weight": "model-00002-of-00002.safetensors",
220
+ "transformer.h.23.attn.attention.v_proj.weight": "model-00002-of-00002.safetensors",
221
+ "transformer.h.23.ln_1.bias": "model-00002-of-00002.safetensors",
222
+ "transformer.h.23.ln_1.weight": "model-00002-of-00002.safetensors",
223
+ "transformer.h.23.ln_2.bias": "model-00002-of-00002.safetensors",
224
+ "transformer.h.23.ln_2.weight": "model-00002-of-00002.safetensors",
225
+ "transformer.h.23.mlp.c_fc.bias": "model-00002-of-00002.safetensors",
226
+ "transformer.h.23.mlp.c_fc.weight": "model-00002-of-00002.safetensors",
227
+ "transformer.h.23.mlp.c_proj.bias": "model-00002-of-00002.safetensors",
228
+ "transformer.h.23.mlp.c_proj.weight": "model-00002-of-00002.safetensors",
229
+ "transformer.h.3.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
230
+ "transformer.h.3.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
231
+ "transformer.h.3.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
232
+ "transformer.h.3.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
233
+ "transformer.h.3.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
234
+ "transformer.h.3.ln_1.bias": "model-00001-of-00002.safetensors",
235
+ "transformer.h.3.ln_1.weight": "model-00001-of-00002.safetensors",
236
+ "transformer.h.3.ln_2.bias": "model-00001-of-00002.safetensors",
237
+ "transformer.h.3.ln_2.weight": "model-00001-of-00002.safetensors",
238
+ "transformer.h.3.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
239
+ "transformer.h.3.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
240
+ "transformer.h.3.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
241
+ "transformer.h.3.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
242
+ "transformer.h.4.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
243
+ "transformer.h.4.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
244
+ "transformer.h.4.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
245
+ "transformer.h.4.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
246
+ "transformer.h.4.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
247
+ "transformer.h.4.ln_1.bias": "model-00001-of-00002.safetensors",
248
+ "transformer.h.4.ln_1.weight": "model-00001-of-00002.safetensors",
249
+ "transformer.h.4.ln_2.bias": "model-00001-of-00002.safetensors",
250
+ "transformer.h.4.ln_2.weight": "model-00001-of-00002.safetensors",
251
+ "transformer.h.4.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
252
+ "transformer.h.4.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
253
+ "transformer.h.4.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
254
+ "transformer.h.4.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
255
+ "transformer.h.5.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
256
+ "transformer.h.5.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
257
+ "transformer.h.5.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
258
+ "transformer.h.5.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
259
+ "transformer.h.5.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
260
+ "transformer.h.5.ln_1.bias": "model-00001-of-00002.safetensors",
261
+ "transformer.h.5.ln_1.weight": "model-00001-of-00002.safetensors",
262
+ "transformer.h.5.ln_2.bias": "model-00001-of-00002.safetensors",
263
+ "transformer.h.5.ln_2.weight": "model-00001-of-00002.safetensors",
264
+ "transformer.h.5.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
265
+ "transformer.h.5.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
266
+ "transformer.h.5.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
267
+ "transformer.h.5.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
268
+ "transformer.h.6.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
269
+ "transformer.h.6.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
270
+ "transformer.h.6.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
271
+ "transformer.h.6.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
272
+ "transformer.h.6.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
273
+ "transformer.h.6.ln_1.bias": "model-00001-of-00002.safetensors",
274
+ "transformer.h.6.ln_1.weight": "model-00001-of-00002.safetensors",
275
+ "transformer.h.6.ln_2.bias": "model-00001-of-00002.safetensors",
276
+ "transformer.h.6.ln_2.weight": "model-00001-of-00002.safetensors",
277
+ "transformer.h.6.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
278
+ "transformer.h.6.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
279
+ "transformer.h.6.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
280
+ "transformer.h.6.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
281
+ "transformer.h.7.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
282
+ "transformer.h.7.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
283
+ "transformer.h.7.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
284
+ "transformer.h.7.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
285
+ "transformer.h.7.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
286
+ "transformer.h.7.ln_1.bias": "model-00001-of-00002.safetensors",
287
+ "transformer.h.7.ln_1.weight": "model-00001-of-00002.safetensors",
288
+ "transformer.h.7.ln_2.bias": "model-00001-of-00002.safetensors",
289
+ "transformer.h.7.ln_2.weight": "model-00001-of-00002.safetensors",
290
+ "transformer.h.7.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
291
+ "transformer.h.7.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
292
+ "transformer.h.7.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
293
+ "transformer.h.7.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
294
+ "transformer.h.8.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
295
+ "transformer.h.8.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
296
+ "transformer.h.8.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
297
+ "transformer.h.8.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
298
+ "transformer.h.8.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
299
+ "transformer.h.8.ln_1.bias": "model-00001-of-00002.safetensors",
300
+ "transformer.h.8.ln_1.weight": "model-00001-of-00002.safetensors",
301
+ "transformer.h.8.ln_2.bias": "model-00001-of-00002.safetensors",
302
+ "transformer.h.8.ln_2.weight": "model-00001-of-00002.safetensors",
303
+ "transformer.h.8.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
304
+ "transformer.h.8.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
305
+ "transformer.h.8.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
306
+ "transformer.h.8.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
307
+ "transformer.h.9.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
308
+ "transformer.h.9.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
309
+ "transformer.h.9.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
310
+ "transformer.h.9.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
311
+ "transformer.h.9.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
312
+ "transformer.h.9.ln_1.bias": "model-00001-of-00002.safetensors",
313
+ "transformer.h.9.ln_1.weight": "model-00001-of-00002.safetensors",
314
+ "transformer.h.9.ln_2.bias": "model-00001-of-00002.safetensors",
315
+ "transformer.h.9.ln_2.weight": "model-00001-of-00002.safetensors",
316
+ "transformer.h.9.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
317
+ "transformer.h.9.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
318
+ "transformer.h.9.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
319
+ "transformer.h.9.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
320
+ "transformer.ln_f.bias": "model-00002-of-00002.safetensors",
321
+ "transformer.ln_f.weight": "model-00002-of-00002.safetensors",
322
+ "transformer.wpe.weight": "model-00001-of-00002.safetensors",
323
+ "transformer.wte.weight": "model-00001-of-00002.safetensors"
324
+ }
325
+ }
GlorIA-1.3B-all/checkpoint-100/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3a60bb5240f1c32adde1fb5f64a0d9b0f04a47d3d482ef2387dbad16e3cd126
3
+ size 10524931458
GlorIA-1.3B-all/checkpoint-100/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f038db249d80ff27a0bc300d721fbbfd6aa33f22e42efc2048660f2d9f8c09f6
3
+ size 14244
GlorIA-1.3B-all/checkpoint-100/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a49087019e82bb23b3f1ce76f4605708f8ce234de314b73586d95282d36e43d9
3
+ size 1064
GlorIA-1.3B-all/checkpoint-100/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
GlorIA-1.3B-all/checkpoint-100/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
GlorIA-1.3B-all/checkpoint-100/tokenizer_config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": true,
3
+ "added_tokens_decoder": {
4
+ "50257": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": true,
15
+ "eos_token": "<|endoftext|>",
16
+ "max_length": 512,
17
+ "model_max_length": 1000000000000000019884624838656,
18
+ "pad_token": "<|endoftext|>",
19
+ "stride": 0,
20
+ "tokenizer_class": "GPT2Tokenizer",
21
+ "truncation_side": "right",
22
+ "truncation_strategy": "longest_first",
23
+ "unk_token": "<|endoftext|>"
24
+ }
GlorIA-1.3B-all/checkpoint-100/trainer_state.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.11806972324848175,
3
+ "best_model_checkpoint": "results/Gloria_all/GlorIA-1.3B/checkpoint-100",
4
+ "epoch": 1.1976047904191618,
5
+ "eval_steps": 100,
6
+ "global_step": 100,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.5988023952095808,
13
+ "grad_norm": 0.9138351082801819,
14
+ "learning_rate": 2.627329192546584e-05,
15
+ "loss": 0.1669,
16
+ "step": 50
17
+ },
18
+ {
19
+ "epoch": 1.1976047904191618,
20
+ "grad_norm": 0.5801578760147095,
21
+ "learning_rate": 2.1614906832298137e-05,
22
+ "loss": 0.1129,
23
+ "step": 100
24
+ },
25
+ {
26
+ "epoch": 1.1976047904191618,
27
+ "eval_accuracy": 0.9447699788056352,
28
+ "eval_f1": 0.4304367402071139,
29
+ "eval_loss": 0.11806972324848175,
30
+ "eval_precision": 0.4337568058076225,
31
+ "eval_recall": 0.42716711349419123,
32
+ "eval_runtime": 9.8143,
33
+ "eval_samples_per_second": 58.079,
34
+ "eval_steps_per_second": 2.445,
35
+ "step": 100
36
+ }
37
+ ],
38
+ "logging_steps": 50,
39
+ "max_steps": 332,
40
+ "num_input_tokens_seen": 0,
41
+ "num_train_epochs": 4,
42
+ "save_steps": 100,
43
+ "total_flos": 1004476175736264.0,
44
+ "train_batch_size": 24,
45
+ "trial_name": null,
46
+ "trial_params": null
47
+ }
GlorIA-1.3B-all/checkpoint-100/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05869643dba3d8000c840394135edb747c583a3773236fdcfc6185019c8f767e
3
+ size 4920
GlorIA-1.3B-all/checkpoint-100/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
GlorIA-1.3B-all/checkpoint-200/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 50257
3
+ }
GlorIA-1.3B-all/checkpoint-200/config.json ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "NOVA-vision-language/GlorIA-1.3B",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPTNeoForTokenClassification"
6
+ ],
7
+ "attention_dropout": 0,
8
+ "attention_layers": [
9
+ "global",
10
+ "local",
11
+ "global",
12
+ "local",
13
+ "global",
14
+ "local",
15
+ "global",
16
+ "local",
17
+ "global",
18
+ "local",
19
+ "global",
20
+ "local",
21
+ "global",
22
+ "local",
23
+ "global",
24
+ "local",
25
+ "global",
26
+ "local",
27
+ "global",
28
+ "local",
29
+ "global",
30
+ "local",
31
+ "global",
32
+ "local"
33
+ ],
34
+ "attention_types": [
35
+ [
36
+ [
37
+ "global",
38
+ "local"
39
+ ],
40
+ 12
41
+ ]
42
+ ],
43
+ "bos_token_id": 50256,
44
+ "classifier_dropout": 0.1,
45
+ "do_sample": true,
46
+ "embed_dropout": 0,
47
+ "eos_token_id": 50256,
48
+ "gradient_checkpointing": false,
49
+ "hidden_size": 2048,
50
+ "id2label": {
51
+ "0": "Non-Pun",
52
+ "1": "Pun"
53
+ },
54
+ "initializer_range": 0.02,
55
+ "intermediate_size": null,
56
+ "label2id": {
57
+ "Non-Pun": 0,
58
+ "Pun": 1
59
+ },
60
+ "layer_norm_epsilon": 1e-05,
61
+ "max_length": 50,
62
+ "max_position_embeddings": 2048,
63
+ "model_type": "gpt_neo",
64
+ "num_heads": 16,
65
+ "num_layers": 24,
66
+ "resid_dropout": 0,
67
+ "summary_activation": null,
68
+ "summary_first_dropout": 0.1,
69
+ "summary_proj_to_labels": true,
70
+ "summary_type": "cls_index",
71
+ "summary_use_proj": true,
72
+ "task_specific_params": {
73
+ "text-generation": {
74
+ "do_sample": true,
75
+ "max_length": 50,
76
+ "temperature": 0.9
77
+ }
78
+ },
79
+ "temperature": 0.9,
80
+ "tokenizer_class": "GPT2Tokenizer",
81
+ "torch_dtype": "float32",
82
+ "transformers_version": "4.40.2",
83
+ "use_cache": true,
84
+ "vocab_size": 50258,
85
+ "window_size": 256
86
+ }
GlorIA-1.3B-all/checkpoint-200/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
GlorIA-1.3B-all/checkpoint-200/model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3f58796d8ffa0f692fd079fa380d5b85644777b30827cd174472b1c737e56cd
3
+ size 4993802376
GlorIA-1.3B-all/checkpoint-200/model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b925f7127a53449f15a039223922808f574952de8ad450b13fb3b21486079c92
3
+ size 268560328
GlorIA-1.3B-all/checkpoint-200/model.safetensors.index.json ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 5262327816
4
+ },
5
+ "weight_map": {
6
+ "classifier.bias": "model-00002-of-00002.safetensors",
7
+ "classifier.weight": "model-00002-of-00002.safetensors",
8
+ "transformer.h.0.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
9
+ "transformer.h.0.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
10
+ "transformer.h.0.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
11
+ "transformer.h.0.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
12
+ "transformer.h.0.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
13
+ "transformer.h.0.ln_1.bias": "model-00001-of-00002.safetensors",
14
+ "transformer.h.0.ln_1.weight": "model-00001-of-00002.safetensors",
15
+ "transformer.h.0.ln_2.bias": "model-00001-of-00002.safetensors",
16
+ "transformer.h.0.ln_2.weight": "model-00001-of-00002.safetensors",
17
+ "transformer.h.0.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
18
+ "transformer.h.0.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
19
+ "transformer.h.0.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
20
+ "transformer.h.0.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
21
+ "transformer.h.1.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
22
+ "transformer.h.1.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
23
+ "transformer.h.1.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
24
+ "transformer.h.1.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
25
+ "transformer.h.1.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
26
+ "transformer.h.1.ln_1.bias": "model-00001-of-00002.safetensors",
27
+ "transformer.h.1.ln_1.weight": "model-00001-of-00002.safetensors",
28
+ "transformer.h.1.ln_2.bias": "model-00001-of-00002.safetensors",
29
+ "transformer.h.1.ln_2.weight": "model-00001-of-00002.safetensors",
30
+ "transformer.h.1.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
31
+ "transformer.h.1.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
32
+ "transformer.h.1.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
33
+ "transformer.h.1.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
34
+ "transformer.h.10.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
35
+ "transformer.h.10.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
36
+ "transformer.h.10.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
37
+ "transformer.h.10.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
38
+ "transformer.h.10.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
39
+ "transformer.h.10.ln_1.bias": "model-00001-of-00002.safetensors",
40
+ "transformer.h.10.ln_1.weight": "model-00001-of-00002.safetensors",
41
+ "transformer.h.10.ln_2.bias": "model-00001-of-00002.safetensors",
42
+ "transformer.h.10.ln_2.weight": "model-00001-of-00002.safetensors",
43
+ "transformer.h.10.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
44
+ "transformer.h.10.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
45
+ "transformer.h.10.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
46
+ "transformer.h.10.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
47
+ "transformer.h.11.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
48
+ "transformer.h.11.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
49
+ "transformer.h.11.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
50
+ "transformer.h.11.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
51
+ "transformer.h.11.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
52
+ "transformer.h.11.ln_1.bias": "model-00001-of-00002.safetensors",
53
+ "transformer.h.11.ln_1.weight": "model-00001-of-00002.safetensors",
54
+ "transformer.h.11.ln_2.bias": "model-00001-of-00002.safetensors",
55
+ "transformer.h.11.ln_2.weight": "model-00001-of-00002.safetensors",
56
+ "transformer.h.11.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
57
+ "transformer.h.11.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
58
+ "transformer.h.11.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
59
+ "transformer.h.11.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
60
+ "transformer.h.12.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
61
+ "transformer.h.12.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
62
+ "transformer.h.12.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
63
+ "transformer.h.12.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
64
+ "transformer.h.12.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
65
+ "transformer.h.12.ln_1.bias": "model-00001-of-00002.safetensors",
66
+ "transformer.h.12.ln_1.weight": "model-00001-of-00002.safetensors",
67
+ "transformer.h.12.ln_2.bias": "model-00001-of-00002.safetensors",
68
+ "transformer.h.12.ln_2.weight": "model-00001-of-00002.safetensors",
69
+ "transformer.h.12.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
70
+ "transformer.h.12.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
71
+ "transformer.h.12.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
72
+ "transformer.h.12.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
73
+ "transformer.h.13.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
74
+ "transformer.h.13.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
75
+ "transformer.h.13.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
76
+ "transformer.h.13.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
77
+ "transformer.h.13.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
78
+ "transformer.h.13.ln_1.bias": "model-00001-of-00002.safetensors",
79
+ "transformer.h.13.ln_1.weight": "model-00001-of-00002.safetensors",
80
+ "transformer.h.13.ln_2.bias": "model-00001-of-00002.safetensors",
81
+ "transformer.h.13.ln_2.weight": "model-00001-of-00002.safetensors",
82
+ "transformer.h.13.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
83
+ "transformer.h.13.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
84
+ "transformer.h.13.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
85
+ "transformer.h.13.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
86
+ "transformer.h.14.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
87
+ "transformer.h.14.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
88
+ "transformer.h.14.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
89
+ "transformer.h.14.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
90
+ "transformer.h.14.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
91
+ "transformer.h.14.ln_1.bias": "model-00001-of-00002.safetensors",
92
+ "transformer.h.14.ln_1.weight": "model-00001-of-00002.safetensors",
93
+ "transformer.h.14.ln_2.bias": "model-00001-of-00002.safetensors",
94
+ "transformer.h.14.ln_2.weight": "model-00001-of-00002.safetensors",
95
+ "transformer.h.14.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
96
+ "transformer.h.14.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
97
+ "transformer.h.14.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
98
+ "transformer.h.14.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
99
+ "transformer.h.15.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
100
+ "transformer.h.15.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
101
+ "transformer.h.15.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
102
+ "transformer.h.15.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
103
+ "transformer.h.15.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
104
+ "transformer.h.15.ln_1.bias": "model-00001-of-00002.safetensors",
105
+ "transformer.h.15.ln_1.weight": "model-00001-of-00002.safetensors",
106
+ "transformer.h.15.ln_2.bias": "model-00001-of-00002.safetensors",
107
+ "transformer.h.15.ln_2.weight": "model-00001-of-00002.safetensors",
108
+ "transformer.h.15.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
109
+ "transformer.h.15.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
110
+ "transformer.h.15.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
111
+ "transformer.h.15.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
112
+ "transformer.h.16.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
113
+ "transformer.h.16.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
114
+ "transformer.h.16.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
115
+ "transformer.h.16.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
116
+ "transformer.h.16.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
117
+ "transformer.h.16.ln_1.bias": "model-00001-of-00002.safetensors",
118
+ "transformer.h.16.ln_1.weight": "model-00001-of-00002.safetensors",
119
+ "transformer.h.16.ln_2.bias": "model-00001-of-00002.safetensors",
120
+ "transformer.h.16.ln_2.weight": "model-00001-of-00002.safetensors",
121
+ "transformer.h.16.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
122
+ "transformer.h.16.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
123
+ "transformer.h.16.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
124
+ "transformer.h.16.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
125
+ "transformer.h.17.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
126
+ "transformer.h.17.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
127
+ "transformer.h.17.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
128
+ "transformer.h.17.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
129
+ "transformer.h.17.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
130
+ "transformer.h.17.ln_1.bias": "model-00001-of-00002.safetensors",
131
+ "transformer.h.17.ln_1.weight": "model-00001-of-00002.safetensors",
132
+ "transformer.h.17.ln_2.bias": "model-00001-of-00002.safetensors",
133
+ "transformer.h.17.ln_2.weight": "model-00001-of-00002.safetensors",
134
+ "transformer.h.17.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
135
+ "transformer.h.17.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
136
+ "transformer.h.17.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
137
+ "transformer.h.17.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
138
+ "transformer.h.18.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
139
+ "transformer.h.18.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
140
+ "transformer.h.18.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
141
+ "transformer.h.18.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
142
+ "transformer.h.18.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
143
+ "transformer.h.18.ln_1.bias": "model-00001-of-00002.safetensors",
144
+ "transformer.h.18.ln_1.weight": "model-00001-of-00002.safetensors",
145
+ "transformer.h.18.ln_2.bias": "model-00001-of-00002.safetensors",
146
+ "transformer.h.18.ln_2.weight": "model-00001-of-00002.safetensors",
147
+ "transformer.h.18.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
148
+ "transformer.h.18.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
149
+ "transformer.h.18.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
150
+ "transformer.h.18.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
151
+ "transformer.h.19.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
152
+ "transformer.h.19.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
153
+ "transformer.h.19.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
154
+ "transformer.h.19.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
155
+ "transformer.h.19.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
156
+ "transformer.h.19.ln_1.bias": "model-00001-of-00002.safetensors",
157
+ "transformer.h.19.ln_1.weight": "model-00001-of-00002.safetensors",
158
+ "transformer.h.19.ln_2.bias": "model-00001-of-00002.safetensors",
159
+ "transformer.h.19.ln_2.weight": "model-00001-of-00002.safetensors",
160
+ "transformer.h.19.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
161
+ "transformer.h.19.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
162
+ "transformer.h.19.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
163
+ "transformer.h.19.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
164
+ "transformer.h.2.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
165
+ "transformer.h.2.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
166
+ "transformer.h.2.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
167
+ "transformer.h.2.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
168
+ "transformer.h.2.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
169
+ "transformer.h.2.ln_1.bias": "model-00001-of-00002.safetensors",
170
+ "transformer.h.2.ln_1.weight": "model-00001-of-00002.safetensors",
171
+ "transformer.h.2.ln_2.bias": "model-00001-of-00002.safetensors",
172
+ "transformer.h.2.ln_2.weight": "model-00001-of-00002.safetensors",
173
+ "transformer.h.2.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
174
+ "transformer.h.2.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
175
+ "transformer.h.2.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
176
+ "transformer.h.2.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
177
+ "transformer.h.20.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
178
+ "transformer.h.20.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
179
+ "transformer.h.20.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
180
+ "transformer.h.20.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
181
+ "transformer.h.20.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
182
+ "transformer.h.20.ln_1.bias": "model-00001-of-00002.safetensors",
183
+ "transformer.h.20.ln_1.weight": "model-00001-of-00002.safetensors",
184
+ "transformer.h.20.ln_2.bias": "model-00001-of-00002.safetensors",
185
+ "transformer.h.20.ln_2.weight": "model-00001-of-00002.safetensors",
186
+ "transformer.h.20.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
187
+ "transformer.h.20.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
188
+ "transformer.h.20.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
189
+ "transformer.h.20.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
190
+ "transformer.h.21.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
191
+ "transformer.h.21.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
192
+ "transformer.h.21.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
193
+ "transformer.h.21.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
194
+ "transformer.h.21.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
195
+ "transformer.h.21.ln_1.bias": "model-00001-of-00002.safetensors",
196
+ "transformer.h.21.ln_1.weight": "model-00001-of-00002.safetensors",
197
+ "transformer.h.21.ln_2.bias": "model-00001-of-00002.safetensors",
198
+ "transformer.h.21.ln_2.weight": "model-00001-of-00002.safetensors",
199
+ "transformer.h.21.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
200
+ "transformer.h.21.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
201
+ "transformer.h.21.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
202
+ "transformer.h.21.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
203
+ "transformer.h.22.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
204
+ "transformer.h.22.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
205
+ "transformer.h.22.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
206
+ "transformer.h.22.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
207
+ "transformer.h.22.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
208
+ "transformer.h.22.ln_1.bias": "model-00001-of-00002.safetensors",
209
+ "transformer.h.22.ln_1.weight": "model-00001-of-00002.safetensors",
210
+ "transformer.h.22.ln_2.bias": "model-00001-of-00002.safetensors",
211
+ "transformer.h.22.ln_2.weight": "model-00001-of-00002.safetensors",
212
+ "transformer.h.22.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
213
+ "transformer.h.22.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
214
+ "transformer.h.22.mlp.c_proj.bias": "model-00002-of-00002.safetensors",
215
+ "transformer.h.22.mlp.c_proj.weight": "model-00002-of-00002.safetensors",
216
+ "transformer.h.23.attn.attention.k_proj.weight": "model-00002-of-00002.safetensors",
217
+ "transformer.h.23.attn.attention.out_proj.bias": "model-00002-of-00002.safetensors",
218
+ "transformer.h.23.attn.attention.out_proj.weight": "model-00002-of-00002.safetensors",
219
+ "transformer.h.23.attn.attention.q_proj.weight": "model-00002-of-00002.safetensors",
220
+ "transformer.h.23.attn.attention.v_proj.weight": "model-00002-of-00002.safetensors",
221
+ "transformer.h.23.ln_1.bias": "model-00002-of-00002.safetensors",
222
+ "transformer.h.23.ln_1.weight": "model-00002-of-00002.safetensors",
223
+ "transformer.h.23.ln_2.bias": "model-00002-of-00002.safetensors",
224
+ "transformer.h.23.ln_2.weight": "model-00002-of-00002.safetensors",
225
+ "transformer.h.23.mlp.c_fc.bias": "model-00002-of-00002.safetensors",
226
+ "transformer.h.23.mlp.c_fc.weight": "model-00002-of-00002.safetensors",
227
+ "transformer.h.23.mlp.c_proj.bias": "model-00002-of-00002.safetensors",
228
+ "transformer.h.23.mlp.c_proj.weight": "model-00002-of-00002.safetensors",
229
+ "transformer.h.3.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
230
+ "transformer.h.3.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
231
+ "transformer.h.3.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
232
+ "transformer.h.3.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
233
+ "transformer.h.3.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
234
+ "transformer.h.3.ln_1.bias": "model-00001-of-00002.safetensors",
235
+ "transformer.h.3.ln_1.weight": "model-00001-of-00002.safetensors",
236
+ "transformer.h.3.ln_2.bias": "model-00001-of-00002.safetensors",
237
+ "transformer.h.3.ln_2.weight": "model-00001-of-00002.safetensors",
238
+ "transformer.h.3.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
239
+ "transformer.h.3.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
240
+ "transformer.h.3.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
241
+ "transformer.h.3.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
242
+ "transformer.h.4.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
243
+ "transformer.h.4.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
244
+ "transformer.h.4.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
245
+ "transformer.h.4.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
246
+ "transformer.h.4.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
247
+ "transformer.h.4.ln_1.bias": "model-00001-of-00002.safetensors",
248
+ "transformer.h.4.ln_1.weight": "model-00001-of-00002.safetensors",
249
+ "transformer.h.4.ln_2.bias": "model-00001-of-00002.safetensors",
250
+ "transformer.h.4.ln_2.weight": "model-00001-of-00002.safetensors",
251
+ "transformer.h.4.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
252
+ "transformer.h.4.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
253
+ "transformer.h.4.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
254
+ "transformer.h.4.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
255
+ "transformer.h.5.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
256
+ "transformer.h.5.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
257
+ "transformer.h.5.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
258
+ "transformer.h.5.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
259
+ "transformer.h.5.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
260
+ "transformer.h.5.ln_1.bias": "model-00001-of-00002.safetensors",
261
+ "transformer.h.5.ln_1.weight": "model-00001-of-00002.safetensors",
262
+ "transformer.h.5.ln_2.bias": "model-00001-of-00002.safetensors",
263
+ "transformer.h.5.ln_2.weight": "model-00001-of-00002.safetensors",
264
+ "transformer.h.5.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
265
+ "transformer.h.5.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
266
+ "transformer.h.5.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
267
+ "transformer.h.5.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
268
+ "transformer.h.6.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
269
+ "transformer.h.6.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
270
+ "transformer.h.6.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
271
+ "transformer.h.6.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
272
+ "transformer.h.6.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
273
+ "transformer.h.6.ln_1.bias": "model-00001-of-00002.safetensors",
274
+ "transformer.h.6.ln_1.weight": "model-00001-of-00002.safetensors",
275
+ "transformer.h.6.ln_2.bias": "model-00001-of-00002.safetensors",
276
+ "transformer.h.6.ln_2.weight": "model-00001-of-00002.safetensors",
277
+ "transformer.h.6.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
278
+ "transformer.h.6.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
279
+ "transformer.h.6.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
280
+ "transformer.h.6.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
281
+ "transformer.h.7.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
282
+ "transformer.h.7.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
283
+ "transformer.h.7.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
284
+ "transformer.h.7.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
285
+ "transformer.h.7.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
286
+ "transformer.h.7.ln_1.bias": "model-00001-of-00002.safetensors",
287
+ "transformer.h.7.ln_1.weight": "model-00001-of-00002.safetensors",
288
+ "transformer.h.7.ln_2.bias": "model-00001-of-00002.safetensors",
289
+ "transformer.h.7.ln_2.weight": "model-00001-of-00002.safetensors",
290
+ "transformer.h.7.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
291
+ "transformer.h.7.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
292
+ "transformer.h.7.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
293
+ "transformer.h.7.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
294
+ "transformer.h.8.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
295
+ "transformer.h.8.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
296
+ "transformer.h.8.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
297
+ "transformer.h.8.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
298
+ "transformer.h.8.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
299
+ "transformer.h.8.ln_1.bias": "model-00001-of-00002.safetensors",
300
+ "transformer.h.8.ln_1.weight": "model-00001-of-00002.safetensors",
301
+ "transformer.h.8.ln_2.bias": "model-00001-of-00002.safetensors",
302
+ "transformer.h.8.ln_2.weight": "model-00001-of-00002.safetensors",
303
+ "transformer.h.8.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
304
+ "transformer.h.8.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
305
+ "transformer.h.8.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
306
+ "transformer.h.8.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
307
+ "transformer.h.9.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
308
+ "transformer.h.9.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
309
+ "transformer.h.9.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
310
+ "transformer.h.9.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
311
+ "transformer.h.9.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
312
+ "transformer.h.9.ln_1.bias": "model-00001-of-00002.safetensors",
313
+ "transformer.h.9.ln_1.weight": "model-00001-of-00002.safetensors",
314
+ "transformer.h.9.ln_2.bias": "model-00001-of-00002.safetensors",
315
+ "transformer.h.9.ln_2.weight": "model-00001-of-00002.safetensors",
316
+ "transformer.h.9.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
317
+ "transformer.h.9.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
318
+ "transformer.h.9.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
319
+ "transformer.h.9.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
320
+ "transformer.ln_f.bias": "model-00002-of-00002.safetensors",
321
+ "transformer.ln_f.weight": "model-00002-of-00002.safetensors",
322
+ "transformer.wpe.weight": "model-00001-of-00002.safetensors",
323
+ "transformer.wte.weight": "model-00001-of-00002.safetensors"
324
+ }
325
+ }
GlorIA-1.3B-all/checkpoint-200/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ec993943488399196384844739bd708348d650a8fe0a9f13d5b740600dff256
3
+ size 10524931458
GlorIA-1.3B-all/checkpoint-200/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2370664a207da4c2bdd1dd94a13cc4caf1852e69baf63968a33e13715ea772cb
3
+ size 14244
GlorIA-1.3B-all/checkpoint-200/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc8a78682527ae46a5bfacd57e3fc75f835cc516aa231ca4e4a10c95af826227
3
+ size 1064
GlorIA-1.3B-all/checkpoint-200/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
GlorIA-1.3B-all/checkpoint-200/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
GlorIA-1.3B-all/checkpoint-200/tokenizer_config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": true,
3
+ "added_tokens_decoder": {
4
+ "50257": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": true,
15
+ "eos_token": "<|endoftext|>",
16
+ "max_length": 512,
17
+ "model_max_length": 1000000000000000019884624838656,
18
+ "pad_token": "<|endoftext|>",
19
+ "stride": 0,
20
+ "tokenizer_class": "GPT2Tokenizer",
21
+ "truncation_side": "right",
22
+ "truncation_strategy": "longest_first",
23
+ "unk_token": "<|endoftext|>"
24
+ }
GlorIA-1.3B-all/checkpoint-200/trainer_state.json ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.11806972324848175,
3
+ "best_model_checkpoint": "results/Gloria_all/GlorIA-1.3B/checkpoint-100",
4
+ "epoch": 2.3952095808383236,
5
+ "eval_steps": 100,
6
+ "global_step": 200,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.5988023952095808,
13
+ "grad_norm": 0.9138351082801819,
14
+ "learning_rate": 2.627329192546584e-05,
15
+ "loss": 0.1669,
16
+ "step": 50
17
+ },
18
+ {
19
+ "epoch": 1.1976047904191618,
20
+ "grad_norm": 0.5801578760147095,
21
+ "learning_rate": 2.1614906832298137e-05,
22
+ "loss": 0.1129,
23
+ "step": 100
24
+ },
25
+ {
26
+ "epoch": 1.1976047904191618,
27
+ "eval_accuracy": 0.9447699788056352,
28
+ "eval_f1": 0.4304367402071139,
29
+ "eval_loss": 0.11806972324848175,
30
+ "eval_precision": 0.4337568058076225,
31
+ "eval_recall": 0.42716711349419123,
32
+ "eval_runtime": 9.8143,
33
+ "eval_samples_per_second": 58.079,
34
+ "eval_steps_per_second": 2.445,
35
+ "step": 100
36
+ },
37
+ {
38
+ "epoch": 1.7964071856287425,
39
+ "grad_norm": 0.3607296645641327,
40
+ "learning_rate": 1.6956521739130433e-05,
41
+ "loss": 0.0915,
42
+ "step": 150
43
+ },
44
+ {
45
+ "epoch": 2.3952095808383236,
46
+ "grad_norm": 0.4452211260795593,
47
+ "learning_rate": 1.2298136645962733e-05,
48
+ "loss": 0.063,
49
+ "step": 200
50
+ },
51
+ {
52
+ "epoch": 2.3952095808383236,
53
+ "eval_accuracy": 0.9409051240493704,
54
+ "eval_f1": 0.40241075567918405,
55
+ "eval_loss": 0.18597252666950226,
56
+ "eval_precision": 0.41811175337186895,
57
+ "eval_recall": 0.387846291331546,
58
+ "eval_runtime": 9.9454,
59
+ "eval_samples_per_second": 57.313,
60
+ "eval_steps_per_second": 2.413,
61
+ "step": 200
62
+ }
63
+ ],
64
+ "logging_steps": 50,
65
+ "max_steps": 332,
66
+ "num_input_tokens_seen": 0,
67
+ "num_train_epochs": 4,
68
+ "save_steps": 100,
69
+ "total_flos": 2009952955744776.0,
70
+ "train_batch_size": 24,
71
+ "trial_name": null,
72
+ "trial_params": null
73
+ }
GlorIA-1.3B-all/checkpoint-200/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05869643dba3d8000c840394135edb747c583a3773236fdcfc6185019c8f767e
3
+ size 4920
GlorIA-1.3B-all/checkpoint-200/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
GlorIA-1.3B-all/checkpoint-300/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 50257
3
+ }
GlorIA-1.3B-all/checkpoint-300/config.json ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "NOVA-vision-language/GlorIA-1.3B",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPTNeoForTokenClassification"
6
+ ],
7
+ "attention_dropout": 0,
8
+ "attention_layers": [
9
+ "global",
10
+ "local",
11
+ "global",
12
+ "local",
13
+ "global",
14
+ "local",
15
+ "global",
16
+ "local",
17
+ "global",
18
+ "local",
19
+ "global",
20
+ "local",
21
+ "global",
22
+ "local",
23
+ "global",
24
+ "local",
25
+ "global",
26
+ "local",
27
+ "global",
28
+ "local",
29
+ "global",
30
+ "local",
31
+ "global",
32
+ "local"
33
+ ],
34
+ "attention_types": [
35
+ [
36
+ [
37
+ "global",
38
+ "local"
39
+ ],
40
+ 12
41
+ ]
42
+ ],
43
+ "bos_token_id": 50256,
44
+ "classifier_dropout": 0.1,
45
+ "do_sample": true,
46
+ "embed_dropout": 0,
47
+ "eos_token_id": 50256,
48
+ "gradient_checkpointing": false,
49
+ "hidden_size": 2048,
50
+ "id2label": {
51
+ "0": "Non-Pun",
52
+ "1": "Pun"
53
+ },
54
+ "initializer_range": 0.02,
55
+ "intermediate_size": null,
56
+ "label2id": {
57
+ "Non-Pun": 0,
58
+ "Pun": 1
59
+ },
60
+ "layer_norm_epsilon": 1e-05,
61
+ "max_length": 50,
62
+ "max_position_embeddings": 2048,
63
+ "model_type": "gpt_neo",
64
+ "num_heads": 16,
65
+ "num_layers": 24,
66
+ "resid_dropout": 0,
67
+ "summary_activation": null,
68
+ "summary_first_dropout": 0.1,
69
+ "summary_proj_to_labels": true,
70
+ "summary_type": "cls_index",
71
+ "summary_use_proj": true,
72
+ "task_specific_params": {
73
+ "text-generation": {
74
+ "do_sample": true,
75
+ "max_length": 50,
76
+ "temperature": 0.9
77
+ }
78
+ },
79
+ "temperature": 0.9,
80
+ "tokenizer_class": "GPT2Tokenizer",
81
+ "torch_dtype": "float32",
82
+ "transformers_version": "4.40.2",
83
+ "use_cache": true,
84
+ "vocab_size": 50258,
85
+ "window_size": 256
86
+ }
GlorIA-1.3B-all/checkpoint-300/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
GlorIA-1.3B-all/checkpoint-300/model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51ccbd8ee3a7bcd714595ff76c2c84bcf870f64454d193024d8e9e7808e20d5f
3
+ size 4993802376
GlorIA-1.3B-all/checkpoint-300/model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:967f7433dba88e011a7b9d8311bce3e15a06c755c86cbeb63632759a9a959cff
3
+ size 268560328
GlorIA-1.3B-all/checkpoint-300/model.safetensors.index.json ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 5262327816
4
+ },
5
+ "weight_map": {
6
+ "classifier.bias": "model-00002-of-00002.safetensors",
7
+ "classifier.weight": "model-00002-of-00002.safetensors",
8
+ "transformer.h.0.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
9
+ "transformer.h.0.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
10
+ "transformer.h.0.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
11
+ "transformer.h.0.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
12
+ "transformer.h.0.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
13
+ "transformer.h.0.ln_1.bias": "model-00001-of-00002.safetensors",
14
+ "transformer.h.0.ln_1.weight": "model-00001-of-00002.safetensors",
15
+ "transformer.h.0.ln_2.bias": "model-00001-of-00002.safetensors",
16
+ "transformer.h.0.ln_2.weight": "model-00001-of-00002.safetensors",
17
+ "transformer.h.0.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
18
+ "transformer.h.0.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
19
+ "transformer.h.0.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
20
+ "transformer.h.0.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
21
+ "transformer.h.1.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
22
+ "transformer.h.1.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
23
+ "transformer.h.1.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
24
+ "transformer.h.1.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
25
+ "transformer.h.1.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
26
+ "transformer.h.1.ln_1.bias": "model-00001-of-00002.safetensors",
27
+ "transformer.h.1.ln_1.weight": "model-00001-of-00002.safetensors",
28
+ "transformer.h.1.ln_2.bias": "model-00001-of-00002.safetensors",
29
+ "transformer.h.1.ln_2.weight": "model-00001-of-00002.safetensors",
30
+ "transformer.h.1.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
31
+ "transformer.h.1.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
32
+ "transformer.h.1.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
33
+ "transformer.h.1.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
34
+ "transformer.h.10.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
35
+ "transformer.h.10.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
36
+ "transformer.h.10.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
37
+ "transformer.h.10.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
38
+ "transformer.h.10.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
39
+ "transformer.h.10.ln_1.bias": "model-00001-of-00002.safetensors",
40
+ "transformer.h.10.ln_1.weight": "model-00001-of-00002.safetensors",
41
+ "transformer.h.10.ln_2.bias": "model-00001-of-00002.safetensors",
42
+ "transformer.h.10.ln_2.weight": "model-00001-of-00002.safetensors",
43
+ "transformer.h.10.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
44
+ "transformer.h.10.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
45
+ "transformer.h.10.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
46
+ "transformer.h.10.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
47
+ "transformer.h.11.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
48
+ "transformer.h.11.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
49
+ "transformer.h.11.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
50
+ "transformer.h.11.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
51
+ "transformer.h.11.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
52
+ "transformer.h.11.ln_1.bias": "model-00001-of-00002.safetensors",
53
+ "transformer.h.11.ln_1.weight": "model-00001-of-00002.safetensors",
54
+ "transformer.h.11.ln_2.bias": "model-00001-of-00002.safetensors",
55
+ "transformer.h.11.ln_2.weight": "model-00001-of-00002.safetensors",
56
+ "transformer.h.11.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
57
+ "transformer.h.11.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
58
+ "transformer.h.11.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
59
+ "transformer.h.11.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
60
+ "transformer.h.12.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
61
+ "transformer.h.12.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
62
+ "transformer.h.12.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
63
+ "transformer.h.12.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
64
+ "transformer.h.12.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
65
+ "transformer.h.12.ln_1.bias": "model-00001-of-00002.safetensors",
66
+ "transformer.h.12.ln_1.weight": "model-00001-of-00002.safetensors",
67
+ "transformer.h.12.ln_2.bias": "model-00001-of-00002.safetensors",
68
+ "transformer.h.12.ln_2.weight": "model-00001-of-00002.safetensors",
69
+ "transformer.h.12.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
70
+ "transformer.h.12.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
71
+ "transformer.h.12.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
72
+ "transformer.h.12.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
73
+ "transformer.h.13.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
74
+ "transformer.h.13.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
75
+ "transformer.h.13.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
76
+ "transformer.h.13.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
77
+ "transformer.h.13.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
78
+ "transformer.h.13.ln_1.bias": "model-00001-of-00002.safetensors",
79
+ "transformer.h.13.ln_1.weight": "model-00001-of-00002.safetensors",
80
+ "transformer.h.13.ln_2.bias": "model-00001-of-00002.safetensors",
81
+ "transformer.h.13.ln_2.weight": "model-00001-of-00002.safetensors",
82
+ "transformer.h.13.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
83
+ "transformer.h.13.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
84
+ "transformer.h.13.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
85
+ "transformer.h.13.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
86
+ "transformer.h.14.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
87
+ "transformer.h.14.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
88
+ "transformer.h.14.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
89
+ "transformer.h.14.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
90
+ "transformer.h.14.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
91
+ "transformer.h.14.ln_1.bias": "model-00001-of-00002.safetensors",
92
+ "transformer.h.14.ln_1.weight": "model-00001-of-00002.safetensors",
93
+ "transformer.h.14.ln_2.bias": "model-00001-of-00002.safetensors",
94
+ "transformer.h.14.ln_2.weight": "model-00001-of-00002.safetensors",
95
+ "transformer.h.14.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
96
+ "transformer.h.14.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
97
+ "transformer.h.14.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
98
+ "transformer.h.14.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
99
+ "transformer.h.15.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
100
+ "transformer.h.15.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
101
+ "transformer.h.15.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
102
+ "transformer.h.15.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
103
+ "transformer.h.15.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
104
+ "transformer.h.15.ln_1.bias": "model-00001-of-00002.safetensors",
105
+ "transformer.h.15.ln_1.weight": "model-00001-of-00002.safetensors",
106
+ "transformer.h.15.ln_2.bias": "model-00001-of-00002.safetensors",
107
+ "transformer.h.15.ln_2.weight": "model-00001-of-00002.safetensors",
108
+ "transformer.h.15.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
109
+ "transformer.h.15.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
110
+ "transformer.h.15.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
111
+ "transformer.h.15.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
112
+ "transformer.h.16.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
113
+ "transformer.h.16.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
114
+ "transformer.h.16.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
115
+ "transformer.h.16.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
116
+ "transformer.h.16.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
117
+ "transformer.h.16.ln_1.bias": "model-00001-of-00002.safetensors",
118
+ "transformer.h.16.ln_1.weight": "model-00001-of-00002.safetensors",
119
+ "transformer.h.16.ln_2.bias": "model-00001-of-00002.safetensors",
120
+ "transformer.h.16.ln_2.weight": "model-00001-of-00002.safetensors",
121
+ "transformer.h.16.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
122
+ "transformer.h.16.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
123
+ "transformer.h.16.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
124
+ "transformer.h.16.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
125
+ "transformer.h.17.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
126
+ "transformer.h.17.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
127
+ "transformer.h.17.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
128
+ "transformer.h.17.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
129
+ "transformer.h.17.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
130
+ "transformer.h.17.ln_1.bias": "model-00001-of-00002.safetensors",
131
+ "transformer.h.17.ln_1.weight": "model-00001-of-00002.safetensors",
132
+ "transformer.h.17.ln_2.bias": "model-00001-of-00002.safetensors",
133
+ "transformer.h.17.ln_2.weight": "model-00001-of-00002.safetensors",
134
+ "transformer.h.17.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
135
+ "transformer.h.17.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
136
+ "transformer.h.17.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
137
+ "transformer.h.17.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
138
+ "transformer.h.18.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
139
+ "transformer.h.18.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
140
+ "transformer.h.18.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
141
+ "transformer.h.18.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
142
+ "transformer.h.18.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
143
+ "transformer.h.18.ln_1.bias": "model-00001-of-00002.safetensors",
144
+ "transformer.h.18.ln_1.weight": "model-00001-of-00002.safetensors",
145
+ "transformer.h.18.ln_2.bias": "model-00001-of-00002.safetensors",
146
+ "transformer.h.18.ln_2.weight": "model-00001-of-00002.safetensors",
147
+ "transformer.h.18.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
148
+ "transformer.h.18.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
149
+ "transformer.h.18.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
150
+ "transformer.h.18.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
151
+ "transformer.h.19.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
152
+ "transformer.h.19.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
153
+ "transformer.h.19.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
154
+ "transformer.h.19.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
155
+ "transformer.h.19.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
156
+ "transformer.h.19.ln_1.bias": "model-00001-of-00002.safetensors",
157
+ "transformer.h.19.ln_1.weight": "model-00001-of-00002.safetensors",
158
+ "transformer.h.19.ln_2.bias": "model-00001-of-00002.safetensors",
159
+ "transformer.h.19.ln_2.weight": "model-00001-of-00002.safetensors",
160
+ "transformer.h.19.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
161
+ "transformer.h.19.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
162
+ "transformer.h.19.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
163
+ "transformer.h.19.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
164
+ "transformer.h.2.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
165
+ "transformer.h.2.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
166
+ "transformer.h.2.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
167
+ "transformer.h.2.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
168
+ "transformer.h.2.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
169
+ "transformer.h.2.ln_1.bias": "model-00001-of-00002.safetensors",
170
+ "transformer.h.2.ln_1.weight": "model-00001-of-00002.safetensors",
171
+ "transformer.h.2.ln_2.bias": "model-00001-of-00002.safetensors",
172
+ "transformer.h.2.ln_2.weight": "model-00001-of-00002.safetensors",
173
+ "transformer.h.2.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
174
+ "transformer.h.2.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
175
+ "transformer.h.2.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
176
+ "transformer.h.2.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
177
+ "transformer.h.20.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
178
+ "transformer.h.20.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
179
+ "transformer.h.20.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
180
+ "transformer.h.20.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
181
+ "transformer.h.20.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
182
+ "transformer.h.20.ln_1.bias": "model-00001-of-00002.safetensors",
183
+ "transformer.h.20.ln_1.weight": "model-00001-of-00002.safetensors",
184
+ "transformer.h.20.ln_2.bias": "model-00001-of-00002.safetensors",
185
+ "transformer.h.20.ln_2.weight": "model-00001-of-00002.safetensors",
186
+ "transformer.h.20.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
187
+ "transformer.h.20.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
188
+ "transformer.h.20.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
189
+ "transformer.h.20.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
190
+ "transformer.h.21.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
191
+ "transformer.h.21.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
192
+ "transformer.h.21.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
193
+ "transformer.h.21.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
194
+ "transformer.h.21.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
195
+ "transformer.h.21.ln_1.bias": "model-00001-of-00002.safetensors",
196
+ "transformer.h.21.ln_1.weight": "model-00001-of-00002.safetensors",
197
+ "transformer.h.21.ln_2.bias": "model-00001-of-00002.safetensors",
198
+ "transformer.h.21.ln_2.weight": "model-00001-of-00002.safetensors",
199
+ "transformer.h.21.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
200
+ "transformer.h.21.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
201
+ "transformer.h.21.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
202
+ "transformer.h.21.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
203
+ "transformer.h.22.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
204
+ "transformer.h.22.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
205
+ "transformer.h.22.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
206
+ "transformer.h.22.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
207
+ "transformer.h.22.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
208
+ "transformer.h.22.ln_1.bias": "model-00001-of-00002.safetensors",
209
+ "transformer.h.22.ln_1.weight": "model-00001-of-00002.safetensors",
210
+ "transformer.h.22.ln_2.bias": "model-00001-of-00002.safetensors",
211
+ "transformer.h.22.ln_2.weight": "model-00001-of-00002.safetensors",
212
+ "transformer.h.22.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
213
+ "transformer.h.22.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
214
+ "transformer.h.22.mlp.c_proj.bias": "model-00002-of-00002.safetensors",
215
+ "transformer.h.22.mlp.c_proj.weight": "model-00002-of-00002.safetensors",
216
+ "transformer.h.23.attn.attention.k_proj.weight": "model-00002-of-00002.safetensors",
217
+ "transformer.h.23.attn.attention.out_proj.bias": "model-00002-of-00002.safetensors",
218
+ "transformer.h.23.attn.attention.out_proj.weight": "model-00002-of-00002.safetensors",
219
+ "transformer.h.23.attn.attention.q_proj.weight": "model-00002-of-00002.safetensors",
220
+ "transformer.h.23.attn.attention.v_proj.weight": "model-00002-of-00002.safetensors",
221
+ "transformer.h.23.ln_1.bias": "model-00002-of-00002.safetensors",
222
+ "transformer.h.23.ln_1.weight": "model-00002-of-00002.safetensors",
223
+ "transformer.h.23.ln_2.bias": "model-00002-of-00002.safetensors",
224
+ "transformer.h.23.ln_2.weight": "model-00002-of-00002.safetensors",
225
+ "transformer.h.23.mlp.c_fc.bias": "model-00002-of-00002.safetensors",
226
+ "transformer.h.23.mlp.c_fc.weight": "model-00002-of-00002.safetensors",
227
+ "transformer.h.23.mlp.c_proj.bias": "model-00002-of-00002.safetensors",
228
+ "transformer.h.23.mlp.c_proj.weight": "model-00002-of-00002.safetensors",
229
+ "transformer.h.3.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
230
+ "transformer.h.3.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
231
+ "transformer.h.3.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
232
+ "transformer.h.3.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
233
+ "transformer.h.3.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
234
+ "transformer.h.3.ln_1.bias": "model-00001-of-00002.safetensors",
235
+ "transformer.h.3.ln_1.weight": "model-00001-of-00002.safetensors",
236
+ "transformer.h.3.ln_2.bias": "model-00001-of-00002.safetensors",
237
+ "transformer.h.3.ln_2.weight": "model-00001-of-00002.safetensors",
238
+ "transformer.h.3.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
239
+ "transformer.h.3.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
240
+ "transformer.h.3.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
241
+ "transformer.h.3.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
242
+ "transformer.h.4.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
243
+ "transformer.h.4.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
244
+ "transformer.h.4.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
245
+ "transformer.h.4.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
246
+ "transformer.h.4.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
247
+ "transformer.h.4.ln_1.bias": "model-00001-of-00002.safetensors",
248
+ "transformer.h.4.ln_1.weight": "model-00001-of-00002.safetensors",
249
+ "transformer.h.4.ln_2.bias": "model-00001-of-00002.safetensors",
250
+ "transformer.h.4.ln_2.weight": "model-00001-of-00002.safetensors",
251
+ "transformer.h.4.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
252
+ "transformer.h.4.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
253
+ "transformer.h.4.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
254
+ "transformer.h.4.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
255
+ "transformer.h.5.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
256
+ "transformer.h.5.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
257
+ "transformer.h.5.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
258
+ "transformer.h.5.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
259
+ "transformer.h.5.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
260
+ "transformer.h.5.ln_1.bias": "model-00001-of-00002.safetensors",
261
+ "transformer.h.5.ln_1.weight": "model-00001-of-00002.safetensors",
262
+ "transformer.h.5.ln_2.bias": "model-00001-of-00002.safetensors",
263
+ "transformer.h.5.ln_2.weight": "model-00001-of-00002.safetensors",
264
+ "transformer.h.5.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
265
+ "transformer.h.5.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
266
+ "transformer.h.5.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
267
+ "transformer.h.5.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
268
+ "transformer.h.6.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
269
+ "transformer.h.6.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
270
+ "transformer.h.6.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
271
+ "transformer.h.6.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
272
+ "transformer.h.6.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
273
+ "transformer.h.6.ln_1.bias": "model-00001-of-00002.safetensors",
274
+ "transformer.h.6.ln_1.weight": "model-00001-of-00002.safetensors",
275
+ "transformer.h.6.ln_2.bias": "model-00001-of-00002.safetensors",
276
+ "transformer.h.6.ln_2.weight": "model-00001-of-00002.safetensors",
277
+ "transformer.h.6.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
278
+ "transformer.h.6.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
279
+ "transformer.h.6.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
280
+ "transformer.h.6.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
281
+ "transformer.h.7.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
282
+ "transformer.h.7.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
283
+ "transformer.h.7.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
284
+ "transformer.h.7.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
285
+ "transformer.h.7.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
286
+ "transformer.h.7.ln_1.bias": "model-00001-of-00002.safetensors",
287
+ "transformer.h.7.ln_1.weight": "model-00001-of-00002.safetensors",
288
+ "transformer.h.7.ln_2.bias": "model-00001-of-00002.safetensors",
289
+ "transformer.h.7.ln_2.weight": "model-00001-of-00002.safetensors",
290
+ "transformer.h.7.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
291
+ "transformer.h.7.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
292
+ "transformer.h.7.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
293
+ "transformer.h.7.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
294
+ "transformer.h.8.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
295
+ "transformer.h.8.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
296
+ "transformer.h.8.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
297
+ "transformer.h.8.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
298
+ "transformer.h.8.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
299
+ "transformer.h.8.ln_1.bias": "model-00001-of-00002.safetensors",
300
+ "transformer.h.8.ln_1.weight": "model-00001-of-00002.safetensors",
301
+ "transformer.h.8.ln_2.bias": "model-00001-of-00002.safetensors",
302
+ "transformer.h.8.ln_2.weight": "model-00001-of-00002.safetensors",
303
+ "transformer.h.8.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
304
+ "transformer.h.8.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
305
+ "transformer.h.8.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
306
+ "transformer.h.8.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
307
+ "transformer.h.9.attn.attention.k_proj.weight": "model-00001-of-00002.safetensors",
308
+ "transformer.h.9.attn.attention.out_proj.bias": "model-00001-of-00002.safetensors",
309
+ "transformer.h.9.attn.attention.out_proj.weight": "model-00001-of-00002.safetensors",
310
+ "transformer.h.9.attn.attention.q_proj.weight": "model-00001-of-00002.safetensors",
311
+ "transformer.h.9.attn.attention.v_proj.weight": "model-00001-of-00002.safetensors",
312
+ "transformer.h.9.ln_1.bias": "model-00001-of-00002.safetensors",
313
+ "transformer.h.9.ln_1.weight": "model-00001-of-00002.safetensors",
314
+ "transformer.h.9.ln_2.bias": "model-00001-of-00002.safetensors",
315
+ "transformer.h.9.ln_2.weight": "model-00001-of-00002.safetensors",
316
+ "transformer.h.9.mlp.c_fc.bias": "model-00001-of-00002.safetensors",
317
+ "transformer.h.9.mlp.c_fc.weight": "model-00001-of-00002.safetensors",
318
+ "transformer.h.9.mlp.c_proj.bias": "model-00001-of-00002.safetensors",
319
+ "transformer.h.9.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
320
+ "transformer.ln_f.bias": "model-00002-of-00002.safetensors",
321
+ "transformer.ln_f.weight": "model-00002-of-00002.safetensors",
322
+ "transformer.wpe.weight": "model-00001-of-00002.safetensors",
323
+ "transformer.wte.weight": "model-00001-of-00002.safetensors"
324
+ }
325
+ }
GlorIA-1.3B-all/checkpoint-300/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:259779368b471b3857945542415196adb7c18431215ff66a01b84bad405672fb
3
+ size 10524931458
GlorIA-1.3B-all/checkpoint-300/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da984250da42e29c169566973464f20eb13559ac7514308b48c172f6c5f3beea
3
+ size 14244
GlorIA-1.3B-all/checkpoint-300/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b55a77f0e3a97e931e1003efe5938b313a0bb124e4fdfe7b9cda381a55663851
3
+ size 1064
GlorIA-1.3B-all/checkpoint-300/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
GlorIA-1.3B-all/checkpoint-300/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
GlorIA-1.3B-all/checkpoint-300/tokenizer_config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": true,
3
+ "added_tokens_decoder": {
4
+ "50257": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": true,
15
+ "eos_token": "<|endoftext|>",
16
+ "max_length": 512,
17
+ "model_max_length": 1000000000000000019884624838656,
18
+ "pad_token": "<|endoftext|>",
19
+ "stride": 0,
20
+ "tokenizer_class": "GPT2Tokenizer",
21
+ "truncation_side": "right",
22
+ "truncation_strategy": "longest_first",
23
+ "unk_token": "<|endoftext|>"
24
+ }
GlorIA-1.3B-all/checkpoint-300/trainer_state.json ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.11806972324848175,
3
+ "best_model_checkpoint": "results/Gloria_all/GlorIA-1.3B/checkpoint-100",
4
+ "epoch": 3.592814371257485,
5
+ "eval_steps": 100,
6
+ "global_step": 300,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.5988023952095808,
13
+ "grad_norm": 0.9138351082801819,
14
+ "learning_rate": 2.627329192546584e-05,
15
+ "loss": 0.1669,
16
+ "step": 50
17
+ },
18
+ {
19
+ "epoch": 1.1976047904191618,
20
+ "grad_norm": 0.5801578760147095,
21
+ "learning_rate": 2.1614906832298137e-05,
22
+ "loss": 0.1129,
23
+ "step": 100
24
+ },
25
+ {
26
+ "epoch": 1.1976047904191618,
27
+ "eval_accuracy": 0.9447699788056352,
28
+ "eval_f1": 0.4304367402071139,
29
+ "eval_loss": 0.11806972324848175,
30
+ "eval_precision": 0.4337568058076225,
31
+ "eval_recall": 0.42716711349419123,
32
+ "eval_runtime": 9.8143,
33
+ "eval_samples_per_second": 58.079,
34
+ "eval_steps_per_second": 2.445,
35
+ "step": 100
36
+ },
37
+ {
38
+ "epoch": 1.7964071856287425,
39
+ "grad_norm": 0.3607296645641327,
40
+ "learning_rate": 1.6956521739130433e-05,
41
+ "loss": 0.0915,
42
+ "step": 150
43
+ },
44
+ {
45
+ "epoch": 2.3952095808383236,
46
+ "grad_norm": 0.4452211260795593,
47
+ "learning_rate": 1.2298136645962733e-05,
48
+ "loss": 0.063,
49
+ "step": 200
50
+ },
51
+ {
52
+ "epoch": 2.3952095808383236,
53
+ "eval_accuracy": 0.9409051240493704,
54
+ "eval_f1": 0.40241075567918405,
55
+ "eval_loss": 0.18597252666950226,
56
+ "eval_precision": 0.41811175337186895,
57
+ "eval_recall": 0.387846291331546,
58
+ "eval_runtime": 9.9454,
59
+ "eval_samples_per_second": 57.313,
60
+ "eval_steps_per_second": 2.413,
61
+ "step": 200
62
+ },
63
+ {
64
+ "epoch": 2.9940119760479043,
65
+ "grad_norm": 0.5937349200248718,
66
+ "learning_rate": 7.63975155279503e-06,
67
+ "loss": 0.0551,
68
+ "step": 250
69
+ },
70
+ {
71
+ "epoch": 3.592814371257485,
72
+ "grad_norm": 0.6216064691543579,
73
+ "learning_rate": 2.981366459627329e-06,
74
+ "loss": 0.0292,
75
+ "step": 300
76
+ },
77
+ {
78
+ "epoch": 3.592814371257485,
79
+ "eval_accuracy": 0.940157087644932,
80
+ "eval_f1": 0.4222124283825474,
81
+ "eval_loss": 0.20892249047756195,
82
+ "eval_precision": 0.4165217391304348,
83
+ "eval_recall": 0.4280607685433423,
84
+ "eval_runtime": 9.707,
85
+ "eval_samples_per_second": 58.721,
86
+ "eval_steps_per_second": 2.472,
87
+ "step": 300
88
+ }
89
+ ],
90
+ "logging_steps": 50,
91
+ "max_steps": 332,
92
+ "num_input_tokens_seen": 0,
93
+ "num_train_epochs": 4,
94
+ "save_steps": 100,
95
+ "total_flos": 3032048467579320.0,
96
+ "train_batch_size": 24,
97
+ "trial_name": null,
98
+ "trial_params": null
99
+ }
GlorIA-1.3B-all/checkpoint-300/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05869643dba3d8000c840394135edb747c583a3773236fdcfc6185019c8f767e
3
+ size 4920
GlorIA-1.3B-all/checkpoint-300/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
GlorIA-1.3B-positive/checkpoint-100/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 50257
3
+ }
GlorIA-1.3B-positive/checkpoint-100/config.json ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "NOVA-vision-language/GlorIA-1.3B",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPTNeoForTokenClassification"
6
+ ],
7
+ "attention_dropout": 0,
8
+ "attention_layers": [
9
+ "global",
10
+ "local",
11
+ "global",
12
+ "local",
13
+ "global",
14
+ "local",
15
+ "global",
16
+ "local",
17
+ "global",
18
+ "local",
19
+ "global",
20
+ "local",
21
+ "global",
22
+ "local",
23
+ "global",
24
+ "local",
25
+ "global",
26
+ "local",
27
+ "global",
28
+ "local",
29
+ "global",
30
+ "local",
31
+ "global",
32
+ "local"
33
+ ],
34
+ "attention_types": [
35
+ [
36
+ [
37
+ "global",
38
+ "local"
39
+ ],
40
+ 12
41
+ ]
42
+ ],
43
+ "bos_token_id": 50256,
44
+ "classifier_dropout": 0.1,
45
+ "do_sample": true,
46
+ "embed_dropout": 0,
47
+ "eos_token_id": 50256,
48
+ "gradient_checkpointing": false,
49
+ "hidden_size": 2048,
50
+ "id2label": {
51
+ "0": "Non-Pun",
52
+ "1": "Pun"
53
+ },
54
+ "initializer_range": 0.02,
55
+ "intermediate_size": null,
56
+ "label2id": {
57
+ "Non-Pun": 0,
58
+ "Pun": 1
59
+ },
60
+ "layer_norm_epsilon": 1e-05,
61
+ "max_length": 50,
62
+ "max_position_embeddings": 2048,
63
+ "model_type": "gpt_neo",
64
+ "num_heads": 16,
65
+ "num_layers": 24,
66
+ "resid_dropout": 0,
67
+ "summary_activation": null,
68
+ "summary_first_dropout": 0.1,
69
+ "summary_proj_to_labels": true,
70
+ "summary_type": "cls_index",
71
+ "summary_use_proj": true,
72
+ "task_specific_params": {
73
+ "text-generation": {
74
+ "do_sample": true,
75
+ "max_length": 50,
76
+ "temperature": 0.9
77
+ }
78
+ },
79
+ "temperature": 0.9,
80
+ "tokenizer_class": "GPT2Tokenizer",
81
+ "torch_dtype": "float32",
82
+ "transformers_version": "4.40.2",
83
+ "use_cache": true,
84
+ "vocab_size": 50258,
85
+ "window_size": 256
86
+ }
GlorIA-1.3B-positive/checkpoint-100/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
GlorIA-1.3B-positive/checkpoint-100/model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6f98c3ddac41ef533e08e341232e25c42abb9a0e4711cb5284c4b1f16d19b91
3
+ size 4993802376
GlorIA-1.3B-positive/checkpoint-100/model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97cdf80b4de968a5945c3660b53db49213fa656d3cb9081ac691adbd20ae0ed6
3
+ size 268560328