swritchie commited on
Commit
f465717
1 Parent(s): 88a4919

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: keras-nlp
3
+ pipeline_tag: text-generation
4
+ ---
5
+ This is a [`GPT2` model](https://keras.io/api/keras_nlp/models/gpt2) uploaded using the KerasNLP library and can be used with JAX, TensorFlow, and PyTorch backends.
6
+ This model is related to a `CausalLM` task.
7
+
8
+ Model config:
9
+ * **name:** gpt2_backbone
10
+ * **trainable:** True
11
+ * **vocabulary_size:** 50257
12
+ * **num_layers:** 12
13
+ * **num_heads:** 12
14
+ * **hidden_dim:** 768
15
+ * **intermediate_dim:** 3072
16
+ * **dropout:** 0.1
17
+ * **max_sequence_length:** 1024
18
+
19
+ This model card has been generated automatically and should be completed by the model author. See [Model Cards documentation](https://huggingface.co/docs/hub/model-cards) for more information.
assets/tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
assets/tokenizer/vocabulary.json ADDED
The diff for this file is too large to render. See raw diff
 
config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_nlp.src.models.gpt2.gpt2_backbone",
3
+ "class_name": "GPT2Backbone",
4
+ "config": {
5
+ "name": "gpt2_backbone",
6
+ "trainable": true,
7
+ "vocabulary_size": 50257,
8
+ "num_layers": 12,
9
+ "num_heads": 12,
10
+ "hidden_dim": 768,
11
+ "intermediate_dim": 3072,
12
+ "dropout": 0.1,
13
+ "max_sequence_length": 1024
14
+ },
15
+ "registered_name": "keras_nlp>GPT2Backbone"
16
+ }
metadata.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "keras_version": "3.4.1",
3
+ "keras_nlp_version": "0.14.4",
4
+ "parameter_count": 124439808,
5
+ "date_saved": "2024-08-21@13:35:30"
6
+ }
model.weights.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81049319cc471db61f93acc6bebcb185299a263050b214df13974b66a1981393
3
+ size 498160592
preprocessor.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_nlp.src.models.gpt2.gpt2_causal_lm_preprocessor",
3
+ "class_name": "GPT2CausalLMPreprocessor",
4
+ "config": {
5
+ "name": "gpt2_causal_lm_preprocessor",
6
+ "trainable": true,
7
+ "dtype": {
8
+ "module": "keras",
9
+ "class_name": "DTypePolicy",
10
+ "config": {
11
+ "name": "float32"
12
+ },
13
+ "registered_name": null
14
+ },
15
+ "tokenizer": {
16
+ "module": "keras_nlp.src.models.gpt2.gpt2_tokenizer",
17
+ "class_name": "GPT2Tokenizer",
18
+ "config": {
19
+ "name": "gpt2_tokenizer",
20
+ "trainable": true,
21
+ "dtype": {
22
+ "module": "keras",
23
+ "class_name": "DTypePolicy",
24
+ "config": {
25
+ "name": "int32"
26
+ },
27
+ "registered_name": null
28
+ },
29
+ "sequence_length": null,
30
+ "add_prefix_space": false
31
+ },
32
+ "registered_name": "keras_nlp>GPT2Tokenizer"
33
+ },
34
+ "sequence_length": 1024,
35
+ "add_start_token": true,
36
+ "add_end_token": true
37
+ },
38
+ "registered_name": "keras_nlp>GPT2CausalLMPreprocessor"
39
+ }
task.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_nlp.src.models.gpt2.gpt2_causal_lm",
3
+ "class_name": "GPT2CausalLM",
4
+ "config": {
5
+ "backbone": {
6
+ "module": "keras_nlp.src.models.gpt2.gpt2_backbone",
7
+ "class_name": "GPT2Backbone",
8
+ "config": {
9
+ "name": "gpt2_backbone",
10
+ "trainable": true,
11
+ "vocabulary_size": 50257,
12
+ "num_layers": 12,
13
+ "num_heads": 12,
14
+ "hidden_dim": 768,
15
+ "intermediate_dim": 3072,
16
+ "dropout": 0.1,
17
+ "max_sequence_length": 1024
18
+ },
19
+ "registered_name": "keras_nlp>GPT2Backbone"
20
+ },
21
+ "preprocessor": {
22
+ "module": "keras_nlp.src.models.gpt2.gpt2_causal_lm_preprocessor",
23
+ "class_name": "GPT2CausalLMPreprocessor",
24
+ "config": {
25
+ "name": "gpt2_causal_lm_preprocessor",
26
+ "trainable": true,
27
+ "dtype": {
28
+ "module": "keras",
29
+ "class_name": "DTypePolicy",
30
+ "config": {
31
+ "name": "float32"
32
+ },
33
+ "registered_name": null
34
+ },
35
+ "tokenizer": {
36
+ "module": "keras_nlp.src.models.gpt2.gpt2_tokenizer",
37
+ "class_name": "GPT2Tokenizer",
38
+ "config": {
39
+ "name": "gpt2_tokenizer",
40
+ "trainable": true,
41
+ "dtype": {
42
+ "module": "keras",
43
+ "class_name": "DTypePolicy",
44
+ "config": {
45
+ "name": "int32"
46
+ },
47
+ "registered_name": null
48
+ },
49
+ "sequence_length": null,
50
+ "add_prefix_space": false
51
+ },
52
+ "registered_name": "keras_nlp>GPT2Tokenizer"
53
+ },
54
+ "sequence_length": 1024,
55
+ "add_start_token": true,
56
+ "add_end_token": true
57
+ },
58
+ "registered_name": "keras_nlp>GPT2CausalLMPreprocessor"
59
+ },
60
+ "name": "gpt2_causal_lm"
61
+ },
62
+ "registered_name": "keras_nlp>GPT2CausalLM"
63
+ }
tokenizer.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_nlp.src.models.gpt2.gpt2_tokenizer",
3
+ "class_name": "GPT2Tokenizer",
4
+ "config": {
5
+ "name": "gpt2_tokenizer",
6
+ "trainable": true,
7
+ "dtype": {
8
+ "module": "keras",
9
+ "class_name": "DTypePolicy",
10
+ "config": {
11
+ "name": "int32"
12
+ },
13
+ "registered_name": null
14
+ },
15
+ "sequence_length": null,
16
+ "add_prefix_space": false
17
+ },
18
+ "registered_name": "keras_nlp>GPT2Tokenizer"
19
+ }