matthoffner commited on
Commit
7b38f15
·
1 Parent(s): ecca34d

Upload 25 files

Browse files
Files changed (26) hide show
  1. .gitattributes +1 -0
  2. gpt_bigcode-santacoder-q3f16_0/gpt_bigcode-santacoder-q3f16_0-metal.so +3 -0
  3. gpt_bigcode-santacoder-q3f16_0/mod_cache_before_build_metal.pkl +3 -0
  4. gpt_bigcode-santacoder-q3f16_0/params/mlc-chat-config.json +14 -0
  5. gpt_bigcode-santacoder-q3f16_0/params/ndarray-cache.json +0 -0
  6. gpt_bigcode-santacoder-q3f16_0/params/params_shard_0.bin +3 -0
  7. gpt_bigcode-santacoder-q3f16_0/params/params_shard_1.bin +3 -0
  8. gpt_bigcode-santacoder-q3f16_0/params/params_shard_10.bin +3 -0
  9. gpt_bigcode-santacoder-q3f16_0/params/params_shard_11.bin +3 -0
  10. gpt_bigcode-santacoder-q3f16_0/params/params_shard_12.bin +3 -0
  11. gpt_bigcode-santacoder-q3f16_0/params/params_shard_13.bin +3 -0
  12. gpt_bigcode-santacoder-q3f16_0/params/params_shard_14.bin +3 -0
  13. gpt_bigcode-santacoder-q3f16_0/params/params_shard_15.bin +3 -0
  14. gpt_bigcode-santacoder-q3f16_0/params/params_shard_16.bin +3 -0
  15. gpt_bigcode-santacoder-q3f16_0/params/params_shard_17.bin +3 -0
  16. gpt_bigcode-santacoder-q3f16_0/params/params_shard_18.bin +3 -0
  17. gpt_bigcode-santacoder-q3f16_0/params/params_shard_2.bin +3 -0
  18. gpt_bigcode-santacoder-q3f16_0/params/params_shard_3.bin +3 -0
  19. gpt_bigcode-santacoder-q3f16_0/params/params_shard_4.bin +3 -0
  20. gpt_bigcode-santacoder-q3f16_0/params/params_shard_5.bin +3 -0
  21. gpt_bigcode-santacoder-q3f16_0/params/params_shard_6.bin +3 -0
  22. gpt_bigcode-santacoder-q3f16_0/params/params_shard_7.bin +3 -0
  23. gpt_bigcode-santacoder-q3f16_0/params/params_shard_8.bin +3 -0
  24. gpt_bigcode-santacoder-q3f16_0/params/params_shard_9.bin +3 -0
  25. gpt_bigcode-santacoder-q3f16_0/params/tokenizer.json +0 -0
  26. gpt_bigcode-santacoder-q3f16_0/params/tokenizer_config.json +7 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ gpt_bigcode-santacoder-q3f16_0/gpt_bigcode-santacoder-q3f16_0-metal.so filter=lfs diff=lfs merge=lfs -text
gpt_bigcode-santacoder-q3f16_0/gpt_bigcode-santacoder-q3f16_0-metal.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:292b1ced49569dc0ee43d5ab9ac4a513eb62bdf4def80b12196168a0bf3a771d
3
+ size 2149172
gpt_bigcode-santacoder-q3f16_0/mod_cache_before_build_metal.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7eed956ab15d8f23e5a18cb92d97c97dfcce1d7fc38730a99d6a503549de96f
3
+ size 9008423
gpt_bigcode-santacoder-q3f16_0/params/mlc-chat-config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_lib": "gpt_bigcode-santacoder-q3f16_0",
3
+ "local_id": "gpt_bigcode-santacoder-q3f16_0",
4
+ "conv_template": "code_gpt",
5
+ "temperature": 0.7,
6
+ "repetition_penalty": 1.0,
7
+ "top_p": 0.95,
8
+ "mean_gen_len": 128,
9
+ "max_gen_len": 512,
10
+ "shift_fill_factor": 0.3,
11
+ "tokenizer_files": [
12
+ "tokenizer.json"
13
+ ]
14
+ }
gpt_bigcode-santacoder-q3f16_0/params/ndarray-cache.json ADDED
The diff for this file is too large to render. See raw diff
 
gpt_bigcode-santacoder-q3f16_0/params/params_shard_0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ee61f95736c09631b31685d4a52d5da42289c2ab90ed27f9e20893b100b4172
3
+ size 41000960
gpt_bigcode-santacoder-q3f16_0/params/params_shard_1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b48615b2cb94a25d4e7d02ad61b7dda90543f19392333599bdeff91239e747f
3
+ size 33074176
gpt_bigcode-santacoder-q3f16_0/params/params_shard_10.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ed367e46ade206c541ae2bb6d2acea02391215ffba020e48a092426aa8146eb
3
+ size 26966016
gpt_bigcode-santacoder-q3f16_0/params/params_shard_11.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:304cccdfca816d52ad059b075cbf680b7982a4a11d1de9675d7eda49410c66bd
3
+ size 30928896
gpt_bigcode-santacoder-q3f16_0/params/params_shard_12.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e991f8f595c864454e12ead4775d5cdcaec5cb88705e555f96e4d54d7df2e794
3
+ size 26966016
gpt_bigcode-santacoder-q3f16_0/params/params_shard_13.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8609178472d794db69ba4ebd1efa158ebd32c11ec16cb4c815b1d2bbbcb23826
3
+ size 30928896
gpt_bigcode-santacoder-q3f16_0/params/params_shard_14.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e41a2d9a7be988e0cede91ea2df186a2ae3af8689ad953e8eef7639fe95141c8
3
+ size 26966016
gpt_bigcode-santacoder-q3f16_0/params/params_shard_15.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07d1ad3d4442f84f4ce056573832e630eb04dd890eabd29acc0130e826d5e595
3
+ size 30928896
gpt_bigcode-santacoder-q3f16_0/params/params_shard_16.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53ede5ba4af58386e33e8a430429acbb082981a829e5103455bd5d3114482d8b
3
+ size 26966016
gpt_bigcode-santacoder-q3f16_0/params/params_shard_17.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fe7ef511905b0b5f045838196b666a9fee40b9de811e7a66c768d080bc559fa
3
+ size 41000960
gpt_bigcode-santacoder-q3f16_0/params/params_shard_18.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:115078168c669d76c2b172b8290ee17f246a1cf515ccd6479e9bab16d231a737
3
+ size 12682240
gpt_bigcode-santacoder-q3f16_0/params/params_shard_2.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3566a763b93a5c193b6c9f64a6c270792cc0a0cdeb8ed6b94a434404bf93b327
3
+ size 31879168
gpt_bigcode-santacoder-q3f16_0/params/params_shard_3.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7452ff1b6aebc05658034e2a1fbef791cd8cdb407b14e271b62d2a2296ef1eec
3
+ size 30928896
gpt_bigcode-santacoder-q3f16_0/params/params_shard_4.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e738b4f91855d9cc1d1f5538694572b04493a835d63d6c3b5bfc35d91b02a049
3
+ size 26966016
gpt_bigcode-santacoder-q3f16_0/params/params_shard_5.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:436f4617e195f6f3cff3cc4cf5d18b2214acedec619a34f5eef7715a4493636a
3
+ size 30928896
gpt_bigcode-santacoder-q3f16_0/params/params_shard_6.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6277280fa5ca88d640240280468be1647038ca0a781cef803643ba747f913a14
3
+ size 26966016
gpt_bigcode-santacoder-q3f16_0/params/params_shard_7.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d894cb1d8d433e4b16e139ac766eac90befcd4cf946edb511d600cf9a99f2857
3
+ size 30928896
gpt_bigcode-santacoder-q3f16_0/params/params_shard_8.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18ff7eaa1fa721aab976bbac60564fcbe5e7a17730f6da55a6f0970b8303454c
3
+ size 26966016
gpt_bigcode-santacoder-q3f16_0/params/params_shard_9.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23aab59d2c77afbb865a9b0283217dda08cdf2f322e3dc0cc53837438fa23eb9
3
+ size 30928896
gpt_bigcode-santacoder-q3f16_0/params/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
gpt_bigcode-santacoder-q3f16_0/params/tokenizer_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "errors": "replace",
3
+ "tokenizer_class": "GPT2TokenizerFast",
4
+ "bos_token": "<|endoftext|>",
5
+ "eos_token": "<|endoftext|>",
6
+ "model_max_length": 2048
7
+ }