sharpenb commited on
Commit
b6d43bd
·
verified ·
1 Parent(s): 268d9b5

cf7e1418fc7b5cb0c81900b108b8f3fd2b5f6484d3a398c1a2b45b8179c49f7b

Browse files
.locks/models--distributed--optimized-gpt2-1b/2525dfc6bc73a28af3c0e9c91ec6e51d2d06ed5043373d6f34559ff9de54c7ff.lock ADDED
File without changes
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/covalent/.cache/models/tmpk1t5rgu6v17wvlsb",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPTOptim"
 
1
  {
2
+ "_name_or_path": "/covalent/.cache/models/tmp40i0g00bxd3z4zcp",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPTOptim"
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a9316cb534bf66e7e13d432916c9d943119734ba5ed1df39a00f821c9fa3380
3
  size 1207575528
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fe716862891eb09b200da6d0eb2214796809985c13fcbe14d2c3c27f542762f
3
  size 1207575528
models--distributed--optimized-gpt2-1b/.no_exist/0f5f2c85309718087017f97adaabc63a05d6a493/adapter_config.json ADDED
File without changes
models--distributed--optimized-gpt2-1b/refs/main CHANGED
@@ -1 +1 @@
1
- fba79423a8549ee57e7ae92c54c57628e4a3b012
 
1
+ 0f5f2c85309718087017f97adaabc63a05d6a493
models--distributed--optimized-gpt2-1b/snapshots/0f5f2c85309718087017f97adaabc63a05d6a493/config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distributed/optimized-gpt2-1b",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPTOptim"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "auto_map": {
9
+ "AutoConfig": "distributed/optimized-gpt2-500m--configuration_gpt_optimized.GPTOptimConfig",
10
+ "AutoModelForCausalLM": "distributed/optimized-gpt2-500m--modeling_gpt_optimized.GPTOptim"
11
+ },
12
+ "block_size": 1024,
13
+ "bos_token_id": 50256,
14
+ "embd_pdrop": 0.1,
15
+ "eos_token_id": 50256,
16
+ "initializer_range": 0.02,
17
+ "layer_norm_epsilon": 1e-05,
18
+ "model_type": "gpt_optimized",
19
+ "n_embd": 1280,
20
+ "n_head": 32,
21
+ "n_inner": null,
22
+ "n_layer": 48,
23
+ "n_positions": 1024,
24
+ "reorder_and_upcast_attn": false,
25
+ "resid_pdrop": 0.1,
26
+ "scale_attn_by_inverse_layer_idx": false,
27
+ "scale_attn_weights": true,
28
+ "summary_activation": null,
29
+ "summary_first_dropout": 0.1,
30
+ "summary_proj_to_labels": true,
31
+ "summary_type": "cls_index",
32
+ "summary_use_proj": true,
33
+ "torch_dtype": "float32",
34
+ "transformers_version": "4.39.3",
35
+ "use_cache": true,
36
+ "vocab_size": 50257
37
+ }
smash_config.json CHANGED
@@ -28,7 +28,7 @@
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
- "cache_dir": "/covalent/.cache/models/tmpk1t5rgu6",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {
 
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
+ "cache_dir": "/covalent/.cache/models/tmp40i0g00b",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {