Upload folder using huggingface_hub

#2
by sharpenb - opened
Files changed (3) hide show
  1. config.json +2 -2
  2. model.safetensors +1 -1
  3. smash_config.json +1 -1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/covalent/.cache/models/tmpr3420g4lh57h91sr",
3
  "architectures": [
4
  "KPhi3ForCausalLM"
5
  ],
@@ -17,7 +17,7 @@
17
  "hidden_size": 1024,
18
  "initializer_range": 0.02,
19
  "intermediate_size": 4096,
20
- "max_position_embeddings": 4096,
21
  "min_channels_per_group": 256,
22
  "model_type": "kphi3",
23
  "num_attention_heads": 16,
 
1
  {
2
+ "_name_or_path": "/covalent/.cache/models/tmpkr35rmgmpl_t5ug3",
3
  "architectures": [
4
  "KPhi3ForCausalLM"
5
  ],
 
17
  "hidden_size": 1024,
18
  "initializer_range": 0.02,
19
  "intermediate_size": 4096,
20
+ "max_position_embeddings": 131072,
21
  "min_channels_per_group": 256,
22
  "model_type": "kphi3",
23
  "num_attention_heads": 16,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf49667941746452f04268166edffd3edc4b03c58917859b3645c7ce8d473645
3
  size 370576136
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6780fb69785033d69c37121447886f71f389f63dd7b239634cd5a27ea7fb3c09
3
  size 370576136
smash_config.json CHANGED
@@ -28,7 +28,7 @@
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
- "cache_dir": "/covalent/.cache/models/tmpr3420g4l",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}
 
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
+ "cache_dir": "/covalent/.cache/models/tmpkr35rmgm",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}