danielhanchen commited on
Commit
c0b164f
·
verified ·
1 Parent(s): 6f71fac

Add files using upload-large-folder tool

Browse files
Files changed (2) hide show
  1. config.json +21 -1
  2. generation_config.json +1 -1
config.json CHANGED
@@ -21,13 +21,33 @@
21
  "num_hidden_layers": 48,
22
  "num_key_value_heads": 8,
23
  "pad_token_id": 151654,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  "rms_norm_eps": 1e-05,
25
  "rope_scaling": null,
26
  "rope_theta": 10000000.0,
27
  "sliding_window": null,
28
  "tie_word_embeddings": false,
29
  "torch_dtype": "bfloat16",
30
- "transformers_version": "4.48.1",
31
  "unsloth_fixed": true,
32
  "use_cache": true,
33
  "use_sliding_window": false,
 
21
  "num_hidden_layers": 48,
22
  "num_key_value_heads": 8,
23
  "pad_token_id": 151654,
24
+ "quantization_config": {
25
+ "_load_in_4bit": true,
26
+ "_load_in_8bit": false,
27
+ "bnb_4bit_compute_dtype": "bfloat16",
28
+ "bnb_4bit_quant_storage": "uint8",
29
+ "bnb_4bit_quant_type": "nf4",
30
+ "bnb_4bit_use_double_quant": true,
31
+ "llm_int8_enable_fp32_cpu_offload": false,
32
+ "llm_int8_has_fp16_weight": false,
33
+ "llm_int8_skip_modules": [
34
+ "lm_head",
35
+ "multi_modal_projector",
36
+ "merger",
37
+ "modality_projection"
38
+ ],
39
+ "llm_int8_threshold": 6.0,
40
+ "load_in_4bit": true,
41
+ "load_in_8bit": false,
42
+ "quant_method": "bitsandbytes"
43
+ },
44
  "rms_norm_eps": 1e-05,
45
  "rope_scaling": null,
46
  "rope_theta": 10000000.0,
47
  "sliding_window": null,
48
  "tie_word_embeddings": false,
49
  "torch_dtype": "bfloat16",
50
+ "transformers_version": "4.49.0.dev0",
51
  "unsloth_fixed": true,
52
  "use_cache": true,
53
  "use_sliding_window": false,
generation_config.json CHANGED
@@ -11,5 +11,5 @@
11
  "temperature": 0.7,
12
  "top_k": 20,
13
  "top_p": 0.8,
14
- "transformers_version": "4.48.1"
15
  }
 
11
  "temperature": 0.7,
12
  "top_k": 20,
13
  "top_p": 0.8,
14
+ "transformers_version": "4.49.0.dev0"
15
  }