|
llama_model_loader: loaded meta data with 24 key-value pairs and 291 tensors from llm-compiler-7b-ftd-IMat-GGUF/llm-compiler-7b-ftd.Q8_0.gguf.hardlink.gguf (version GGUF V3 (latest)) |
|
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. |
|
llama_model_loader: - kv 0: general.architecture str = llama |
|
llama_model_loader: - kv 1: general.name str = llm-compiler-7b-ftd |
|
llama_model_loader: - kv 2: llama.block_count u32 = 32 |
|
llama_model_loader: - kv 3: llama.context_length u32 = 16384 |
|
llama_model_loader: - kv 4: llama.embedding_length u32 = 4096 |
|
llama_model_loader: - kv 5: llama.feed_forward_length u32 = 11008 |
|
llama_model_loader: - kv 6: llama.attention.head_count u32 = 32 |
|
llama_model_loader: - kv 7: llama.attention.head_count_kv u32 = 32 |
|
llama_model_loader: - kv 8: llama.rope.freq_base f32 = 1000000.000000 |
|
llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 = 0.000010 |
|
llama_model_loader: - kv 10: general.file_type u32 = 7 |
|
llama_model_loader: - kv 11: llama.vocab_size u32 = 32000 |
|
llama_model_loader: - kv 12: llama.rope.dimension_count u32 = 128 |
|
llama_model_loader: - kv 13: tokenizer.ggml.model str = llama |
|
llama_model_loader: - kv 14: tokenizer.ggml.pre str = default |
|
llama_model_loader: - kv 15: tokenizer.ggml.tokens arr[str,32000] = ["<unk>", "<s>", "</s>", "<0x00>", "<... |
|
llama_model_loader: - kv 16: tokenizer.ggml.scores arr[f32,32000] = [0.000000, 0.000000, 0.000000, 0.0000... |
|
llama_model_loader: - kv 17: tokenizer.ggml.token_type arr[i32,32000] = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ... |
|
llama_model_loader: - kv 18: tokenizer.ggml.bos_token_id u32 = 1 |
|
llama_model_loader: - kv 19: tokenizer.ggml.eos_token_id u32 = 2 |
|
llama_model_loader: - kv 20: tokenizer.ggml.unknown_token_id u32 = 0 |
|
llama_model_loader: - kv 21: tokenizer.ggml.add_bos_token bool = true |
|
llama_model_loader: - kv 22: tokenizer.ggml.add_eos_token bool = false |
|
llama_model_loader: - kv 23: general.quantization_version u32 = 2 |
|
llama_model_loader: - type f32: 65 tensors |
|
llama_model_loader: - type q8_0: 226 tensors |
|
llm_load_vocab: special tokens cache size = 259 |
|
llm_load_vocab: token to piece cache size = 0.1684 MB |
|
llm_load_print_meta: format = GGUF V3 (latest) |
|
llm_load_print_meta: arch = llama |
|
llm_load_print_meta: vocab type = SPM |
|
llm_load_print_meta: n_vocab = 32000 |
|
llm_load_print_meta: n_merges = 0 |
|
llm_load_print_meta: n_ctx_train = 16384 |
|
llm_load_print_meta: n_embd = 4096 |
|
llm_load_print_meta: n_head = 32 |
|
llm_load_print_meta: n_head_kv = 32 |
|
llm_load_print_meta: n_layer = 32 |
|
llm_load_print_meta: n_rot = 128 |
|
llm_load_print_meta: n_embd_head_k = 128 |
|
llm_load_print_meta: n_embd_head_v = 128 |
|
llm_load_print_meta: n_gqa = 1 |
|
llm_load_print_meta: n_embd_k_gqa = 4096 |
|
llm_load_print_meta: n_embd_v_gqa = 4096 |
|
llm_load_print_meta: f_norm_eps = 0.0e+00 |
|
llm_load_print_meta: f_norm_rms_eps = 1.0e-05 |
|
llm_load_print_meta: f_clamp_kqv = 0.0e+00 |
|
llm_load_print_meta: f_max_alibi_bias = 0.0e+00 |
|
llm_load_print_meta: f_logit_scale = 0.0e+00 |
|
llm_load_print_meta: n_ff = 11008 |
|
llm_load_print_meta: n_expert = 0 |
|
llm_load_print_meta: n_expert_used = 0 |
|
llm_load_print_meta: causal attn = 1 |
|
llm_load_print_meta: pooling type = 0 |
|
llm_load_print_meta: rope type = 0 |
|
llm_load_print_meta: rope scaling = linear |
|
llm_load_print_meta: freq_base_train = 1000000.0 |
|
llm_load_print_meta: freq_scale_train = 1 |
|
llm_load_print_meta: n_ctx_orig_yarn = 16384 |
|
llm_load_print_meta: rope_finetuned = unknown |
|
llm_load_print_meta: ssm_d_conv = 0 |
|
llm_load_print_meta: ssm_d_inner = 0 |
|
llm_load_print_meta: ssm_d_state = 0 |
|
llm_load_print_meta: ssm_dt_rank = 0 |
|
llm_load_print_meta: model type = 7B |
|
llm_load_print_meta: model ftype = Q8_0 |
|
llm_load_print_meta: model params = 6.74 B |
|
llm_load_print_meta: model size = 6.67 GiB (8.50 BPW) |
|
llm_load_print_meta: general.name = llm-compiler-7b-ftd |
|
llm_load_print_meta: BOS token = 1 '<s>' |
|
llm_load_print_meta: EOS token = 2 '</s>' |
|
llm_load_print_meta: UNK token = 0 '<unk>' |
|
llm_load_print_meta: LF token = 13 '<0x0A>' |
|
llm_load_print_meta: max token length = 48 |
|
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no |
|
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no |
|
ggml_cuda_init: found 1 CUDA devices: |
|
Device 0: NVIDIA GeForce RTX 4090, compute capability 8.9, VMM: yes |
|
llm_load_tensors: ggml ctx size = 0.27 MiB |
|
llm_load_tensors: offloading 32 repeating layers to GPU |
|
llm_load_tensors: offloading non-repeating layers to GPU |
|
llm_load_tensors: offloaded 33/33 layers to GPU |
|
llm_load_tensors: CPU buffer size = 132.81 MiB |
|
llm_load_tensors: CUDA0 buffer size = 6695.84 MiB |
|
................................................................................................... |
|
llama_new_context_with_model: n_ctx = 512 |
|
llama_new_context_with_model: n_batch = 512 |
|
llama_new_context_with_model: n_ubatch = 512 |
|
llama_new_context_with_model: flash_attn = 0 |
|
llama_new_context_with_model: freq_base = 1000000.0 |
|
llama_new_context_with_model: freq_scale = 1 |
|
llama_kv_cache_init: CUDA0 KV buffer size = 256.00 MiB |
|
llama_new_context_with_model: KV self size = 256.00 MiB, K (f16): 128.00 MiB, V (f16): 128.00 MiB |
|
llama_new_context_with_model: CUDA_Host output buffer size = 0.12 MiB |
|
llama_new_context_with_model: CUDA0 compute buffer size = 70.50 MiB |
|
llama_new_context_with_model: CUDA_Host compute buffer size = 9.01 MiB |
|
llama_new_context_with_model: graph nodes = 1030 |
|
llama_new_context_with_model: graph splits = 2 |
|
|
|
system_info: n_threads = 25 / 32 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | AVX512_BF16 = 1 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | |
|
compute_imatrix: tokenizing the input .. |
|
compute_imatrix: tokenization took 95.108 ms |
|
compute_imatrix: computing over 151 chunks with batch_size 512 |
|
compute_imatrix: 0.54 seconds per pass - ETA 1.35 minutes |
|
[1]8.5363,[2]6.5180,[3]6.5837,[4]8.0179,[5]9.0085,[6]9.0519,[7]8.4522,[8]9.2578,[9]9.5049, |
|
save_imatrix: stored collected data after 10 chunks in llm-compiler-7b-ftd-IMat-GGUF/imatrix.dat |
|
[10]10.1415,[11]10.0765,[12]8.7815,[13]8.5609,[14]8.9764,[15]9.5567,[16]9.6316,[17]10.0254,[18]10.2615,[19]10.4075, |
|
save_imatrix: stored collected data after 20 chunks in llm-compiler-7b-ftd-IMat-GGUF/imatrix.dat |
|
[20]10.5242,[21]10.8305,[22]10.2302,[23]9.6849,[24]9.7514,[25]9.9103,[26]9.8096,[27]9.5890,[28]9.7533,[29]9.9361, |
|
save_imatrix: stored collected data after 30 chunks in llm-compiler-7b-ftd-IMat-GGUF/imatrix.dat |
|
[30]10.1287,[31]10.1454,[32]10.3399,[33]10.4551,[34]10.7579,[35]10.8765,[36]10.7794,[37]10.2467,[38]9.8992,[39]9.7896, |
|
save_imatrix: stored collected data after 40 chunks in llm-compiler-7b-ftd-IMat-GGUF/imatrix.dat |
|
[40]9.7003,[41]9.5599,[42]9.4412,[43]9.2317,[44]9.1229,[45]8.9791,[46]9.0007,[47]9.0452,[48]9.1178,[49]9.2314, |
|
save_imatrix: stored collected data after 50 chunks in llm-compiler-7b-ftd-IMat-GGUF/imatrix.dat |
|
[50]9.2559,[51]9.5228,[52]9.7619,[53]9.9972,[54]10.2238,[55]10.3586,[56]10.2935,[57]10.1973,[58]10.2496,[59]10.3353, |
|
save_imatrix: stored collected data after 60 chunks in llm-compiler-7b-ftd-IMat-GGUF/imatrix.dat |
|
[60]10.4602,[61]10.3213,[62]10.3514,[63]10.4496,[64]10.5597,[65]10.6374,[66]10.6801,[67]10.7588,[68]10.8062,[69]10.7740, |
|
save_imatrix: stored collected data after 70 chunks in llm-compiler-7b-ftd-IMat-GGUF/imatrix.dat |
|
[70]10.7875,[71]10.7903,[72]10.7101,[73]10.6545,[74]10.5994,[75]10.5572,[76]10.5601,[77]10.5771,[78]10.5642,[79]10.5723, |
|
save_imatrix: stored collected data after 80 chunks in llm-compiler-7b-ftd-IMat-GGUF/imatrix.dat |
|
[80]10.6077,[81]10.5792,[82]10.5634,[83]10.4786,[84]10.4957,[85]10.5038,[86]10.4922,[87]10.5227,[88]10.5129,[89]10.4940, |
|
save_imatrix: stored collected data after 90 chunks in llm-compiler-7b-ftd-IMat-GGUF/imatrix.dat |
|
[90]10.4698,[91]10.4809,[92]10.4164,[93]10.4059,[94]10.3597,[95]10.3050,[96]10.3275,[97]10.3215,[98]10.3394,[99]10.3072, |
|
save_imatrix: stored collected data after 100 chunks in llm-compiler-7b-ftd-IMat-GGUF/imatrix.dat |
|
[100]10.2922,[101]10.3336,[102]10.2897,[103]10.2420,[104]10.2165,[105]10.2516,[106]10.2479,[107]10.2669,[108]10.3052,[109]10.1909, |
|
save_imatrix: stored collected data after 110 chunks in llm-compiler-7b-ftd-IMat-GGUF/imatrix.dat |
|
[110]10.1019,[111]10.0060,[112]9.9080,[113]9.8094,[114]9.7172,[115]9.6301,[116]9.5456,[117]9.4842,[118]9.5120,[119]9.5214, |
|
save_imatrix: stored collected data after 120 chunks in llm-compiler-7b-ftd-IMat-GGUF/imatrix.dat |
|
[120]9.5756,[121]9.6436,[122]9.7038,[123]9.7723,[124]9.8896,[125]10.0112,[126]10.0204,[127]10.0489,[128]9.9467,[129]9.9451, |
|
save_imatrix: stored collected data after 130 chunks in llm-compiler-7b-ftd-IMat-GGUF/imatrix.dat |
|
[130]9.9290,[131]9.9153,[132]9.8805,[133]9.8694,[134]9.8900,[135]9.9156,[136]9.9020,[137]9.8942,[138]9.9130,[139]9.9265, |
|
save_imatrix: stored collected data after 140 chunks in llm-compiler-7b-ftd-IMat-GGUF/imatrix.dat |
|
[140]9.9498,[141]9.9494,[142]9.9487,[143]9.9227,[144]9.8980,[145]9.9167,[146]9.9522,[147]9.9990,[148]10.0373,[149]10.0841, |
|
save_imatrix: stored collected data after 150 chunks in llm-compiler-7b-ftd-IMat-GGUF/imatrix.dat |
|
[150]10.1300,[151]10.1805, |
|
save_imatrix: stored collected data after 151 chunks in llm-compiler-7b-ftd-IMat-GGUF/imatrix.dat |
|
|
|
llama_print_timings: load time = 1845.29 ms |
|
llama_print_timings: sample time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second) |
|
llama_print_timings: prompt eval time = 75945.00 ms / 77312 tokens ( 0.98 ms per token, 1018.00 tokens per second) |
|
llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second) |
|
llama_print_timings: total time = 77714.38 ms / 77313 tokens |
|
|
|
Final estimate: PPL = 10.1805 +/- 0.14161 |
|
|