yuekai commited on
Commit
7576105
1 Parent(s): b50ab74

Upload folder using huggingface_hub

Browse files
Qwen2_1.5B_merged/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/scratch.yuekaiz_wwfo_1/Qwen2-1.5B-Instruct",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151645,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 1536,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 8960,
13
+ "max_position_embeddings": 32768,
14
+ "max_window_layers": 28,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 28,
18
+ "num_key_value_heads": 2,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_theta": 1000000.0,
21
+ "sliding_window": 32768,
22
+ "tie_word_embeddings": true,
23
+ "torch_dtype": "float16",
24
+ "transformers_version": "4.42.4",
25
+ "use_cache": true,
26
+ "use_sliding_window": false,
27
+ "vocab_size": 151936
28
+ }
Qwen2_1.5B_merged/generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.42.4"
14
+ }
Qwen2_1.5B_merged/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd9913bcefe5f587335ad14d182a3d624abd87db893389398493c3a5cee3d30d
3
+ size 3087466808
Qwen2_1.5B_merged/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
Qwen2_1.5B_merged/tokenizer_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": ["<|im_start|>", "<|im_end|>"],
30
+ "bos_token": null,
31
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "<|im_end|>",
34
+ "errors": "replace",
35
+ "model_max_length": 32768,
36
+ "pad_token": "<|endoftext|>",
37
+ "split_special_tokens": false,
38
+ "tokenizer_class": "Qwen2Tokenizer",
39
+ "unk_token": null
40
+ }
Qwen2_1.5B_merged/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
build_qwen.sh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # You need to merge Qwen2-1.5B-Instruct with fine-tuned LLM lora first, see merge_lora.py.
2
+ # Then you can build the engine with the merged model.
3
+ # python3 convert_checkpoint.py --model_dir ${model_dir} \
4
+ # --output_dir ${checkpoint_dir} \
5
+ # --dtype float16
6
+ # We have merged the two models and convert it for trt-llm into the below checkpoint:
7
+ checkpoint_dir=tllm_checkpoint_1gpu_fp16_qwen2_1.5B_instruct_merged
8
+ # output engine directory
9
+ engine_dir=qwen2_1.5B_instruct_fp16_merged
10
+
11
+ # max_prompt_embedding_table_size should >= max_batch_size * speech_embedding_seq_length
12
+ trtllm-build --checkpoint_dir ${checkpoint_dir} \
13
+ --output_dir $engine_dir \
14
+ --max_prompt_embedding_table_size 4096 \
15
+ --max_batch_size 16 \
16
+ --gemm_plugin float16
build_whisper_encoder.sh ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ INFERENCE_PRECISION=float16
3
+ MAX_BEAM_WIDTH=4
4
+ MAX_BATCH_SIZE=64
5
+ checkpoint_dir=whisper_multi_zh_tllm_checkpoint
6
+ output_dir=whisper_multi_zh
7
+
8
+ # Download the fine-tuned model https://huggingface.co/yuekai/icefall_asr_multi-hans-zh_whisper/blob/main/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt.
9
+ # python3 convert_checkpoint.py \
10
+ # --output_dir whisper_multi_zh_tllm_checkpoint \
11
+ # --model_name large-v2
12
+
13
+ trtllm-build --checkpoint_dir ${checkpoint_dir}/encoder \
14
+ --output_dir ${output_dir}/encoder \
15
+ --moe_plugin disable \
16
+ --enable_xqa disable \
17
+ --max_batch_size ${MAX_BATCH_SIZE} \
18
+ --gemm_plugin disable \
19
+ --bert_attention_plugin ${INFERENCE_PRECISION} \
20
+ --max_input_len 3000 --max_seq_len=3000 \
21
+ --remove_input_padding disable --paged_kv_cache disable
icefall_asr_multi-hans_whisper_qwen2_1.5B/epoch-2-avg-6.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c20e8745dace75339447806d5f9177e0b719a9eb796512542c268868f8d8bebb
3
+ size 375542023
merge_lora.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ from transformers.trainer_pt_utils import LabelSmoother
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
+ from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
6
+ IGNORE_TOKEN_ID = LabelSmoother.ignore_index
7
+ DEFAULT_SPEECH_TOKEN = "<speech>"
8
+
9
+
10
+ class SPEECH_LLM(nn.Module):
11
+ """
12
+ The Speech-to-Text model. It consists of an encoder, a language model and an encoder projector.
13
+ The encoder is used to extract speech features from the input speech signal.
14
+ The encoder projector is used to project the encoder outputs to the same dimension as the language model.
15
+ The language model is used to generate the text from the speech features.
16
+ Args:
17
+ encoder (:obj:`nn.Module`): The encoder module.
18
+ llm (:obj:`nn.Module`): The language model module.
19
+ encoder_projector (:obj:`nn.Module`): The encoder projector module.
20
+ """
21
+
22
+ def __init__(
23
+ self,
24
+ encoder: nn.Module = None,
25
+ llm: nn.Module = None,
26
+ encoder_projector: nn.Module = None,
27
+ ):
28
+ super().__init__()
29
+ self.encoder = encoder
30
+ self.llm = llm
31
+ self.encoder_projector = encoder_projector
32
+
33
+
34
+
35
+ if __name__ == "__main__":
36
+ speech_encoder_dim = 1280
37
+ encoder_projector_ds_rate = 8
38
+ llm_config_hidden_size = 1536
39
+
40
+ adapter_dir="/home/scratch.yuekaiz_wwfo_1/icefall_asr_multi-hans_whisper_qwen2_1.5B/epoch-2-avg-6.pt"
41
+ llm_dir="/home/scratch.yuekaiz_wwfo_1/Qwen2-1.5B-Instruct"
42
+ target_dir="/home/scratch.yuekaiz_wwfo_1/Qwen2_1.5B_merged"
43
+
44
+ llm = AutoModelForCausalLM.from_pretrained(
45
+ llm_dir,
46
+ torch_dtype=torch.float16,
47
+ )
48
+ lora_config = LoraConfig(
49
+ r=64,
50
+ lora_alpha=16,
51
+ target_modules=[
52
+ "q_proj",
53
+ "k_proj",
54
+ "v_proj",
55
+ "o_proj",
56
+ "up_proj",
57
+ "gate_proj",
58
+ "down_proj",
59
+ ],
60
+ task_type="CAUSAL_LM",
61
+ )
62
+ llm = get_peft_model(llm, lora_config)
63
+ model = SPEECH_LLM(
64
+ llm = llm,
65
+ )
66
+
67
+ checkpoint = torch.load(
68
+ adapter_dir, map_location="cpu"
69
+ )
70
+ missing_keys, unexpected_keys = model.load_state_dict(checkpoint, strict=False)
71
+
72
+ print(missing_keys, unexpected_keys)
73
+
74
+ llm_merged = model.llm.merge_and_unload()
75
+
76
+ llm_merged.save_pretrained(target_dir)
tllm_checkpoint_1gpu_fp16_qwen2_1.5B_instruct_merged/config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "mlp_bias": false,
3
+ "attn_bias": true,
4
+ "rotary_base": 1000000.0,
5
+ "rotary_scaling": null,
6
+ "disable_weight_only_quant_plugin": false,
7
+ "moe": {
8
+ "num_experts": 0,
9
+ "top_k": 0,
10
+ "normalization_mode": 0,
11
+ "tp_mode": 0
12
+ },
13
+ "architecture": "Qwen2ForCausalLM",
14
+ "dtype": "float16",
15
+ "vocab_size": 151936,
16
+ "hidden_size": 1536,
17
+ "num_hidden_layers": 28,
18
+ "num_attention_heads": 12,
19
+ "hidden_act": "silu",
20
+ "logits_dtype": "float32",
21
+ "norm_epsilon": 1e-06,
22
+ "position_embedding_type": "rope_gpt_neox",
23
+ "max_position_embeddings": 32768,
24
+ "num_key_value_heads": 2,
25
+ "intermediate_size": 8960,
26
+ "mapping": {
27
+ "world_size": 1,
28
+ "gpus_per_node": 8,
29
+ "cp_size": 1,
30
+ "tp_size": 1,
31
+ "pp_size": 1,
32
+ "moe_tp_size": 1,
33
+ "moe_ep_size": 1
34
+ },
35
+ "quantization": {
36
+ "quant_algo": null,
37
+ "kv_cache_quant_algo": null,
38
+ "group_size": 128,
39
+ "smoothquant_val": 0.5,
40
+ "clamp_val": null,
41
+ "has_zero_point": false,
42
+ "pre_quant_scale": false,
43
+ "exclude_modules": null
44
+ },
45
+ "use_parallel_embedding": false,
46
+ "embedding_sharding_dim": 0,
47
+ "share_embedding_table": false,
48
+ "head_size": 128,
49
+ "qk_layernorm": false,
50
+ "qwen_type": "qwen2",
51
+ "moe_intermediate_size": 0,
52
+ "moe_shared_expert_intermediate_size": 0
53
+ }
tllm_checkpoint_1gpu_fp16_qwen2_1.5B_instruct_merged/rank0.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d7c0035fb7e28fffd625e180be6238dccb44f763a763fe8843f0f0db2c0aada
3
+ size 3587757176
whisper_multi_zh_tllm_checkpoint/encoder/config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architecture": "WhisperEncoder",
3
+ "dtype": "float16",
4
+ "num_hidden_layers": 32,
5
+ "num_attention_heads": 20,
6
+ "hidden_size": 1280,
7
+ "n_mels": 80,
8
+ "n_audio_ctx": 1500,
9
+ "vocab_size": 51865,
10
+ "hidden_act": "gelu",
11
+ "num_languages": 99,
12
+ "quantization": {
13
+ "quant_algo": null
14
+ }
15
+ }
whisper_multi_zh_tllm_checkpoint/encoder/rank0.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34c8b445a098fcd90eda7bfba8b9ff2fe7c34fb0b3589f80bc048b7411a6248d
3
+ size 1277533656