File size: 2,378 Bytes
3c2fd60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
547b716
 
3c2fd60
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import torch, torch.nn as nn
from transformers import AutoTokenizer, AutoModelForCausalLM

source_dir = "/mnt/str/models/qwen2-0.5b-instruct"
target_dir = "/mnt/str/models/llama3-70b-instruct"
output_dir = "/mnt/str/temp/transplant"

# Load model and tokenizers
model = AutoModelForCausalLM.from_pretrained(source_dir, device_map = "auto")
tokenizer_source = AutoTokenizer.from_pretrained(source_dir)
tokenizer_target = AutoTokenizer.from_pretrained(target_dir)
tied = model.config.tie_word_embeddings
target_vocab_size = max(tokenizer_target.vocab.values()) + 1  # vocab_size member seems to be unreliable

# Embedding tensor
old_emb = model.model.embed_tokens.weight
new_emb = torch.empty((target_vocab_size, model.config.hidden_size),
                      dtype = old_emb.dtype, device = old_emb.device)

# Head tensor
old_head = model.lm_head.weight
new_head = torch.empty((target_vocab_size, model.config.hidden_size),
                       dtype = old_head.dtype, device = old_head.device)

# Initialize new tensors
for idx in range(target_vocab_size):
    decode = tokenizer_target.decode(torch.tensor(idx, dtype = torch.long), decode_special_tokens = True)
    encode = tokenizer_source.encode(decode, add_special_tokens = False, return_tensors = "pt")
    new_emb[idx] = old_emb[encode.flatten()].mean(dim = 0)
    new_head[idx] = old_head[encode.flatten()].mean(dim = 0)

# Replace embedding tensor
model.model.embed_tokens.weight = nn.Parameter(new_emb, requires_grad = False)
model.model.embed_tokens.num_embeddings = target_vocab_size

# Replace head tensor
model.lm_head.weight = nn.Parameter(new_head, requires_grad = False)
model.lm_head.out_features = tokenizer_target.vocab_size

# Update model
model.vocab_size = target_vocab_size
model.config.vocab_size = target_vocab_size
model.config.bos_token_id = tokenizer_target.bos_token_id
model.config.eos_token_id = tokenizer_target.eos_token_id

# Save
model.save_pretrained(output_dir, tie_word_embeddings = tied)
tokenizer_target.save_pretrained(output_dir)

# This is more reliable since save_pretrained seems to gives you a messed up model with some architectures,
# but it requires manually copying and modifying config.json etc.:
#
#   import os
#   from safetensors.torch import save_file
#   save_file(model.state_dict(), os.path.join(args.output_dir, "model.safetensors"), metadata = {'format': 'pt'})