diff --git a/.gitattributes b/.gitattributes index c7d9f3332a950355d5a77d85000f05e6f45435ea..fa0ada747c9d2ecf8c8e9cd4495d4ccf0609f682 100644 --- a/.gitattributes +++ b/.gitattributes @@ -32,3 +32,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +cleaners/JapaneseCleaner.dll filter=lfs diff=lfs merge=lfs -text +cleaners/sys.dic filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md index 32897cd3e640101ba184f8c4ccd896981de3804a..5dff1202fc4a2059b8dedea05953f2d01b5fb122 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,13 @@ --- +title: Chatbot With Vits +emoji: 🚀 +colorFrom: purple +colorTo: indigo +sdk: gradio +sdk_version: 3.23.0 +app_file: app.py +pinned: false license: mit --- + +Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..b031ebd991a89850c0b4746e7ad28d2de4dab081 --- /dev/null +++ b/app.py @@ -0,0 +1,251 @@ +import logging +logging.getLogger('numba').setLevel(logging.WARNING) +logging.getLogger('matplotlib').setLevel(logging.WARNING) +logging.getLogger('urllib3').setLevel(logging.WARNING) +from text import text_to_sequence +import numpy as np +from scipy.io import wavfile +import torch +import json +import commons +import utils +import sys +import pathlib +import onnxruntime as ort +import gradio as gr +import argparse +import time +import os +import io +from scipy.io.wavfile import write +from flask import Flask, request +from threading import Thread +import openai +import requests +class VitsGradio: + def __init__(self): + self.lan = ["中文","日文","自动"] + self.chatapi = ["gpt-3.5-turbo","gpt3"] + self.modelPaths = [] + for root,dirs,files in os.walk("checkpoints"): + for dir in dirs: + self.modelPaths.append(dir) + with gr.Blocks() as self.Vits: + with gr.Tab("调试用"): + with gr.Row(): + with gr.Column(): + with gr.Row(): + with gr.Column(): + self.text = gr.TextArea(label="Text", value="你好") + with gr.Accordion(label="测试api", open=False): + self.local_chat1 = gr.Checkbox(value=False, label="使用网址+文本进行模拟") + self.url_input = gr.TextArea(label="键入测试", value="http://127.0.0.1:8080/chat?Text=") + butto = gr.Button("测试从网页端获取文本") + btnVC = gr.Button("测试tts+对话程序") + with gr.Column(): + output2 = gr.TextArea(label="回复") + output1 = gr.Audio(label="采样率22050") + output3 = gr.outputs.File(label="44100hz: output.wav") + butto.click(self.Simul, inputs=[self.text, self.url_input], outputs=[output2,output3]) + btnVC.click(self.tts_fn, inputs=[self.text], outputs=[output1,output2]) + with gr.Tab("控制面板"): + with gr.Row(): + with gr.Column(): + with gr.Row(): + with gr.Column(): + self.api_input1 = gr.TextArea(label="输入api-key或ChATGLM模型的路径", value="https://platform.openai.com/account/api-keys") + with gr.Accordion(label="chatbot选择", open=False): + self.api_input2 = gr.Checkbox(value=True, label="采用gpt3.5") + self.local_chat1 = gr.Checkbox(value=False, label="启动本地chatbot") + self.local_chat2 = gr.Checkbox(value=True, label="是否量化") + res = gr.TextArea() + Botselection = gr.Button("聊天机器人选择") + Botselection.click(self.check_bot, inputs=[self.api_input1,self.api_input2,self.local_chat1,self.local_chat2], outputs = [res]) + self.input1 = gr.Dropdown(label = "vits模型加载", choices = self.modelPaths, value = self.modelPaths[0], type = "value") + self.input2 = gr.Dropdown(label="Language", choices=self.lan, value="自动", interactive=True) + with gr.Column(): + btnVC = gr.Button("Submit") + self.input3 = gr.Dropdown(label="Speaker", choices=list(range(101)), value=0, interactive=True) + self.input4 = gr.Slider(minimum=0, maximum=1.0, label="更改噪声比例(noise scale),以控制情感", value=0.267) + self.input5 = gr.Slider(minimum=0, maximum=1.0, label="更改噪声偏差(noise scale w),以控制音素长短", value=0.7) + self.input6 = gr.Slider(minimum=0.1, maximum=10, label="duration", value=1) + statusa = gr.TextArea() + btnVC.click(self.create_tts_fn, inputs=[self.input1, self.input2, self.input3, self.input4, self.input5, self.input6], outputs = [statusa]) + + def Simul(self,text,url_input): + web = url_input + text + res = requests.get(web) + music = res.content + with open('output.wav', 'wb') as code: + code.write(music) + file_path = "output.wav" + return web,file_path + + + def chatgpt(self,text): + self.messages.append({"role": "user", "content": text},) + chat = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages= self.messages) + reply = chat.choices[0].message.content + return reply + + def ChATGLM(self,text): + if text == 'clear': + self.history = [] + response, new_history = self.model.chat(self.tokenizer, text, self.history) + response = response.replace(" ",'').replace("\n",'.') + self.history = new_history + return response + + def gpt3_chat(self,text): + call_name = "Waifu" + openai.api_key = args.key + identity = "" + start_sequence = '\n'+str(call_name)+':' + restart_sequence = "\nYou: " + if 1 == 1: + prompt0 = text #当期prompt + if text == 'quit': + return prompt0 + prompt = identity + prompt0 + start_sequence + response = openai.Completion.create( + model="text-davinci-003", + prompt=prompt, + temperature=0.5, + max_tokens=1000, + top_p=1.0, + frequency_penalty=0.5, + presence_penalty=0.0, + stop=["\nYou:"] + ) + return response['choices'][0]['text'].strip() + + def check_bot(self,api_input1,api_input2,local_chat1,local_chat2): + if local_chat1: + from transformers import AutoTokenizer, AutoModel + self.tokenizer = AutoTokenizer.from_pretrained(api_input1, trust_remote_code=True) + if local_chat2: + self.model = AutoModel.from_pretrained(api_input1, trust_remote_code=True).half().quantize(4).cuda() + else: + self.model = AutoModel.from_pretrained(api_input1, trust_remote_code=True) + self.history = [] + else: + self.messages = [] + openai.api_key = api_input1 + return "Finished" + + def is_japanese(self,string): + for ch in string: + if ord(ch) > 0x3040 and ord(ch) < 0x30FF: + return True + return False + + def is_english(self,string): + import re + pattern = re.compile('^[A-Za-z0-9.,:;!?()_*"\' ]+$') + if pattern.fullmatch(string): + return True + else: + return False + + def get_symbols_from_json(self,path): + assert os.path.isfile(path) + with open(path, 'r') as f: + data = json.load(f) + return data['symbols'] + + def sle(self,language,text): + text = text.replace('\n','。').replace(' ',',') + if language == "中文": + tts_input1 = "[ZH]" + text + "[ZH]" + return tts_input1 + elif language == "自动": + tts_input1 = f"[JA]{text}[JA]" if self.is_japanese(text) else f"[ZH]{text}[ZH]" + return tts_input1 + elif language == "日文": + tts_input1 = "[JA]" + text + "[JA]" + return tts_input1 + + def get_text(self,text,hps_ms): + text_norm = text_to_sequence(text,hps_ms.data.text_cleaners) + if hps_ms.data.add_blank: + text_norm = commons.intersperse(text_norm, 0) + text_norm = torch.LongTensor(text_norm) + return text_norm + + def create_tts_fn(self,path, input2, input3, n_scale= 0.667,n_scale_w = 0.8, l_scale = 1 ): + self.symbols = self.get_symbols_from_json(f"checkpoints/{path}/config.json") + self.hps = utils.get_hparams_from_file(f"checkpoints/{path}/config.json") + phone_dict = { + symbol: i for i, symbol in enumerate(self.symbols) + } + self.ort_sess = ort.InferenceSession(f"checkpoints/{path}/model.onnx") + self.language = input2 + self.speaker_id = input3 + self.n_scale = n_scale + self.n_scale_w = n_scale_w + self.l_scale = l_scale + print(self.language,self.speaker_id,self.n_scale) + return 'success' + + def tts_fn(self,text): + if self.local_chat1: + text = self.chatgpt(text) + elif self.api_input2: + text = self.ChATGLM(text) + else: + text = self.gpt3_chat(text) + print(text) + text =self.sle(self.language,text) + seq = text_to_sequence(text, cleaner_names=self.hps.data.text_cleaners) + if self.hps.data.add_blank: + seq = commons.intersperse(seq, 0) + with torch.no_grad(): + x = np.array([seq], dtype=np.int64) + x_len = np.array([x.shape[1]], dtype=np.int64) + sid = np.array([self.speaker_id], dtype=np.int64) + scales = np.array([self.n_scale, self.n_scale_w, self.l_scale], dtype=np.float32) + scales.resize(1, 3) + ort_inputs = { + 'input': x, + 'input_lengths': x_len, + 'scales': scales, + 'sid': sid + } + t1 = time.time() + audio = np.squeeze(self.ort_sess.run(None, ort_inputs)) + audio *= 32767.0 / max(0.01, np.max(np.abs(audio))) * 0.6 + audio = np.clip(audio, -32767.0, 32767.0) + t2 = time.time() + spending_time = "推理时间:"+str(t2-t1)+"s" + print(spending_time) + bytes_wav = bytes() + byte_io = io.BytesIO(bytes_wav) + wavfile.write('moe/temp1.wav',self.hps.data.sampling_rate, audio.astype(np.int16)) + cmd = 'ffmpeg -y -i ' + 'moe/temp1.wav' + ' -ar 44100 ' + 'moe/temp2.wav' + os.system(cmd) + return (self.hps.data.sampling_rate, audio),text.replace('[JA]','').replace('[ZH]','') + +app = Flask(__name__) +print("开始部署") +grVits = VitsGradio() + +@app.route('/chat') +def text_api(): + message = request.args.get('Text','') + audio,text = grVits.tts_fn(message) + text = text.replace('[JA]','').replace('[ZH]','') + with open('moe/temp2.wav','rb') as bit: + wav_bytes = bit.read() + headers = { + 'Content-Type': 'audio/wav', + 'Text': text.encode('utf-8')} + return wav_bytes, 200, headers + +def gradio_interface(): + return grVits.Vits.launch() + +if __name__ == '__main__': + api_thread = Thread(target=app.run, args=("0.0.0.0", 8080)) + gradio_thread = Thread(target=gradio_interface) + api_thread.start() + gradio_thread.start() \ No newline at end of file diff --git a/attentions.py b/attentions.py new file mode 100644 index 0000000000000000000000000000000000000000..f8e5112051bae41715bed99a7b6e14ef54b18f60 --- /dev/null +++ b/attentions.py @@ -0,0 +1,392 @@ +import math + +import torch +from torch import nn +from torch.nn import functional as F + +import commons +from modules import LayerNorm + + +class Encoder(nn.Module): + def __init__(self, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size=1, + p_dropout=0., + window_size=4, + **kwargs): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + + self.drop = nn.Dropout(p_dropout) + self.attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.attn_layers.append( + MultiHeadAttention(hidden_channels, + hidden_channels, + n_heads, + p_dropout=p_dropout, + window_size=window_size)) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append( + FFN(hidden_channels, + hidden_channels, + filter_channels, + kernel_size, + p_dropout=p_dropout)) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask): + attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + y = self.attn_layers[i](x, x, attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class Decoder(nn.Module): + def __init__(self, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size=1, + p_dropout=0., + proximal_bias=False, + proximal_init=True, + **kwargs): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + + self.drop = nn.Dropout(p_dropout) + self.self_attn_layers = nn.ModuleList() + self.norm_layers_0 = nn.ModuleList() + self.encdec_attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.self_attn_layers.append( + MultiHeadAttention(hidden_channels, + hidden_channels, + n_heads, + p_dropout=p_dropout, + proximal_bias=proximal_bias, + proximal_init=proximal_init)) + self.norm_layers_0.append(LayerNorm(hidden_channels)) + self.encdec_attn_layers.append( + MultiHeadAttention(hidden_channels, + hidden_channels, + n_heads, + p_dropout=p_dropout)) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append( + FFN(hidden_channels, + hidden_channels, + filter_channels, + kernel_size, + p_dropout=p_dropout, + causal=True)) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask, h, h_mask): + """ + x: decoder input + h: encoder output + """ + self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( + device=x.device, dtype=x.dtype) + encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + y = self.self_attn_layers[i](x, x, self_attn_mask) + y = self.drop(y) + x = self.norm_layers_0[i](x + y) + + y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class MultiHeadAttention(nn.Module): + def __init__(self, + channels, + out_channels, + n_heads, + p_dropout=0., + window_size=None, + heads_share=True, + block_length=None, + proximal_bias=False, + proximal_init=False): + super().__init__() + assert channels % n_heads == 0 + + self.channels = channels + self.out_channels = out_channels + self.n_heads = n_heads + self.p_dropout = p_dropout + self.window_size = window_size + self.heads_share = heads_share + self.block_length = block_length + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + self.attn = None + + self.k_channels = channels // n_heads + self.conv_q = nn.Conv1d(channels, channels, 1) + self.conv_k = nn.Conv1d(channels, channels, 1) + self.conv_v = nn.Conv1d(channels, channels, 1) + self.conv_o = nn.Conv1d(channels, out_channels, 1) + self.drop = nn.Dropout(p_dropout) + + if window_size is not None: + n_heads_rel = 1 if heads_share else n_heads + rel_stddev = self.k_channels**-0.5 + self.emb_rel_k = nn.Parameter( + torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) + * rel_stddev) + self.emb_rel_v = nn.Parameter( + torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) + * rel_stddev) + + nn.init.xavier_uniform_(self.conv_q.weight) + nn.init.xavier_uniform_(self.conv_k.weight) + nn.init.xavier_uniform_(self.conv_v.weight) + if proximal_init: + with torch.no_grad(): + self.conv_k.weight.copy_(self.conv_q.weight) + self.conv_k.bias.copy_(self.conv_q.bias) + + def forward(self, x, c, attn_mask=None): + q = self.conv_q(x) + k = self.conv_k(c) + v = self.conv_v(c) + + x, self.attn = self.attention(q, k, v, mask=attn_mask) + + x = self.conv_o(x) + return x + + def attention(self, query, key, value, mask=None): + # reshape [b, d, t] -> [b, n_h, t, d_k] + b, d, t_s, t_t = (*key.size(), query.size(2)) + query = query.view(b, self.n_heads, self.k_channels, + t_t).transpose(2, 3) + key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + value = value.view(b, self.n_heads, self.k_channels, + t_s).transpose(2, 3) + + scores = torch.matmul(query / math.sqrt(self.k_channels), + key.transpose(-2, -1)) + if self.window_size is not None: + msg = "Relative attention is only available for self-attention." + assert t_s == t_t, msg + key_relative_embeddings = self._get_relative_embeddings( + self.emb_rel_k, t_s) + rel_logits = self._matmul_with_relative_keys( + query / math.sqrt(self.k_channels), key_relative_embeddings) + scores_local = self._relative_position_to_absolute_position( + rel_logits) + scores = scores + scores_local + if self.proximal_bias: + msg = "Proximal bias is only available for self-attention." + assert t_s == t_t, msg + scores = scores + self._attention_bias_proximal(t_s).to( + device=scores.device, dtype=scores.dtype) + if mask is not None: + scores = scores.masked_fill(mask == 0, -1e4) + if self.block_length is not None: + msg = "Local attention is only available for self-attention." + assert t_s == t_t, msg + block_mask = torch.ones_like(scores).triu( + -self.block_length).tril(self.block_length) + scores = scores.masked_fill(block_mask == 0, -1e4) + p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] + p_attn = self.drop(p_attn) + output = torch.matmul(p_attn, value) + if self.window_size is not None: + relative_weights = self._absolute_position_to_relative_position( + p_attn) + value_relative_embeddings = self._get_relative_embeddings( + self.emb_rel_v, t_s) + output = output + self._matmul_with_relative_values( + relative_weights, value_relative_embeddings) + output = output.transpose(2, 3).contiguous().view( + b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] + return output, p_attn + + def _matmul_with_relative_values(self, x, y): + """ + x: [b, h, l, m] + y: [h or 1, m, d] + ret: [b, h, l, d] + """ + ret = torch.matmul(x, y.unsqueeze(0)) + return ret + + def _matmul_with_relative_keys(self, x, y): + """ + x: [b, h, l, d] + y: [h or 1, m, d] + ret: [b, h, l, m] + """ + ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) + return ret + + def _get_relative_embeddings(self, relative_embeddings, length): + max_relative_position = 2 * self.window_size + 1 + # Pad first before slice to avoid using cond ops. + pad_length = max(length - (self.window_size + 1), 0) + slice_start_position = max((self.window_size + 1) - length, 0) + slice_end_position = slice_start_position + 2 * length - 1 + if pad_length > 0: + padded_relative_embeddings = F.pad( + relative_embeddings, + commons.convert_pad_shape([[0, 0], [pad_length, pad_length], + [0, 0]])) + else: + padded_relative_embeddings = relative_embeddings + used_relative_embeddings = padded_relative_embeddings[:, + slice_start_position: + slice_end_position] + return used_relative_embeddings + + def _relative_position_to_absolute_position(self, x): + """ + x: [b, h, l, 2*l-1] + ret: [b, h, l, l] + """ + batch, heads, length, _ = x.size() + # Concat columns of pad to shift from relative to absolute indexing. + x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, + 1]])) + + # Concat extra elements so to add up to shape (len+1, 2*len-1). + x_flat = x.view([batch, heads, length * 2 * length]) + x_flat = F.pad( + x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, + length - 1]])) + + # Reshape and slice out the padded elements. + x_final = x_flat.view([batch, heads, length + 1, + 2 * length - 1])[:, :, :length, length - 1:] + return x_final + + def _absolute_position_to_relative_position(self, x): + """ + x: [b, h, l, l] + ret: [b, h, l, 2*l-1] + """ + batch, heads, length, _ = x.size() + # padd along column + x = F.pad( + x, + commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, + length - 1]])) + x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) + # add 0's in the beginning that will skew the elements after reshape + x_flat = F.pad( + x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) + x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] + return x_final + + def _attention_bias_proximal(self, length): + """Bias for self-attention to encourage attention to close positions. + Args: + length: an integer scalar. + Returns: + a Tensor with shape [1, 1, length, length] + """ + r = torch.arange(length, dtype=torch.float32) + diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) + return torch.unsqueeze( + torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) + + +class FFN(nn.Module): + def __init__(self, + in_channels, + out_channels, + filter_channels, + kernel_size, + p_dropout=0., + activation=None, + causal=False): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.activation = activation + self.causal = causal + + if causal: + self.padding = self._causal_padding + else: + self.padding = self._same_padding + + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) + self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) + self.drop = nn.Dropout(p_dropout) + + def forward(self, x, x_mask): + x = self.conv_1(self.padding(x * x_mask)) + if self.activation == "gelu": + x = x * torch.sigmoid(1.702 * x) + else: + x = torch.relu(x) + x = self.drop(x) + x = self.conv_2(self.padding(x * x_mask)) + return x * x_mask + + def _causal_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = self.kernel_size - 1 + pad_r = 0 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, commons.convert_pad_shape(padding)) + return x + + def _same_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = (self.kernel_size - 1) // 2 + pad_r = self.kernel_size // 2 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, commons.convert_pad_shape(padding)) + return x diff --git a/checkpoints/Default/config.json b/checkpoints/Default/config.json new file mode 100644 index 0000000000000000000000000000000000000000..1087a33c6ec52fe0eb883c0f352e72af20babc15 --- /dev/null +++ b/checkpoints/Default/config.json @@ -0,0 +1,35 @@ +{ + "train": { + "segment_size": 8192 + }, + "data": { + "text_cleaners":["zh_ja_mixture_cleaners"], + "max_wav_value": 32768.0, + "sampling_rate": 22050, + "filter_length": 1024, + "hop_length": 256, + "win_length": 1024, + "add_blank": true, + "n_speakers": 5 + }, + "model": { + "inter_channels": 192, + "hidden_channels": 192, + "filter_channels": 768, + "n_heads": 2, + "n_layers": 6, + "kernel_size": 3, + "p_dropout": 0.1, + "resblock": "1", + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + "upsample_rates": [8,8,2,2], + "upsample_initial_channel": 512, + "upsample_kernel_sizes": [16,16,4,4], + "n_layers_q": 3, + "use_spectral_norm": false, + "gin_channels": 256 + }, + "speakers": ["\u7dbe\u5730\u5be7\u3005", "\u5728\u539f\u4e03\u6d77", "\u5c0f\u8338", "\u5510\u4e50\u541f"], + "symbols": ["_", ",", ".", "!", "?", "-", "~", "\u2026", "A", "E", "I", "N", "O", "Q", "U", "a", "b", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "r", "s", "t", "u", "v", "w", "y", "z", "\u0283", "\u02a7", "\u02a6", "\u026f", "\u0279", "\u0259", "\u0265", "\u207c", "\u02b0", "`", "\u2192", "\u2193", "\u2191", " "] +} diff --git a/checkpoints/Default/model.onnx b/checkpoints/Default/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..90e50ed41ab7b856959257d0254a3c0bf66e76cd --- /dev/null +++ b/checkpoints/Default/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ac210365de160dd5db134f9333525e4ff38426a6a24fcb73e24375b09bef15e +size 121090654 diff --git a/checkpoints/info.json b/checkpoints/info.json new file mode 100644 index 0000000000000000000000000000000000000000..96e6c4c5c32c0c259d42b36a9d503aed9fcb3542 --- /dev/null +++ b/checkpoints/info.json @@ -0,0 +1,221 @@ +{ + "Nijigasaki High School":{ + "speakers":{ + "高咲侑":{ + "sid": 0, + "setting": "只选一个做不到啊", + "name": "高咲侑" + }, + "歩夢":{ + "sid": 1, + "setting": "只选一个做不到啊", + "name": "歩夢" + }, + "かすみ":{ + "sid": 2, + "setting": "只选一个做不到啊", + "name": "かすみ" + }, + "しずく":{ + "sid": 3, + "setting": "只选一个做不到啊", + "name": "しずく" + }, + "果林":{ + "sid": 4, + "setting": "只选一个做不到啊", + "name": "果林" + }, + "愛":{ + "sid": 5, + "setting": "只选一个做不到啊", + "name": "愛" + }, + "せつ菜":{ + "sid": 7, + "setting": "只选一个做不到啊", + "name": "せつ菜" + }, + "エマ":{ + "sid": 8, + "setting": "只选一个做不到啊", + "name": "エマ" + }, + "璃奈":{ + "sid": 9, + "setting": "只选一个做不到啊", + "name": "璃奈" + }, + "栞子":{ + "sid": 10, + "setting": "只选一个做不到啊", + "name": "栞子" + }, + "ランジュ":{ + "sid": 11, + "setting": "只选一个做不到啊", + "name": "ランジュ" + }, + "ミア":{ + "sid": 12, + "setting": "只选一个做不到啊", + "name": "ミア" + } + }, + "checkpoint": "checkpoints/Nijigasaki/model.onnx" + }, + "Seisho Music Academy":{ + "speakers":{ + "華恋":{ + "sid": 21, + "setting": "只选一个做不到啊", + "name": "華恋" + }, + "まひる":{ + "sid": 22, + "setting": "只选一个做不到啊", + "name": "まひる" + }, + "なな":{ + "sid": 23, + "setting": "只选一个做不到啊", + "name": "なな" + }, + "クロディーヌ":{ + "sid": 24, + "setting": "只选一个做不到啊", + "name": "クロディーヌ" + }, + "ひかり":{ + "sid": 25, + "setting": "只选一个做不到啊", + "name": "ひかり" + }, + "純那":{ + "sid": 26, + "setting": "只选一个做不到啊", + "name": "純那" + }, + "香子":{ + "sid": 27, + "setting": "只选一个做不到啊", + "name": "香子" + }, + "真矢":{ + "sid": 28, + "setting": "只选一个做不到啊", + "name": "真矢" + }, + "双葉":{ + "sid": 29, + "setting": "只选一个做不到啊", + "name": "双葉" + } + }, + "checkpoint": "checkpoints/Starlight/model.onnx" + }, + "Rinmeikan Girls School":{ + "speakers":{ + "珠緒":{ + "sid": 37, + "setting": "只选一个做不到啊", + "name": "珠緒" + }, + "塁":{ + "sid": 36, + "setting": "只选一个做不到啊", + "name": "塁" + }, + "ゆゆ子":{ + "sid": 35, + "setting": "只选一个做不到啊", + "name": "ゆゆ子" + }, + "いちえ":{ + "sid": 34, + "setting": "只选一个做不到啊", + "name": "いちえ" + } + }, + "checkpoint": "checkpoints/Starlight/model.onnx" + + }, + "Frontier School of Arts":{ + "speakers":{ + "あるる":{ + "sid": 38, + "setting": "只选一个做不到啊", + "name": "あるる" + }, + "ララフィン":{ + "sid": 39, + "setting": "只选一个做不到啊", + "name": "ララフィン" + }, + "美空":{ + "sid": 40, + "setting": "只选一个做不到啊", + "name": "美空" + }, + "静羽":{ + "sid": 41, + "setting": "只选一个做不到啊", + "name": "静羽" + } + }, + "checkpoint": "checkpoints/Nijigasaki/model.onnx" + + }, + "Siegfeld Institute of Music":{ + "speakers":{ + "ミチル":{ + "sid": 30, + "setting": "只选一个做不到啊", + "name": "ミチル" + }, + "メイファン":{ + "sid": 31, + "setting": "只选一个做不到啊", + "name": "メイファン" + }, + "やちよ":{ + "sid": 32, + "setting": "只选一个做不到啊", + "name": "やちよ" + }, + "晶":{ + "sid": 33, + "setting": "只选一个做不到啊", + "name": "晶" + } + }, + "checkpoint": "checkpoints/Starlight/model.onnx" + + }, + "Youzusoft":{ + "speakers":{ + "宁宁":{ + "sid": 0, + "setting": "只选一个做不到啊", + "name": "宁宁" + }, + "在原七海":{ + "sid": 1, + "setting": "只选一个做不到啊", + "name": "在原七海" + }, + "小茸":{ + "sid": 2, + "setting": "只选一个做不到啊", + "name": "小茸" + }, + "唐乐吟":{ + "sid": 3, + "setting": "只选一个做不到啊", + "name": "唐乐吟" + } + }, + "checkpoint": "checkpoints/Default/model.onnx" + + } +} \ No newline at end of file diff --git a/cleaners/JapaneseCleaner.dll b/cleaners/JapaneseCleaner.dll new file mode 100644 index 0000000000000000000000000000000000000000..42ac45fb90957d960232fdfdb4878fd9cf9bc7ba --- /dev/null +++ b/cleaners/JapaneseCleaner.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a659eb68d12d4a88ef7dfde6086b9974cd4d43634f7e4bfe710d5537cdd61a75 +size 3097600 diff --git a/cleaners/char.bin b/cleaners/char.bin new file mode 100644 index 0000000000000000000000000000000000000000..9f6369901c7383d9038c9c49939279e47f6a4db9 --- /dev/null +++ b/cleaners/char.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:888ee94c5a8a7a26d24ab3f1b7155441351954fd51ea06b4a2f78bd742492b2f +size 262496 diff --git a/cleaners/matrix.bin b/cleaners/matrix.bin new file mode 100644 index 0000000000000000000000000000000000000000..bf7a464c5c6768b506986b1f01f8c82bb707b2b8 --- /dev/null +++ b/cleaners/matrix.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62fd16b4f64c851d5dc352ef0d5740c5fc83ddc7c203b2b0b1fc5271969a14ce +size 3792262 diff --git a/cleaners/sys.dic b/cleaners/sys.dic new file mode 100644 index 0000000000000000000000000000000000000000..700526dd4ccbd412d38d057307f5980d1be5a422 --- /dev/null +++ b/cleaners/sys.dic @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca57d9029691a70a5dfb99afc2844180256161d7130da65b1a867510e129b9a6 +size 103073776 diff --git a/cleaners/unk.dic b/cleaners/unk.dic new file mode 100644 index 0000000000000000000000000000000000000000..2635af4613f6c6012259743bad97594ab0c85bc0 Binary files /dev/null and b/cleaners/unk.dic differ diff --git a/commons.py b/commons.py new file mode 100644 index 0000000000000000000000000000000000000000..33ec83a7986a12b237d28d5e610222881d6b42ae --- /dev/null +++ b/commons.py @@ -0,0 +1,161 @@ +import math + +import torch +from torch.nn import functional as F + + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size * dilation - dilation) / 2) + + +def convert_pad_shape(pad_shape): + pad_shape = [item for sublist in reversed(pad_shape) for item in sublist] + return pad_shape + + +def intersperse(lst, item): + result = [item] * (len(lst) * 2 + 1) + result[1::2] = lst + return result + + +def kl_divergence(m_p, logs_p, m_q, logs_q): + """KL(P||Q)""" + kl = (logs_q - logs_p) - 0.5 + kl += 0.5 * (torch.exp(2. * logs_p) + + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) + return kl + + +def rand_gumbel(shape): + """Sample from the Gumbel distribution, protect from overflows.""" + uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 + return -torch.log(-torch.log(uniform_samples)) + + +def rand_gumbel_like(x): + g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) + return g + + +def slice_segments(x, ids_str, segment_size=4): + ret = torch.zeros_like(x[:, :, :segment_size]) + for i in range(x.size(0)): + idx_str = ids_str[i] + idx_end = idx_str + segment_size + ret[i] = x[i, :, idx_str:idx_end] + return ret + + +def rand_slice_segments(x, x_lengths=None, segment_size=4): + b, d, t = x.size() + if x_lengths is None: + x_lengths = t + ids_str_max = x_lengths - segment_size + 1 + ids_str = (torch.rand([b]).to(device=x.device) * + ids_str_max).to(dtype=torch.long) + ret = slice_segments(x, ids_str, segment_size) + return ret, ids_str + + +def get_timing_signal_1d(length, + channels, + min_timescale=1.0, + max_timescale=1.0e4): + position = torch.arange(length, dtype=torch.float) + num_timescales = channels // 2 + log_timescale_increment = ( + math.log(float(max_timescale) / float(min_timescale)) / + (num_timescales - 1)) + inv_timescales = min_timescale * torch.exp( + torch.arange(num_timescales, dtype=torch.float) * + -log_timescale_increment) + scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) + signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) + signal = F.pad(signal, [0, 0, 0, channels % 2]) + signal = signal.view(1, channels, length) + return signal + + +def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): + b, channels, length = x.size() + signal = get_timing_signal_1d(length, channels, min_timescale, + max_timescale) + return x + signal.to(dtype=x.dtype, device=x.device) + + +def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): + b, channels, length = x.size() + signal = get_timing_signal_1d(length, channels, min_timescale, + max_timescale) + return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) + + +def subsequent_mask(length): + mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) + return mask + + +@torch.jit.script +def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): + n_channels_int = n_channels[0] + in_act = input_a + input_b + t_act = torch.tanh(in_act[:, :n_channels_int, :]) + s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) + acts = t_act * s_act + return acts + + +def shift_1d(x): + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] + return x + + +def sequence_mask(length, max_length=None): + if max_length is None: + max_length = length.max() + x = torch.arange(max_length, dtype=length.dtype, device=length.device) + return x.unsqueeze(0) < length.unsqueeze(1) + + +def generate_path(duration, mask): + """ + duration: [b, 1, t_x] + mask: [b, 1, t_y, t_x] + """ + device = duration.device + + b, _, t_y, t_x = mask.shape + cum_duration = torch.cumsum(duration, -1) + + cum_duration_flat = cum_duration.view(b * t_x) + path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) + path = path.view(b, t_x, t_y) + path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0] + ]))[:, :-1] + path = path.unsqueeze(1).transpose(2, 3) * mask + return path + + +def clip_grad_value_(parameters, clip_value, norm_type=2): + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = list(filter(lambda p: p.grad is not None, parameters)) + norm_type = float(norm_type) + if clip_value is not None: + clip_value = float(clip_value) + + total_norm = 0 + for p in parameters: + param_norm = p.grad.data.norm(norm_type) + total_norm += param_norm.item()**norm_type + if clip_value is not None: + p.grad.data.clamp_(min=-clip_value, max=clip_value) + total_norm = total_norm**(1. / norm_type) + return total_norm diff --git a/data_utils.py b/data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1d10e95398af4e76ae61de15fca8e0e80d85282d --- /dev/null +++ b/data_utils.py @@ -0,0 +1,307 @@ +import os +import random + +import torch +import torchaudio +import torch.utils.data + +import commons +from mel_processing import spectrogram_torch +from utils import load_filepaths_and_text + + +class TextAudioSpeakerLoader(torch.utils.data.Dataset): + """ + 1) loads audio, speaker_id, text pairs + 2) normalizes text and converts them to sequences of integers + 3) computes spectrograms from audio files. + """ + def __init__(self, audiopaths_sid_text, hparams): + self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text) + # self.text_cleaners = hparams.text_cleaners + self.max_wav_value = hparams.max_wav_value + self.sampling_rate = hparams.sampling_rate + self.filter_length = hparams.filter_length + self.hop_length = hparams.hop_length + self.win_length = hparams.win_length + self.sampling_rate = hparams.sampling_rate + self.src_sampling_rate = getattr(hparams, "src_sampling_rate", + self.sampling_rate) + + self.cleaned_text = getattr(hparams, "cleaned_text", False) + + self.add_blank = hparams.add_blank + self.min_text_len = getattr(hparams, "min_text_len", 1) + self.max_text_len = getattr(hparams, "max_text_len", 190) + + phone_file = getattr(hparams, "phone_table", None) + self.phone_dict = None + if phone_file is not None: + self.phone_dict = {} + with open(phone_file) as fin: + for line in fin: + arr = line.strip().split() + self.phone_dict[arr[0]] = int(arr[1]) + + speaker_file = getattr(hparams, "speaker_table", None) + self.speaker_dict = None + if speaker_file is not None: + self.speaker_dict = {} + with open(speaker_file) as fin: + for line in fin: + arr = line.strip().split() + self.speaker_dict[arr[0]] = int(arr[1]) + + random.seed(1234) + random.shuffle(self.audiopaths_sid_text) + self._filter() + + def _filter(self): + """ + Filter text & store spec lengths + """ + # Store spectrogram lengths for Bucketing + # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) + # spec_length = wav_length // hop_length + + audiopaths_sid_text_new = [] + lengths = [] + for item in self.audiopaths_sid_text: + audiopath = item[0] + # filename|text or filename|speaker|text + text = item[1] if len(item) == 2 else item[2] + if self.min_text_len <= len(text) and len( + text) <= self.max_text_len: + audiopaths_sid_text_new.append(item) + lengths.append( + int( + os.path.getsize(audiopath) * self.sampling_rate / + self.src_sampling_rate) // (2 * self.hop_length)) + self.audiopaths_sid_text = audiopaths_sid_text_new + self.lengths = lengths + + def get_audio_text_speaker_pair(self, audiopath_sid_text): + audiopath = audiopath_sid_text[0] + if len(audiopath_sid_text) == 2: # filename|text + sid = 0 + text = audiopath_sid_text[1] + else: # filename|speaker|text + sid = self.speaker_dict[audiopath_sid_text[1]] + text = audiopath_sid_text[2] + text = self.get_text(text) + spec, wav = self.get_audio(audiopath) + sid = self.get_sid(sid) + return (text, spec, wav, sid) + + def get_audio(self, filename): + audio, sampling_rate = torchaudio.load(filename, normalize=False) + if sampling_rate != self.sampling_rate: + audio = audio.to(torch.float) + audio = torchaudio.transforms.Resample(sampling_rate, + self.sampling_rate)(audio) + audio = audio.to(torch.int16) + audio = audio[0] # Get the first channel + audio_norm = audio / self.max_wav_value + audio_norm = audio_norm.unsqueeze(0) + spec = spectrogram_torch(audio_norm, + self.filter_length, + self.sampling_rate, + self.hop_length, + self.win_length, + center=False) + spec = torch.squeeze(spec, 0) + return spec, audio_norm + + def get_text(self, text): + text_norm = [self.phone_dict[phone] for phone in text.split()] + if self.add_blank: + text_norm = commons.intersperse(text_norm, 0) + text_norm = torch.LongTensor(text_norm) + return text_norm + + def get_sid(self, sid): + sid = torch.LongTensor([int(sid)]) + return sid + + def __getitem__(self, index): + return self.get_audio_text_speaker_pair( + self.audiopaths_sid_text[index]) + + def __len__(self): + return len(self.audiopaths_sid_text) + + +class TextAudioSpeakerCollate(): + """ Zero-pads model inputs and targets + """ + def __init__(self, return_ids=False): + self.return_ids = return_ids + + def __call__(self, batch): + """Collate's training batch from normalized text, audio and speaker identities + PARAMS + ------ + batch: [text_normalized, spec_normalized, wav_normalized, sid] + """ + # Right zero-pad all one-hot text sequences to max input length + _, ids_sorted_decreasing = torch.sort(torch.LongTensor( + [x[1].size(1) for x in batch]), + dim=0, + descending=True) + + max_text_len = max([len(x[0]) for x in batch]) + max_spec_len = max([x[1].size(1) for x in batch]) + max_wav_len = max([x[2].size(1) for x in batch]) + + text_lengths = torch.LongTensor(len(batch)) + spec_lengths = torch.LongTensor(len(batch)) + wav_lengths = torch.LongTensor(len(batch)) + sid = torch.LongTensor(len(batch)) + + text_padded = torch.LongTensor(len(batch), max_text_len) + spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), + max_spec_len) + wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) + text_padded.zero_() + spec_padded.zero_() + wav_padded.zero_() + for i in range(len(ids_sorted_decreasing)): + row = batch[ids_sorted_decreasing[i]] + + text = row[0] + text_padded[i, :text.size(0)] = text + text_lengths[i] = text.size(0) + + spec = row[1] + spec_padded[i, :, :spec.size(1)] = spec + spec_lengths[i] = spec.size(1) + + wav = row[2] + wav_padded[i, :, :wav.size(1)] = wav + wav_lengths[i] = wav.size(1) + + sid[i] = row[3] + + if self.return_ids: + return (text_padded, text_lengths, spec_padded, spec_lengths, + wav_padded, wav_lengths, sid, ids_sorted_decreasing) + return (text_padded, text_lengths, spec_padded, spec_lengths, + wav_padded, wav_lengths, sid) + + +class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler + ): + """ + Maintain similar input lengths in a batch. + Length groups are specified by boundaries. + Ex) boundaries = [b1, b2, b3] -> any batch is included either + {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. + + It removes samples which are not included in the boundaries. + Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 + or length(x) > b3 are discarded. + """ + def __init__(self, + dataset, + batch_size, + boundaries, + num_replicas=None, + rank=None, + shuffle=True): + super().__init__(dataset, + num_replicas=num_replicas, + rank=rank, + shuffle=shuffle) + self.lengths = dataset.lengths + self.batch_size = batch_size + self.boundaries = boundaries + + self.buckets, self.num_samples_per_bucket = self._create_buckets() + self.total_size = sum(self.num_samples_per_bucket) + self.num_samples = self.total_size // self.num_replicas + + def _create_buckets(self): + buckets = [[] for _ in range(len(self.boundaries) - 1)] + for i in range(len(self.lengths)): + length = self.lengths[i] + idx_bucket = self._bisect(length) + if idx_bucket != -1: + buckets[idx_bucket].append(i) + + for i in range(len(buckets) - 1, 0, -1): + if len(buckets[i]) == 0: + buckets.pop(i) + self.boundaries.pop(i + 1) + + num_samples_per_bucket = [] + for i in range(len(buckets)): + len_bucket = len(buckets[i]) + total_batch_size = self.num_replicas * self.batch_size + rem = (total_batch_size - + (len_bucket % total_batch_size)) % total_batch_size + num_samples_per_bucket.append(len_bucket + rem) + return buckets, num_samples_per_bucket + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + + indices = [] + if self.shuffle: + for bucket in self.buckets: + indices.append( + torch.randperm(len(bucket), generator=g).tolist()) + else: + for bucket in self.buckets: + indices.append(list(range(len(bucket)))) + + batches = [] + for i in range(len(self.buckets)): + bucket = self.buckets[i] + len_bucket = len(bucket) + ids_bucket = indices[i] + num_samples_bucket = self.num_samples_per_bucket[i] + + # add extra samples to make it evenly divisible + rem = num_samples_bucket - len_bucket + ids_bucket = ids_bucket + ids_bucket * ( + rem // len_bucket) + ids_bucket[:(rem % len_bucket)] + + # subsample + ids_bucket = ids_bucket[self.rank::self.num_replicas] + + # batching + for j in range(len(ids_bucket) // self.batch_size): + batch = [ + bucket[idx] + for idx in ids_bucket[j * self.batch_size:(j + 1) * + self.batch_size] + ] + batches.append(batch) + + if self.shuffle: + batch_ids = torch.randperm(len(batches), generator=g).tolist() + batches = [batches[i] for i in batch_ids] + self.batches = batches + + assert len(self.batches) * self.batch_size == self.num_samples + return iter(self.batches) + + def _bisect(self, x, lo=0, hi=None): + if hi is None: + hi = len(self.boundaries) - 1 + + if hi > lo: + mid = (hi + lo) // 2 + if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]: + return mid + elif x <= self.boundaries[mid]: + return self._bisect(x, lo, mid) + else: + return self._bisect(x, mid + 1, hi) + else: + return -1 + + def __len__(self): + return self.num_samples // self.batch_size diff --git a/export_onnx.py b/export_onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..1834d24bc581ee43af038c2f56668d7a8bb1e425 --- /dev/null +++ b/export_onnx.py @@ -0,0 +1,140 @@ +# Copyright (c) 2022, Yongqiang Li (yongqiangli@alumni.hust.edu.cn) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import json +import os +import sys + +import torch + +from models import SynthesizerTrn +import utils + +try: + import onnxruntime as ort +except ImportError: + print('Please install onnxruntime!') + sys.exit(1) + + +def to_numpy(tensor): + return tensor.detach().cpu().numpy() if tensor.requires_grad \ + else tensor.detach().numpy() + + +def get_args(): + parser = argparse.ArgumentParser(description='export onnx model') + parser.add_argument('--checkpoint', required=True, help='checkpoint') + parser.add_argument('--cfg', required=True, help='config file') + parser.add_argument('--onnx_model', required=True, help='onnx model name') + # parser.add_argument('--phone_table', + # required=True, + # help='input phone dict') + # parser.add_argument('--speaker_table', default=None, help='speaker table') + # parser.add_argument("--speaker_num", required=True, + # type=int, help="speaker num") + parser.add_argument( + '--providers', + required=False, + default='CPUExecutionProvider', + choices=['CUDAExecutionProvider', 'CPUExecutionProvider'], + help='the model to send request to') + args = parser.parse_args() + return args + + +def get_data_from_cfg(cfg_path: str): + assert os.path.isfile(cfg_path) + with open(cfg_path, 'r') as f: + data = json.load(f) + symbols = data["symbols"] + speaker_num = data["data"]["n_speakers"] + return len(symbols), speaker_num + + +def main(): + args = get_args() + os.environ['CUDA_VISIBLE_DEVICES'] = '0' + + hps = utils.get_hparams_from_file(args.cfg) + # with open(args.phone_table) as p_f: + # phone_num = len(p_f.readlines()) + 1 + # num_speakers = 1 + # if args.speaker_table is not None: + # num_speakers = len(open(args.speaker_table).readlines()) + 1 + phone_num, num_speakers = get_data_from_cfg(args.cfg) + net_g = SynthesizerTrn(phone_num, + hps.data.filter_length // 2 + 1, + hps.train.segment_size // hps.data.hop_length, + n_speakers=num_speakers, + **hps.model) + utils.load_checkpoint(args.checkpoint, net_g, None) + net_g.forward = net_g.export_forward + net_g.eval() + + seq = torch.randint(low=0, high=phone_num, size=(1, 10), dtype=torch.long) + seq_len = torch.IntTensor([seq.size(1)]).long() + + # noise(可用于控制感情等变化程度) lenth(可用于控制整体语速) noisew(控制音素发音长度变化程度) + # 参考 https://github.com/gbxh/genshinTTS + scales = torch.FloatTensor([0.667, 1.0, 0.8]) + # make triton dynamic shape happy + scales = scales.unsqueeze(0) + sid = torch.IntTensor([0]).long() + + dummy_input = (seq, seq_len, scales, sid) + torch.onnx.export(model=net_g, + args=dummy_input, + f=args.onnx_model, + input_names=['input', 'input_lengths', 'scales', 'sid'], + output_names=['output'], + dynamic_axes={ + 'input': { + 0: 'batch', + 1: 'phonemes' + }, + 'input_lengths': { + 0: 'batch' + }, + 'scales': { + 0: 'batch' + }, + 'sid': { + 0: 'batch' + }, + 'output': { + 0: 'batch', + 1: 'audio', + 2: 'audio_length' + } + }, + opset_version=13, + verbose=False) + + # Verify onnx precision + torch_output = net_g(seq, seq_len, scales, sid) + providers = [args.providers] + ort_sess = ort.InferenceSession(args.onnx_model, providers=providers) + ort_inputs = { + 'input': to_numpy(seq), + 'input_lengths': to_numpy(seq_len), + 'scales': to_numpy(scales), + 'sid': to_numpy(sid), + } + onnx_output = ort_sess.run(None, ort_inputs) + + +if __name__ == '__main__': + main() diff --git a/losses.py b/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..f835539a16b49e1065fef4e4a1efb259b88dcf64 --- /dev/null +++ b/losses.py @@ -0,0 +1,58 @@ +import torch + + +def feature_loss(fmap_r, fmap_g): + loss = 0 + for dr, dg in zip(fmap_r, fmap_g): + for rl, gl in zip(dr, dg): + rl = rl.float().detach() + gl = gl.float() + loss += torch.mean(torch.abs(rl - gl)) + + return loss * 2 + + +def discriminator_loss(disc_real_outputs, disc_generated_outputs): + loss = 0 + r_losses = [] + g_losses = [] + for dr, dg in zip(disc_real_outputs, disc_generated_outputs): + dr = dr.float() + dg = dg.float() + r_loss = torch.mean((1 - dr)**2) + g_loss = torch.mean(dg**2) + loss += (r_loss + g_loss) + r_losses.append(r_loss.item()) + g_losses.append(g_loss.item()) + + return loss, r_losses, g_losses + + +def generator_loss(disc_outputs): + loss = 0 + gen_losses = [] + for dg in disc_outputs: + dg = dg.float() + l = torch.mean((1 - dg)**2) + gen_losses.append(l) + loss += l + + return loss, gen_losses + + +def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): + """ + z_p, logs_q: [b, h, t_t] + m_p, logs_p: [b, h, t_t] + """ + z_p = z_p.float() + logs_q = logs_q.float() + m_p = m_p.float() + logs_p = logs_p.float() + z_mask = z_mask.float() + + kl = logs_p - logs_q - 0.5 + kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) + kl = torch.sum(kl * z_mask) + l = kl / torch.sum(z_mask) + return l diff --git a/main.py b/main.py new file mode 100644 index 0000000000000000000000000000000000000000..54d633252ac3087ff8bfa4753af5d4997acd8e5f --- /dev/null +++ b/main.py @@ -0,0 +1,255 @@ +import logging +logging.getLogger('numba').setLevel(logging.WARNING) +logging.getLogger('matplotlib').setLevel(logging.WARNING) +logging.getLogger('urllib3').setLevel(logging.WARNING) +opj = input("是否启用pyopenjtalk?封装版无法保证非japanese cleaners推理日语时的质量(Y/N)") +if opj == "N": + from TEXTS import text_to_sequence +else: + from text import text_to_sequence +import numpy as np +from scipy.io import wavfile +import torch +import json +import commons +import utils +import sys +import pathlib +import onnxruntime as ort +import gradio as gr +import argparse +import time +import os +import io +from scipy.io.wavfile import write +from flask import Flask, request +from threading import Thread +import openai +import requests +class VitsGradio: + def __init__(self): + self.lan = ["中文","日文","自动"] + self.chatapi = ["gpt-3.5-turbo","gpt3"] + self.modelPaths = [] + for root,dirs,files in os.walk("checkpoints"): + for dir in dirs: + self.modelPaths.append(dir) + with gr.Blocks() as self.Vits: + with gr.Tab("调试用"): + with gr.Row(): + with gr.Column(): + with gr.Row(): + with gr.Column(): + self.text = gr.TextArea(label="Text", value="你好") + with gr.Accordion(label="测试api", open=False): + self.local_chat1 = gr.Checkbox(value=False, label="使用网址+文本进行模拟") + self.url_input = gr.TextArea(label="键入测试", value="http://127.0.0.1:8080/chat?Text=") + butto = gr.Button("测试从网页端获取文本") + btnVC = gr.Button("测试tts+对话程序") + with gr.Column(): + output2 = gr.TextArea(label="回复") + output1 = gr.Audio(label="采样率22050") + output3 = gr.outputs.File(label="44100hz: output.wav") + butto.click(self.Simul, inputs=[self.text, self.url_input], outputs=[output2,output3]) + btnVC.click(self.tts_fn, inputs=[self.text], outputs=[output1,output2]) + with gr.Tab("控制面板"): + with gr.Row(): + with gr.Column(): + with gr.Row(): + with gr.Column(): + self.api_input1 = gr.TextArea(label="输入api-key或本地存储说话模型的路径", value="https://platform.openai.com/account/api-keys") + with gr.Accordion(label="chatbot选择", open=False): + self.api_input2 = gr.Checkbox(value=True, label="采用gpt3.5") + self.local_chat1 = gr.Checkbox(value=False, label="启动本地chatbot") + self.local_chat2 = gr.Checkbox(value=True, label="是否量化") + res = gr.TextArea() + Botselection = gr.Button("确认模型") + Botselection.click(self.check_bot, inputs=[self.api_input1,self.api_input2,self.local_chat1,self.local_chat2], outputs = [res]) + self.input1 = gr.Dropdown(label = "模型", choices = self.modelPaths, value = self.modelPaths[0], type = "value") + self.input2 = gr.Dropdown(label="Language", choices=self.lan, value="自动", interactive=True) + with gr.Column(): + btnVC = gr.Button("Submit") + self.input3 = gr.Dropdown(label="Speaker", choices=list(range(101)), value=0, interactive=True) + self.input4 = gr.Slider(minimum=0, maximum=1.0, label="更改噪声比例(noise scale),以控制情感", value=0.267) + self.input5 = gr.Slider(minimum=0, maximum=1.0, label="更改噪声偏差(noise scale w),以控制音素长短", value=0.7) + self.input6 = gr.Slider(minimum=0.1, maximum=10, label="duration", value=1) + statusa = gr.TextArea() + btnVC.click(self.create_tts_fn, inputs=[self.input1, self.input2, self.input3, self.input4, self.input5, self.input6], outputs = [statusa]) + + def Simul(self,text,url_input): + web = url_input + text + res = requests.get(web) + music = res.content + with open('output.wav', 'wb') as code: + code.write(music) + file_path = "output.wav" + return web,file_path + + + def chatgpt(self,text): + self.messages.append({"role": "user", "content": text},) + chat = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages= self.messages) + reply = chat.choices[0].message.content + return reply + + def ChATGLM(self,text): + if text == 'clear': + self.history = [] + response, new_history = self.model.chat(self.tokenizer, text, self.history) + response = response.replace(" ",'').replace("\n",'.') + self.history = new_history + return response + + def gpt3_chat(self,text): + call_name = "Waifu" + openai.api_key = args.key + identity = "" + start_sequence = '\n'+str(call_name)+':' + restart_sequence = "\nYou: " + if 1 == 1: + prompt0 = text #当期prompt + if text == 'quit': + return prompt0 + prompt = identity + prompt0 + start_sequence + response = openai.Completion.create( + model="text-davinci-003", + prompt=prompt, + temperature=0.5, + max_tokens=1000, + top_p=1.0, + frequency_penalty=0.5, + presence_penalty=0.0, + stop=["\nYou:"] + ) + return response['choices'][0]['text'].strip() + + def check_bot(self,api_input1,api_input2,local_chat1,local_chat2): + if local_chat1: + from transformers import AutoTokenizer, AutoModel + self.tokenizer = AutoTokenizer.from_pretrained(api_input1, trust_remote_code=True) + if local_chat2: + self.model = AutoModel.from_pretrained(api_input1, trust_remote_code=True).half().quantize(4).cuda() + else: + self.model = AutoModel.from_pretrained(api_input1, trust_remote_code=True) + self.history = [] + else: + self.messages = [] + openai.api_key = api_input1 + return "Finished" + + def is_japanese(self,string): + for ch in string: + if ord(ch) > 0x3040 and ord(ch) < 0x30FF: + return True + return False + + def is_english(self,string): + import re + pattern = re.compile('^[A-Za-z0-9.,:;!?()_*"\' ]+$') + if pattern.fullmatch(string): + return True + else: + return False + + def get_symbols_from_json(self,path): + assert os.path.isfile(path) + with open(path, 'r') as f: + data = json.load(f) + return data['symbols'] + + def sle(self,language,text): + text = text.replace('\n','。').replace(' ',',') + if language == "中文": + tts_input1 = "[ZH]" + text + "[ZH]" + return tts_input1 + elif language == "自动": + tts_input1 = f"[JA]{text}[JA]" if self.is_japanese(text) else f"[ZH]{text}[ZH]" + return tts_input1 + elif language == "日文": + tts_input1 = "[JA]" + text + "[JA]" + return tts_input1 + + def get_text(self,text,hps_ms): + text_norm = text_to_sequence(text,hps_ms.data.text_cleaners) + if hps_ms.data.add_blank: + text_norm = commons.intersperse(text_norm, 0) + text_norm = torch.LongTensor(text_norm) + return text_norm + + def create_tts_fn(self,path, input2, input3, n_scale= 0.667,n_scale_w = 0.8, l_scale = 1 ): + self.symbols = self.get_symbols_from_json(f"checkpoints/{path}/config.json") + self.hps = utils.get_hparams_from_file(f"checkpoints/{path}/config.json") + phone_dict = { + symbol: i for i, symbol in enumerate(self.symbols) + } + self.ort_sess = ort.InferenceSession(f"checkpoints/{path}/model.onnx") + self.language = input2 + self.speaker_id = input3 + self.n_scale = n_scale + self.n_scale_w = n_scale_w + self.l_scale = l_scale + print(self.language,self.speaker_id,self.n_scale) + return 'success' + + def tts_fn(self,text): + if self.local_chat1: + text = self.chatgpt(text) + elif self.api_input2: + text = self.ChATGLM(text) + else: + text = self.gpt3_chat(text) + print(text) + text =self.sle(self.language,text) + seq = text_to_sequence(text, cleaner_names=self.hps.data.text_cleaners) + if self.hps.data.add_blank: + seq = commons.intersperse(seq, 0) + with torch.no_grad(): + x = np.array([seq], dtype=np.int64) + x_len = np.array([x.shape[1]], dtype=np.int64) + sid = np.array([self.speaker_id], dtype=np.int64) + scales = np.array([self.n_scale, self.n_scale_w, self.l_scale], dtype=np.float32) + scales.resize(1, 3) + ort_inputs = { + 'input': x, + 'input_lengths': x_len, + 'scales': scales, + 'sid': sid + } + t1 = time.time() + audio = np.squeeze(self.ort_sess.run(None, ort_inputs)) + audio *= 32767.0 / max(0.01, np.max(np.abs(audio))) * 0.6 + audio = np.clip(audio, -32767.0, 32767.0) + t2 = time.time() + spending_time = "推理时间:"+str(t2-t1)+"s" + print(spending_time) + bytes_wav = bytes() + byte_io = io.BytesIO(bytes_wav) + wavfile.write('moe/temp1.wav',self.hps.data.sampling_rate, audio.astype(np.int16)) + cmd = 'ffmpeg -y -i ' + 'moe/temp1.wav' + ' -ar 44100 ' + 'moe/temp2.wav' + os.system(cmd) + return (self.hps.data.sampling_rate, audio),text.replace('[JA]','').replace('[ZH]','') + +app = Flask(__name__) +print("开始部署") +grVits = VitsGradio() + +@app.route('/chat') +def text_api(): + message = request.args.get('Text','') + audio,text = grVits.tts_fn(message) + text = text.replace('[JA]','').replace('[ZH]','') + with open('moe/temp2.wav','rb') as bit: + wav_bytes = bit.read() + headers = { + 'Content-Type': 'audio/wav', + 'Text': text.encode('utf-8')} + return wav_bytes, 200, headers + +def gradio_interface(): + return grVits.Vits.launch() + +if __name__ == '__main__': + api_thread = Thread(target=app.run, args=("0.0.0.0", 8080)) + gradio_thread = Thread(target=gradio_interface) + api_thread.start() + gradio_thread.start() \ No newline at end of file diff --git a/mel_processing.py b/mel_processing.py new file mode 100644 index 0000000000000000000000000000000000000000..575ee5ef554ce4ef12cfee0cfbae87c869c1be05 --- /dev/null +++ b/mel_processing.py @@ -0,0 +1,137 @@ +import torch +import torch.nn.functional as F +import torch.utils.data +from librosa.filters import mel as librosa_mel_fn + +MAX_WAV_VALUE = 32768.0 + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): + """ + PARAMS + ------ + C: compression factor + """ + return torch.log(torch.clamp(x, min=clip_val) * C) + + +def dynamic_range_decompression_torch(x, C=1): + """ + PARAMS + ------ + C: compression factor used to compress + """ + return torch.exp(x) / C + + +def spectral_normalize_torch(magnitudes): + output = dynamic_range_compression_torch(magnitudes) + return output + + +def spectral_de_normalize_torch(magnitudes): + output = dynamic_range_decompression_torch(magnitudes) + return output + + +mel_basis = {} +hann_window = {} + + +def spectrogram_torch(y, + n_fft, + sampling_rate, + hop_size, + win_size, + center=False): + if torch.min(y) < -1.: + print('min value is ', torch.min(y)) + if torch.max(y) > 1.: + print('max value is ', torch.max(y)) + + global hann_window + dtype_device = str(y.dtype) + '_' + str(y.device) + wnsize_dtype_device = str(win_size) + '_' + dtype_device + if wnsize_dtype_device not in hann_window: + hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to( + dtype=y.dtype, device=y.device) + + y = F.pad(y.unsqueeze(1), + (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), + mode='reflect') + y = y.squeeze(1) + + spec = torch.stft(y, + n_fft, + hop_length=hop_size, + win_length=win_size, + window=hann_window[wnsize_dtype_device], + center=center, + pad_mode='reflect', + normalized=False, + onesided=True) + + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) + return spec + + +def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): + global mel_basis + dtype_device = str(spec.dtype) + '_' + str(spec.device) + fmax_dtype_device = str(fmax) + '_' + dtype_device + if fmax_dtype_device not in mel_basis: + mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) + mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to( + dtype=spec.dtype, device=spec.device) + spec = torch.matmul(mel_basis[fmax_dtype_device], spec) + spec = spectral_normalize_torch(spec) + return spec + + +def mel_spectrogram_torch(y, + n_fft, + num_mels, + sampling_rate, + hop_size, + win_size, + fmin, + fmax, + center=False): + if torch.min(y) < -1.: + print('min value is ', torch.min(y)) + if torch.max(y) > 1.: + print('max value is ', torch.max(y)) + + global mel_basis, hann_window + dtype_device = str(y.dtype) + '_' + str(y.device) + fmax_dtype_device = str(fmax) + '_' + dtype_device + wnsize_dtype_device = str(win_size) + '_' + dtype_device + if fmax_dtype_device not in mel_basis: + mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) + mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to( + dtype=y.dtype, device=y.device) + if wnsize_dtype_device not in hann_window: + hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to( + dtype=y.dtype, device=y.device) + + y = F.pad(y.unsqueeze(1), + (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), + mode='reflect') + y = y.squeeze(1) + + spec = torch.stft(y, + n_fft, + hop_length=hop_size, + win_length=win_size, + window=hann_window[wnsize_dtype_device], + center=center, + pad_mode='reflect', + normalized=False, + onesided=True) + + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) + + spec = torch.matmul(mel_basis[fmax_dtype_device], spec) + spec = spectral_normalize_torch(spec) + + return spec diff --git a/models.py b/models.py new file mode 100644 index 0000000000000000000000000000000000000000..7757ad7afecddf87b29e120ba3784e9bed42d713 --- /dev/null +++ b/models.py @@ -0,0 +1,672 @@ +import math + +import torch +from torch import nn +from torch.nn import functional as F +from torch.nn import Conv1d, ConvTranspose1d, Conv2d +from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm +import monotonic_align + +import commons +import modules +import attentions +from commons import init_weights, get_padding + + +class StochasticDurationPredictor(nn.Module): + def __init__(self, + in_channels, + filter_channels, + kernel_size, + p_dropout, + n_flows=4, + gin_channels=0): + super().__init__() + filter_channels = in_channels # it needs to be removed from future version. + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.log_flow = modules.Log() + self.flows = nn.ModuleList() + self.flows.append(modules.ElementwiseAffine(2)) + for i in range(n_flows): + self.flows.append( + modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) + self.flows.append(modules.Flip()) + + self.post_pre = nn.Conv1d(1, filter_channels, 1) + self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.post_convs = modules.DDSConv(filter_channels, + kernel_size, + n_layers=3, + p_dropout=p_dropout) + self.post_flows = nn.ModuleList() + self.post_flows.append(modules.ElementwiseAffine(2)) + for i in range(4): + self.post_flows.append( + modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) + self.post_flows.append(modules.Flip()) + + self.pre = nn.Conv1d(in_channels, filter_channels, 1) + self.proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.convs = modules.DDSConv(filter_channels, + kernel_size, + n_layers=3, + p_dropout=p_dropout) + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, filter_channels, 1) + + def forward(self, + x, + x_mask, + w=None, + g=None, + reverse=False, + noise_scale=1.0): + x = torch.detach(x) + x = self.pre(x) + if g is not None: + g = torch.detach(g) + x = x + self.cond(g) + x = self.convs(x, x_mask) + x = self.proj(x) * x_mask + + if not reverse: + flows = self.flows + assert w is not None + + logdet_tot_q = 0 + h_w = self.post_pre(w) + h_w = self.post_convs(h_w, x_mask) + h_w = self.post_proj(h_w) * x_mask + e_q = torch.randn(w.size(0), 2, w.size(2)).to( + device=x.device, dtype=x.dtype) * x_mask + z_q = e_q + for flow in self.post_flows: + z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) + logdet_tot_q += logdet_q + z_u, z1 = torch.split(z_q, [1, 1], 1) + u = torch.sigmoid(z_u) * x_mask + z0 = (w - u) * x_mask + logdet_tot_q += torch.sum( + (F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]) + logq = torch.sum( + -0.5 * (math.log(2 * math.pi) + + (e_q**2)) * x_mask, [1, 2]) - logdet_tot_q + + logdet_tot = 0 + z0, logdet = self.log_flow(z0, x_mask) + logdet_tot += logdet + z = torch.cat([z0, z1], 1) + for flow in flows: + z, logdet = flow(z, x_mask, g=x, reverse=reverse) + logdet_tot = logdet_tot + logdet + nll = torch.sum(0.5 * (math.log(2 * math.pi) + + (z**2)) * x_mask, [1, 2]) - logdet_tot + return nll + logq # [b] + else: + flows = list(reversed(self.flows)) + flows = flows[:-2] + [flows[-1]] # remove a useless vflow + z = torch.randn(x.size(0), 2, x.size(2)).to( + device=x.device, dtype=x.dtype) * noise_scale + for flow in flows: + z = flow(z, x_mask, g=x, reverse=reverse) + z0, z1 = torch.split(z, [1, 1], 1) + logw = z0 + return logw + + +class DurationPredictor(nn.Module): + def __init__(self, + in_channels, + filter_channels, + kernel_size, + p_dropout, + gin_channels=0): + super().__init__() + + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.gin_channels = gin_channels + + self.drop = nn.Dropout(p_dropout) + self.conv_1 = nn.Conv1d(in_channels, + filter_channels, + kernel_size, + padding=kernel_size // 2) + self.norm_1 = modules.LayerNorm(filter_channels) + self.conv_2 = nn.Conv1d(filter_channels, + filter_channels, + kernel_size, + padding=kernel_size // 2) + self.norm_2 = modules.LayerNorm(filter_channels) + self.proj = nn.Conv1d(filter_channels, 1, 1) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, in_channels, 1) + + def forward(self, x, x_mask, g=None): + x = torch.detach(x) + if g is not None: + g = torch.detach(g) + x = x + self.cond(g) + x = self.conv_1(x * x_mask) + x = torch.relu(x) + x = self.norm_1(x) + x = self.drop(x) + x = self.conv_2(x * x_mask) + x = torch.relu(x) + x = self.norm_2(x) + x = self.drop(x) + x = self.proj(x * x_mask) + return x * x_mask + + +class TextEncoder(nn.Module): + def __init__(self, n_vocab, out_channels, hidden_channels, filter_channels, + n_heads, n_layers, kernel_size, p_dropout): + super().__init__() + self.n_vocab = n_vocab + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + + self.emb = nn.Embedding(n_vocab, hidden_channels) + nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) + + self.encoder = attentions.Encoder(hidden_channels, filter_channels, + n_heads, n_layers, kernel_size, + p_dropout) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths): + x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), + 1).to(x.dtype) + + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + + m, logs = torch.split(stats, self.out_channels, dim=1) + return x, m, logs, x_mask + + +class ResidualCouplingBlock(nn.Module): + def __init__(self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + n_flows=4, + gin_channels=0): + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.flows = nn.ModuleList() + for i in range(n_flows): + self.flows.append( + modules.ResidualCouplingLayer(channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + mean_only=True)) + self.flows.append(modules.Flip()) + + def forward(self, x, x_mask, g=None, reverse=False): + if not reverse: + for flow in self.flows: + x, _ = flow(x, x_mask, g=g, reverse=reverse) + else: + for flow in reversed(self.flows): + x = flow(x, x_mask, g=g, reverse=reverse) + return x + + +class PosteriorEncoder(nn.Module): + def __init__(self, + in_channels, + out_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = modules.WN(hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths, g=None): + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), + 1).to(x.dtype) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask + return z, m, logs, x_mask + + +class Generator(torch.nn.Module): + def __init__(self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=0): + super(Generator, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.conv_pre = Conv1d(initial_channel, + upsample_initial_channel, + 7, + 1, + padding=3) + resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append( + weight_norm( + ConvTranspose1d(upsample_initial_channel // (2**i), + upsample_initial_channel // (2**(i + 1)), + k, + u, + padding=(k - u) // 2))) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2**(i + 1)) + for j, (k, d) in enumerate( + zip(resblock_kernel_sizes, resblock_dilation_sizes)): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + def forward(self, x, g=None): + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + print('Removing weight norm...') + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, + period, + kernel_size=5, + stride=3, + use_spectral_norm=False): + super(DiscriminatorP, self).__init__() + self.period = period + self.use_spectral_norm = use_spectral_norm + norm_f = weight_norm if use_spectral_norm is False else spectral_norm + self.convs = nn.ModuleList([ + norm_f( + Conv2d(1, + 32, (kernel_size, 1), (stride, 1), + padding=(get_padding(kernel_size, 1), 0))), + norm_f( + Conv2d(32, + 128, (kernel_size, 1), (stride, 1), + padding=(get_padding(kernel_size, 1), 0))), + norm_f( + Conv2d(128, + 512, (kernel_size, 1), (stride, 1), + padding=(get_padding(kernel_size, 1), 0))), + norm_f( + Conv2d(512, + 1024, (kernel_size, 1), (stride, 1), + padding=(get_padding(kernel_size, 1), 0))), + norm_f( + Conv2d(1024, + 1024, (kernel_size, 1), + 1, + padding=(get_padding(kernel_size, 1), 0))), + ]) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x): + fmap = [] + + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(DiscriminatorS, self).__init__() + norm_f = weight_norm if use_spectral_norm is False else spectral_norm + self.convs = nn.ModuleList([ + norm_f(Conv1d(1, 16, 15, 1, padding=7)), + norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), + norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ]) + self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x): + fmap = [] + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminator, self).__init__() + periods = [2, 3, 5, 7, 11] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs = discs + [ + DiscriminatorP(i, use_spectral_norm=use_spectral_norm) + for i in periods + ] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class SynthesizerTrn(nn.Module): + """ + Synthesizer for Training + """ + def __init__(self, + n_vocab, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + n_speakers=0, + gin_channels=0, + use_sdp=True, + **kwargs): + + super().__init__() + self.n_vocab = n_vocab + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.n_speakers = n_speakers + self.gin_channels = gin_channels + if self.n_speakers != 0: + message = "gin_channels must be none zero for multiple speakers" + assert gin_channels != 0, message + + self.use_sdp = use_sdp + + self.enc_p = TextEncoder(n_vocab, inter_channels, hidden_channels, + filter_channels, n_heads, n_layers, + kernel_size, p_dropout) + self.dec = Generator(inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels) + self.enc_q = PosteriorEncoder(spec_channels, + inter_channels, + hidden_channels, + 5, + 1, + 16, + gin_channels=gin_channels) + self.flow = ResidualCouplingBlock(inter_channels, + hidden_channels, + 5, + 1, + 4, + gin_channels=gin_channels) + + if use_sdp: + self.dp = StochasticDurationPredictor(hidden_channels, + 192, + 3, + 0.5, + 4, + gin_channels=gin_channels) + else: + self.dp = DurationPredictor(hidden_channels, + 256, + 3, + 0.5, + gin_channels=gin_channels) + + if n_speakers > 1: + self.emb_g = nn.Embedding(n_speakers, gin_channels) + + def forward(self, x, x_lengths, y, y_lengths, sid=None): + + x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) + if self.n_speakers > 0: + g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + else: + g = None + + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + + with torch.no_grad(): + # negative cross-entropy + s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] + neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], + keepdim=True) # [b, 1, t_s] + neg_cent2 = torch.matmul( + -0.5 * (z_p**2).transpose(1, 2), + s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] + neg_cent3 = torch.matmul( + z_p.transpose(1, 2), + (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] + neg_cent4 = torch.sum(-0.5 * (m_p**2) * s_p_sq_r, [1], + keepdim=True) # [b, 1, t_s] + neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 + + attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze( + y_mask, -1) + attn = monotonic_align.maximum_path( + neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() + + w = attn.sum(2) + if self.use_sdp: + l_length = self.dp(x, x_mask, w, g=g) + l_length = l_length / torch.sum(x_mask) + else: + logw_ = torch.log(w + 1e-6) * x_mask + logw = self.dp(x, x_mask, g=g) + l_length = torch.sum( + (logw - logw_)**2, [1, 2]) / torch.sum(x_mask) # for averaging + + # expand prior + m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, + 2)).transpose(1, 2) + logs_p = torch.matmul(attn.squeeze(1), + logs_p.transpose(1, 2)).transpose(1, 2) + + z_slice, ids_slice = commons.rand_slice_segments( + z, y_lengths, self.segment_size) + o = self.dec(z_slice, g=g) + return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, + logs_p, m_q, + logs_q) + + def infer(self, + x, + x_lengths, + sid=None, + noise_scale=1, + length_scale=1, + noise_scale_w=1., + max_len=None): + x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) + if self.n_speakers > 0: + g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + else: + g = None + + if self.use_sdp: + logw = self.dp(x, + x_mask, + g=g, + reverse=True, + noise_scale=noise_scale_w) + else: + logw = self.dp(x, x_mask, g=g) + w = torch.exp(logw) * x_mask * length_scale + w_ceil = torch.ceil(w) + y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() + y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), + 1).to(x_mask.dtype) + attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) + attn = commons.generate_path(w_ceil, attn_mask) + + m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose( + 1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose( + 1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + + z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale + z = self.flow(z_p, y_mask, g=g, reverse=True) + o = self.dec((z * y_mask)[:, :, :max_len], g=g) + return o, attn, y_mask, (z, z_p, m_p, logs_p) + + def export_forward(self, x, x_lengths, scales, sid): + # shape of scales: Bx3, make triton happy + audio, *_ = self.infer(x, + x_lengths, + sid, + noise_scale=scales[0][0], + length_scale=scales[0][1], + noise_scale_w=scales[0][2]) + return audio + + def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): + assert self.n_speakers > 0, "n_speakers have to be larger than 0." + g_src = self.emb_g(sid_src).unsqueeze(-1) + g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) + z_p = self.flow(z, y_mask, g=g_src) + z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) + o_hat = self.dec(z_hat * y_mask, g=g_tgt) + return o_hat, y_mask, (z, z_p, z_hat) diff --git a/modules.py b/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..0f9286cd02b123d54f38e5c2bb144cc01056d080 --- /dev/null +++ b/modules.py @@ -0,0 +1,469 @@ +import math + +import torch +from torch import nn +from torch.nn import functional as F +from torch.nn import Conv1d +from torch.nn.utils import weight_norm, remove_weight_norm + +import commons +from commons import init_weights, get_padding +from transforms import piecewise_rational_quadratic_transform + +LRELU_SLOPE = 0.1 + + +class LayerNorm(nn.Module): + def __init__(self, channels, eps=1e-5): + super().__init__() + self.channels = channels + self.eps = eps + + self.gamma = nn.Parameter(torch.ones(channels)) + self.beta = nn.Parameter(torch.zeros(channels)) + + def forward(self, x): + x = x.transpose(1, -1) + x = F.layer_norm(x, (self.channels, ), self.gamma, self.beta, self.eps) + return x.transpose(1, -1) + + +class ConvReluNorm(nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, + n_layers, p_dropout): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + assert n_layers > 1, "Number of layers should be larger than 0." + + self.conv_layers = nn.ModuleList() + self.norm_layers = nn.ModuleList() + self.conv_layers.append( + nn.Conv1d(in_channels, + hidden_channels, + kernel_size, + padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) + for _ in range(n_layers - 1): + self.conv_layers.append( + nn.Conv1d(hidden_channels, + hidden_channels, + kernel_size, + padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.proj = nn.Conv1d(hidden_channels, out_channels, 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask): + x_org = x + for i in range(self.n_layers): + x = self.conv_layers[i](x * x_mask) + x = self.norm_layers[i](x) + x = self.relu_drop(x) + x = x_org + self.proj(x) + return x * x_mask + + +class DDSConv(nn.Module): + """ + Dialted and Depth-Separable Convolution + """ + def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): + super().__init__() + self.channels = channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + + self.drop = nn.Dropout(p_dropout) + self.convs_sep = nn.ModuleList() + self.convs_1x1 = nn.ModuleList() + self.norms_1 = nn.ModuleList() + self.norms_2 = nn.ModuleList() + for i in range(n_layers): + dilation = kernel_size**i + padding = (kernel_size * dilation - dilation) // 2 + self.convs_sep.append( + nn.Conv1d(channels, + channels, + kernel_size, + groups=channels, + dilation=dilation, + padding=padding)) + self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) + self.norms_1.append(LayerNorm(channels)) + self.norms_2.append(LayerNorm(channels)) + + def forward(self, x, x_mask, g=None): + if g is not None: + x = x + g + for i in range(self.n_layers): + y = self.convs_sep[i](x * x_mask) + y = self.norms_1[i](y) + y = F.gelu(y) + y = self.convs_1x1[i](y) + y = self.norms_2[i](y) + y = F.gelu(y) + y = self.drop(y) + x = x + y + return x * x_mask + + +class WN(torch.nn.Module): + def __init__(self, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0, + p_dropout=0): + super(WN, self).__init__() + assert (kernel_size % 2 == 1) + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size, + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + self.p_dropout = p_dropout + + self.in_layers = torch.nn.ModuleList() + self.res_skip_layers = torch.nn.ModuleList() + self.drop = nn.Dropout(p_dropout) + + if gin_channels != 0: + cond_layer = torch.nn.Conv1d(gin_channels, + 2 * hidden_channels * n_layers, 1) + self.cond_layer = torch.nn.utils.weight_norm(cond_layer, + name='weight') + + for i in range(n_layers): + dilation = dilation_rate**i + padding = int((kernel_size * dilation - dilation) / 2) + in_layer = torch.nn.Conv1d(hidden_channels, + 2 * hidden_channels, + kernel_size, + dilation=dilation, + padding=padding) + in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') + self.in_layers.append(in_layer) + + # last one is not necessary + if i < n_layers - 1: + res_skip_channels = 2 * hidden_channels + else: + res_skip_channels = hidden_channels + + res_skip_layer = torch.nn.Conv1d(hidden_channels, + res_skip_channels, 1) + res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, + name='weight') + self.res_skip_layers.append(res_skip_layer) + + def forward(self, x, x_mask, g=None, **kwargs): + output = torch.zeros_like(x) + n_channels_tensor = torch.IntTensor([self.hidden_channels]) + + if g is not None: + g = self.cond_layer(g) + + for i in range(self.n_layers): + x_in = self.in_layers[i](x) + if g is not None: + cond_offset = i * 2 * self.hidden_channels + g_l = g[:, + cond_offset:cond_offset + 2 * self.hidden_channels, :] + else: + g_l = torch.zeros_like(x_in) + + acts = commons.fused_add_tanh_sigmoid_multiply( + x_in, g_l, n_channels_tensor) + acts = self.drop(acts) + + res_skip_acts = self.res_skip_layers[i](acts) + if i < self.n_layers - 1: + res_acts = res_skip_acts[:, :self.hidden_channels, :] + x = (x + res_acts) * x_mask + output = output + res_skip_acts[:, self.hidden_channels:, :] + else: + output = output + res_skip_acts + return output * x_mask + + def remove_weight_norm(self): + if self.gin_channels != 0: + torch.nn.utils.remove_weight_norm(self.cond_layer) + for l in self.in_layers: + torch.nn.utils.remove_weight_norm(l) + for l in self.res_skip_layers: + torch.nn.utils.remove_weight_norm(l) + + +class ResBlock1(torch.nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): + super(ResBlock1, self).__init__() + self.convs1 = nn.ModuleList([ + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]))) + ]) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList([ + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1))) + ]) + self.convs2.apply(init_weights) + + def forward(self, x, x_mask=None): + for c1, c2 in zip(self.convs1, self.convs2): + xt = F.leaky_relu(x, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c1(xt) + xt = F.leaky_relu(xt, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c2(xt) + x = xt + x + if x_mask is not None: + x = x * x_mask + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_weight_norm(l) + for l in self.convs2: + remove_weight_norm(l) + + +class ResBlock2(torch.nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3)): + super(ResBlock2, self).__init__() + self.convs = nn.ModuleList([ + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))) + ]) + self.convs.apply(init_weights) + + def forward(self, x, x_mask=None): + for c in self.convs: + xt = F.leaky_relu(x, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c(xt) + x = xt + x + if x_mask is not None: + x = x * x_mask + return x + + def remove_weight_norm(self): + for l in self.convs: + remove_weight_norm(l) + + +class Log(nn.Module): + def forward(self, x, x_mask, reverse=False, **kwargs): + if not reverse: + y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask + logdet = torch.sum(-y, [1, 2]) + return y, logdet + else: + x = torch.exp(x) * x_mask + return x + + +class Flip(nn.Module): + def forward(self, x, *args, reverse=False, **kwargs): + x = torch.flip(x, [1]) + if not reverse: + logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) + return x, logdet + else: + return x + + +class ElementwiseAffine(nn.Module): + def __init__(self, channels): + super().__init__() + self.channels = channels + self.m = nn.Parameter(torch.zeros(channels, 1)) + self.logs = nn.Parameter(torch.zeros(channels, 1)) + + def forward(self, x, x_mask, reverse=False, **kwargs): + if not reverse: + y = self.m + torch.exp(self.logs) * x + y = y * x_mask + logdet = torch.sum(self.logs * x_mask, [1, 2]) + return y, logdet + else: + x = (x - self.m) * torch.exp(-self.logs) * x_mask + return x + + +class ResidualCouplingLayer(nn.Module): + def __init__(self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + p_dropout=0, + gin_channels=0, + mean_only=False): + assert channels % 2 == 0, "channels should be divisible by 2" + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.half_channels = channels // 2 + self.mean_only = mean_only + + self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) + self.enc = WN(hidden_channels, + kernel_size, + dilation_rate, + n_layers, + p_dropout=p_dropout, + gin_channels=gin_channels) + self.post = nn.Conv1d(hidden_channels, + self.half_channels * (2 - mean_only), 1) + self.post.weight.data.zero_() + self.post.bias.data.zero_() + + def forward(self, x, x_mask, g=None, reverse=False): + x0, x1 = torch.split(x, [self.half_channels] * 2, 1) + h = self.pre(x0) * x_mask + h = self.enc(h, x_mask, g=g) + stats = self.post(h) * x_mask + if not self.mean_only: + m, logs = torch.split(stats, [self.half_channels] * 2, 1) + else: + m = stats + logs = torch.zeros_like(m) + + if not reverse: + x1 = m + x1 * torch.exp(logs) * x_mask + x = torch.cat([x0, x1], 1) + logdet = torch.sum(logs, [1, 2]) + return x, logdet + else: + x1 = (x1 - m) * torch.exp(-logs) * x_mask + x = torch.cat([x0, x1], 1) + return x + + +class ConvFlow(nn.Module): + def __init__(self, + in_channels, + filter_channels, + kernel_size, + n_layers, + num_bins=10, + tail_bound=5.0): + super().__init__() + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.num_bins = num_bins + self.tail_bound = tail_bound + self.half_channels = in_channels // 2 + + self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) + self.convs = DDSConv(filter_channels, + kernel_size, + n_layers, + p_dropout=0.) + self.proj = nn.Conv1d(filter_channels, + self.half_channels * (num_bins * 3 - 1), 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask, g=None, reverse=False): + x0, x1 = torch.split(x, [self.half_channels] * 2, 1) + h = self.pre(x0) + h = self.convs(h, x_mask, g=g) + h = self.proj(h) * x_mask + + b, c, t = x0.shape + h = h.reshape(b, c, -1, t).permute(0, 1, 3, + 2) # [b, cx?, t] -> [b, c, t, ?] + + unnormalized_widths = h[..., :self.num_bins] / math.sqrt( + self.filter_channels) + unnormalized_heights = h[..., + self.num_bins:2 * self.num_bins] / math.sqrt( + self.filter_channels) + unnormalized_derivatives = h[..., 2 * self.num_bins:] + + x1, logabsdet = piecewise_rational_quadratic_transform( + x1, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=reverse, + tails='linear', + tail_bound=self.tail_bound) + + x = torch.cat([x0, x1], 1) * x_mask + logdet = torch.sum(logabsdet * x_mask, [1, 2]) + if not reverse: + return x, logdet + else: + return x diff --git a/moe/config.json b/moe/config.json new file mode 100644 index 0000000000000000000000000000000000000000..a907a4d270fc3456b076018f28b57b9925e56750 --- /dev/null +++ b/moe/config.json @@ -0,0 +1,25 @@ +{ + "_name_or_path": "THUDM/chatglm-6b", + "architectures": [ + "ChatGLMModel" + ], + "auto_map": { + "AutoConfig": "configuration_chatglm.ChatGLMConfig", + "AutoModel": "modeling_chatglm.ChatGLMForConditionalGeneration", + "AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGeneration" + }, + "bos_token_id": 150004, + "eos_token_id": 150005, + "hidden_size": 4096, + "inner_hidden_size": 16384, + "layernorm_epsilon": 1e-05, + "max_sequence_length": 2048, + "model_type": "chatglm", + "num_attention_heads": 32, + "num_layers": 28, + "position_encoding_2d": true, + "torch_dtype": "float16", + "transformers_version": "4.23.1", + "use_cache": true, + "vocab_size": 150528 +} diff --git a/moe/configuration_chatglm.py b/moe/configuration_chatglm.py new file mode 100644 index 0000000000000000000000000000000000000000..b4b196a51c922f44936b6f6bd2bbd80980f1755d --- /dev/null +++ b/moe/configuration_chatglm.py @@ -0,0 +1,92 @@ +""" ChatGLM model configuration """ + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + +logger = logging.get_logger(__name__) + + +class ChatGLMConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`~ChatGLMModel`]. + It is used to instantiate an ChatGLM model according to the specified arguments, defining the model + architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of + the ChatGLM-6B [THUDM/ChatGLM-6B](https://huggingface.co/THUDM/chatglm-6b) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used + to control the model outputs. Read the documentation from [`PretrainedConfig`] + for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 150528): + Vocabulary size of the ChatGLM-6B model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`~ChatGLMModel`] or + [`~TFChatGLMModel`]. + hidden_size (`int`, *optional*, defaults to 4096): + Dimension of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 28): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer encoder. + inner_hidden_size (`int`, *optional*, defaults to 16384): + Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + max_sequence_length (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. + Typically set this to something large just in case (e.g., 512 or 1024 or 2048). + layernorm_epsilon (`float`, *optional*, defaults to 1e-5): + The epsilon used by the layer normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether the model should return the last key/values attentions (not used by all models). + Example: + + ```python + >>> from configuration_chatglm import ChatGLMConfig + >>> from modeling_chatglm import ChatGLMModel + + >>> # Initializing a ChatGLM-6B THUDM/ChatGLM-6B style configuration + >>> configuration = ChatGLMConfig() + + >>> # Initializing a model from the THUDM/ChatGLM-6B style configuration + >>> model = ChatGLMModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ``` +""" + model_type = "chatglm" + + def __init__( + self, + vocab_size=150528, + hidden_size=4096, + num_layers=28, + num_attention_heads=32, + layernorm_epsilon=1e-5, + use_cache=False, + bos_token_id=150004, + eos_token_id=150005, + pad_token_id=0, + max_sequence_length=2048, + inner_hidden_size=16384, + position_encoding_2d=True, + **kwargs + ): + self.num_layers = num_layers + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_attention_heads = num_attention_heads + self.max_sequence_length = max_sequence_length + self.layernorm_epsilon = layernorm_epsilon + self.inner_hidden_size = inner_hidden_size + self.use_cache = use_cache + self.bos_token_id = bos_token_id + self.eos_token_id = eos_token_id + self.pad_token_id = pad_token_id + self.position_encoding_2d = position_encoding_2d + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + **kwargs + ) diff --git a/moe/modeling_chatglm.py b/moe/modeling_chatglm.py new file mode 100644 index 0000000000000000000000000000000000000000..1429dbb820ce1d625e69c471a772f868b867e529 --- /dev/null +++ b/moe/modeling_chatglm.py @@ -0,0 +1,1157 @@ +""" PyTorch ChatGLM model. """ + +import math +import copy +import os + +import torch +import torch.utils.checkpoint +import torch.nn.functional as F +from torch import nn +from torch.nn import CrossEntropyLoss, LayerNorm +from torch.nn.utils import skip_init +from typing import Optional, Tuple, Union, List + +from transformers.utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, +) +from transformers.modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + BaseModelOutputWithPastAndCrossAttentions, +) +from transformers.modeling_utils import PreTrainedModel + +from transformers.utils import logging +from .configuration_chatglm import ChatGLMConfig + +# flags required to enable jit fusion kernels +torch._C._jit_set_profiling_mode(False) +torch._C._jit_set_profiling_executor(False) +torch._C._jit_override_can_fuse_on_cpu(True) +torch._C._jit_override_can_fuse_on_gpu(True) + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "THUDM/ChatGLM-6B" +_CONFIG_FOR_DOC = "ChatGLM6BConfig" + +CHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "THUDM/chatglm-6b", + # See all ChatGLM-6B models at https://huggingface.co/models?filter=chatglm +] + + +def load_tf_weights_in_chatglm_6b(model, config, tf_checkpoint_path): + """Load tf checkpoints in a pytorch model.""" + try: + import re + + import numpy as np + import tensorflow as tf + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions." + ) + raise + tf_path = os.path.abspath(tf_checkpoint_path) + logger.info(f"Converting TensorFlow checkpoint from {tf_path}") + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + logger.info(f"Loading TF weight {name} with shape {shape}") + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array) + + for name, array in zip(names, arrays): + name = name.split("/") + # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v + # which are not required for using pretrained model + if any( + n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] + for n in name + ): + logger.info(f"Skipping {'/'.join(name)}") + continue + pointer = model + for m_name in name: + if re.fullmatch(r"[A-Za-z]+_\d+", m_name): + scope_names = re.split(r"_(\d+)", m_name) + else: + scope_names = [m_name] + if scope_names[0] == "kernel" or scope_names[0] == "gamma": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "output_bias" or scope_names[0] == "beta": + pointer = getattr(pointer, "bias") + elif scope_names[0] == "output_weights": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "squad": + pointer = getattr(pointer, "classifier") + else: + try: + pointer = getattr(pointer, scope_names[0]) + except AttributeError: + logger.info(f"Skipping {'/'.join(name)}") + continue + if len(scope_names) >= 2: + num = int(scope_names[1]) + pointer = pointer[num] + if m_name[-11:] == "_embeddings": + pointer = getattr(pointer, "weight") + elif m_name == "kernel": + array = np.transpose(array) + try: + assert ( + pointer.shape == array.shape + ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" + except AssertionError as e: + e.args += (pointer.shape, array.shape) + raise + logger.info(f"Initialize PyTorch weight {name}") + pointer.data = torch.from_numpy(array) + return model + + +@torch.jit.script +def gelu_impl(x): + """OpenAI's gelu implementation.""" + return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * + (1.0 + 0.044715 * x * x))) + + +def gelu(x): + return gelu_impl(x) + + +class RotaryEmbedding(torch.nn.Module): + def __init__(self, dim, base=10000, precision=torch.half, learnable=False): + super().__init__() + inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim)) + inv_freq = inv_freq.half() + self.learnable = learnable + if learnable: + self.inv_freq = torch.nn.Parameter(inv_freq) + self.max_seq_len_cached = None + else: + self.register_buffer('inv_freq', inv_freq) + self.max_seq_len_cached = None + self.cos_cached = None + self.sin_cached = None + self.precision = precision + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, + error_msgs): + pass + + def forward(self, x, seq_dim=1, seq_len=None): + if seq_len is None: + seq_len = x.shape[seq_dim] + if self.max_seq_len_cached is None or (seq_len > self.max_seq_len_cached): + self.max_seq_len_cached = None if self.learnable else seq_len + t = torch.arange(seq_len, device=x.device, dtype=self.inv_freq.dtype) + freqs = torch.einsum('i,j->ij', t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1).to(x.device) + if self.precision == torch.bfloat16: + emb = emb.float() + + # [sx, 1 (b * np), hn] + cos_cached = emb.cos()[:, None, :] + sin_cached = emb.sin()[:, None, :] + if self.precision == torch.bfloat16: + cos_cached = cos_cached.bfloat16() + sin_cached = sin_cached.bfloat16() + if self.learnable: + return cos_cached, sin_cached + self.cos_cached, self.sin_cached = cos_cached, sin_cached + return self.cos_cached[:seq_len, ...], self.sin_cached[:seq_len, ...] + + +def rotate_half(x): + x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:] + return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in earlier torch versions + + +@torch.jit.script +def apply_rotary_pos_emb_index(q, k, cos, sin, position_id): + # position_id: [sq, b], q, k: [sq, b, np, hn], cos: [sq, 1, hn] -> [sq, b, 1, hn] + cos, sin = F.embedding(position_id, cos.squeeze(1)).unsqueeze(2), \ + F.embedding(position_id, sin.squeeze(1)).unsqueeze(2) + q, k = (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin) + return q, k + + +def attention_fn( + self, + query_layer, + key_layer, + value_layer, + attention_mask, + hidden_size_per_partition, + layer_id, + layer_past=None, + scaling_attention_score=True, + use_cache=False, +): + if layer_past is not None: + past_key, past_value = layer_past + key_layer = torch.cat((past_key, key_layer), dim=0) + value_layer = torch.cat((past_value, value_layer), dim=0) + + # seqlen, batch, num_attention_heads, hidden_size_per_attention_head + seq_len, b, nh, hidden_size = key_layer.shape + + if use_cache: + present = (key_layer, value_layer) + else: + present = None + + query_key_layer_scaling_coeff = float(layer_id + 1) + if scaling_attention_score: + query_layer = query_layer / (math.sqrt(hidden_size) * query_key_layer_scaling_coeff) + + # =================================== + # Raw attention scores. [b, np, s, s] + # =================================== + + # [b, np, sq, sk] + output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0)) + + # [sq, b, np, hn] -> [sq, b * np, hn] + query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1) + # [sk, b, np, hn] -> [sk, b * np, hn] + key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1) + + matmul_result = torch.empty( + output_size[0] * output_size[1], + output_size[2], + output_size[3], + dtype=query_layer.dtype, + device=query_layer.device, + ) + + matmul_result = torch.baddbmm( + matmul_result, + query_layer.transpose(0, 1), # [b * np, sq, hn] + key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk] + beta=0.0, + alpha=1.0, + ) + + # change view to [b, np, sq, sk] + attention_scores = matmul_result.view(*output_size) + + if self.scale_mask_softmax: + self.scale_mask_softmax.scale = query_key_layer_scaling_coeff + attention_probs = self.scale_mask_softmax(attention_scores, attention_mask.contiguous()) + else: + if not (attention_mask == 0).all(): + # if auto-regressive, skip + attention_scores.masked_fill_(attention_mask, -10000.0) + dtype = attention_scores.type() + attention_scores = attention_scores.float() + attention_scores = attention_scores * query_key_layer_scaling_coeff + + attention_probs = F.softmax(attention_scores, dim=-1) + + attention_probs = attention_probs.type(dtype) + + # ========================= + # Context layer. [sq, b, hp] + # ========================= + + # value_layer -> context layer. + # [sk, b, np, hn] --> [b, np, sq, hn] + + # context layer shape: [b, np, sq, hn] + output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3)) + + # change view [sk, b * np, hn] + value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1) + + # change view [b * np, sq, sk] + attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1) + + # matmul: [b * np, sq, hn] + context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1)) + + # change view [b, np, sq, hn] + context_layer = context_layer.view(*output_size) + + # [b, np, sq, hn] --> [sq, b, np, hn] + context_layer = context_layer.permute(2, 0, 1, 3).contiguous() + + # [sq, b, np, hn] --> [sq, b, hp] + new_context_layer_shape = context_layer.size()[:-2] + (hidden_size_per_partition,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, present, attention_probs) + + return outputs + + +class SelfAttention(torch.nn.Module): + def __init__(self, hidden_size, num_attention_heads, + layer_id, hidden_size_per_attention_head=None, bias=True, + params_dtype=torch.float, position_encoding_2d=True): + super(SelfAttention, self).__init__() + + self.layer_id = layer_id + self.hidden_size = hidden_size + self.hidden_size_per_partition = hidden_size + self.num_attention_heads = num_attention_heads + self.num_attention_heads_per_partition = num_attention_heads + self.position_encoding_2d = position_encoding_2d + self.rotary_emb = RotaryEmbedding( + self.hidden_size // (self.num_attention_heads * 2) + if position_encoding_2d + else self.hidden_size // self.num_attention_heads, + base=10000, + precision=torch.half, + learnable=False, + ) + + self.scale_mask_softmax = None + + if hidden_size_per_attention_head is None: + self.hidden_size_per_attention_head = hidden_size // num_attention_heads + else: + self.hidden_size_per_attention_head = hidden_size_per_attention_head + + self.inner_hidden_size = num_attention_heads * self.hidden_size_per_attention_head + + # Strided linear layer. + self.query_key_value = skip_init( + torch.nn.Linear, + hidden_size, + 3 * self.inner_hidden_size, + bias=bias, + dtype=params_dtype, + ) + + self.dense = skip_init( + torch.nn.Linear, + self.inner_hidden_size, + hidden_size, + bias=bias, + dtype=params_dtype, + ) + + @staticmethod + def attention_mask_func(attention_scores, attention_mask): + attention_scores.masked_fill_(attention_mask, -10000.0) + return attention_scores + + def split_tensor_along_last_dim(self, tensor, num_partitions, + contiguous_split_chunks=False): + """Split a tensor along its last dimension. + Arguments: + tensor: input tensor. + num_partitions: number of partitions to split the tensor + contiguous_split_chunks: If True, make each chunk contiguous + in memory. + """ + # Get the size and dimension. + last_dim = tensor.dim() - 1 + last_dim_size = tensor.size()[last_dim] // num_partitions + # Split. + tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) + # Note: torch.split does not create contiguous tensors by default. + if contiguous_split_chunks: + return tuple(chunk.contiguous() for chunk in tensor_list) + + return tensor_list + + def forward( + self, + hidden_states: torch.Tensor, + position_ids, + attention_mask: torch.Tensor, + layer_id, + layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + use_cache: bool = False, + output_attentions: bool = False, + ): + """ + hidden_states: [seq_len, batch, hidden_size] + attention_mask: [(1, 1), seq_len, seq_len] + """ + + # [seq_len, batch, 3 * hidden_size] + mixed_raw_layer = self.query_key_value(hidden_states) + + # [seq_len, batch, 3 * hidden_size] --> [seq_len, batch, num_attention_heads, 3 * hidden_size_per_attention_head] + new_tensor_shape = mixed_raw_layer.size()[:-1] + ( + self.num_attention_heads_per_partition, + 3 * self.hidden_size_per_attention_head, + ) + mixed_raw_layer = mixed_raw_layer.view(*new_tensor_shape) + + # [seq_len, batch, num_attention_heads, hidden_size_per_attention_head] + (query_layer, key_layer, value_layer) = self.split_tensor_along_last_dim(mixed_raw_layer, 3) + + if self.position_encoding_2d: + q1, q2 = query_layer.chunk(2, dim=(query_layer.ndim - 1)) + k1, k2 = key_layer.chunk(2, dim=(key_layer.ndim - 1)) + cos, sin = self.rotary_emb(q1, seq_len=position_ids.max() + 1) + position_ids, block_position_ids = position_ids[:, 0, :].transpose(0, 1).contiguous(), \ + position_ids[:, 1, :].transpose(0, 1).contiguous() + q1, k1 = apply_rotary_pos_emb_index(q1, k1, cos, sin, position_ids) + q2, k2 = apply_rotary_pos_emb_index(q2, k2, cos, sin, block_position_ids) + query_layer = torch.concat([q1, q2], dim=(q1.ndim - 1)) + key_layer = torch.concat([k1, k2], dim=(k1.ndim - 1)) + else: + position_ids = position_ids.transpose(0, 1) + cos, sin = self.rotary_emb(value_layer, seq_len=position_ids.max() + 1) + # [seq_len, batch, num_attention_heads, hidden_size_per_attention_head] + query_layer, key_layer = apply_rotary_pos_emb_index(query_layer, key_layer, cos, sin, position_ids) + + # [seq_len, batch, hidden_size] + context_layer, present, attention_probs = attention_fn( + self=self, + query_layer=query_layer, + key_layer=key_layer, + value_layer=value_layer, + attention_mask=attention_mask, + hidden_size_per_partition=self.hidden_size_per_partition, + layer_id=layer_id, + layer_past=layer_past, + use_cache=use_cache + ) + + output = self.dense(context_layer) + + outputs = (output, present) + + if output_attentions: + outputs += (attention_probs,) + + return outputs # output, present, attention_probs + + +class GEGLU(torch.nn.Module): + def __init__(self): + super().__init__() + self.activation_fn = F.gelu + + def forward(self, x): + # dim=-1 breaks in jit for pt<1.10 + x1, x2 = x.chunk(2, dim=(x.ndim - 1)) + return x1 * self.activation_fn(x2) + + +class GLU(torch.nn.Module): + def __init__(self, hidden_size, inner_hidden_size=None, + layer_id=None, bias=True, activation_func=gelu, params_dtype=torch.float): + super(GLU, self).__init__() + self.layer_id = layer_id + self.activation_func = activation_func + + # Project to 4h. + self.hidden_size = hidden_size + if inner_hidden_size is None: + inner_hidden_size = 4 * hidden_size + self.inner_hidden_size = inner_hidden_size + self.dense_h_to_4h = skip_init( + torch.nn.Linear, + self.hidden_size, + self.inner_hidden_size, + bias=bias, + dtype=params_dtype, + ) + # Project back to h. + self.dense_4h_to_h = skip_init( + torch.nn.Linear, + self.inner_hidden_size, + self.hidden_size, + bias=bias, + dtype=params_dtype, + ) + + def forward(self, hidden_states): + """ + hidden_states: [seq_len, batch, hidden_size] + """ + + # [seq_len, batch, inner_hidden_size] + intermediate_parallel = self.dense_h_to_4h(hidden_states) + + intermediate_parallel = self.activation_func(intermediate_parallel) + + output = self.dense_4h_to_h(intermediate_parallel) + + return output + + +class GLMBlock(torch.nn.Module): + def __init__( + self, + hidden_size, + num_attention_heads, + layernorm_epsilon, + layer_id, + inner_hidden_size=None, + hidden_size_per_attention_head=None, + layernorm=LayerNorm, + use_bias=True, + params_dtype=torch.float, + num_layers=28, + position_encoding_2d=True + ): + super(GLMBlock, self).__init__() + # Set output layer initialization if not provided. + + self.layer_id = layer_id + + # Layernorm on the input data. + self.input_layernorm = layernorm(hidden_size, eps=layernorm_epsilon) + + self.position_encoding_2d = position_encoding_2d + + # Self attention. + self.attention = SelfAttention( + hidden_size, + num_attention_heads, + layer_id, + hidden_size_per_attention_head=hidden_size_per_attention_head, + bias=use_bias, + params_dtype=params_dtype, + position_encoding_2d=self.position_encoding_2d + ) + + # Layernorm on the input data. + self.post_attention_layernorm = layernorm(hidden_size, eps=layernorm_epsilon) + + self.num_layers = num_layers + + # GLU + self.mlp = GLU( + hidden_size, + inner_hidden_size=inner_hidden_size, + bias=use_bias, + layer_id=layer_id, + params_dtype=params_dtype, + ) + + def forward( + self, + hidden_states: torch.Tensor, + position_ids, + attention_mask: torch.Tensor, + layer_id, + layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + use_cache: bool = False, + output_attentions: bool = False, + ): + """ + hidden_states: [seq_len, batch, hidden_size] + attention_mask: [(1, 1), seq_len, seq_len] + """ + + # Layer norm at the begining of the transformer layer. + # [seq_len, batch, hidden_size] + attention_input = self.input_layernorm(hidden_states) + + # Self attention. + attention_outputs = self.attention( + attention_input, + position_ids, + attention_mask=attention_mask, + layer_id=layer_id, + layer_past=layer_past, + use_cache=use_cache, + output_attentions=output_attentions + ) + + attention_output = attention_outputs[0] + + outputs = attention_outputs[1:] + + # Residual connection. + alpha = (2 * self.num_layers) ** 0.5 + hidden_states = attention_input * alpha + attention_output + + mlp_input = self.post_attention_layernorm(hidden_states) + + # MLP. + mlp_output = self.mlp(mlp_input) + + # Second residual connection. + output = mlp_input * alpha + mlp_output + + if use_cache: + outputs = (output,) + outputs + else: + outputs = (output,) + outputs[1:] + + return outputs # hidden_states, present, attentions + + +class ChatGLMPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and + a simple interface for downloading and loading pretrained models. + """ + + is_parallelizable = True + supports_gradient_checkpointing = False + config_class = ChatGLMConfig + base_model_prefix = "transformer" + _no_split_modules = ["GLM6BBlock"] + + def __init__(self, *inputs, **kwargs): + super().__init__(*inputs, **kwargs) + + def _init_weights(self, module: nn.Module): + """Initialize the weights.""" + return + + +CHATGLM_6B_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general + usage and behavior. + + Parameters: + config ([`~ChatGLM6BConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the configuration. + Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +CHATGLM_6B_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`ChatGLM6BTokenizer`]. + See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. + Selected in the range `[0, config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert *input_ids* indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare ChatGLM-6B Model transformer outputting raw hidden-states without any specific head on top.", + CHATGLM_6B_START_DOCSTRING, +) +class ChatGLMModel(ChatGLMPreTrainedModel): + """ + + The model can behave as an encoder (with only self-attention) as well + as a decoder, in which case a layer of cross-attention is added between + the self-attention layers, following the architecture described in [Attention is + all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, + Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + + To behave as an decoder the model needs to be initialized with the + `is_decoder` argument of the configuration set to `True`. + To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` + argument and `add_cross_attention` set to `True`; an + `encoder_hidden_states` is then expected as an input to the forward pass. + """ + + def __init__(self, config: ChatGLMConfig): + super().__init__(config) + + # recording parameters + self.max_sequence_length = config.max_sequence_length + self.hidden_size = config.hidden_size + self.params_dtype = torch.half + self.num_attention_heads = config.num_attention_heads + self.vocab_size = config.vocab_size + self.num_layers = config.num_layers + self.layernorm_epsilon = config.layernorm_epsilon + self.inner_hidden_size = config.inner_hidden_size + self.hidden_size_per_attention_head = self.hidden_size // self.num_attention_heads + self.position_encoding_2d = config.position_encoding_2d + + self.word_embeddings = skip_init( + torch.nn.Embedding, + num_embeddings=self.vocab_size, embedding_dim=self.hidden_size, + dtype=self.params_dtype + ) + + def get_layer(layer_id): + return GLMBlock( + self.hidden_size, + self.num_attention_heads, + self.layernorm_epsilon, + layer_id, + inner_hidden_size=self.inner_hidden_size, + hidden_size_per_attention_head=self.hidden_size_per_attention_head, + layernorm=LayerNorm, + use_bias=True, + params_dtype=self.params_dtype, + position_encoding_2d=self.position_encoding_2d, + ) + + self.layers = torch.nn.ModuleList( + [get_layer(layer_id) for layer_id in range(self.num_layers)] + ) + + # Final layer norm before output. + self.final_layernorm = LayerNorm(self.hidden_size, eps=self.layernorm_epsilon) + + def get_input_embeddings(self): + return self.word_embeddings + + def set_input_embeddings(self, new_embeddings: torch.Tensor): + self.word_embeddings = new_embeddings + + @staticmethod + def get_masks(seq, device): + context_length = seq.index(150004) + 1 + + attention_mask = torch.ones((1, len(seq), len(seq)), device=device) + attention_mask.tril_() + attention_mask[..., :context_length - 1] = 1 + attention_mask.unsqueeze_(1) + attention_mask = (attention_mask < 0.5).bool() + + return attention_mask + + def get_position_ids(self, seq, mask_position, device, gmask=False): + context_length = seq.index(150004) + 1 + if self.position_encoding_2d: + seq_length = seq.index(150004) + position_ids = torch.arange(context_length, dtype=torch.long, device=device) + if not gmask: + position_ids[seq_length:] = mask_position + block_position_ids = torch.cat(( + torch.zeros(seq_length, dtype=torch.long, device=device), + torch.arange(context_length - seq_length, dtype=torch.long, device=device) + 1 + )) + position_ids = torch.stack((position_ids, block_position_ids), dim=0) + else: + position_ids = torch.arange(context_length, dtype=torch.long, device=device) + if not gmask: + position_ids[context_length - 1:] = mask_position + + position_ids = position_ids.unsqueeze(0) + + return position_ids + + @add_start_docstrings_to_model_forward(CHATGLM_6B_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPastAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, + inputs_embeds: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPast]: + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape[:2] + elif inputs_embeds is not None: + batch_size, seq_length, _ = inputs_embeds.shape[:2] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if past_key_values is None: + past_key_values = tuple([None] * len(self.layers)) + + MASK, gMASK = 150000, 150001 + mask_token = MASK if MASK in input_ids else gMASK + use_gmask = False if MASK in input_ids else gMASK + seq = input_ids[0].tolist() + + mask_position = seq.index(mask_token) + + if attention_mask is None: + attention_mask = self.get_masks( + seq=seq, + device=input_ids.device + ) + + if position_ids is None: + position_ids = self.get_position_ids( + seq=seq, + mask_position=mask_position, + device=input_ids.device, + gmask=use_gmask + ) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + # [seq_len, batch, hidden_size] + hidden_states = inputs_embeds.transpose(0, 1) + + presents = () if use_cache else None + all_self_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + + seq_length_with_past = seq_length + past_key_values_length = 0 + if past_key_values[0] is not None: + past_key_values_length = past_key_values[0][0].shape[0] + seq_length_with_past = seq_length_with_past + past_key_values_length + if attention_mask is None: + attention_mask = torch.zeros(1, 1, device=input_ids.device).bool() + + else: + attention_mask = attention_mask.to(input_ids.device) + + for i, layer in enumerate(self.layers): + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_ret = layer( + hidden_states, + position_ids=position_ids, + attention_mask=attention_mask, + layer_id=torch.tensor(i), + layer_past=past_key_values[i], + use_cache=use_cache, + output_attentions=output_attentions + ) + + hidden_states = layer_ret[0] + + if use_cache: + presents = presents + (layer_ret[1],) + + if output_attentions: + all_self_attentions = all_self_attentions + (layer_ret[2 if use_cache else 1],) + + # Final layer norm. + hidden_states = self.final_layernorm(hidden_states) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) + + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + # self.hidden_size = config.hidden_size + # self.params_dtype = torch.half + # self.vocab_size = config.vocab_size + self.max_sequence_length = config.max_sequence_length + + self.position_encoding_2d = config.position_encoding_2d + + self.transformer = ChatGLMModel(config) + + self.lm_head = skip_init( + nn.Linear, + config.hidden_size, + config.vocab_size, + bias=False, + dtype=torch.half + ) + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def get_masks_and_position_ids(self, seq, mask_position, context_length, device, gmask=False): + attention_mask = torch.ones((1, context_length, context_length), device=device) + attention_mask.tril_() + attention_mask[..., :context_length - 1] = 1 + attention_mask.unsqueeze_(1) + attention_mask = (attention_mask < 0.5).bool() + + if self.position_encoding_2d: + seq_length = seq.index(150004) + position_ids = torch.arange(context_length, dtype=torch.long, device=device) + if not gmask: + position_ids[seq_length:] = mask_position + block_position_ids = torch.cat(( + torch.zeros(seq_length, dtype=torch.long, device=device), + torch.arange(context_length - seq_length, dtype=torch.long, device=device) + 1 + )) + position_ids = torch.stack((position_ids, block_position_ids), dim=0) + else: + position_ids = torch.arange(context_length, dtype=torch.long, device=device) + if not gmask: + position_ids[context_length - 1:] = mask_position + + position_ids = position_ids.unsqueeze(0) + + return attention_mask, position_ids + + def prepare_inputs_for_generation( + self, + input_ids: torch.LongTensor, + past: Optional[torch.Tensor] = None, + past_key_values: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + **kwargs + ) -> dict: + + MASK, gMASK = 150000, 150001 + mask_token = MASK if MASK in input_ids else gMASK + use_gmask = False if MASK in input_ids else gMASK + seq = input_ids[0].tolist() + mask_position = seq.index(mask_token) + + if mask_token not in seq: + raise ValueError("You have to add either [MASK] or [gMASK] in your input") + + # only last token for input_ids if past is not None + if past is not None or past_key_values is not None: + context_length = seq.index(150004) + last_token = input_ids[:, -1].unsqueeze(-1) + if self.position_encoding_2d: + position_ids = torch.tensor([[[mask_position], [len(seq) - context_length]]], dtype=torch.long, + device=input_ids.device) + else: + position_ids = torch.tensor([[mask_position]], dtype=torch.long, device=input_ids.device) + + if past is None: + past = past_key_values + return { + "input_ids": last_token, + "past_key_values": past, + "position_ids": position_ids, + } + else: + attention_mask, position_ids = self.get_masks_and_position_ids( + seq=seq, + mask_position=mask_position, + context_length=len(seq), + device=input_ids.device, + gmask=use_gmask + ) + + return { + "input_ids": input_ids, + "past_key_values": past, + "position_ids": position_ids, + "attention_mask": attention_mask + } + + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids=input_ids, + position_ids=position_ids, + attention_mask=attention_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = transformer_outputs[0] + + lm_logits = self.lm_head(hidden_states).permute(1, 0, 2).contiguous() + + loss = None + if labels is not None: + lm_logits = lm_logits.to(torch.float32) + + # Shift so that tokens < n predict n + shift_logits = lm_logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) + + lm_logits = lm_logits.to(hidden_states.dtype) + loss = loss.to(hidden_states.dtype) + + if not return_dict: + output = (lm_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=lm_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + @staticmethod + def _reorder_cache( + past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor + ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]: + """ + This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or + [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct + beam_idx at every generation step. + + Output shares the same memory storage as `past`. + """ + return tuple( + ( + layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)), + layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)), + ) + for layer_past in past + ) + + @torch.no_grad() + def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048, num_beams=1, + do_sample=True, top_p=0.7, temperature=0.95, **kwargs): + if history is None: + history = [] + gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p, + "temperature": temperature, **kwargs} + if not history: + prompt = query + else: + prompt = "" + for i, (old_query, response) in enumerate(history): + prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response) + prompt += "[Round {}]\n问:{}\n答:".format(len(history), query) + input_ids = tokenizer([prompt], return_tensors="pt", padding=True) + input_ids = input_ids.to(self.device) + outputs = self.generate(**input_ids, **gen_kwargs) + outputs = outputs.tolist()[0][len(input_ids["input_ids"][0]) - 2:] + response = tokenizer.decode(outputs) + response = response.strip() + response = response.replace("[[训练时间]]", "2023年") + history = history + [(query, response)] + return response, history + + @torch.no_grad() + def generate( + self, + **kwargs, + ): + MASK, gMASK = 150000, 150001 + bos, eos = 150004, 150005 + + if "eos_token_id" not in kwargs: + kwargs["eos_token_id"] = eos + + stop = False + + return_seqs = [] + + while True: + output_ids = super().generate(**kwargs) + + return_seqs = [] + max_length = 0 + + for i in range(output_ids.shape[0]): + output_seq = output_ids[i].tolist() + mask_token = MASK if MASK in output_seq else gMASK + mask_position = output_seq.index(mask_token) + bos_position = output_seq.index(bos) + if eos in output_seq: + eos_position = output_seq.index(eos) + else: + eos_position = len(output_seq) + + return_seq = output_seq[:mask_position] + output_seq[bos_position + 1:eos_position] + output_seq[ + mask_position + 1:bos_position] + max_length = max(max_length, len(return_seq)) + return_seqs.append(return_seq) + + for i in range(output_ids.shape[0]): + return_seqs[i] = [0] * (max_length - len(return_seqs[i])) + return_seqs[i] # padding + if mask_token not in return_seqs[i]: + stop = True + + if stop: + break + + for return_seq in return_seqs: + return_seq += [bos] + + kwargs['input_ids'] = torch.tensor(return_seqs, dtype=torch.long, device=kwargs['input_ids'].device) + + return torch.tensor(return_seqs, dtype=torch.long, device=kwargs['input_ids'].device) + + def quantize(self, bits: int): + from .quantization import quantize + self.transformer = quantize(self.transformer, bits) + return self diff --git a/moe/pytorch_model.bin.index.json b/moe/pytorch_model.bin.index.json new file mode 100644 index 0000000000000000000000000000000000000000..b8ada2bdf39c8297dc2b3159270227c587bd13e9 --- /dev/null +++ b/moe/pytorch_model.bin.index.json @@ -0,0 +1,375 @@ +{ + "metadata": { + "total_size": 13744473856 + }, + "weight_map": { + "lm_head.weight": "pytorch_model-00008-of-00008.bin", + "transformer.final_layernorm.bias": "pytorch_model-00007-of-00008.bin", + "transformer.final_layernorm.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.0.attention.dense.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.attention.dense.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.attention.query_key_value.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.attention.query_key_value.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.input_layernorm.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.post_attention_layernorm.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.attention.dense.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.attention.dense.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.attention.query_key_value.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.attention.query_key_value.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.input_layernorm.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.1.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.1.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.post_attention_layernorm.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.10.attention.dense.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.attention.dense.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.attention.query_key_value.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.attention.query_key_value.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.input_layernorm.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.input_layernorm.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.post_attention_layernorm.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.post_attention_layernorm.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.11.attention.dense.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.11.attention.dense.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.11.attention.query_key_value.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.11.attention.query_key_value.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.11.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00008.bin", + "transformer.layers.11.input_layernorm.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.11.input_layernorm.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.11.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.11.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.11.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.11.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.11.post_attention_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.11.post_attention_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.attention.dense.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.attention.dense.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.attention.query_key_value.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.attention.query_key_value.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.attention.rotary_emb.inv_freq": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.input_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.input_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.post_attention_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.post_attention_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.attention.dense.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.attention.dense.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.attention.query_key_value.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.attention.query_key_value.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.attention.rotary_emb.inv_freq": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.input_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.input_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.post_attention_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.post_attention_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.attention.dense.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.attention.dense.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.attention.query_key_value.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.attention.query_key_value.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.attention.rotary_emb.inv_freq": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.input_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.input_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.post_attention_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.post_attention_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.attention.dense.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.attention.dense.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.attention.query_key_value.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.attention.query_key_value.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.attention.rotary_emb.inv_freq": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.input_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.input_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.post_attention_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.post_attention_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.16.attention.dense.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.16.attention.dense.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.16.attention.query_key_value.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.16.attention.query_key_value.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.16.attention.rotary_emb.inv_freq": "pytorch_model-00004-of-00008.bin", + "transformer.layers.16.input_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.16.input_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.16.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.16.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.16.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.16.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.16.post_attention_layernorm.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.16.post_attention_layernorm.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.attention.dense.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.attention.dense.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.attention.query_key_value.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.attention.query_key_value.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.attention.rotary_emb.inv_freq": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.input_layernorm.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.input_layernorm.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.post_attention_layernorm.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.post_attention_layernorm.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.attention.dense.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.attention.dense.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.attention.query_key_value.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.attention.query_key_value.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.attention.rotary_emb.inv_freq": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.input_layernorm.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.input_layernorm.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.post_attention_layernorm.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.post_attention_layernorm.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.attention.dense.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.attention.dense.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.attention.query_key_value.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.attention.query_key_value.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.attention.rotary_emb.inv_freq": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.input_layernorm.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.input_layernorm.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.post_attention_layernorm.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.post_attention_layernorm.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.2.attention.dense.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.attention.dense.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.attention.query_key_value.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.attention.query_key_value.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.input_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.input_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.post_attention_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.post_attention_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.20.attention.dense.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.attention.dense.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.attention.query_key_value.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.attention.query_key_value.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.attention.rotary_emb.inv_freq": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.input_layernorm.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.input_layernorm.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.mlp.dense_4h_to_h.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.20.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.20.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.post_attention_layernorm.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.post_attention_layernorm.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.21.attention.dense.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.attention.dense.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.attention.query_key_value.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.attention.query_key_value.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.attention.rotary_emb.inv_freq": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.input_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.input_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.mlp.dense_4h_to_h.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.mlp.dense_h_to_4h.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.post_attention_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.post_attention_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.attention.dense.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.attention.dense.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.attention.query_key_value.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.attention.query_key_value.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.attention.rotary_emb.inv_freq": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.input_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.input_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.mlp.dense_4h_to_h.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.mlp.dense_h_to_4h.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.post_attention_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.post_attention_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.attention.dense.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.attention.dense.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.attention.query_key_value.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.attention.query_key_value.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.attention.rotary_emb.inv_freq": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.input_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.input_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.mlp.dense_4h_to_h.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.mlp.dense_h_to_4h.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.post_attention_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.post_attention_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.attention.dense.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.attention.dense.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.attention.query_key_value.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.attention.query_key_value.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.attention.rotary_emb.inv_freq": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.input_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.input_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.mlp.dense_4h_to_h.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.mlp.dense_h_to_4h.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.post_attention_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.post_attention_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.25.attention.dense.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.25.attention.dense.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.25.attention.query_key_value.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.25.attention.query_key_value.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.25.attention.rotary_emb.inv_freq": "pytorch_model-00006-of-00008.bin", + "transformer.layers.25.input_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.25.input_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.25.mlp.dense_4h_to_h.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.25.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.25.mlp.dense_h_to_4h.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.25.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.25.post_attention_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.25.post_attention_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.26.attention.dense.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.attention.dense.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.attention.query_key_value.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.attention.query_key_value.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.attention.rotary_emb.inv_freq": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.input_layernorm.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.input_layernorm.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.mlp.dense_4h_to_h.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.mlp.dense_h_to_4h.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.post_attention_layernorm.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.post_attention_layernorm.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.attention.dense.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.attention.dense.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.attention.query_key_value.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.attention.query_key_value.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.attention.rotary_emb.inv_freq": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.input_layernorm.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.input_layernorm.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.mlp.dense_4h_to_h.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.mlp.dense_h_to_4h.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.post_attention_layernorm.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.post_attention_layernorm.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.3.attention.dense.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.attention.dense.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.attention.query_key_value.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.attention.query_key_value.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.input_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.input_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.post_attention_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.post_attention_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.attention.dense.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.attention.dense.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.attention.query_key_value.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.attention.query_key_value.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.input_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.input_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.post_attention_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.post_attention_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.attention.dense.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.attention.dense.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.attention.query_key_value.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.attention.query_key_value.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.input_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.input_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.post_attention_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.post_attention_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.6.attention.dense.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.6.attention.dense.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.6.attention.query_key_value.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.6.attention.query_key_value.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.6.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00008.bin", + "transformer.layers.6.input_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.6.input_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.6.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.6.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.6.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.6.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.6.post_attention_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.6.post_attention_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.7.attention.dense.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.attention.dense.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.attention.query_key_value.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.attention.query_key_value.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.input_layernorm.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.input_layernorm.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.post_attention_layernorm.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.post_attention_layernorm.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.attention.dense.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.attention.dense.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.attention.query_key_value.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.attention.query_key_value.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.input_layernorm.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.input_layernorm.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.post_attention_layernorm.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.post_attention_layernorm.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.attention.dense.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.attention.dense.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.attention.query_key_value.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.attention.query_key_value.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.input_layernorm.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.input_layernorm.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.post_attention_layernorm.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.post_attention_layernorm.weight": "pytorch_model-00003-of-00008.bin", + "transformer.word_embeddings.weight": "pytorch_model-00001-of-00008.bin" + } +} diff --git a/moe/quantization.py b/moe/quantization.py new file mode 100644 index 0000000000000000000000000000000000000000..bf30790cd8cd4adbead4f8cb64afd6db7ff29cf2 --- /dev/null +++ b/moe/quantization.py @@ -0,0 +1,187 @@ +from torch.nn import Linear +from torch.nn.parameter import Parameter + +import bz2 +import torch +import base64 +import ctypes + +from typing import List +from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up + + +class W8A16Linear(torch.autograd.Function): + @staticmethod + def forward(ctx, inp: torch.Tensor, quant_w: torch.Tensor, scale_w: torch.Tensor, weight_bit_width): + ctx.inp_shape = inp.size() + ctx.weight_shape = quant_w.size() + ctx.weight_bit_width = weight_bit_width + out_features = quant_w.size(0) + inp = inp.contiguous().view(-1, inp.size(-1)) + weight = extract_weight_to_half(quant_w, scale_w, weight_bit_width) + output = inp.mm(weight.t()) + ctx.save_for_backward(inp, quant_w, scale_w) + return output.view(*(ctx.inp_shape[:-1] + (out_features,))) + + @staticmethod + def backward(ctx, grad_output: torch.Tensor): + inp, quant_w, scale_w = ctx.saved_tensors + weight = extract_weight_to_half(quant_w, scale_w, ctx.weight_bit_width) + grad_output = grad_output.contiguous().view(-1, weight.size(0)) + grad_input = grad_output.mm(weight) + grad_weight = grad_output.t().mm(inp) + return grad_input.view(ctx.inp_shape), grad_weight.view(ctx.weight_shape), None + + +class Kernel: + def __init__(self, code: bytes, function_names: List[str]): + self.code = code + self._function_names = function_names + self._cmodule = LazyKernelCModule(self.code) + + for name in self._function_names: + setattr(self, name, KernelFunction(self._cmodule, name)) + + +quantization_code = "$QlpoOTFBWSZTWU9yuJUAQHN//////////f/n/8/n///n//bt4dTidcVx8X3V9FV/92/v4B7/AD5FBQFAAAChSgKpFCFAFVSigUAAAEKhSgUUqgFBKigqVREQAABQBQIANDTTIGI00BkZBkNGE0A0BkBkGQGRkaNAaAGQNBoGgDIAAYIGTI0DQAQAaGmmQMRpoDIyDIaMJoBoDIDIMgMjI0aA0AMgaDQNAGQAAwQMmRoGgAgA0NNMgYjTQGRkGQ0YTQDQGQGQZAZGRo0BoAZA0GgaAMgABggZMjQNABABoaaZAxGmgMjIMhowmgGgMgMgyAyMjRoDQAyBoNA0AZAADBAyZGgaAAmqU1NEgJqnptU/Sn4jRR6J6epk2pqb1Q/SgAPUGgyNNGjQ2SBpoAZAAGg0NB6mgDIAAAAA2oaApSREBNAARhGiYEaEwU8pvImlP0k2aam1GaGqbFNM1MHpTwmkepmyU9R6nqPKekHqNNPUxNGhp6n6p6QaZ6o9TG1GMqcoV9ly6nRanHlq6zPNbnGZNi6HSug+2nPiZ13XcnFYZW+45W11CumhzYhchOJ2GLLV1OBjBjGf4TptOddTSOcVxhqYZMYwZXZZY00zI1paX5X9J+b+f4e+x43RXSxXPOdquiGpduatGyXneN696M9t4HU2eR5XX/kPhP261NTx3JO1Ow7LyuDmeo9a7d351T1ZxnvnrvYnrXv/hXxPCeuYx2XsNmO003eg9J3Z6U7b23meJ4ri01OdzTk9BNO96brz+qT5nuvvH3ds/G+m/JcG/F2XYuhXlvO+jP7U3XgrzPN/lr8Sf1n6j4j7jZs+s/T0tNaNNYzTs12rxjwztHlnire3Nzc3N1wuBwOBwXBvZfoHpD7rFmR99V5vj3aXza3xdBbXMalubTg/jIv5dfAi54Pdc75j4z412n3Npj3Ld/ENm7a3b/Cod6h/ret1/5vn/C+l+gdslMvgPSLJ8d8q+U66fevYn/tW1chleEtNTGlcHCbLRlq0tHzF5tsbbZZfHjjLgZu42XCuC3NrdjTasZGNzgxPIrGqp7r3p7L2p5XjnpPSmTd5XtzqnB6U87zzg1Ol0zd0zsLszxR6lkxp35u6/teL0L0W922cR7Lu1lpL9CsHirzuM2T+BgsyViT6LHcm0/Vr6U/7LGGyJeqTEjt0PHWhF5mCT7R9mtlDwriYv0Tyr/OxYt6qp5r0mPVT0608TqnqMZaarU2nFwrTzzlrs1ed7z1ux60wyr4ydCaTi3enW8x68x0zU7tXSlcmPSW1mGpWJMg4zmPC2lK96tp0OE80y4MfEvnZj8zGluR6b22ki1Ou9V2nCd9xovcPvcYMZYy0lvN60ScZ45vN6yeCeeXFb1lVjnnCar5fwXwE2bzJ4HI1XVPXfXZMm44GUsMpYsmLB65TuVdm0cl0b+i/wGNN66XjeV7zuPpHcnK/juhhjdfId5jMdE5nN0dGmmm2zZs2cexD5n9p/dY352XsvXHaZNWWsmmS1atjR452nYudzvqv2HMRyvNNnlMcDl3R2+yx2uVrBubTW9icHDVtbNXlZm7jma1rM4VurZZd2y6nUau7ZXZ7bVU+mnoOVxZGMrVmvX60605JwmzGZhhhjTWtaaaMaaGTGmNMZasY0iX8VMUl8eepaIrzGSpemWOQyZORk2bNpjUybMmxqYmknCGCFynutfksaZpjTNMaaatM0xsxcGR0sociNqxNSmhhR1ZJPbsn8qyF0t2qH6iYBclclalbtTTcHTDsPaX6rlnElph2Jyumumtynv2Kk8GI7rsvXbIcJgHJOSaSXnnGaI3m87RtVXJOZ/YtgdTE6Wpha6ZlE8ayXkef1fh602r2WwvfMXtMdLlkfnLFdYYwYso+bWqm7yJqHXZGw2nrS5ZanSYnWlxBxMF1V940K2wdrI7R6OYf7DGGamMmTSbRhlS45xmVOumF1EyPCmHrrN8wwZOOrdNtLeMtzFzDlWnfTBxMk2NaXIZHBYxYLD4w8yju0ao65Vz1OIXoS9dLanwCe1PWrYuWMqf1if1z2k2yYfKJ741PDgno1ZQ8DRqvUny3mNoWTzGO6m1DkrJI8JiR5cSd+vZdGOO8nrMoc5+NDUFsMSXaZJeNlMmGLtJsovOsUp7I9S5VojKxF6bTVEelXqlfJobQr3LozSh2Jk7VcrVMfhXqszGWMzNqGhqZY0OadxkyyMssKugZR0KNFXBHlqwmJgTE/BNVMk6ItJXZMR0H47GpXv/DMOvNkmVuaV1PRfEdxuqc7Hcd+ZV/zTLaRxWk0nl9CdCeM6mn5rstHIBcpiuwmUZXeq81DacHI2rmrZ5SuE5mOZd6LQrZg9mx32TprA8BMo5jKN6yLTCi3WzQaZSuhzTtM1fUTGVpG8Tw+KXI0tjEpiWxtLYynOlktSbVlaI5kxP8TDH8kx50xoxi5KcA4pcja8KWLRlO/Ks6q06ergnvm1ca3Tq8Uw7LTUsmWyctXPWmpitl/uvGcWTGXGuAXDfhqazGmjkxcJW5hMMMMpYsXl2TZYtVOddG3XCarUt6Ptq9CZXSNzyuRzqRZOjsxdBbFVz6OA5HI43r1jityVlVpVkxmOsyaYWE1NTGq1sOVh36mHMcxtSvcy70edG0ZGR3I1Go1GRlV7mWWo1G0ZGRqlvH40l7o4m5xMWLLLYyNjnqc8556mdPqLJ31n/1nWOncxzG1tizrHs/Z+d2vP/B/l8wdJ6rHUn2nbbDq4p6htFtYzMMMTaZis1K5GKzGNmxhmUx2DDlZ/qNnIx41xnaMfCZWYaZWtNLTNW8ND4Fw1MyZOCdM428suKG1ehW8TesOydg7J+YYcD4cYR+8dFK6M4E3HM9ZfRNNL+Sn6rsl4DsrDl2HpPCnfxjGXtbZtYys1ttlyJ4T+BvexjGWRjMszK4Jpc77D3GyuVD7q0+G8m9G+2+rGm7cOR2y7FdtY2XUYx/oNlfRYxhMYyYZkyyg55enna9Kt/FFi6GMMwYwdwxWgxGMLKYmUyGExTKMZkMFhkymKuh0NOBNnBu+23LdwDoZYYzGGMxtORaTU1pjTGWTTGGtMrNWUsyyTTLLG1qy2ZjbK2DBllWqxMtBMaYZQmcE7zvvRcTkclUwdkxTaSdyySt/7fpL+T1v516Ji97fwr5JbLu305zMn5+GMTTZ9F+y7ExwmGVfG44yxn3dLv6l5i+Wth1jCrDq21nW9LqvvDzz3Vf3LLH/O/32TJ/erx3bXftO4eF+G956D952K/An4NfvOpjFjExjevP/UmE0fIoZXx6/w6lX/no3D0bLt+ixjieBM6ksRd0yB4Lt2SwYNE+gd1detlZWUnpiZfGfFaK+4PyCa/v18V8X75pe9fLXzp7l3VjF76vWZmHwGz1IZNWT7b8yddJ4q5kyrVdfru6atWc7bVYztL9Jf4GXvT+Y8m9/YsXP6H018a8D4XVOqvfzqeR+6yZOD8dPv0+U7/q5Pl+2dNb0MjzGVH5p6MNQ7cOWvw62U9aHE8DprDek+McLyvDz+te+9Zhq5+YTruufMcWMabqysTmZVWjKPfnK0wyVcrsuhjZRdLkHNvD72b9abriOSGIxiLixMOoalNPXzy+wT/tf+U6HHONfsz+xe8ufHBdQWWGWLA9if0rsnmrxK5LvRZQeWsTCsrmOYy8VteVfuRfcVTtDLItLIsMYxZLdU/DbtSemxF6Z6Zo5WBXE4tFdCyVMMXMTEMZXVlS6Xec2T4e0tHsRcEuWshcJ2YsNF5rUx1E8ifCq6Z+ZP7qdCeu/aTwFd53l16/o0NOw6O3dLavP4Hbi4RdmuDk6DoYaninC0+o4uZjbJ7Rxeu0/FbuFg+q7DVS6fQe0rZ6NDGUNNU6DEqOaLTicKnYZMnBWruljQxoaS3dZhocDge0bSTyOvdAbG5hxe2xji7E/L55xX13wWNDi6HCekcFxfCPGxY0MXC+s7afWaMdDyjyr+o8Rudm/NabOZvdl274zH4f5XK9z6On1Pe/K5TdPAslg77BjuO6Y3eO7GqvOPG/stknp1leyvLL0Z7bl9I4noMvLkzytLhWYzrOZzLXCORe028rORzOg4N/L0HlMOQ3Pgmnbb6KczlabORpu980q37TBqRu0/p3PO6234Bl03Ynuz+9W7gnsEcmvYaYY3aMYY0wx3pYd+ujsXauWdaY5Xkbtl23fPzFHiDB/QMo0yFjBllYxTQYYyxkrwn7JufwJ/PfgJ+C83X69ni6zvXcnyXabv0ncbLwsceS+RNlyN2mnneJtX0ngYO0+e+0+UnA+Wch3ji8hj5an4h+i6XBySU4n+R0roVcbw5yvHrmr4Yw8Y7x6c+9POPYHI5HI5HI5HI5HGXGww4nE4nrVyOR8XeqPEO7PLOiukYa3Novk5hV4cdtYZLI93e+uxff2jRo0aNGjRo0aNG1bVtW1dy3m83m8+tQ5ZzHw3nObwOu8La9Rc1dtkdS8A3eTk823tnktXWlxN6Oixe06zrN70Isd9jiOgZFq9yfkPqP/SLhN2Myl8jDM43bl1nbcb4cO57jlh8Jow6pzXZdL4dyODTuuhu77FyO27DdwdRxmvO+O+3N2+BdqyTwLHVczDVY4UPE4O66/ZO2cx1LFzVdSXtF7G4HMbrauOHRw6c8FdZ5m9fHZHYZXfTlZquyynSyTTKke6vcffSD9pzPA/G7n7jxPmuhc1DHMynPMrGL6AdewYmwu5ko+UUyTwrMv27rPH1v1nGqd87+p6N6LU8k3NEng53xXyHS97+44OSg/sy/hn+Se6yfYNjW0/uTgP+PvWYzLMmjhcLB/gGpri6H83/84eUXWT6T9Hsv7785z/7z4icpW+zfXypuR7rx/gMdZb1/wC678pcs8/2a3mDitGHxl9mfPlll5MafWWqxk/eYuTDgcNMzDGWLWvsuglNxs53GtN6uWpktlW1tZZYcuinMMWmnNnJydze3b2Y1McBxrBkXw799izLMZZYyy0TkbsGM4p03S2uVu5s/XXUdSdec6smVxZYYGpVmT8A+8ajuEyV5FatkvVru2x6uxGXXbH4A+jvgP4GMYy3iPLXzq/6z65+E005ey+cwMZD3fZcqc6xpjTFjQ0P3U+e++cPYmTIwj0nrK5NPTfl3WvpfLtXDcb2HQMudYOxFXQBor4L4T6vrOauFctYXJQ++NUWmJe5bmx1jDiZS1dTqWxo4GR8jm3fttpmPHppk9PEyv4/y8/sO07XacOmcqc0x2Vi9BvNJvN5oW8x4mOsydpidRxMYJPx06m1bqPzq9KtK8sxXNXFodD/+MYYaJTLwOhc9brCsV18oOR1i4tXChyTkq4lf4y1Ke+9axjDHqs1mfBbMXuP4Hzi+X7t8vzv7bHerrUPgPCxhjre4fXdfLNtNM+Jd+Zdh8xd8wP87uNPoPgv4W7/5P2BuxfsMabNnMnza+54Pdi5U671GPZY8CehX8Voeoo7FHpkeEc6715FwHZrIrUrHaviPUbPZHND+IhczrP6FcYvhOZ0Di/ETt0OI+YwNWR9r7tpf6WDeZKZDB1+z2IthOl1mPyb5FluvEx9h9d0NnM0Y1XPFkWIsk1WotJ0PBMmkvjvQTd0e71tfeV+8r8lQ/tpzpsmxJ+InrI/dj2UajUajVTUajatRqNRtGo1Go1Go4wjeMpZFMVV9CHbofPraLsJ3JpWV2XOoanCuFky4y3PPNxucK2uKC1Lbdb1eo+m5XomN6HfeZsabHLHRX/K+offtNGGmHWctcVcG44MdSqsOLY9VzX+Zxfxn2HPdWTpzWvkrtJ8M5zorrKcquRytJ5N5DZmcaW02l76nWO+BqPXm1A2Ry/0q71dH/mqrqeFjkYxjEXtsX8qubTk67rGycyqsdm4tZx5D6D5hhi0waaWmiaMP81Yjii5qxPlPuU/GfTL1Y5E6Jyfiq63qTa39A4J0sOGDgO9WF9bOXl0XfPRbsY2bPNKPy1YrFYrFYmRhhlTIyMjJWJYZHXuCXI8OoXsvfljGLFicNifpp2XunoPiG1wtx3p1Tah+/DD66OnVtVXP9rKbVxOnL0tR/rHtqB5UDErUVcl11D4qqvjpOcxX7armUNJB3LpW6bxVvD08e8h3odKKvyCFZBdSh2FVcST9xV3n3T8t1j7Kr9qgrqXg+13Pt5U7JCvFXVIV1YG5lRhkVYZJYYDDD4KOIMoHCp26WS8GB7uBh2zIdgq/PKyInjV2STShuoapUdCpX1yTwqq/z1VvET7Kh5nVPkO8YyxjLt2MaaMmWTLQvx3qnzltnXW0p2jxgbEtSny/Osv8Y9pLMXYoHVPAhkVdWVeODhR6q9/Sxe2liwwZWMVvFXfRkeIDxAePUPIrdJ4ey6yquzH+PD/bUOWAu05qVHtFd8rrKHSoeNIOUqrYr3FXyToqfYJgwmJdKpXXOwYYegNNGMzfZPp/t3t/DVs4zjNTN61rRqaWaa4NYbRjTa0tWwy2Y2tGN8ZO8ofNKq4j9SL7I+cSm4/6ovLV5HNXLI0jJidwrtk6ynCaP6Z++GjRlWS3tLeW129Mi9evxU9mtz6s5J3Z7M2ngTgnKvmpomxpaLCzPfmx0JWE+m3NLDDGOX47RctdYYNK5jakdqLkRlI39n590T5zctGSwwZZDJj6kW8XSi6ot2MmWWJ0DUT3nuvebBudScjZ79g8cWJ8av0k+/bE5WKd5MdbFpbDVMxu1DVMmtNZGJvq1mtRbn6M+g/kP0FwDwr7quZs7xosNGpbscyxhhd9TyJyFwbLcxlTasg75vW7TsV5K7ji44XPMMrdoj+Y3rT0Hie62nlYV/pwczzOmdLqLhYkzGMzCZWGMQzGMSsZYY6Di1t4nlJ+Em63mJxrVLxPbYxNEdgc1dU2iOKyoYYWjNrEeHTYybVk0atSa7ehuwsWMWTqn1TrnS6hYsi71d1+s+k+ic70e20fzE/VaTdxT9ZtU4GIXdeNx3X77guYYfpHeTQjaMX6brOu4OY4K7Y2d9mbHarI5ox3p4GpJ2Vd/Tst60f7j999pppjR+Q/Qf8J/VaORs3cji7FfFuN61+ui9s8hix1OCh5KGVV23BPXvZfz3CLyHpix+exi8z/KnCnosY2eunor+cxyPO/xJ0vKey9OvE9VjqaYu0x3Z3jd6o2b1T12D+F8l232lwaaacD5LE8LBxu7WTlbWraWpew8Xexjel3E+wWD4APITdNqR8F3R3T0lunCQ4GaE9R37DxeCYfcHi4xci5ovKfxVs55y2hf+65E/Xdp6jR5nrebTmi5incpkyOjs50JvrZwstbbW6kfuuQw+2mykf/EXNFzxfKTrxew929TR6bWnGL//F3JFOFCQT3K4lQ" + +kernels = Kernel( + bz2.decompress(base64.b64decode(quantization_code)), + [ + "int4WeightCompression", + "int4WeightExtractionFloat", + "int4WeightExtractionHalf", + "int8WeightExtractionFloat", + "int8WeightExtractionHalf", + ], +) + + +def compress_int4_weight(weight: torch.Tensor): # (n, m) + with torch.cuda.device(weight.device): + n, m = weight.size(0), weight.size(1) + assert m % 2 == 0 + m = m // 2 + out = torch.empty(n, m, dtype=torch.int8, device="cuda") + stream = torch.cuda.current_stream() + + gridDim = (n, 1, 1) + blockDim = (min(round_up(m, 32), 1024), 1, 1) + + kernels.int4WeightCompression( + gridDim, + blockDim, + 0, + stream, + [ctypes.c_void_p(weight.data_ptr()), ctypes.c_void_p(out.data_ptr()), ctypes.c_int32(n), ctypes.c_int32(m)], + ) + return out + + +def extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int): + if source_bit_width == 8: + func = kernels.int8WeightExtractionHalf + elif source_bit_width == 4: + func = kernels.int4WeightExtractionHalf + else: + assert False, "Unsupported bit-width" + + with torch.cuda.device(weight.device): + n, m = weight.size(0), weight.size(1) + out = torch.empty(n, m * (8 // source_bit_width), dtype=torch.half, device="cuda") + stream = torch.cuda.current_stream() + + gridDim = (n, 1, 1) + blockDim = (min(round_up(m, 32), 1024), 1, 1) + + func( + gridDim, + blockDim, + 0, + stream, + [ + ctypes.c_void_p(weight.data_ptr()), + ctypes.c_void_p(scale_list.data_ptr()), + ctypes.c_void_p(out.data_ptr()), + ctypes.c_int32(n), + ctypes.c_int32(m), + ], + ) + return out + + +class QuantizedLinear(Linear): + def __init__(self, weight_bit_width: int, weight_tensor=None, bias_tensor=None, *args, **kwargs): + super(QuantizedLinear, self).__init__(*args, **kwargs) + self.weight_bit_width = weight_bit_width + + shape = self.weight.shape + del self.weight + + if weight_tensor is None: + self.weight = torch.empty( + shape[0], shape[1] * weight_bit_width // 8, dtype=torch.int8, device=kwargs["device"] + ) + self.weight_scale = torch.empty(shape[0], dtype=kwargs["params_dtype"], device=kwargs["device"]) + else: + self.weight_scale = (weight_tensor.abs().max(dim=-1).values / ((2 ** (weight_bit_width - 1)) - 1)).half() + self.weight = torch.round(weight_tensor / self.weight_scale[:, None]).to(torch.int8) + if weight_bit_width == 4: + self.weight = compress_int4_weight(self.weight) + + self.weight = Parameter(self.weight.to(kwargs["device"]), requires_grad=False) + self.weight_scale = Parameter(self.weight_scale.to(kwargs["device"]), requires_grad=False) + self.bias = Parameter(bias_tensor.to(kwargs["device"]), requires_grad=False) + + def forward(self, input): + output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width) + if self.bias is not None: + output = output + self.bias + return output + + +def quantize(model, weight_bit_width): + """Replace fp16 linear with quantized linear""" + + for layer in model.layers: + layer.attention.query_key_value = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight_tensor=layer.attention.query_key_value.weight.to(torch.cuda.current_device()), + bias_tensor=layer.attention.query_key_value.bias, + in_features=layer.attention.query_key_value.in_features, + out_features=layer.attention.query_key_value.out_features, + bias=True, + dtype=torch.half, + device=layer.attention.query_key_value.weight.device, + ) + layer.attention.dense = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight_tensor=layer.attention.dense.weight.to(torch.cuda.current_device()), + bias_tensor=layer.attention.dense.bias, + in_features=layer.attention.dense.in_features, + out_features=layer.attention.dense.out_features, + bias=True, + dtype=torch.half, + device=layer.attention.dense.weight.device, + ) + layer.mlp.dense_h_to_4h = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight_tensor=layer.mlp.dense_h_to_4h.weight.to(torch.cuda.current_device()), + bias_tensor=layer.mlp.dense_h_to_4h.bias, + in_features=layer.mlp.dense_h_to_4h.in_features, + out_features=layer.mlp.dense_h_to_4h.out_features, + bias=True, + dtype=torch.half, + device=layer.mlp.dense_h_to_4h.weight.device, + ) + layer.mlp.dense_4h_to_h = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight_tensor=layer.mlp.dense_4h_to_h.weight.to(torch.cuda.current_device()), + bias_tensor=layer.mlp.dense_4h_to_h.bias, + in_features=layer.mlp.dense_4h_to_h.in_features, + out_features=layer.mlp.dense_4h_to_h.out_features, + bias=True, + dtype=torch.half, + device=layer.mlp.dense_4h_to_h.weight.device, + ) + return model diff --git a/moe/temp1.wav b/moe/temp1.wav new file mode 100644 index 0000000000000000000000000000000000000000..c8e988bfe54824c966d2238f847cbfc4a651c393 Binary files /dev/null and b/moe/temp1.wav differ diff --git a/moe/temp2.wav b/moe/temp2.wav new file mode 100644 index 0000000000000000000000000000000000000000..5f179652846abbaf12112c754fa42e2783ad8c0b Binary files /dev/null and b/moe/temp2.wav differ diff --git a/moe/tokenization_chatglm.py b/moe/tokenization_chatglm.py new file mode 100644 index 0000000000000000000000000000000000000000..619a8c7a50fcd7369db05d33e0e4661e10c5ebe1 --- /dev/null +++ b/moe/tokenization_chatglm.py @@ -0,0 +1,345 @@ +"""Tokenization classes for ChatGLM.""" +import sys +import unicodedata +from typing import List, Optional, Union +from functools import lru_cache +import os +import collections +import re + +from transformers.tokenization_utils import PreTrainedTokenizer +from icetk.text_tokenizer import TextTokenizer +from icetk.utils import auto_create +import icetk.sentencepiece_model_pb2 as sp_model +from transformers.utils import logging + +logger = logging.get_logger(__name__) + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + "THUDM/chatglm-6b": 2048, +} + + +class SPTokenizer: + def __init__( + self, + vocab_file, + max_blank_length=80, + byte_fallback=True, + ): + assert vocab_file is not None + self.vocab_file = vocab_file + self.special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "", "", "", "", ""] + self.max_blank_length = max_blank_length + self.byte_fallback = byte_fallback + self.text_tokenizer = self._build_text_tokenizer(encode_special_tokens=False) + self.special_text_tokenizer = self._build_text_tokenizer(encode_special_tokens=True) + + @staticmethod + def _configure_tokenizer( + text_tokenizer: TextTokenizer, + special_tokens: List[str], + max_blank_length: int, + byte_fallback: bool, + encode_special_tokens=False, + ): + # special token + special_token_type = 4 if encode_special_tokens else 3 # 3 - CONTROL, 4 - USER_DEFINE + for token in special_tokens: + text_tokenizer.proto.pieces.append( + sp_model.ModelProto.SentencePiece(piece=token, score=0.0, type=special_token_type) + ) + # whitespaces + for token in [SPTokenizer.get_tab_token()] + [ + SPTokenizer.get_blank_token(i) for i in range(2, max_blank_length + 1) + ]: + text_tokenizer.proto.pieces.append(sp_model.ModelProto.SentencePiece(piece=token, score=0.0, type=4)) + # byte fallback + if byte_fallback: + text_tokenizer.proto.trainer_spec.byte_fallback = True + for i in range(256): + text_tokenizer.proto.pieces.append( + sp_model.ModelProto.SentencePiece(piece="<0x{:02X}>".format(i), score=0.0, type=6) + ) + text_tokenizer.refresh() + + def _build_text_tokenizer(self, encode_special_tokens=False): + tokenizer = TextTokenizer(self.vocab_file) + self._configure_tokenizer( + tokenizer, self.special_tokens, self.max_blank_length, self.byte_fallback, encode_special_tokens + ) + return tokenizer + + def _get_text_tokenizer(self, encode_special_tokens=False): + if encode_special_tokens: + return self.special_text_tokenizer + else: + return self.text_tokenizer + + @staticmethod + def get_blank_token(length: int): + assert length >= 2 + return f"<|blank_{length}|>" + + @staticmethod + def get_tab_token(): + return f"<|tab|>" + + @property + def num_image_tokens(self): + return 20000 + + @property + def num_text_tokens(self): + return self.text_tokenizer.num_tokens + + @property + def num_tokens(self): + return self.num_image_tokens + self.num_text_tokens + + @staticmethod + def _encode_whitespaces(text: str, max_len: int = 80): + text = text.replace("\t", SPTokenizer.get_tab_token()) + for i in range(max_len, 1, -1): + text = text.replace(" " * i, SPTokenizer.get_blank_token(i)) + return text + + def _preprocess(self, text: str, linebreak=True, whitespaces=True): + if linebreak: + text = text.replace("\n", "") + if whitespaces: + text = self._encode_whitespaces(text, max_len=self.max_blank_length) + return text + + def encode( + self, text: str, linebreak=True, whitespaces=True, special_tokens=False, add_dummy_prefix=True + ) -> List[int]: + """ + @param text: Text to encode. + @param linebreak: Whether to encode newline (\n) in text. + @param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding. + @param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text. + @param add_dummy_prefix: Whether to add dummy blank space in the beginning. + """ + text = self._preprocess(text, linebreak, whitespaces) + if not add_dummy_prefix: + text = "" + text + tmp = self._get_text_tokenizer(encode_special_tokens=special_tokens).encode(text) + tokens = [x + self.num_image_tokens for x in tmp] + return tokens if add_dummy_prefix else tokens[2:] + + def decode(self, text_ids: List[int], special_tokens=False) -> str: + ids = [int(_id) - self.num_image_tokens for _id in text_ids] + text = self._get_text_tokenizer(encode_special_tokens=special_tokens).decode(ids) + text = text.replace("", "\n") + text = text.replace(SPTokenizer.get_tab_token(), "\t") + for i in range(2, self.max_blank_length + 1): + text = text.replace(self.get_blank_token(i), " " * i) + return text + + def tokenize( + self, text: str, linebreak=True, whitespaces=True, special_tokens=False, add_dummy_prefix=True + ) -> List[str]: + """ + @param text: Text to encode. + @param linebreak: Whether to encode newline (\n) in text. + @param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding. + @param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text. + @param add_dummy_prefix: Whether to add dummy blank space in the beginning. + """ + text = self._preprocess(text, linebreak, whitespaces) + if not add_dummy_prefix: + text = "" + text + tokens = self._get_text_tokenizer(encode_special_tokens=special_tokens).tokenize(text) + return tokens if add_dummy_prefix else tokens[2:] + + def __getitem__(self, x: Union[int, str]): + if isinstance(x, int): + if x < self.num_image_tokens: + return "".format(x) + else: + return self.text_tokenizer.convert_id_to_token(x - self.num_image_tokens) + elif isinstance(x, str): + if x.startswith("") and x[7:-1].isdigit(): + return int(x[7:-1]) + else: + return self.text_tokenizer.convert_token_to_id(x) + self.num_image_tokens + else: + raise ValueError("The key should be str or int.") + + +class ChatGLMTokenizer(PreTrainedTokenizer): + """ + Construct a ChatGLM tokenizer. Based on byte-level Byte-Pair-Encoding. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + """ + + vocab_files_names = {"vocab_file": "ice_text.model"} + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + model_input_names = ["input_ids"] + + def __init__( + self, + vocab_file, + do_lower_case=False, + remove_space=False, + bos_token='sop', + eos_token='eos', + eop_token='eop', + mask_token='[MASK]', + gmask_token='[gMASK]', + padding_side="left", + **kwargs + ) -> None: + super().__init__( + do_lower_case=do_lower_case, + remove_space=remove_space, + padding_side=padding_side, + **kwargs + ) + + self.do_lower_case = do_lower_case + self.remove_space = remove_space + self.vocab_file = vocab_file + + self.bos_token = bos_token + self.eos_token = eos_token + self.eop_token = eop_token + self.mask_token = mask_token + self.gMASK_token = gmask_token + + self.sp_tokenizer = SPTokenizer(vocab_file) + + """ Initialisation """ + + @property + def eop_token_id(self) -> Optional[int]: + """ + `Optional[int]`: Id of the end of sentence token in the vocabulary. Returns `None` if the token has not been + set. + """ + if self.eop_token is None: + return None + return self.convert_tokens_to_ids(self.eop_token) + + @property + def vocab_size(self): + """ Returns vocab size """ + return self.sp_tokenizer.num_tokens + + def get_vocab(self): + """ Returns vocab as a dict """ + vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} + vocab.update(self.added_tokens_encoder) + return vocab + + def preprocess_text(self, inputs): + if self.remove_space: + outputs = " ".join(inputs.strip().split()) + else: + outputs = inputs + + if self.do_lower_case: + outputs = outputs.lower() + + return outputs + + def _tokenize(self, text, **kwargs): + """ Returns a tokenized string. """ + text = self.preprocess_text(text) + + seq = self.sp_tokenizer.tokenize(text) + + return seq + + def decode( + self, + token_ids: Union[List[int], List[List[int]]], + skip_special_tokens: bool = False, + clean_up_tokenization_spaces: bool = True, + spaces_between_special_tokens: bool = True, + **kwargs + ) -> str: + if isinstance(token_ids[0], list): + tokens = [] + for single_token_ids in token_ids: + if self.pad_token_id in single_token_ids: # remove pad + single_token_ids = list(filter((self.pad_token_id).__ne__, single_token_ids)) + tokens.append(self.sp_tokenizer.decode(single_token_ids)) + return (tokens) + else: + if self.pad_token_id in token_ids: # remove pad + token_ids = list(filter((self.pad_token_id).__ne__, token_ids)) + return self.sp_tokenizer.decode(token_ids) + + def _convert_token_to_id(self, token): + """ Converts a token (str) in an id using the vocab. """ + return self.sp_tokenizer[token] + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.sp_tokenizer[index] + + def save_vocabulary(self, save_directory, filename_prefix=None): + """ + Save the vocabulary and special tokens file to a directory. + + Args: + save_directory (`str`): + The directory in which to save the vocabulary. + filename_prefix (`str`, *optional*): + An optional prefix to add to the named of the saved files. + + Returns: + `Tuple(str)`: Paths to the files saved. + """ + if os.path.isdir(save_directory): + vocab_file = os.path.join( + save_directory, VOCAB_FILES_NAMES["vocab_file"] + ) + else: + vocab_file = save_directory + + with open(self.vocab_file, 'rb') as fin: + proto_str = fin.read() + + with open(vocab_file, "wb") as writer: + writer.write(proto_str) + + return (vocab_file,) + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A BERT sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + if token_ids_1 is not None: + token_ids_0 += token_ids_1 + mask_ids = self.sp_tokenizer[self.mask_token] + gmask_ids = self.sp_tokenizer[self.gMASK_token] + if mask_ids not in token_ids_0 and gmask_ids not in token_ids_0: + token_ids_0 += [gmask_ids] + + if token_ids_0[-1] != mask_ids and token_ids_0[-1] != gmask_ids: + token_ids_0 += [self.sp_tokenizer[self.eos_token]] + + token_ids_0 += [self.sp_tokenizer[self.bos_token]] + + return token_ids_0 diff --git a/moe/tokenizer_config.json b/moe/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..21c1748d14ccbb115d065fcf87007c0c0e6c098e --- /dev/null +++ b/moe/tokenizer_config.json @@ -0,0 +1,19 @@ +{ + "name_or_path": "THUDM/chatglm-6b", + "bos_token": "", + "eop_token": "", + "eos_token": "", + "gmask_token": "[gMASK]", + "mask_token": "[MASK]", + "pad_token": "", + "unk_token": "", + "remove_space": false, + "do_lower_case": false, + "tokenizer_class": "ChatGLMTokenizer", + "auto_map": { + "AutoTokenizer": [ + "tokenization_chatglm.ChatGLMTokenizer", + null + ] + } +} diff --git a/output.wav b/output.wav new file mode 100644 index 0000000000000000000000000000000000000000..5f179652846abbaf12112c754fa42e2783ad8c0b Binary files /dev/null and b/output.wav differ diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..2c2f4901eef9b73f7eebbcb737f98aa851f35e1d --- /dev/null +++ b/requirements.txt @@ -0,0 +1,24 @@ +Flask +Cython==0.29.21 +librosa==0.8.0 +matplotlib==3.3.1 +numpy==1.21.6 +phonemizer==2.2.1 +scipy==1.5.2 +tensorboard==2.3.0 +torch +torchvision +Unidecode==1.1.1 +pyopenjtalk==0.2.0 +jamo==0.4.1 +pypinyin==0.44.0 +jieba==0.42.1 +cn2an==0.5.17 +jieba==0.42.1 +ipython==7.34.0 +gradio==3.4.1 +openai +pydub +inflect +eng_to_ipa +onnxruntime \ No newline at end of file diff --git a/text/LICENSE b/text/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..4ad4ed1d5e34d95c8380768ec16405d789cc6de4 --- /dev/null +++ b/text/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2017 Keith Ito + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/text/__init__.py b/text/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..48ae82f3e40ecd1bf17a7de78d87790327af3362 --- /dev/null +++ b/text/__init__.py @@ -0,0 +1,56 @@ +""" from https://github.com/keithito/tacotron """ +from text import cleaners +from text.symbols import symbols + + +# Mappings from symbol to numeric ID and vice versa: +_symbol_to_id = {s: i for i, s in enumerate(symbols)} +_id_to_symbol = {i: s for i, s in enumerate(symbols)} + + +def text_to_sequence(text, cleaner_names): + '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. + Args: + text: string to convert to a sequence + cleaner_names: names of the cleaner functions to run the text through + Returns: + List of integers corresponding to the symbols in the text + ''' + sequence = [] + + clean_text = _clean_text(text, cleaner_names) + for symbol in clean_text: + if symbol not in _symbol_to_id.keys(): + continue + symbol_id = _symbol_to_id[symbol] + sequence += [symbol_id] + return sequence + + +def cleaned_text_to_sequence(cleaned_text): + '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. + Args: + text: string to convert to a sequence + Returns: + List of integers corresponding to the symbols in the text + ''' + sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()] + return sequence + + +def sequence_to_text(sequence): + '''Converts a sequence of IDs back to a string''' + result = '' + for symbol_id in sequence: + s = _id_to_symbol[symbol_id] + result += s + return result + + +def _clean_text(text, cleaner_names): + for name in cleaner_names: + cleaner = getattr(cleaners, name) + if not cleaner: + raise Exception('Unknown cleaner: %s' % name) + text = cleaner(text) + return text diff --git a/text/__pycache__/__init__.cpython-37.pyc b/text/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a791bce07f63f176c38217ad40b57d88ee9cf70 Binary files /dev/null and b/text/__pycache__/__init__.cpython-37.pyc differ diff --git a/text/__pycache__/__init__.cpython-38.pyc b/text/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73ec0b66e64e9ad68f5efc65689e4cd7ec7d5b41 Binary files /dev/null and b/text/__pycache__/__init__.cpython-38.pyc differ diff --git a/text/__pycache__/__init__.cpython-39.pyc b/text/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a21056f7c260b50726d61c2c1e0c35a1e77b29c7 Binary files /dev/null and b/text/__pycache__/__init__.cpython-39.pyc differ diff --git a/text/__pycache__/cleaners.cpython-37.pyc b/text/__pycache__/cleaners.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0211d13c1a049c10c856951d64e61e361dbc0a79 Binary files /dev/null and b/text/__pycache__/cleaners.cpython-37.pyc differ diff --git a/text/__pycache__/cleaners.cpython-38.pyc b/text/__pycache__/cleaners.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..214b7862bd968990df4463fbee817aa3cf4a8602 Binary files /dev/null and b/text/__pycache__/cleaners.cpython-38.pyc differ diff --git a/text/__pycache__/cleaners.cpython-39.pyc b/text/__pycache__/cleaners.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9f004ed1396da9b7be7235eddb3331faa9bad61 Binary files /dev/null and b/text/__pycache__/cleaners.cpython-39.pyc differ diff --git a/text/__pycache__/english.cpython-37.pyc b/text/__pycache__/english.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67af12d2984060609249e5b92b0ef96849f14db2 Binary files /dev/null and b/text/__pycache__/english.cpython-37.pyc differ diff --git a/text/__pycache__/english.cpython-38.pyc b/text/__pycache__/english.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b3e6648a01b7ce179f2b56399705b0bf0b6e0f4 Binary files /dev/null and b/text/__pycache__/english.cpython-38.pyc differ diff --git a/text/__pycache__/english.cpython-39.pyc b/text/__pycache__/english.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cc8a13bcb5ce9f9b2e85d322e848257576a23d8 Binary files /dev/null and b/text/__pycache__/english.cpython-39.pyc differ diff --git a/text/__pycache__/japanese.cpython-37.pyc b/text/__pycache__/japanese.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5b21af0a9a15311da14c8cde5d7dad8dad6edfe Binary files /dev/null and b/text/__pycache__/japanese.cpython-37.pyc differ diff --git a/text/__pycache__/japanese.cpython-38.pyc b/text/__pycache__/japanese.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbdbb185b77ea81094492f63d1fcf9db62ca8fd2 Binary files /dev/null and b/text/__pycache__/japanese.cpython-38.pyc differ diff --git a/text/__pycache__/japanese.cpython-39.pyc b/text/__pycache__/japanese.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15b3c19c08a02b640aff770599e469171896f349 Binary files /dev/null and b/text/__pycache__/japanese.cpython-39.pyc differ diff --git a/text/__pycache__/korean.cpython-37.pyc b/text/__pycache__/korean.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc7804b6ffdd617bbc312c02eab8bca33f7a49dc Binary files /dev/null and b/text/__pycache__/korean.cpython-37.pyc differ diff --git a/text/__pycache__/mandarin.cpython-37.pyc b/text/__pycache__/mandarin.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf0505a499ed9cd4fbef381b2bbb7c0c2ddb5b60 Binary files /dev/null and b/text/__pycache__/mandarin.cpython-37.pyc differ diff --git a/text/__pycache__/mandarin.cpython-38.pyc b/text/__pycache__/mandarin.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..351cb45f946679dc1c124e568d995269a57e2154 Binary files /dev/null and b/text/__pycache__/mandarin.cpython-38.pyc differ diff --git a/text/__pycache__/mandarin.cpython-39.pyc b/text/__pycache__/mandarin.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5afc0dbfb99dac5ab34cdfe2a567ed22d322af9a Binary files /dev/null and b/text/__pycache__/mandarin.cpython-39.pyc differ diff --git a/text/__pycache__/sanskrit.cpython-37.pyc b/text/__pycache__/sanskrit.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fd202a3167dddb5dc444e904430ddddbda8fb21 Binary files /dev/null and b/text/__pycache__/sanskrit.cpython-37.pyc differ diff --git a/text/__pycache__/shanghainese.cpython-37.pyc b/text/__pycache__/shanghainese.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35832acc5a117ea38e3064bcb8827f5fd88c675e Binary files /dev/null and b/text/__pycache__/shanghainese.cpython-37.pyc differ diff --git a/text/__pycache__/symbols.cpython-37.pyc b/text/__pycache__/symbols.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3b5ef456a75a0b2a430f466d4cebac086966dc4 Binary files /dev/null and b/text/__pycache__/symbols.cpython-37.pyc differ diff --git a/text/__pycache__/symbols.cpython-38.pyc b/text/__pycache__/symbols.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8479e53ae2435f0adb421546b050730e2b90d1e8 Binary files /dev/null and b/text/__pycache__/symbols.cpython-38.pyc differ diff --git a/text/__pycache__/symbols.cpython-39.pyc b/text/__pycache__/symbols.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43cf34fa8459dcd073507ebbb0b3d9967708228e Binary files /dev/null and b/text/__pycache__/symbols.cpython-39.pyc differ diff --git a/text/__pycache__/thai.cpython-37.pyc b/text/__pycache__/thai.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba2b0d1fd908b42691c4d0a9a8494ad37a437c28 Binary files /dev/null and b/text/__pycache__/thai.cpython-37.pyc differ diff --git a/text/cantonese.py b/text/cantonese.py new file mode 100644 index 0000000000000000000000000000000000000000..b66d12138b81b70b86f18217d24a08fce76305c0 --- /dev/null +++ b/text/cantonese.py @@ -0,0 +1,59 @@ +import re +import cn2an +import opencc + + +converter = opencc.OpenCC('jyutjyu') + +# List of (Latin alphabet, ipa) pairs: +_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('A', 'ei˥'), + ('B', 'biː˥'), + ('C', 'siː˥'), + ('D', 'tiː˥'), + ('E', 'iː˥'), + ('F', 'e˥fuː˨˩'), + ('G', 'tsiː˥'), + ('H', 'ɪk̚˥tsʰyː˨˩'), + ('I', 'ɐi˥'), + ('J', 'tsei˥'), + ('K', 'kʰei˥'), + ('L', 'e˥llou˨˩'), + ('M', 'ɛːm˥'), + ('N', 'ɛːn˥'), + ('O', 'ou˥'), + ('P', 'pʰiː˥'), + ('Q', 'kʰiːu˥'), + ('R', 'aː˥lou˨˩'), + ('S', 'ɛː˥siː˨˩'), + ('T', 'tʰiː˥'), + ('U', 'juː˥'), + ('V', 'wiː˥'), + ('W', 'tʊk̚˥piː˥juː˥'), + ('X', 'ɪk̚˥siː˨˩'), + ('Y', 'waːi˥'), + ('Z', 'iː˨sɛːt̚˥') +]] + + +def number_to_cantonese(text): + return re.sub(r'\d+(?:\.?\d+)?', lambda x: cn2an.an2cn(x.group()), text) + + +def latin_to_ipa(text): + for regex, replacement in _latin_to_ipa: + text = re.sub(regex, replacement, text) + return text + + +def cantonese_to_ipa(text): + text = number_to_cantonese(text.upper()) + text = converter.convert(text).replace('-','').replace('$',' ') + text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text) + text = re.sub(r'[、;:]', ',', text) + text = re.sub(r'\s*,\s*', ', ', text) + text = re.sub(r'\s*。\s*', '. ', text) + text = re.sub(r'\s*?\s*', '? ', text) + text = re.sub(r'\s*!\s*', '! ', text) + text = re.sub(r'\s*$', '', text) + return text diff --git a/text/cleaners.py b/text/cleaners.py new file mode 100644 index 0000000000000000000000000000000000000000..6405dd8a7b3b727b0d8a3cc418b50bd0d066d143 --- /dev/null +++ b/text/cleaners.py @@ -0,0 +1,176 @@ +import re +from text.english import english_to_lazy_ipa, english_to_ipa2, english_to_lazy_ipa2 +from text.japanese import japanese_to_romaji_with_accent, japanese_to_ipa, japanese_to_ipa2, japanese_to_ipa3 +from text.mandarin import number_to_chinese, chinese_to_bopomofo, latin_to_bopomofo, chinese_to_romaji, chinese_to_lazy_ipa, chinese_to_ipa, chinese_to_ipa2 +# from text.sanskrit import devanagari_to_ipa +# from text.english import english_to_lazy_ipa, english_to_ipa2, english_to_lazy_ipa2 +# from text.thai import num_to_thai, latin_to_thai +# from text.shanghainese import shanghainese_to_ipa +# from text.cantonese import cantonese_to_ipa +# from text.ngu_dialect import ngu_dialect_to_ipa + + +def japanese_cleaners(text): + text = japanese_to_romaji_with_accent(text) + if re.match('[A-Za-z]', text[-1]): + text += '.' + return text + + +def japanese_cleaners2(text): + return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…') + + +def korean_cleaners(text): + '''Pipeline for Korean text''' + text = latin_to_hangul(text) + text = number_to_hangul(text) + text = divide_hangul(text) + if re.match('[\u3131-\u3163]', text[-1]): + text += '.' + return text + + +def chinese_cleaners(text): + '''Pipeline for Chinese text''' + text = number_to_chinese(text) + text = chinese_to_bopomofo(text) + text = latin_to_bopomofo(text) + if re.match('[ˉˊˇˋ˙]', text[-1]): + text += '。' + return text + + +def zh_ja_mixture_cleaners(text): + chinese_texts = re.findall(r'\[ZH\].*?\[ZH\]', text) + japanese_texts = re.findall(r'\[JA\].*?\[JA\]', text) + for chinese_text in chinese_texts: + cleaned_text = chinese_to_romaji(chinese_text[4:-4]) + text = text.replace(chinese_text, cleaned_text+' ', 1) + for japanese_text in japanese_texts: + cleaned_text = japanese_to_romaji_with_accent( + japanese_text[4:-4]).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…') + text = text.replace(japanese_text, cleaned_text+' ', 1) + text = text[:-1] + if re.match('[A-Za-zɯɹəɥ→↓↑]', text[-1]): + text += '.' + return text + + +def sanskrit_cleaners(text): + text = text.replace('॥', '।').replace('ॐ', 'ओम्') + if text[-1] != '।': + text += ' ।' + return text + + +def cjks_cleaners(text): + chinese_texts = re.findall(r'\[ZH\].*?\[ZH\]', text) + japanese_texts = re.findall(r'\[JA\].*?\[JA\]', text) + korean_texts = re.findall(r'\[KO\].*?\[KO\]', text) + sanskrit_texts = re.findall(r'\[SA\].*?\[SA\]', text) + english_texts = re.findall(r'\[EN\].*?\[EN\]', text) + for chinese_text in chinese_texts: + cleaned_text = chinese_to_lazy_ipa(chinese_text[4:-4]) + text = text.replace(chinese_text, cleaned_text+' ', 1) + for japanese_text in japanese_texts: + cleaned_text = japanese_to_ipa(japanese_text[4:-4]) + text = text.replace(japanese_text, cleaned_text+' ', 1) + for korean_text in korean_texts: + cleaned_text = korean_to_lazy_ipa(korean_text[4:-4]) + text = text.replace(korean_text, cleaned_text+' ', 1) + for sanskrit_text in sanskrit_texts: + cleaned_text = devanagari_to_ipa(sanskrit_text[4:-4]) + text = text.replace(sanskrit_text, cleaned_text+' ', 1) + for english_text in english_texts: + cleaned_text = english_to_lazy_ipa(english_text[4:-4]) + text = text.replace(english_text, cleaned_text+' ', 1) + text = text[:-1] + if re.match(r'[^\.,!\?\-…~]', text[-1]): + text += '.' + return text + + +def cjke_cleaners(text): + chinese_texts = re.findall(r'\[ZH\].*?\[ZH\]', text) + japanese_texts = re.findall(r'\[JA\].*?\[JA\]', text) + korean_texts = re.findall(r'\[KO\].*?\[KO\]', text) + english_texts = re.findall(r'\[EN\].*?\[EN\]', text) + for chinese_text in chinese_texts: + cleaned_text = chinese_to_lazy_ipa(chinese_text[4:-4]) + cleaned_text = cleaned_text.replace( + 'ʧ', 'tʃ').replace('ʦ', 'ts').replace('ɥan', 'ɥæn') + text = text.replace(chinese_text, cleaned_text+' ', 1) + for japanese_text in japanese_texts: + cleaned_text = japanese_to_ipa(japanese_text[4:-4]) + cleaned_text = cleaned_text.replace('ʧ', 'tʃ').replace( + 'ʦ', 'ts').replace('ɥan', 'ɥæn').replace('ʥ', 'dz') + text = text.replace(japanese_text, cleaned_text+' ', 1) + for korean_text in korean_texts: + cleaned_text = korean_to_ipa(korean_text[4:-4]) + text = text.replace(korean_text, cleaned_text+' ', 1) + for english_text in english_texts: + cleaned_text = english_to_ipa2(english_text[4:-4]) + cleaned_text = cleaned_text.replace('ɑ', 'a').replace( + 'ɔ', 'o').replace('ɛ', 'e').replace('ɪ', 'i').replace('ʊ', 'u') + text = text.replace(english_text, cleaned_text+' ', 1) + text = text[:-1] + if re.match(r'[^\.,!\?\-…~]', text[-1]): + text += '.' + return text + + +def cjke_cleaners2(text): + chinese_texts = re.findall(r'\[ZH\].*?\[ZH\]', text) + japanese_texts = re.findall(r'\[JA\].*?\[JA\]', text) + korean_texts = re.findall(r'\[KO\].*?\[KO\]', text) + english_texts = re.findall(r'\[EN\].*?\[EN\]', text) + for chinese_text in chinese_texts: + cleaned_text = chinese_to_ipa(chinese_text[4:-4]) + text = text.replace(chinese_text, cleaned_text+' ', 1) + for japanese_text in japanese_texts: + cleaned_text = japanese_to_ipa2(japanese_text[4:-4]) + text = text.replace(japanese_text, cleaned_text+' ', 1) + for korean_text in korean_texts: + cleaned_text = korean_to_ipa(korean_text[4:-4]) + text = text.replace(korean_text, cleaned_text+' ', 1) + for english_text in english_texts: + cleaned_text = english_to_ipa2(english_text[4:-4]) + text = text.replace(english_text, cleaned_text+' ', 1) + text = text[:-1] + if re.match(r'[^\.,!\?\-…~]', text[-1]): + text += '.' + return text + + +def thai_cleaners(text): + text = num_to_thai(text) + text = latin_to_thai(text) + return text + + +def shanghainese_cleaners(text): + text = shanghainese_to_ipa(text) + if re.match(r'[^\.,!\?\-…~]', text[-1]): + text += '.' + return text + + +def chinese_dialect_cleaners(text): + text = re.sub(r'\[MD\](.*?)\[MD\]', + lambda x: chinese_to_ipa2(x.group(1))+' ', text) + text = re.sub(r'\[TW\](.*?)\[TW\]', + lambda x: chinese_to_ipa2(x.group(1), True)+' ', text) + text = re.sub(r'\[JA\](.*?)\[JA\]', + lambda x: japanese_to_ipa3(x.group(1)).replace('Q', 'ʔ')+' ', text) + text = re.sub(r'\[SH\](.*?)\[SH\]', lambda x: shanghainese_to_ipa(x.group(1)).replace('1', '˥˧').replace('5', + '˧˧˦').replace('6', '˩˩˧').replace('7', '˥').replace('8', '˩˨').replace('ᴀ', 'ɐ').replace('ᴇ', 'e')+' ', text) + text = re.sub(r'\[GD\](.*?)\[GD\]', + lambda x: cantonese_to_ipa(x.group(1))+' ', text) + text = re.sub(r'\[EN\](.*?)\[EN\]', + lambda x: english_to_lazy_ipa2(x.group(1))+' ', text) + text = re.sub(r'\[([A-Z]{2})\](.*?)\[\1\]', lambda x: ngu_dialect_to_ipa(x.group(2), x.group( + 1)).replace('ʣ', 'dz').replace('ʥ', 'dʑ').replace('ʦ', 'ts').replace('ʨ', 'tɕ')+' ', text) + text = re.sub(r'\s+$', '', text) + text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) + return text diff --git a/text/english.py b/text/english.py new file mode 100644 index 0000000000000000000000000000000000000000..6817392ba8a9eb830351de89fb7afc5ad72f5e42 --- /dev/null +++ b/text/english.py @@ -0,0 +1,188 @@ +""" from https://github.com/keithito/tacotron """ + +''' +Cleaners are transformations that run over the input text at both training and eval time. + +Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" +hyperparameter. Some cleaners are English-specific. You'll typically want to use: + 1. "english_cleaners" for English text + 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using + the Unidecode library (https://pypi.python.org/pypi/Unidecode) + 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update + the symbols in symbols.py to match your data). +''' + + +# Regular expression matching whitespace: + + +import re +import inflect +from unidecode import unidecode +import eng_to_ipa as ipa +_inflect = inflect.engine() +_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])') +_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)') +_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)') +_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)') +_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)') +_number_re = re.compile(r'[0-9]+') + +# List of (regular expression, replacement) pairs for abbreviations: +_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ + ('mrs', 'misess'), + ('mr', 'mister'), + ('dr', 'doctor'), + ('st', 'saint'), + ('co', 'company'), + ('jr', 'junior'), + ('maj', 'major'), + ('gen', 'general'), + ('drs', 'doctors'), + ('rev', 'reverend'), + ('lt', 'lieutenant'), + ('hon', 'honorable'), + ('sgt', 'sergeant'), + ('capt', 'captain'), + ('esq', 'esquire'), + ('ltd', 'limited'), + ('col', 'colonel'), + ('ft', 'fort'), +]] + + +# List of (ipa, lazy ipa) pairs: +_lazy_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('r', 'ɹ'), + ('æ', 'e'), + ('ɑ', 'a'), + ('ɔ', 'o'), + ('ð', 'z'), + ('θ', 's'), + ('ɛ', 'e'), + ('ɪ', 'i'), + ('ʊ', 'u'), + ('ʒ', 'ʥ'), + ('ʤ', 'ʥ'), + ('ˈ', '↓'), +]] + +# List of (ipa, lazy ipa2) pairs: +_lazy_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('r', 'ɹ'), + ('ð', 'z'), + ('θ', 's'), + ('ʒ', 'ʑ'), + ('ʤ', 'dʑ'), + ('ˈ', '↓'), +]] + +# List of (ipa, ipa2) pairs +_ipa_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('r', 'ɹ'), + ('ʤ', 'dʒ'), + ('ʧ', 'tʃ') +]] + + +def expand_abbreviations(text): + for regex, replacement in _abbreviations: + text = re.sub(regex, replacement, text) + return text + + +def collapse_whitespace(text): + return re.sub(r'\s+', ' ', text) + + +def _remove_commas(m): + return m.group(1).replace(',', '') + + +def _expand_decimal_point(m): + return m.group(1).replace('.', ' point ') + + +def _expand_dollars(m): + match = m.group(1) + parts = match.split('.') + if len(parts) > 2: + return match + ' dollars' # Unexpected format + dollars = int(parts[0]) if parts[0] else 0 + cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 + if dollars and cents: + dollar_unit = 'dollar' if dollars == 1 else 'dollars' + cent_unit = 'cent' if cents == 1 else 'cents' + return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit) + elif dollars: + dollar_unit = 'dollar' if dollars == 1 else 'dollars' + return '%s %s' % (dollars, dollar_unit) + elif cents: + cent_unit = 'cent' if cents == 1 else 'cents' + return '%s %s' % (cents, cent_unit) + else: + return 'zero dollars' + + +def _expand_ordinal(m): + return _inflect.number_to_words(m.group(0)) + + +def _expand_number(m): + num = int(m.group(0)) + if num > 1000 and num < 3000: + if num == 2000: + return 'two thousand' + elif num > 2000 and num < 2010: + return 'two thousand ' + _inflect.number_to_words(num % 100) + elif num % 100 == 0: + return _inflect.number_to_words(num // 100) + ' hundred' + else: + return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ') + else: + return _inflect.number_to_words(num, andword='') + + +def normalize_numbers(text): + text = re.sub(_comma_number_re, _remove_commas, text) + text = re.sub(_pounds_re, r'\1 pounds', text) + text = re.sub(_dollars_re, _expand_dollars, text) + text = re.sub(_decimal_number_re, _expand_decimal_point, text) + text = re.sub(_ordinal_re, _expand_ordinal, text) + text = re.sub(_number_re, _expand_number, text) + return text + + +def mark_dark_l(text): + return re.sub(r'l([^aeiouæɑɔəɛɪʊ ]*(?: |$))', lambda x: 'ɫ'+x.group(1), text) + + +def english_to_ipa(text): + text = unidecode(text).lower() + text = expand_abbreviations(text) + text = normalize_numbers(text) + phonemes = ipa.convert(text) + phonemes = collapse_whitespace(phonemes) + return phonemes + + +def english_to_lazy_ipa(text): + text = english_to_ipa(text) + for regex, replacement in _lazy_ipa: + text = re.sub(regex, replacement, text) + return text + + +def english_to_ipa2(text): + text = english_to_ipa(text) + text = mark_dark_l(text) + for regex, replacement in _ipa_to_ipa2: + text = re.sub(regex, replacement, text) + return text.replace('...', '…') + + +def english_to_lazy_ipa2(text): + text = english_to_ipa(text) + for regex, replacement in _lazy_ipa2: + text = re.sub(regex, replacement, text) + return text diff --git a/text/japanese.py b/text/japanese.py new file mode 100644 index 0000000000000000000000000000000000000000..375e4d50872d5c68ee57ca17470a2ca425425eba --- /dev/null +++ b/text/japanese.py @@ -0,0 +1,153 @@ +import re +from unidecode import unidecode +import pyopenjtalk + + +# Regular expression matching Japanese without punctuation marks: +_japanese_characters = re.compile( + r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') + +# Regular expression matching non-Japanese characters or punctuation marks: +_japanese_marks = re.compile( + r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') + +# List of (symbol, Japanese) pairs for marks: +_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('%', 'パーセント') +]] + +# List of (romaji, ipa) pairs for marks: +_romaji_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('ts', 'ʦ'), + ('u', 'ɯ'), + ('j', 'ʥ'), + ('y', 'j'), + ('ni', 'n^i'), + ('nj', 'n^'), + ('hi', 'çi'), + ('hj', 'ç'), + ('f', 'ɸ'), + ('I', 'i*'), + ('U', 'ɯ*'), + ('r', 'ɾ') +]] + +# List of (romaji, ipa2) pairs for marks: +_romaji_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('u', 'ɯ'), + ('ʧ', 'tʃ'), + ('j', 'dʑ'), + ('y', 'j'), + ('ni', 'n^i'), + ('nj', 'n^'), + ('hi', 'çi'), + ('hj', 'ç'), + ('f', 'ɸ'), + ('I', 'i*'), + ('U', 'ɯ*'), + ('r', 'ɾ') +]] + +# List of (consonant, sokuon) pairs: +_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ + (r'Q([↑↓]*[kg])', r'k#\1'), + (r'Q([↑↓]*[tdjʧ])', r't#\1'), + (r'Q([↑↓]*[sʃ])', r's\1'), + (r'Q([↑↓]*[pb])', r'p#\1') +]] + +# List of (consonant, hatsuon) pairs: +_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ + (r'N([↑↓]*[pbm])', r'm\1'), + (r'N([↑↓]*[ʧʥj])', r'n^\1'), + (r'N([↑↓]*[tdn])', r'n\1'), + (r'N([↑↓]*[kg])', r'ŋ\1') +]] + + +def symbols_to_japanese(text): + for regex, replacement in _symbols_to_japanese: + text = re.sub(regex, replacement, text) + return text + + +def japanese_to_romaji_with_accent(text): + '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' + text = symbols_to_japanese(text) + sentences = re.split(_japanese_marks, text) + marks = re.findall(_japanese_marks, text) + text = '' + for i, sentence in enumerate(sentences): + if re.match(_japanese_characters, sentence): + if text != '': + text += ' ' + labels = pyopenjtalk.extract_fullcontext(sentence) + for n, label in enumerate(labels): + phoneme = re.search(r'\-([^\+]*)\+', label).group(1) + if phoneme not in ['sil', 'pau']: + text += phoneme.replace('ch', 'ʧ').replace('sh', + 'ʃ').replace('cl', 'Q') + else: + continue + # n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) + a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) + a2 = int(re.search(r"\+(\d+)\+", label).group(1)) + a3 = int(re.search(r"\+(\d+)/", label).group(1)) + if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']: + a2_next = -1 + else: + a2_next = int( + re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) + # Accent phrase boundary + if a3 == 1 and a2_next == 1: + text += ' ' + # Falling + elif a1 == 0 and a2_next == a2 + 1: + text += '↓' + # Rising + elif a2 == 1 and a2_next == 2: + text += '↑' + if i < len(marks): + text += unidecode(marks[i]).replace(' ', '') + return text + + +def get_real_sokuon(text): + for regex, replacement in _real_sokuon: + text = re.sub(regex, replacement, text) + return text + + +def get_real_hatsuon(text): + for regex, replacement in _real_hatsuon: + text = re.sub(regex, replacement, text) + return text + + +def japanese_to_ipa(text): + text = japanese_to_romaji_with_accent(text).replace('...', '…') + text = re.sub( + r'([aiueo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) + text = get_real_sokuon(text) + text = get_real_hatsuon(text) + for regex, replacement in _romaji_to_ipa: + text = re.sub(regex, replacement, text) + return text + + +def japanese_to_ipa2(text): + text = japanese_to_romaji_with_accent(text).replace('...', '…') + text = get_real_sokuon(text) + text = get_real_hatsuon(text) + for regex, replacement in _romaji_to_ipa2: + text = re.sub(regex, replacement, text) + return text + + +def japanese_to_ipa3(text): + text = japanese_to_ipa2(text).replace('n^', 'ȵ').replace( + 'ʃ', 'ɕ').replace('*', '\u0325').replace('#', '\u031a') + text = re.sub( + r'([aiɯeo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) + text = re.sub(r'((?:^|\s)(?:ts|tɕ|[kpt]))', r'\1ʰ', text) + return text diff --git a/text/korean.py b/text/korean.py new file mode 100644 index 0000000000000000000000000000000000000000..edee07429a450c55e3d8e246997faaa1e0b89cc9 --- /dev/null +++ b/text/korean.py @@ -0,0 +1,210 @@ +import re +from jamo import h2j, j2hcj +import ko_pron + + +# This is a list of Korean classifiers preceded by pure Korean numerals. +_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' + +# List of (hangul, hangul divided) pairs: +_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('ㄳ', 'ㄱㅅ'), + ('ㄵ', 'ㄴㅈ'), + ('ㄶ', 'ㄴㅎ'), + ('ㄺ', 'ㄹㄱ'), + ('ㄻ', 'ㄹㅁ'), + ('ㄼ', 'ㄹㅂ'), + ('ㄽ', 'ㄹㅅ'), + ('ㄾ', 'ㄹㅌ'), + ('ㄿ', 'ㄹㅍ'), + ('ㅀ', 'ㄹㅎ'), + ('ㅄ', 'ㅂㅅ'), + ('ㅘ', 'ㅗㅏ'), + ('ㅙ', 'ㅗㅐ'), + ('ㅚ', 'ㅗㅣ'), + ('ㅝ', 'ㅜㅓ'), + ('ㅞ', 'ㅜㅔ'), + ('ㅟ', 'ㅜㅣ'), + ('ㅢ', 'ㅡㅣ'), + ('ㅑ', 'ㅣㅏ'), + ('ㅒ', 'ㅣㅐ'), + ('ㅕ', 'ㅣㅓ'), + ('ㅖ', 'ㅣㅔ'), + ('ㅛ', 'ㅣㅗ'), + ('ㅠ', 'ㅣㅜ') +]] + +# List of (Latin alphabet, hangul) pairs: +_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ + ('a', '에이'), + ('b', '비'), + ('c', '시'), + ('d', '디'), + ('e', '이'), + ('f', '에프'), + ('g', '지'), + ('h', '에이치'), + ('i', '아이'), + ('j', '제이'), + ('k', '케이'), + ('l', '엘'), + ('m', '엠'), + ('n', '엔'), + ('o', '오'), + ('p', '피'), + ('q', '큐'), + ('r', '아르'), + ('s', '에스'), + ('t', '티'), + ('u', '유'), + ('v', '브이'), + ('w', '더블유'), + ('x', '엑스'), + ('y', '와이'), + ('z', '제트') +]] + +# List of (ipa, lazy ipa) pairs: +_ipa_to_lazy_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ + ('t͡ɕ','ʧ'), + ('d͡ʑ','ʥ'), + ('ɲ','n^'), + ('ɕ','ʃ'), + ('ʷ','w'), + ('ɭ','l`'), + ('ʎ','ɾ'), + ('ɣ','ŋ'), + ('ɰ','ɯ'), + ('ʝ','j'), + ('ʌ','ə'), + ('ɡ','g'), + ('\u031a','#'), + ('\u0348','='), + ('\u031e',''), + ('\u0320',''), + ('\u0339','') +]] + + +def latin_to_hangul(text): + for regex, replacement in _latin_to_hangul: + text = re.sub(regex, replacement, text) + return text + + +def divide_hangul(text): + text = j2hcj(h2j(text)) + for regex, replacement in _hangul_divided: + text = re.sub(regex, replacement, text) + return text + + +def hangul_number(num, sino=True): + '''Reference https://github.com/Kyubyong/g2pK''' + num = re.sub(',', '', num) + + if num == '0': + return '영' + if not sino and num == '20': + return '스무' + + digits = '123456789' + names = '일이삼사오육칠팔구' + digit2name = {d: n for d, n in zip(digits, names)} + + modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉' + decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔' + digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())} + digit2dec = {d: dec for d, dec in zip(digits, decimals.split())} + + spelledout = [] + for i, digit in enumerate(num): + i = len(num) - i - 1 + if sino: + if i == 0: + name = digit2name.get(digit, '') + elif i == 1: + name = digit2name.get(digit, '') + '십' + name = name.replace('일십', '십') + else: + if i == 0: + name = digit2mod.get(digit, '') + elif i == 1: + name = digit2dec.get(digit, '') + if digit == '0': + if i % 4 == 0: + last_three = spelledout[-min(3, len(spelledout)):] + if ''.join(last_three) == '': + spelledout.append('') + continue + else: + spelledout.append('') + continue + if i == 2: + name = digit2name.get(digit, '') + '백' + name = name.replace('일백', '백') + elif i == 3: + name = digit2name.get(digit, '') + '천' + name = name.replace('일천', '천') + elif i == 4: + name = digit2name.get(digit, '') + '만' + name = name.replace('일만', '만') + elif i == 5: + name = digit2name.get(digit, '') + '십' + name = name.replace('일십', '십') + elif i == 6: + name = digit2name.get(digit, '') + '백' + name = name.replace('일백', '백') + elif i == 7: + name = digit2name.get(digit, '') + '천' + name = name.replace('일천', '천') + elif i == 8: + name = digit2name.get(digit, '') + '억' + elif i == 9: + name = digit2name.get(digit, '') + '십' + elif i == 10: + name = digit2name.get(digit, '') + '백' + elif i == 11: + name = digit2name.get(digit, '') + '천' + elif i == 12: + name = digit2name.get(digit, '') + '조' + elif i == 13: + name = digit2name.get(digit, '') + '십' + elif i == 14: + name = digit2name.get(digit, '') + '백' + elif i == 15: + name = digit2name.get(digit, '') + '천' + spelledout.append(name) + return ''.join(elem for elem in spelledout) + + +def number_to_hangul(text): + '''Reference https://github.com/Kyubyong/g2pK''' + tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text)) + for token in tokens: + num, classifier = token + if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers: + spelledout = hangul_number(num, sino=False) + else: + spelledout = hangul_number(num, sino=True) + text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}') + # digit by digit for remaining digits + digits = '0123456789' + names = '영일이삼사오육칠팔구' + for d, n in zip(digits, names): + text = text.replace(d, n) + return text + + +def korean_to_lazy_ipa(text): + text = latin_to_hangul(text) + text = number_to_hangul(text) + text=re.sub('[\uac00-\ud7af]+',lambda x:ko_pron.romanise(x.group(0),'ipa').split('] ~ [')[0],text) + for regex, replacement in _ipa_to_lazy_ipa: + text = re.sub(regex, replacement, text) + return text + + +def korean_to_ipa(text): + text = korean_to_lazy_ipa(text) + return text.replace('ʧ','tʃ').replace('ʥ','dʑ') diff --git a/text/mandarin.py b/text/mandarin.py new file mode 100644 index 0000000000000000000000000000000000000000..a9ce0c4b223cd7fbb00e8332d2dd53de4c7cea09 --- /dev/null +++ b/text/mandarin.py @@ -0,0 +1,328 @@ +import os +import sys +import re +from pypinyin import lazy_pinyin, BOPOMOFO +import jieba +import cn2an + + +# List of (Latin alphabet, bopomofo) pairs: +_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ + ('a', 'ㄟˉ'), + ('b', 'ㄅㄧˋ'), + ('c', 'ㄙㄧˉ'), + ('d', 'ㄉㄧˋ'), + ('e', 'ㄧˋ'), + ('f', 'ㄝˊㄈㄨˋ'), + ('g', 'ㄐㄧˋ'), + ('h', 'ㄝˇㄑㄩˋ'), + ('i', 'ㄞˋ'), + ('j', 'ㄐㄟˋ'), + ('k', 'ㄎㄟˋ'), + ('l', 'ㄝˊㄛˋ'), + ('m', 'ㄝˊㄇㄨˋ'), + ('n', 'ㄣˉ'), + ('o', 'ㄡˉ'), + ('p', 'ㄆㄧˉ'), + ('q', 'ㄎㄧㄡˉ'), + ('r', 'ㄚˋ'), + ('s', 'ㄝˊㄙˋ'), + ('t', 'ㄊㄧˋ'), + ('u', 'ㄧㄡˉ'), + ('v', 'ㄨㄧˉ'), + ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), + ('x', 'ㄝˉㄎㄨˋㄙˋ'), + ('y', 'ㄨㄞˋ'), + ('z', 'ㄗㄟˋ') +]] + +# List of (bopomofo, romaji) pairs: +_bopomofo_to_romaji = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('ㄅㄛ', 'p⁼wo'), + ('ㄆㄛ', 'pʰwo'), + ('ㄇㄛ', 'mwo'), + ('ㄈㄛ', 'fwo'), + ('ㄅ', 'p⁼'), + ('ㄆ', 'pʰ'), + ('ㄇ', 'm'), + ('ㄈ', 'f'), + ('ㄉ', 't⁼'), + ('ㄊ', 'tʰ'), + ('ㄋ', 'n'), + ('ㄌ', 'l'), + ('ㄍ', 'k⁼'), + ('ㄎ', 'kʰ'), + ('ㄏ', 'h'), + ('ㄐ', 'ʧ⁼'), + ('ㄑ', 'ʧʰ'), + ('ㄒ', 'ʃ'), + ('ㄓ', 'ʦ`⁼'), + ('ㄔ', 'ʦ`ʰ'), + ('ㄕ', 's`'), + ('ㄖ', 'ɹ`'), + ('ㄗ', 'ʦ⁼'), + ('ㄘ', 'ʦʰ'), + ('ㄙ', 's'), + ('ㄚ', 'a'), + ('ㄛ', 'o'), + ('ㄜ', 'ə'), + ('ㄝ', 'e'), + ('ㄞ', 'ai'), + ('ㄟ', 'ei'), + ('ㄠ', 'au'), + ('ㄡ', 'ou'), + ('ㄧㄢ', 'yeNN'), + ('ㄢ', 'aNN'), + ('ㄧㄣ', 'iNN'), + ('ㄣ', 'əNN'), + ('ㄤ', 'aNg'), + ('ㄧㄥ', 'iNg'), + ('ㄨㄥ', 'uNg'), + ('ㄩㄥ', 'yuNg'), + ('ㄥ', 'əNg'), + ('ㄦ', 'əɻ'), + ('ㄧ', 'i'), + ('ㄨ', 'u'), + ('ㄩ', 'ɥ'), + ('ˉ', '→'), + ('ˊ', '↑'), + ('ˇ', '↓↑'), + ('ˋ', '↓'), + ('˙', ''), + (',', ','), + ('。', '.'), + ('!', '!'), + ('?', '?'), + ('—', '-') +]] + +# List of (romaji, ipa) pairs: +_romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ + ('ʃy', 'ʃ'), + ('ʧʰy', 'ʧʰ'), + ('ʧ⁼y', 'ʧ⁼'), + ('NN', 'n'), + ('Ng', 'ŋ'), + ('y', 'j'), + ('h', 'x') +]] + +# List of (bopomofo, ipa) pairs: +_bopomofo_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('ㄅㄛ', 'p⁼wo'), + ('ㄆㄛ', 'pʰwo'), + ('ㄇㄛ', 'mwo'), + ('ㄈㄛ', 'fwo'), + ('ㄅ', 'p⁼'), + ('ㄆ', 'pʰ'), + ('ㄇ', 'm'), + ('ㄈ', 'f'), + ('ㄉ', 't⁼'), + ('ㄊ', 'tʰ'), + ('ㄋ', 'n'), + ('ㄌ', 'l'), + ('ㄍ', 'k⁼'), + ('ㄎ', 'kʰ'), + ('ㄏ', 'x'), + ('ㄐ', 'tʃ⁼'), + ('ㄑ', 'tʃʰ'), + ('ㄒ', 'ʃ'), + ('ㄓ', 'ts`⁼'), + ('ㄔ', 'ts`ʰ'), + ('ㄕ', 's`'), + ('ㄖ', 'ɹ`'), + ('ㄗ', 'ts⁼'), + ('ㄘ', 'tsʰ'), + ('ㄙ', 's'), + ('ㄚ', 'a'), + ('ㄛ', 'o'), + ('ㄜ', 'ə'), + ('ㄝ', 'ɛ'), + ('ㄞ', 'aɪ'), + ('ㄟ', 'eɪ'), + ('ㄠ', 'ɑʊ'), + ('ㄡ', 'oʊ'), + ('ㄧㄢ', 'jɛn'), + ('ㄩㄢ', 'ɥæn'), + ('ㄢ', 'an'), + ('ㄧㄣ', 'in'), + ('ㄩㄣ', 'ɥn'), + ('ㄣ', 'ən'), + ('ㄤ', 'ɑŋ'), + ('ㄧㄥ', 'iŋ'), + ('ㄨㄥ', 'ʊŋ'), + ('ㄩㄥ', 'jʊŋ'), + ('ㄥ', 'əŋ'), + ('ㄦ', 'əɻ'), + ('ㄧ', 'i'), + ('ㄨ', 'u'), + ('ㄩ', 'ɥ'), + ('ˉ', '→'), + ('ˊ', '↑'), + ('ˇ', '↓↑'), + ('ˋ', '↓'), + ('˙', ''), + (',', ','), + ('。', '.'), + ('!', '!'), + ('?', '?'), + ('—', '-') +]] + +# List of (bopomofo, ipa2) pairs: +_bopomofo_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('ㄅㄛ', 'pwo'), + ('ㄆㄛ', 'pʰwo'), + ('ㄇㄛ', 'mwo'), + ('ㄈㄛ', 'fwo'), + ('ㄅ', 'p'), + ('ㄆ', 'pʰ'), + ('ㄇ', 'm'), + ('ㄈ', 'f'), + ('ㄉ', 't'), + ('ㄊ', 'tʰ'), + ('ㄋ', 'n'), + ('ㄌ', 'l'), + ('ㄍ', 'k'), + ('ㄎ', 'kʰ'), + ('ㄏ', 'h'), + ('ㄐ', 'tɕ'), + ('ㄑ', 'tɕʰ'), + ('ㄒ', 'ɕ'), + ('ㄓ', 'tʂ'), + ('ㄔ', 'tʂʰ'), + ('ㄕ', 'ʂ'), + ('ㄖ', 'ɻ'), + ('ㄗ', 'ts'), + ('ㄘ', 'tsʰ'), + ('ㄙ', 's'), + ('ㄚ', 'a'), + ('ㄛ', 'o'), + ('ㄜ', 'ɤ'), + ('ㄝ', 'ɛ'), + ('ㄞ', 'aɪ'), + ('ㄟ', 'eɪ'), + ('ㄠ', 'ɑʊ'), + ('ㄡ', 'oʊ'), + ('ㄧㄢ', 'jɛn'), + ('ㄩㄢ', 'yæn'), + ('ㄢ', 'an'), + ('ㄧㄣ', 'in'), + ('ㄩㄣ', 'yn'), + ('ㄣ', 'ən'), + ('ㄤ', 'ɑŋ'), + ('ㄧㄥ', 'iŋ'), + ('ㄨㄥ', 'ʊŋ'), + ('ㄩㄥ', 'jʊŋ'), + ('ㄥ', 'ɤŋ'), + ('ㄦ', 'əɻ'), + ('ㄧ', 'i'), + ('ㄨ', 'u'), + ('ㄩ', 'y'), + ('ˉ', '˥'), + ('ˊ', '˧˥'), + ('ˇ', '˨˩˦'), + ('ˋ', '˥˩'), + ('˙', ''), + (',', ','), + ('。', '.'), + ('!', '!'), + ('?', '?'), + ('—', '-') +]] + + +def number_to_chinese(text): + numbers = re.findall(r'\d+(?:\.?\d+)?', text) + for number in numbers: + text = text.replace(number, cn2an.an2cn(number), 1) + return text + + +def chinese_to_bopomofo(text, taiwanese=False): + text = text.replace('、', ',').replace(';', ',').replace(':', ',') + words = jieba.lcut(text, cut_all=False) + text = '' + for word in words: + bopomofos = lazy_pinyin(word, BOPOMOFO) + if not re.search('[\u4e00-\u9fff]', word): + text += word + continue + for i in range(len(bopomofos)): + bopomofos[i] = re.sub(r'([\u3105-\u3129])$', r'\1ˉ', bopomofos[i]) + if text != '': + text += ' ' + if taiwanese: + text += '#'+'#'.join(bopomofos) + else: + text += ''.join(bopomofos) + return text + + +def latin_to_bopomofo(text): + for regex, replacement in _latin_to_bopomofo: + text = re.sub(regex, replacement, text) + return text + + +def bopomofo_to_romaji(text): + for regex, replacement in _bopomofo_to_romaji: + text = re.sub(regex, replacement, text) + return text + + +def bopomofo_to_ipa(text): + for regex, replacement in _bopomofo_to_ipa: + text = re.sub(regex, replacement, text) + return text + + +def bopomofo_to_ipa2(text): + for regex, replacement in _bopomofo_to_ipa2: + text = re.sub(regex, replacement, text) + return text + + +def chinese_to_romaji(text): + text = number_to_chinese(text) + text = chinese_to_bopomofo(text) + text = latin_to_bopomofo(text) + text = bopomofo_to_romaji(text) + text = re.sub('i([aoe])', r'y\1', text) + text = re.sub('u([aoəe])', r'w\1', text) + text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', + r'\1ɹ`\2', text).replace('ɻ', 'ɹ`') + text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text) + return text + + +def chinese_to_lazy_ipa(text): + text = chinese_to_romaji(text) + for regex, replacement in _romaji_to_ipa: + text = re.sub(regex, replacement, text) + return text + + +def chinese_to_ipa(text): + text = number_to_chinese(text) + text = chinese_to_bopomofo(text) + text = latin_to_bopomofo(text) + text = bopomofo_to_ipa(text) + text = re.sub('i([aoe])', r'j\1', text) + text = re.sub('u([aoəe])', r'w\1', text) + text = re.sub('([sɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', + r'\1ɹ`\2', text).replace('ɻ', 'ɹ`') + text = re.sub('([s][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text) + return text + + +def chinese_to_ipa2(text, taiwanese=False): + text = number_to_chinese(text) + text = chinese_to_bopomofo(text, taiwanese) + text = latin_to_bopomofo(text) + text = bopomofo_to_ipa2(text) + text = re.sub(r'i([aoe])', r'j\1', text) + text = re.sub(r'u([aoəe])', r'w\1', text) + text = re.sub(r'([ʂɹ]ʰ?)([˩˨˧˦˥ ]+|$)', r'\1ʅ\2', text) + text = re.sub(r'(sʰ?)([˩˨˧˦˥ ]+|$)', r'\1ɿ\2', text) + return text diff --git a/text/ngu_dialect.py b/text/ngu_dialect.py new file mode 100644 index 0000000000000000000000000000000000000000..ce3e12bbf0469426872eed5f681985d3e1be9b26 --- /dev/null +++ b/text/ngu_dialect.py @@ -0,0 +1,30 @@ +import re +import opencc + + +dialects = {'SZ': 'suzhou', 'WX': 'wuxi', 'CZ': 'changzhou', 'HZ': 'hangzhou', + 'SX': 'shaoxing', 'NB': 'ningbo', 'JJ': 'jingjiang', 'YX': 'yixing', + 'JD': 'jiading', 'ZR': 'zhenru', 'PH': 'pinghu', 'TX': 'tongxiang', + 'JS': 'jiashan', 'HN': 'xiashi', 'LP': 'linping', 'XS': 'xiaoshan', + 'FY': 'fuyang', 'RA': 'ruao', 'CX': 'cixi', 'SM': 'sanmen', + 'TT': 'tiantai', 'WZ': 'wenzhou', 'SC': 'suichang', 'YB': 'youbu'} + +converters = {} + +for dialect in dialects.values(): + try: + converters[dialect] = opencc.OpenCC(dialect) + except: + pass + + +def ngu_dialect_to_ipa(text, dialect): + dialect = dialects[dialect] + text = converters[dialect].convert(text).replace('-','').replace('$',' ') + text = re.sub(r'[、;:]', ',', text) + text = re.sub(r'\s*,\s*', ', ', text) + text = re.sub(r'\s*。\s*', '. ', text) + text = re.sub(r'\s*?\s*', '? ', text) + text = re.sub(r'\s*!\s*', '! ', text) + text = re.sub(r'\s*$', '', text) + return text diff --git a/text/sanskrit.py b/text/sanskrit.py new file mode 100644 index 0000000000000000000000000000000000000000..0223aaac384a2f850f5bc20651fc18eb964607d0 --- /dev/null +++ b/text/sanskrit.py @@ -0,0 +1,62 @@ +import re +from indic_transliteration import sanscript + + +# List of (iast, ipa) pairs: +_iast_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('a', 'ə'), + ('ā', 'aː'), + ('ī', 'iː'), + ('ū', 'uː'), + ('ṛ', 'ɹ`'), + ('ṝ', 'ɹ`ː'), + ('ḷ', 'l`'), + ('ḹ', 'l`ː'), + ('e', 'eː'), + ('o', 'oː'), + ('k', 'k⁼'), + ('k⁼h', 'kʰ'), + ('g', 'g⁼'), + ('g⁼h', 'gʰ'), + ('ṅ', 'ŋ'), + ('c', 'ʧ⁼'), + ('ʧ⁼h', 'ʧʰ'), + ('j', 'ʥ⁼'), + ('ʥ⁼h', 'ʥʰ'), + ('ñ', 'n^'), + ('ṭ', 't`⁼'), + ('t`⁼h', 't`ʰ'), + ('ḍ', 'd`⁼'), + ('d`⁼h', 'd`ʰ'), + ('ṇ', 'n`'), + ('t', 't⁼'), + ('t⁼h', 'tʰ'), + ('d', 'd⁼'), + ('d⁼h', 'dʰ'), + ('p', 'p⁼'), + ('p⁼h', 'pʰ'), + ('b', 'b⁼'), + ('b⁼h', 'bʰ'), + ('y', 'j'), + ('ś', 'ʃ'), + ('ṣ', 's`'), + ('r', 'ɾ'), + ('l̤', 'l`'), + ('h', 'ɦ'), + ("'", ''), + ('~', '^'), + ('ṃ', '^') +]] + + +def devanagari_to_ipa(text): + text = text.replace('ॐ', 'ओम्') + text = re.sub(r'\s*।\s*$', '.', text) + text = re.sub(r'\s*।\s*', ', ', text) + text = re.sub(r'\s*॥', '.', text) + text = sanscript.transliterate(text, sanscript.DEVANAGARI, sanscript.IAST) + for regex, replacement in _iast_to_ipa: + text = re.sub(regex, replacement, text) + text = re.sub('(.)[`ː]*ḥ', lambda x: x.group(0) + [:-1]+'h'+x.group(1)+'*', text) + return text diff --git a/text/shanghainese.py b/text/shanghainese.py new file mode 100644 index 0000000000000000000000000000000000000000..cb29c24a08d2e406e8399cf7bc9fe5cb43cb9c61 --- /dev/null +++ b/text/shanghainese.py @@ -0,0 +1,64 @@ +import re +import cn2an +import opencc + + +converter = opencc.OpenCC('zaonhe') + +# List of (Latin alphabet, ipa) pairs: +_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ + ('A', 'ᴇ'), + ('B', 'bi'), + ('C', 'si'), + ('D', 'di'), + ('E', 'i'), + ('F', 'ᴇf'), + ('G', 'dʑi'), + ('H', 'ᴇtɕʰ'), + ('I', 'ᴀi'), + ('J', 'dʑᴇ'), + ('K', 'kʰᴇ'), + ('L', 'ᴇl'), + ('M', 'ᴇm'), + ('N', 'ᴇn'), + ('O', 'o'), + ('P', 'pʰi'), + ('Q', 'kʰiu'), + ('R', 'ᴀl'), + ('S', 'ᴇs'), + ('T', 'tʰi'), + ('U', 'ɦiu'), + ('V', 'vi'), + ('W', 'dᴀbɤliu'), + ('X', 'ᴇks'), + ('Y', 'uᴀi'), + ('Z', 'zᴇ') +]] + + +def _number_to_shanghainese(num): + num = cn2an.an2cn(num).replace('一十','十').replace('二十', '廿').replace('二', '两') + return re.sub(r'((?:^|[^三四五六七八九])十|廿)两', r'\1二', num) + + +def number_to_shanghainese(text): + return re.sub(r'\d+(?:\.?\d+)?', lambda x: _number_to_shanghainese(x.group()), text) + + +def latin_to_ipa(text): + for regex, replacement in _latin_to_ipa: + text = re.sub(regex, replacement, text) + return text + + +def shanghainese_to_ipa(text): + text = number_to_shanghainese(text.upper()) + text = converter.convert(text).replace('-','').replace('$',' ') + text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text) + text = re.sub(r'[、;:]', ',', text) + text = re.sub(r'\s*,\s*', ', ', text) + text = re.sub(r'\s*。\s*', '. ', text) + text = re.sub(r'\s*?\s*', '? ', text) + text = re.sub(r'\s*!\s*', '! ', text) + text = re.sub(r'\s*$', '', text) + return text diff --git a/text/symbols.py b/text/symbols.py new file mode 100644 index 0000000000000000000000000000000000000000..3705de1c96d52d5643eab9bc80671fe9cb7e4363 --- /dev/null +++ b/text/symbols.py @@ -0,0 +1,67 @@ +''' +Defines the set of symbols used in text input to the model. +''' +_pad = '_' +_punctuation = ',.!?-~…' +_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ ' +''' +# japanese_cleaners2 +_pad = '_' +_punctuation = ',.!?-~…' +_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ ' +''' + +'''# korean_cleaners +_pad = '_' +_punctuation = ',.!?…~' +_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ ' +''' + +'''# chinese_cleaners +_pad = '_' +_punctuation = ',。!?—…' +_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ ' +''' + + +'''# sanskrit_cleaners +_pad = '_' +_punctuation = '।' +_letters = 'ँंःअआइईउऊऋएऐओऔकखगघङचछजझञटठडढणतथदधनपफबभमयरलळवशषसहऽािीुूृॄेैोौ्ॠॢ ' +''' + +'''# cjks_cleaners +_pad = '_' +_punctuation = ',.!?-~…' +_letters = 'NQabdefghijklmnopstuvwxyzʃʧʥʦɯɹəɥçɸɾβŋɦː⁼ʰ`^#*=→↓↑ ' +''' + +'''# thai_cleaners +_pad = '_' +_punctuation = '.!? ' +_letters = 'กขฃคฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลวศษสหฬอฮฯะัาำิีึืุูเแโใไๅๆ็่้๊๋์' +''' + +'''# cjke_cleaners2 +_pad = '_' +_punctuation = ',.!?-~…' +_letters = 'NQabdefghijklmnopstuvwxyzɑæʃʑçɯɪɔɛɹðəɫɥɸʊɾʒθβŋɦ⁼ʰ`^#*=ˈˌ→↓↑ ' +''' + +'''# shanghainese_cleaners +_pad = '_' +_punctuation = ',.!?…' +_letters = 'abdfghiklmnopstuvyzøŋȵɑɔɕəɤɦɪɿʑʔʰ̩̃ᴀᴇ15678 ' +''' + +'''# chinese_dialect_cleaners +_pad = '_' +_punctuation = ',.!?~…─' +_letters = '#Nabdefghijklmnoprstuvwxyzæçøŋœȵɐɑɒɓɔɕɗɘəɚɛɜɣɤɦɪɭɯɵɷɸɻɾɿʂʅʊʋʌʏʑʔʦʮʰʷˀː˥˦˧˨˩̥̩̃̚αᴀᴇ↑↓∅ⱼ ' +''' + +# Export all symbols: +symbols = [_pad] + list(_punctuation) + list(_letters) + +# Special symbol ids +SPACE_ID = symbols.index(" ") diff --git a/text/thai.py b/text/thai.py new file mode 100644 index 0000000000000000000000000000000000000000..998207c01a85c710a46db1ec8b62c39c2d94bc84 --- /dev/null +++ b/text/thai.py @@ -0,0 +1,44 @@ +import re +from num_thai.thainumbers import NumThai + + +num = NumThai() + +# List of (Latin alphabet, Thai) pairs: +_latin_to_thai = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ + ('a', 'เอ'), + ('b','บี'), + ('c','ซี'), + ('d','ดี'), + ('e','อี'), + ('f','เอฟ'), + ('g','จี'), + ('h','เอช'), + ('i','ไอ'), + ('j','เจ'), + ('k','เค'), + ('l','แอล'), + ('m','เอ็ม'), + ('n','เอ็น'), + ('o','โอ'), + ('p','พี'), + ('q','คิว'), + ('r','แอร์'), + ('s','เอส'), + ('t','ที'), + ('u','ยู'), + ('v','วี'), + ('w','ดับเบิลยู'), + ('x','เอ็กซ์'), + ('y','วาย'), + ('z','ซี') +]] + + +def num_to_thai(text): + return re.sub(r'(?:\d+(?:,?\d+)?)+(?:\.\d+(?:,?\d+)?)?', lambda x: ''.join(num.NumberToTextThai(float(x.group(0).replace(',', '')))), text) + +def latin_to_thai(text): + for regex, replacement in _latin_to_thai: + text = re.sub(regex, replacement, text) + return text diff --git a/transforms.py b/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..30e857692442329a569fdd53e391b3ec4044a628 --- /dev/null +++ b/transforms.py @@ -0,0 +1,200 @@ +import numpy as np +import torch +from torch.nn import functional as F + +DEFAULT_MIN_BIN_WIDTH = 1e-3 +DEFAULT_MIN_BIN_HEIGHT = 1e-3 +DEFAULT_MIN_DERIVATIVE = 1e-3 + + +def piecewise_rational_quadratic_transform( + inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails=None, + tail_bound=1., + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE): + + if tails is None: + spline_fn = rational_quadratic_spline + spline_kwargs = {} + else: + spline_fn = unconstrained_rational_quadratic_spline + spline_kwargs = {'tails': tails, 'tail_bound': tail_bound} + + outputs, logabsdet = spline_fn( + inputs=inputs, + unnormalized_widths=unnormalized_widths, + unnormalized_heights=unnormalized_heights, + unnormalized_derivatives=unnormalized_derivatives, + inverse=inverse, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative, + **spline_kwargs) + return outputs, logabsdet + + +def searchsorted(bin_locations, inputs, eps=1e-6): + bin_locations[..., bin_locations.size(-1) - 1] += eps + return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1 + + +def unconstrained_rational_quadratic_spline( + inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails='linear', + tail_bound=1., + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE): + inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) + outside_interval_mask = ~inside_interval_mask + + outputs = torch.zeros_like(inputs) + logabsdet = torch.zeros_like(inputs) + + if tails == 'linear': + unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) + constant = np.log(np.exp(1 - min_derivative) - 1) + unnormalized_derivatives[..., 0] = constant + unnormalized_derivatives[..., unnormalized_derivatives.size(-1) - 1] = constant + + outputs[outside_interval_mask] = inputs[outside_interval_mask] + logabsdet[outside_interval_mask] = 0 + else: + raise RuntimeError('{} tails are not implemented.'.format(tails)) + + outputs[inside_interval_mask], logabsdet[ + inside_interval_mask] = rational_quadratic_spline( + inputs=inputs[inside_interval_mask], + unnormalized_widths=unnormalized_widths[inside_interval_mask, :], + unnormalized_heights=unnormalized_heights[inside_interval_mask, :], + unnormalized_derivatives=unnormalized_derivatives[ + inside_interval_mask, :], + inverse=inverse, + left=-tail_bound, + right=tail_bound, + bottom=-tail_bound, + top=tail_bound, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative) + + return outputs, logabsdet + + +def rational_quadratic_spline(inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + left=0., + right=1., + bottom=0., + top=1., + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE): + if torch.min(inputs) < left or torch.max(inputs) > right: + raise ValueError('Input to a transform is not within its domain') + + num_bins = unnormalized_widths.shape[-1] + + if min_bin_width * num_bins > 1.0: + raise ValueError('Minimal bin width too large for the number of bins') + if min_bin_height * num_bins > 1.0: + raise ValueError('Minimal bin height too large for the number of bins') + + widths = F.softmax(unnormalized_widths, dim=-1) + widths = min_bin_width + (1 - min_bin_width * num_bins) * widths + cumwidths = torch.cumsum(widths, dim=-1) + cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) + cumwidths = (right - left) * cumwidths + left + cumwidths[..., 0] = left + cumwidths[..., cumwidths.size(-1) - 1] = right + widths = cumwidths[..., 1:] - cumwidths[..., :-1] + + derivatives = min_derivative + F.softplus(unnormalized_derivatives) + + heights = F.softmax(unnormalized_heights, dim=-1) + heights = min_bin_height + (1 - min_bin_height * num_bins) * heights + cumheights = torch.cumsum(heights, dim=-1) + cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) + cumheights = (top - bottom) * cumheights + bottom + cumheights[..., 0] = bottom + cumheights[..., cumheights.size(-1) - 1] = top + heights = cumheights[..., 1:] - cumheights[..., :-1] + + if inverse: + bin_idx = searchsorted(cumheights, inputs)[..., None] + else: + bin_idx = searchsorted(cumwidths, inputs)[..., None] + + input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] + input_bin_widths = widths.gather(-1, bin_idx)[..., 0] + + input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] + delta = heights / widths + input_delta = delta.gather(-1, bin_idx)[..., 0] + + input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] + input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., + 0] + + input_heights = heights.gather(-1, bin_idx)[..., 0] + + if inverse: + a = ( + ((inputs - input_cumheights) * + (input_derivatives + input_derivatives_plus_one - 2 * input_delta) + + input_heights * (input_delta - input_derivatives))) + b = ( + input_heights * input_derivatives - (inputs - input_cumheights) * + (input_derivatives + input_derivatives_plus_one - 2 * input_delta)) + c = -input_delta * (inputs - input_cumheights) + + discriminant = b.pow(2) - 4 * a * c + assert (discriminant >= 0).all() + + root = (2 * c) / (-b - torch.sqrt(discriminant)) + outputs = root * input_bin_widths + input_cumwidths + + theta_one_minus_theta = root * (1 - root) + denominator = input_delta + ( + (input_derivatives + input_derivatives_plus_one - 2 * input_delta) + * theta_one_minus_theta) + derivative_numerator = input_delta.pow(2) * ( + input_derivatives_plus_one * root.pow(2) + + 2 * input_delta * theta_one_minus_theta + input_derivatives * + (1 - root).pow(2)) + logabsdet = torch.log( + derivative_numerator) - 2 * torch.log(denominator) + + return outputs, -logabsdet + else: + theta = (inputs - input_cumwidths) / input_bin_widths + theta_one_minus_theta = theta * (1 - theta) + + numerator = input_heights * (input_delta * theta.pow(2) + + input_derivatives * theta_one_minus_theta) + denominator = input_delta + ( + (input_derivatives + input_derivatives_plus_one - 2 * input_delta) + * theta_one_minus_theta) + outputs = input_cumheights + numerator / denominator + + derivative_numerator = input_delta.pow(2) * ( + input_derivatives_plus_one * theta.pow(2) + + 2 * input_delta * theta_one_minus_theta + input_derivatives * + (1 - theta).pow(2)) + logabsdet = torch.log( + derivative_numerator) - 2 * torch.log(denominator) + + return outputs, logabsdet diff --git a/utils.py b/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a870556e805c8cba7dc0540c686710a6a62819c4 --- /dev/null +++ b/utils.py @@ -0,0 +1,307 @@ +import argparse +import glob +import json +import logging +import os +import subprocess +import sys + +import numpy as np +from scipy.io.wavfile import read +import torch + +MATPLOTLIB_FLAG = False + +logging.basicConfig(stream=sys.stdout, level=logging.INFO) +logger = logging + + +def load_checkpoint(checkpoint_path, model, optimizer=None): + assert os.path.isfile(checkpoint_path) + checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') + iteration = checkpoint_dict['iteration'] + learning_rate = checkpoint_dict['learning_rate'] + if optimizer is not None: + optimizer.load_state_dict(checkpoint_dict['optimizer']) + saved_state_dict = checkpoint_dict['model'] + if hasattr(model, 'module'): + state_dict = model.module.state_dict() + else: + state_dict = model.state_dict() + new_state_dict = {} + for k, v in state_dict.items(): + try: + new_state_dict[k] = saved_state_dict[k] + except Exception as e: + logger.info("%s is not in the checkpoint" % k) + new_state_dict[k] = v + if hasattr(model, 'module'): + model.module.load_state_dict(new_state_dict) + else: + model.load_state_dict(new_state_dict) + logger.info("Loaded checkpoint '{}' (iteration {})".format( + checkpoint_path, iteration)) + return model, optimizer, learning_rate, iteration + + +def save_checkpoint(model, optimizer, learning_rate, iteration, + checkpoint_path): + logger.info( + "Saving model and optimizer state at iteration {} to {}".format( + iteration, checkpoint_path)) + if hasattr(model, 'module'): + state_dict = model.module.state_dict() + else: + state_dict = model.state_dict() + torch.save( + { + 'model': state_dict, + 'iteration': iteration, + 'optimizer': optimizer.state_dict(), + 'learning_rate': learning_rate + }, checkpoint_path) + + +def summarize( + writer, + global_step, + scalars={}, # noqa + histograms={}, # noqa + images={}, # noqa + audios={}, # noqa + audio_sampling_rate=22050): + for k, v in scalars.items(): + writer.add_scalar(k, v, global_step) + for k, v in histograms.items(): + writer.add_histogram(k, v, global_step) + for k, v in images.items(): + writer.add_image(k, v, global_step, dataformats='HWC') + for k, v in audios.items(): + writer.add_audio(k, v, global_step, audio_sampling_rate) + + +def latest_checkpoint_path(dir_path, regex="G_*.pth"): + f_list = glob.glob(os.path.join(dir_path, regex)) + f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) + x = f_list[-1] + print(x) + return x + + +def plot_spectrogram_to_numpy(spectrogram): + global MATPLOTLIB_FLAG + if not MATPLOTLIB_FLAG: + import matplotlib + matplotlib.use("Agg") + MATPLOTLIB_FLAG = True + mpl_logger = logging.getLogger('matplotlib') + mpl_logger.setLevel(logging.WARNING) + import matplotlib.pylab as plt + import numpy as np + + fig, ax = plt.subplots(figsize=(10, 2)) + im = ax.imshow(spectrogram, + aspect="auto", + origin="lower", + interpolation='none') + plt.colorbar(im, ax=ax) + plt.xlabel("Frames") + plt.ylabel("Channels") + plt.tight_layout() + + fig.canvas.draw() + data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') + data = data.reshape(fig.canvas.get_width_height()[::-1] + (3, )) + plt.close() + return data + + +def plot_alignment_to_numpy(alignment, info=None): + global MATPLOTLIB_FLAG + if not MATPLOTLIB_FLAG: + import matplotlib + matplotlib.use("Agg") + MATPLOTLIB_FLAG = True + mpl_logger = logging.getLogger('matplotlib') + mpl_logger.setLevel(logging.WARNING) + import matplotlib.pylab as plt + import numpy as np + + fig, ax = plt.subplots(figsize=(6, 4)) + im = ax.imshow(alignment.transpose(), + aspect='auto', + origin='lower', + interpolation='none') + fig.colorbar(im, ax=ax) + xlabel = 'Decoder timestep' + if info is not None: + xlabel += '\n\n' + info + plt.xlabel(xlabel) + plt.ylabel('Encoder timestep') + plt.tight_layout() + + fig.canvas.draw() + data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') + data = data.reshape(fig.canvas.get_width_height()[::-1] + (3, )) + plt.close() + return data + + +def load_wav_to_torch(full_path): + sampling_rate, data = read(full_path) + return torch.FloatTensor(data.astype(np.float32)), sampling_rate + + +def load_filepaths_and_text(filename, split="|"): + with open(filename, encoding='utf-8') as f: + filepaths_and_text = [line.strip().split(split) for line in f] + return filepaths_and_text + + +def get_hparams(init=True): + parser = argparse.ArgumentParser() + parser.add_argument('-c', + '--config', + type=str, + default="./configs/base.json", + help='JSON file for configuration') + parser.add_argument('-m', + '--model', + type=str, + required=True, + help='Model name') + parser.add_argument('--train_data', + type=str, + required=True, + help='train data') + parser.add_argument('--val_data', type=str, required=True, help='val data') + parser.add_argument('--phone_table', + type=str, + required=True, + help='phone table') + parser.add_argument('--speaker_table', + type=str, + default=None, + help='speaker table, required for multiple speakers') + + args = parser.parse_args() + model_dir = args.model + + if not os.path.exists(model_dir): + os.makedirs(model_dir) + + config_path = args.config + config_save_path = os.path.join(model_dir, "config.json") + if init: + with open(config_path, "r", encoding='utf8') as f: + data = f.read() + with open(config_save_path, "w", encoding='utf8') as f: + f.write(data) + else: + with open(config_save_path, "r", encoding='utf8') as f: + data = f.read() + config = json.loads(data) + config['data']['training_files'] = args.train_data + config['data']['validation_files'] = args.val_data + config['data']['phone_table'] = args.phone_table + # 0 is kept for blank + config['data']['num_phones'] = len(open(args.phone_table).readlines()) + 1 + if args.speaker_table is not None: + config['data']['speaker_table'] = args.speaker_table + # 0 is kept for unknown speaker + config['data']['n_speakers'] = len( + open(args.speaker_table).readlines()) + 1 + else: + config['data']['n_speakers'] = 0 + + hparams = HParams(**config) + hparams.model_dir = model_dir + return hparams + + +def get_hparams_from_dir(model_dir): + config_save_path = os.path.join(model_dir, "config.json") + with open(config_save_path, "r") as f: + data = f.read() + config = json.loads(data) + + hparams = HParams(**config) + hparams.model_dir = model_dir + return hparams + + +def get_hparams_from_file(config_path): + with open(config_path, "r") as f: + data = f.read() + config = json.loads(data) + + hparams = HParams(**config) + return hparams + + +def check_git_hash(model_dir): + source_dir = os.path.dirname(os.path.realpath(__file__)) + if not os.path.exists(os.path.join(source_dir, ".git")): + logger.warn('''{} is not a git repository, therefore hash value + comparison will be ignored.'''.format(source_dir)) + return + + cur_hash = subprocess.getoutput("git rev-parse HEAD") + + path = os.path.join(model_dir, "githash") + if os.path.exists(path): + saved_hash = open(path).read() + if saved_hash != cur_hash: + logger.warn( + "git hash values are different. {}(saved) != {}(current)". + format(saved_hash[:8], cur_hash[:8])) + else: + open(path, "w").write(cur_hash) + + +def get_logger(model_dir, filename="train.log"): + global logger + logger = logging.getLogger(os.path.basename(model_dir)) + logger.setLevel(logging.INFO) + + formatter = logging.Formatter( + "%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") + if not os.path.exists(model_dir): + os.makedirs(model_dir) + h = logging.FileHandler(os.path.join(model_dir, filename)) + h.setLevel(logging.INFO) + h.setFormatter(formatter) + logger.addHandler(h) + return logger + + +class HParams(): + def __init__(self, **kwargs): + for k, v in kwargs.items(): + if type(v) == dict: + v = HParams(**v) + self[k] = v + + def keys(self): + return self.__dict__.keys() + + def items(self): + return self.__dict__.items() + + def values(self): + return self.__dict__.values() + + def __len__(self): + return len(self.__dict__) + + def __getitem__(self, key): + return getattr(self, key) + + def __setitem__(self, key, value): + return setattr(self, key, value) + + def __contains__(self, key): + return key in self.__dict__ + + def __repr__(self): + return self.__dict__.__repr__()