Spaces:
Runtime error
Runtime error
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .dockerignore +2 -1
- .github/workflows/update_space.yml +28 -0
- .gitignore +4 -0
- .gradio/certificate.pem +31 -0
- App.bat +10 -10
- Dataset.bat +11 -0
- Dockerfile.deploy +1 -1
- Editor.bat +10 -10
- Inference.bat +11 -0
- Initialize.bat +11 -0
- Merge.bat +11 -0
- README.md +27 -20
- Server.bat +10 -10
- StyleVectors.bat +11 -0
- Train.bat +11 -0
- app.py +7 -8
- bert_gen.py +6 -9
- colab.ipynb +445 -374
- config.py +98 -83
- configs/config.json +1 -1
- configs/config_jp_extra.json +1 -1
- configs/default_paths.yml +8 -0
- data_utils.py +13 -14
- default_style.py +75 -11
- docs/CHANGELOG.md +71 -0
- docs/CLI.md +5 -5
- docs/FAQ.md +57 -0
- docs/TERMS_OF_USE.md +54 -0
- gen_yaml.py +1 -1
- gradio_tabs/dataset.py +34 -17
- gradio_tabs/inference.py +76 -5
- gradio_tabs/merge.py +1245 -215
- gradio_tabs/style_vectors.py +277 -161
- gradio_tabs/train.py +116 -53
- initialize.py +34 -8
- library.ipynb +132 -135
- preprocess_text.py +21 -11
- pyproject.toml +29 -49
- requirements-colab.txt +20 -0
- requirements-infer.txt +24 -0
- requirements.txt +7 -12
- resample.py +3 -1
- scripts/Install-Style-Bert-VITS2-CPU.bat +134 -123
- scripts/Install-Style-Bert-VITS2.bat +141 -130
- scripts/Setup-Python.bat +101 -115
- scripts/Update-Style-Bert-VITS2.bat +69 -62
- server_editor.py +12 -14
- server_fastapi.py +33 -1
- slice.py +7 -7
- speech_mos.py +7 -6
.dockerignore
CHANGED
@@ -6,12 +6,13 @@
|
|
6 |
!/style_bert_vits2/
|
7 |
|
8 |
!/bert/deberta-v2-large-japanese-char-wwm/
|
9 |
-
!/common/
|
10 |
!/configs/
|
11 |
!/dict_data/default.csv
|
12 |
!/model_assets/
|
|
|
13 |
|
14 |
!/config.py
|
15 |
!/default_config.yml
|
|
|
16 |
!/requirements.txt
|
17 |
!/server_editor.py
|
|
|
6 |
!/style_bert_vits2/
|
7 |
|
8 |
!/bert/deberta-v2-large-japanese-char-wwm/
|
|
|
9 |
!/configs/
|
10 |
!/dict_data/default.csv
|
11 |
!/model_assets/
|
12 |
+
!/static/
|
13 |
|
14 |
!/config.py
|
15 |
!/default_config.yml
|
16 |
+
!/initialize.py
|
17 |
!/requirements.txt
|
18 |
!/server_editor.py
|
.github/workflows/update_space.yml
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Run Python script
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
build:
|
10 |
+
runs-on: ubuntu-latest
|
11 |
+
|
12 |
+
steps:
|
13 |
+
- name: Checkout
|
14 |
+
uses: actions/checkout@v2
|
15 |
+
|
16 |
+
- name: Set up Python
|
17 |
+
uses: actions/setup-python@v2
|
18 |
+
with:
|
19 |
+
python-version: '3.9'
|
20 |
+
|
21 |
+
- name: Install Gradio
|
22 |
+
run: python -m pip install gradio
|
23 |
+
|
24 |
+
- name: Log in to Hugging Face
|
25 |
+
run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
|
26 |
+
|
27 |
+
- name: Deploy to Spaces
|
28 |
+
run: gradio deploy
|
.gitignore
CHANGED
@@ -14,6 +14,8 @@ dist/
|
|
14 |
/bert/*/*.safetensors
|
15 |
/bert/*/*.msgpack
|
16 |
|
|
|
|
|
17 |
/pretrained/*.safetensors
|
18 |
/pretrained/*.pth
|
19 |
|
@@ -37,3 +39,5 @@ safetensors.ipynb
|
|
37 |
|
38 |
# pyopenjtalk's dictionary
|
39 |
*.dic
|
|
|
|
|
|
14 |
/bert/*/*.safetensors
|
15 |
/bert/*/*.msgpack
|
16 |
|
17 |
+
/configs/paths.yml
|
18 |
+
|
19 |
/pretrained/*.safetensors
|
20 |
/pretrained/*.pth
|
21 |
|
|
|
39 |
|
40 |
# pyopenjtalk's dictionary
|
41 |
*.dic
|
42 |
+
|
43 |
+
playground.ipynb
|
.gradio/certificate.pem
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-----BEGIN CERTIFICATE-----
|
2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
31 |
+
-----END CERTIFICATE-----
|
App.bat
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
-
chcp 65001 > NUL
|
2 |
-
@echo off
|
3 |
-
|
4 |
-
pushd %~dp0
|
5 |
-
echo Running app.py...
|
6 |
-
venv\Scripts\python app.py
|
7 |
-
|
8 |
-
if %errorlevel% neq 0 ( pause & popd & exit /b %errorlevel% )
|
9 |
-
|
10 |
-
popd
|
11 |
pause
|
|
|
1 |
+
chcp 65001 > NUL
|
2 |
+
@echo off
|
3 |
+
|
4 |
+
pushd %~dp0
|
5 |
+
echo Running app.py...
|
6 |
+
venv\Scripts\python app.py
|
7 |
+
|
8 |
+
if %errorlevel% neq 0 ( pause & popd & exit /b %errorlevel% )
|
9 |
+
|
10 |
+
popd
|
11 |
pause
|
Dataset.bat
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
chcp 65001 > NUL
|
2 |
+
@echo off
|
3 |
+
|
4 |
+
pushd %~dp0
|
5 |
+
echo Running gradio_tabs/dataset.py...
|
6 |
+
venv\Scripts\python -m gradio_tabs.dataset
|
7 |
+
|
8 |
+
if %errorlevel% neq 0 ( pause & popd & exit /b %errorlevel% )
|
9 |
+
|
10 |
+
popd
|
11 |
+
pause
|
Dockerfile.deploy
CHANGED
@@ -20,4 +20,4 @@ COPY --chown=user . $HOME/app
|
|
20 |
RUN pip install --no-cache-dir -r $HOME/app/requirements.txt
|
21 |
|
22 |
# 必要に応じて制限を変更してください
|
23 |
-
CMD ["python", "
|
|
|
20 |
RUN pip install --no-cache-dir -r $HOME/app/requirements.txt
|
21 |
|
22 |
# 必要に応じて制限を変更してください
|
23 |
+
CMD ["python", "server_editor.py", "--line_length", "50", "--line_count", "3", "--skip_static_files"]
|
Editor.bat
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
-
chcp 65001 > NUL
|
2 |
-
@echo off
|
3 |
-
|
4 |
-
pushd %~dp0
|
5 |
-
echo Running server_editor.py --inbrowser
|
6 |
-
venv\Scripts\python server_editor.py --inbrowser
|
7 |
-
|
8 |
-
if %errorlevel% neq 0 ( pause & popd & exit /b %errorlevel% )
|
9 |
-
|
10 |
-
popd
|
11 |
pause
|
|
|
1 |
+
chcp 65001 > NUL
|
2 |
+
@echo off
|
3 |
+
|
4 |
+
pushd %~dp0
|
5 |
+
echo Running server_editor.py --inbrowser
|
6 |
+
venv\Scripts\python server_editor.py --inbrowser
|
7 |
+
|
8 |
+
if %errorlevel% neq 0 ( pause & popd & exit /b %errorlevel% )
|
9 |
+
|
10 |
+
popd
|
11 |
pause
|
Inference.bat
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
chcp 65001 > NUL
|
2 |
+
@echo off
|
3 |
+
|
4 |
+
pushd %~dp0
|
5 |
+
echo Running gradio_tabs/inference.py...
|
6 |
+
venv\Scripts\python -m gradio_tabs.inference
|
7 |
+
|
8 |
+
if %errorlevel% neq 0 ( pause & popd & exit /b %errorlevel% )
|
9 |
+
|
10 |
+
popd
|
11 |
+
pause
|
Initialize.bat
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
chcp 65001 > NUL
|
2 |
+
@echo off
|
3 |
+
|
4 |
+
pushd %~dp0
|
5 |
+
echo Running initialize.py...
|
6 |
+
venv\Scripts\python initialize.py
|
7 |
+
|
8 |
+
if %errorlevel% neq 0 ( pause & popd & exit /b %errorlevel% )
|
9 |
+
|
10 |
+
popd
|
11 |
+
pause
|
Merge.bat
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
chcp 65001 > NUL
|
2 |
+
@echo off
|
3 |
+
|
4 |
+
pushd %~dp0
|
5 |
+
echo Running gradio_tabs/merge.py...
|
6 |
+
venv\Scripts\python -m gradio_tabs.merge
|
7 |
+
|
8 |
+
if %errorlevel% neq 0 ( pause & popd & exit /b %errorlevel% )
|
9 |
+
|
10 |
+
popd
|
11 |
+
pause
|
README.md
CHANGED
@@ -2,10 +2,12 @@
|
|
2 |
title: Style-Bert-VITS2
|
3 |
app_file: app.py
|
4 |
sdk: gradio
|
5 |
-
sdk_version:
|
6 |
---
|
7 |
# Style-Bert-VITS2
|
8 |
|
|
|
|
|
9 |
Bert-VITS2 with more controllable voice styles.
|
10 |
|
11 |
https://github.com/litagin02/Style-Bert-VITS2/assets/139731664/e853f9a2-db4a-4202-a1dd-56ded3c562a0
|
@@ -13,13 +15,16 @@ https://github.com/litagin02/Style-Bert-VITS2/assets/139731664/e853f9a2-db4a-420
|
|
13 |
You can install via `pip install style-bert-vits2` (inference only), see [library.ipynb](/library.ipynb) for example usage.
|
14 |
|
15 |
- **解説チュートリアル動画** [YouTube](https://youtu.be/aTUSzgDl1iY) [ニコニコ動画](https://www.nicovideo.jp/watch/sm43391524)
|
16 |
-
- [English README](docs/README_en.md)
|
17 |
- [](http://colab.research.google.com/github/litagin02/Style-Bert-VITS2/blob/master/colab.ipynb)
|
|
|
18 |
- [🤗 オンラインデモはこちらから](https://huggingface.co/spaces/litagin/Style-Bert-VITS2-Editor-Demo)
|
19 |
- [Zennの解説記事](https://zenn.dev/litagin/articles/034819a5256ff4)
|
20 |
|
21 |
- [**リリースページ**](https://github.com/litagin02/Style-Bert-VITS2/releases/)、[更新履歴](/docs/CHANGELOG.md)
|
22 |
-
|
|
|
|
|
|
|
23 |
- 2024-03-16: ver 2.4.1 (**batファイルによるインストール方法の変更**)
|
24 |
- 2024-03-15: ver 2.4.0 (大規模リファクタリングや種々の改良、ライブラリ化)
|
25 |
- 2024-02-26: ver 2.3 (辞書機能とエディター機能)
|
@@ -38,13 +43,15 @@ This repository is based on [Bert-VITS2](https://github.com/fishaudio/Bert-VITS2
|
|
38 |
- 入力されたテキストの内容をもとに感情豊かな音声を生成する[Bert-VITS2](https://github.com/fishaudio/Bert-VITS2)のv2.1とJapanese-Extraを元に、感情や発話スタイルを強弱込みで自由に制御できるようにしたものです。
|
39 |
- GitやPythonがない人でも(Windowsユーザーなら)簡単にインストールでき、学習もできます (多くを[EasyBertVits2](https://github.com/Zuntan03/EasyBertVits2/)からお借りしました)。またGoogle Colabでの学習もサポートしています: [](http://colab.research.google.com/github/litagin02/Style-Bert-VITS2/blob/master/colab.ipynb)
|
40 |
- 音声合成のみに使う場合は、グラボがなくてもCPUで動作します。
|
|
|
41 |
- 他との連携に使えるAPIサーバーも同梱しています ([@darai0512](https://github.com/darai0512) 様によるPRです、ありがとうございます)。
|
42 |
- 元々「楽しそうな文章は楽しそうに、悲しそうな文章は悲しそうに」読むのがBert-VITS2の強みですので、スタイル指定がデフォルトでも感情豊かな音声を生成することができます。
|
43 |
|
44 |
|
45 |
## 使い方
|
46 |
|
47 |
-
CLIでの使い方は[こちら](/docs/CLI.md)を参照してください。
|
|
|
48 |
|
49 |
### 動作環境
|
50 |
|
@@ -58,7 +65,7 @@ Pythonライブラリとしてのpipでのインストールや使用例は[libr
|
|
58 |
|
59 |
Windowsを前提としています。
|
60 |
|
61 |
-
1. [このzipファイル](https://github.com/litagin02/Style-Bert-VITS2/releases/download/2.
|
62 |
- グラボがある方は、`Install-Style-Bert-VITS2.bat`をダブルクリックします。
|
63 |
- グラボがない方は、`Install-Style-Bert-VITS2-CPU.bat`をダブルクリックします。CPU版では学習はできませんが、音声合成とマージは可能です。
|
64 |
2. 待つと自動で必要な環境がインストールされます。
|
@@ -70,13 +77,17 @@ Windowsを前提としています。
|
|
70 |
|
71 |
#### GitやPython使える人
|
72 |
|
|
|
|
|
|
|
73 |
```bash
|
|
|
74 |
git clone https://github.com/litagin02/Style-Bert-VITS2.git
|
75 |
cd Style-Bert-VITS2
|
76 |
-
|
77 |
venv\Scripts\activate
|
78 |
-
pip install torch
|
79 |
-
pip install -r requirements.txt
|
80 |
python initialize.py # 必要なモデルとデフォルトTTSモデルをダウンロード
|
81 |
```
|
82 |
最後を忘れずに。
|
@@ -88,7 +99,7 @@ python initialize.py # 必要なモデルとデフォルトTTSモデルをダ
|
|
88 |
|
89 |
エディター部分は[別リポジトリ](https://github.com/litagin02/Style-Bert-VITS2-Editor)に分かれています。
|
90 |
|
91 |
-
バージョン2.2以前での音声合成WebUIは、`App.bat`をダブルクリックか、`python app.py`するとWebUI
|
92 |
|
93 |
音声合成に必要なモデルファイルたちの構造は以下の通りです(手動で配置する必要はありません)。
|
94 |
```
|
@@ -119,23 +130,19 @@ model_assets
|
|
119 |
|
120 |
#### データセット作り
|
121 |
|
122 |
-
- `App.bat`をダブルクリックか`python app.py
|
123 |
- 指示に従った後、下の「学習」タブでそのまま学習を行うことができます。
|
124 |
|
125 |
-
注意: データセットの手動修正やノイズ除去等、細かい修正を行いたい場合は[Aivis](https://github.com/tsukumijima/Aivis)や、そのデータセット部分のWindows対応版 [Aivis Dataset](https://github.com/litagin02/Aivis-Dataset) を使うといいかもしれません。ですがファイル数が多い場合などは、このツールで簡易的に切り出してデータセットを作るだけでも十分という気もしています。
|
126 |
-
|
127 |
-
データセットがどのようなものがいいかは各自試行錯誤中してください。
|
128 |
-
|
129 |
#### 学習WebUI
|
130 |
|
131 |
-
- `App.bat`をダブルクリックか`python app.py`して開くWebUI
|
132 |
|
133 |
### スタイルの生成
|
134 |
|
135 |
-
-
|
136 |
-
-
|
|
|
137 |
- 学習とは独立しているので、学習中でもできるし、学習が終わっても何度もやりなおせます(前処理は終わらせている必要があります)。
|
138 |
-
- スタイルについての仕様の詳細は[clustering.ipynb](clustering.ipynb)を参照してください。
|
139 |
|
140 |
### API Server
|
141 |
|
@@ -151,8 +158,8 @@ API仕様は起動後に`/docs`にて確認ください。
|
|
151 |
|
152 |
### マージ
|
153 |
|
154 |
-
2つのモデルを、「声質」「声の高さ」「感情表現」「テンポ」の4
|
155 |
-
`App.bat
|
156 |
|
157 |
### 自然性評価
|
158 |
|
|
|
2 |
title: Style-Bert-VITS2
|
3 |
app_file: app.py
|
4 |
sdk: gradio
|
5 |
+
sdk_version: 5.16.0
|
6 |
---
|
7 |
# Style-Bert-VITS2
|
8 |
|
9 |
+
**利用の際は必ず[お願いとデフォルトモデルの利用規約](/docs/TERMS_OF_USE.md)をお読みください。**
|
10 |
+
|
11 |
Bert-VITS2 with more controllable voice styles.
|
12 |
|
13 |
https://github.com/litagin02/Style-Bert-VITS2/assets/139731664/e853f9a2-db4a-4202-a1dd-56ded3c562a0
|
|
|
15 |
You can install via `pip install style-bert-vits2` (inference only), see [library.ipynb](/library.ipynb) for example usage.
|
16 |
|
17 |
- **解説チュートリアル動画** [YouTube](https://youtu.be/aTUSzgDl1iY) [ニコニコ動画](https://www.nicovideo.jp/watch/sm43391524)
|
|
|
18 |
- [](http://colab.research.google.com/github/litagin02/Style-Bert-VITS2/blob/master/colab.ipynb)
|
19 |
+
- [**よくある質問** (FAQ)](/docs/FAQ.md)
|
20 |
- [🤗 オンラインデモはこちらから](https://huggingface.co/spaces/litagin/Style-Bert-VITS2-Editor-Demo)
|
21 |
- [Zennの解説記事](https://zenn.dev/litagin/articles/034819a5256ff4)
|
22 |
|
23 |
- [**リリースページ**](https://github.com/litagin02/Style-Bert-VITS2/releases/)、[更新履歴](/docs/CHANGELOG.md)
|
24 |
+
- 2024-09-09: Ver 2.6.1: Google colabでうまく学習できない等のバグ修正のみ
|
25 |
+
- 2024-06-16: Ver 2.6.0 (モデルの差分マージ・加重マージ・ヌルモデルマージの追加、使い道については[この記事](https://zenn.dev/litagin/articles/1297b1dc7bdc79)参照)
|
26 |
+
- 2024-06-14: Ver 2.5.1 (利用規約をお願いへ変更したのみ)
|
27 |
+
- 2024-06-02: Ver 2.5.0 (**[利用規約](/docs/TERMS_OF_USE.md)の追加**、フォルダ分けからのスタイル生成、小春音アミ・あみたろモデルの追加、インストールの高速化等)
|
28 |
- 2024-03-16: ver 2.4.1 (**batファイルによるインストール方法の変更**)
|
29 |
- 2024-03-15: ver 2.4.0 (大規模リファクタリングや種々の改良、ライブラリ化)
|
30 |
- 2024-02-26: ver 2.3 (辞書機能とエディター機能)
|
|
|
43 |
- 入力されたテキストの内容をもとに感情豊かな音声を生成する[Bert-VITS2](https://github.com/fishaudio/Bert-VITS2)のv2.1とJapanese-Extraを元に、感情や発話スタイルを強弱込みで自由に制御できるようにしたものです。
|
44 |
- GitやPythonがない人でも(Windowsユーザーなら)簡単にインストールでき、学習もできます (多くを[EasyBertVits2](https://github.com/Zuntan03/EasyBertVits2/)からお借りしました)。またGoogle Colabでの学習もサポートしています: [](http://colab.research.google.com/github/litagin02/Style-Bert-VITS2/blob/master/colab.ipynb)
|
45 |
- 音声合成のみに使う場合は、グラボがなくてもCPUで動作します。
|
46 |
+
- 音声合成のみに使う場合、Pythonライブラリとして`pip install style-bert-vits2`でインストールできます。例は[library.ipynb](/library.ipynb)を参照してください。
|
47 |
- 他との連携に使えるAPIサーバーも同梱しています ([@darai0512](https://github.com/darai0512) 様によるPRです、ありがとうございます)。
|
48 |
- 元々「楽しそうな文章は楽しそうに、悲しそうな文章は悲しそうに」読むのがBert-VITS2の強みですので、スタイル指定がデフォルトでも感情豊かな音声を生成することができます。
|
49 |
|
50 |
|
51 |
## 使い方
|
52 |
|
53 |
+
- CLIでの使い方は[こちら](/docs/CLI.md)を参照してください。
|
54 |
+
- [よくある質問](/docs/FAQ.md)も参照してください。
|
55 |
|
56 |
### 動作環境
|
57 |
|
|
|
65 |
|
66 |
Windowsを前提としています。
|
67 |
|
68 |
+
1. [このzipファイル](https://github.com/litagin02/Style-Bert-VITS2/releases/download/2.6.0/sbv2.zip)を**パスに日本語や空白が含まれない場所に**ダウンロードして展開します。
|
69 |
- グラボがある方は、`Install-Style-Bert-VITS2.bat`をダブルクリックします。
|
70 |
- グラボがない方は、`Install-Style-Bert-VITS2-CPU.bat`をダブルクリックします。CPU版では学習はできませんが、音声合成とマージは可能です。
|
71 |
2. 待つと自動で必要な環境がインストールされます。
|
|
|
77 |
|
78 |
#### GitやPython使える人
|
79 |
|
80 |
+
Pythonの仮想環境・パッケージ管理ツールである[uv](https://github.com/astral-sh/uv)がpipより高速なので、それを使ってインストールすることをお勧めします。
|
81 |
+
(使いたくない場合は通常のpipでも大丈夫です。)
|
82 |
+
|
83 |
```bash
|
84 |
+
powershell -c "irm https://astral.sh/uv/install.ps1 | iex"
|
85 |
git clone https://github.com/litagin02/Style-Bert-VITS2.git
|
86 |
cd Style-Bert-VITS2
|
87 |
+
uv venv venv
|
88 |
venv\Scripts\activate
|
89 |
+
uv pip install "torch<2.4" "torchaudio<2.4" --index-url https://download.pytorch.org/whl/cu118
|
90 |
+
uv pip install -r requirements.txt
|
91 |
python initialize.py # 必要なモデルとデフォルトTTSモデルをダウンロード
|
92 |
```
|
93 |
最後を忘れずに。
|
|
|
99 |
|
100 |
エディター部分は[別リポジトリ](https://github.com/litagin02/Style-Bert-VITS2-Editor)に分かれています。
|
101 |
|
102 |
+
バージョン2.2以前での音声合成WebUIは、`App.bat`をダブルクリックか、`python app.py`するとWebUIが起動します。または`Inference.bat`でも音声合成単独タブが開きます。
|
103 |
|
104 |
音声合成に必要なモデルファイルたちの構造は以下の通りです(手動で配置する必要はありません)。
|
105 |
```
|
|
|
130 |
|
131 |
#### データセット作り
|
132 |
|
133 |
+
- `App.bat`をダブルクリックか`python app.py`したところの「データセット作成」タブから、音声ファイルを適切な長さにスライスし、その後に文字の書き起こしを自動で行えます。または`Dataset.bat`をダブルクリックでもその単独タブが開きます。
|
134 |
- 指示に従った後、下の「学習」タブでそのまま学習を行うことができます。
|
135 |
|
|
|
|
|
|
|
|
|
136 |
#### 学習WebUI
|
137 |
|
138 |
+
- `App.bat`をダブルクリックか`python app.py`して開くWebUIの「学習」タブから指示に従ってください。または`Train.bat`をダブルクリックでもその単独タブが開きます。
|
139 |
|
140 |
### スタイルの生成
|
141 |
|
142 |
+
- デフォルトでは、デフォルトスタイル「Neutral」の他、学習フォルダのフォルダ分けに応じたスタイルが生成されます。
|
143 |
+
- それ以外の方法で手動でスタイルを作成したい人向けです。
|
144 |
+
- `App.bat`をダブルクリックか`python app.py`して開くWebUIの「スタイル作成」タブから、音声ファイルを使ってスタイルを生成できます。または`StyleVectors.bat`をダブルクリックでもその単独タブが開きます。
|
145 |
- 学習とは独立しているので、学習中でもできるし、学習が終わっても何度もやりなおせます(前処理は終わらせている必要があります)。
|
|
|
146 |
|
147 |
### API Server
|
148 |
|
|
|
158 |
|
159 |
### マージ
|
160 |
|
161 |
+
2つのモデルを、「声質」「声の高さ」「感情表現」「テンポ」の4点で混ぜ合わせて、新しいモデルを作ったり、また「あるモデルに、別の2つのモデルの差分を足す」等の操作ができます。
|
162 |
+
`App.bat`をダブル���リックか`python app.py`して開くWebUIの「マージ」タブから、2つのモデルを選択してマージすることができます。または`Merge.bat`をダブルクリックでもその単独タブが開きます。
|
163 |
|
164 |
### 自然性評価
|
165 |
|
Server.bat
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
-
chcp 65001 > NUL
|
2 |
-
@echo off
|
3 |
-
|
4 |
-
pushd %~dp0
|
5 |
-
echo Running server_fastapi.py
|
6 |
-
venv\Scripts\python server_fastapi.py
|
7 |
-
|
8 |
-
if %errorlevel% neq 0 ( pause & popd & exit /b %errorlevel% )
|
9 |
-
|
10 |
-
popd
|
11 |
pause
|
|
|
1 |
+
chcp 65001 > NUL
|
2 |
+
@echo off
|
3 |
+
|
4 |
+
pushd %~dp0
|
5 |
+
echo Running server_fastapi.py
|
6 |
+
venv\Scripts\python server_fastapi.py
|
7 |
+
|
8 |
+
if %errorlevel% neq 0 ( pause & popd & exit /b %errorlevel% )
|
9 |
+
|
10 |
+
popd
|
11 |
pause
|
StyleVectors.bat
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
chcp 65001 > NUL
|
2 |
+
@echo off
|
3 |
+
|
4 |
+
pushd %~dp0
|
5 |
+
echo Running gradio_tabs/style_vectors.py...
|
6 |
+
venv\Scripts\python -m gradio_tabs.style_vectors
|
7 |
+
|
8 |
+
if %errorlevel% neq 0 ( pause & popd & exit /b %errorlevel% )
|
9 |
+
|
10 |
+
popd
|
11 |
+
pause
|
Train.bat
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
chcp 65001 > NUL
|
2 |
+
@echo off
|
3 |
+
|
4 |
+
pushd %~dp0
|
5 |
+
echo Running gradio_tabs/train.py...
|
6 |
+
venv\Scripts\python -m gradio_tabs.train
|
7 |
+
|
8 |
+
if %errorlevel% neq 0 ( pause & popd & exit /b %errorlevel% )
|
9 |
+
|
10 |
+
popd
|
11 |
+
pause
|
app.py
CHANGED
@@ -3,8 +3,8 @@ from pathlib import Path
|
|
3 |
|
4 |
import gradio as gr
|
5 |
import torch
|
6 |
-
import yaml
|
7 |
|
|
|
8 |
from gradio_tabs.dataset import create_dataset_app
|
9 |
from gradio_tabs.inference import create_inference_app
|
10 |
from gradio_tabs.merge import create_merge_app
|
@@ -22,11 +22,6 @@ pyopenjtalk_worker.initialize_worker()
|
|
22 |
# dict_data/ 以下の辞書データを pyopenjtalk に適用
|
23 |
update_dict()
|
24 |
|
25 |
-
# Get path settings
|
26 |
-
with Path("configs/paths.yml").open("r", encoding="utf-8") as f:
|
27 |
-
path_config: dict[str, str] = yaml.safe_load(f.read())
|
28 |
-
# dataset_root = path_config["dataset_root"]
|
29 |
-
assets_root = path_config["assets_root"]
|
30 |
|
31 |
parser = argparse.ArgumentParser()
|
32 |
parser.add_argument("--device", type=str, default="cuda")
|
@@ -34,13 +29,18 @@ parser.add_argument("--host", type=str, default="127.0.0.1")
|
|
34 |
parser.add_argument("--port", type=int, default=None)
|
35 |
parser.add_argument("--no_autolaunch", action="store_true")
|
36 |
parser.add_argument("--share", action="store_true")
|
|
|
37 |
|
38 |
args = parser.parse_args()
|
39 |
device = args.device
|
40 |
if device == "cuda" and not torch.cuda.is_available():
|
41 |
device = "cpu"
|
42 |
|
43 |
-
|
|
|
|
|
|
|
|
|
44 |
|
45 |
with gr.Blocks(theme=GRADIO_THEME) as app:
|
46 |
gr.Markdown(f"# Style-Bert-VITS2 WebUI (version {VERSION})")
|
@@ -56,7 +56,6 @@ with gr.Blocks(theme=GRADIO_THEME) as app:
|
|
56 |
with gr.Tab("マージ"):
|
57 |
create_merge_app(model_holder=model_holder)
|
58 |
|
59 |
-
|
60 |
app.launch(
|
61 |
server_name=args.host,
|
62 |
server_port=args.port,
|
|
|
3 |
|
4 |
import gradio as gr
|
5 |
import torch
|
|
|
6 |
|
7 |
+
from config import get_path_config
|
8 |
from gradio_tabs.dataset import create_dataset_app
|
9 |
from gradio_tabs.inference import create_inference_app
|
10 |
from gradio_tabs.merge import create_merge_app
|
|
|
22 |
# dict_data/ 以下の辞書データを pyopenjtalk に適用
|
23 |
update_dict()
|
24 |
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
parser = argparse.ArgumentParser()
|
27 |
parser.add_argument("--device", type=str, default="cuda")
|
|
|
29 |
parser.add_argument("--port", type=int, default=None)
|
30 |
parser.add_argument("--no_autolaunch", action="store_true")
|
31 |
parser.add_argument("--share", action="store_true")
|
32 |
+
# parser.add_argument("--skip_default_models", action="store_true")
|
33 |
|
34 |
args = parser.parse_args()
|
35 |
device = args.device
|
36 |
if device == "cuda" and not torch.cuda.is_available():
|
37 |
device = "cpu"
|
38 |
|
39 |
+
# if not args.skip_default_models:
|
40 |
+
# download_default_models()
|
41 |
+
|
42 |
+
path_config = get_path_config()
|
43 |
+
model_holder = TTSModelHolder(Path(path_config.assets_root), device)
|
44 |
|
45 |
with gr.Blocks(theme=GRADIO_THEME) as app:
|
46 |
gr.Markdown(f"# Style-Bert-VITS2 WebUI (version {VERSION})")
|
|
|
56 |
with gr.Tab("マージ"):
|
57 |
create_merge_app(model_holder=model_holder)
|
58 |
|
|
|
59 |
app.launch(
|
60 |
server_name=args.host,
|
61 |
server_port=args.port,
|
bert_gen.py
CHANGED
@@ -5,21 +5,18 @@ import torch
|
|
5 |
import torch.multiprocessing as mp
|
6 |
from tqdm import tqdm
|
7 |
|
8 |
-
from config import
|
9 |
from style_bert_vits2.constants import Languages
|
10 |
from style_bert_vits2.logging import logger
|
11 |
from style_bert_vits2.models import commons
|
12 |
from style_bert_vits2.models.hyper_parameters import HyperParameters
|
13 |
-
from style_bert_vits2.nlp import
|
14 |
-
bert_models,
|
15 |
-
cleaned_text_to_sequence,
|
16 |
-
extract_bert_feature,
|
17 |
-
)
|
18 |
from style_bert_vits2.nlp.japanese import pyopenjtalk_worker
|
19 |
from style_bert_vits2.nlp.japanese.user_dict import update_dict
|
20 |
from style_bert_vits2.utils.stdout_wrapper import SAFE_STDOUT
|
21 |
|
22 |
|
|
|
23 |
# このプロセスからはワーカーを起動して辞書を使いたいので、ここで初期化
|
24 |
pyopenjtalk_worker.initialize_worker()
|
25 |
|
@@ -61,7 +58,7 @@ def process_line(x: tuple[str, bool]):
|
|
61 |
bert = torch.load(bert_path)
|
62 |
assert bert.shape[-1] == len(phone)
|
63 |
except Exception:
|
64 |
-
bert = extract_bert_feature(text, word2ph, language_str, device)
|
65 |
assert bert.shape[-1] == len(phone)
|
66 |
torch.save(bert, bert_path)
|
67 |
|
@@ -77,10 +74,10 @@ if __name__ == "__main__":
|
|
77 |
config_path = args.config
|
78 |
hps = HyperParameters.load_from_json(config_path)
|
79 |
lines: list[str] = []
|
80 |
-
with open(hps.data.training_files,
|
81 |
lines.extend(f.readlines())
|
82 |
|
83 |
-
with open(hps.data.validation_files,
|
84 |
lines.extend(f.readlines())
|
85 |
add_blank = [hps.data.add_blank] * len(lines)
|
86 |
|
|
|
5 |
import torch.multiprocessing as mp
|
6 |
from tqdm import tqdm
|
7 |
|
8 |
+
from config import get_config
|
9 |
from style_bert_vits2.constants import Languages
|
10 |
from style_bert_vits2.logging import logger
|
11 |
from style_bert_vits2.models import commons
|
12 |
from style_bert_vits2.models.hyper_parameters import HyperParameters
|
13 |
+
from style_bert_vits2.nlp import cleaned_text_to_sequence, extract_bert_feature
|
|
|
|
|
|
|
|
|
14 |
from style_bert_vits2.nlp.japanese import pyopenjtalk_worker
|
15 |
from style_bert_vits2.nlp.japanese.user_dict import update_dict
|
16 |
from style_bert_vits2.utils.stdout_wrapper import SAFE_STDOUT
|
17 |
|
18 |
|
19 |
+
config = get_config()
|
20 |
# このプロセスからはワーカーを起動して辞書を使いたいので、ここで初期化
|
21 |
pyopenjtalk_worker.initialize_worker()
|
22 |
|
|
|
58 |
bert = torch.load(bert_path)
|
59 |
assert bert.shape[-1] == len(phone)
|
60 |
except Exception:
|
61 |
+
bert = extract_bert_feature(text, word2ph, Languages(language_str), device)
|
62 |
assert bert.shape[-1] == len(phone)
|
63 |
torch.save(bert, bert_path)
|
64 |
|
|
|
74 |
config_path = args.config
|
75 |
hps = HyperParameters.load_from_json(config_path)
|
76 |
lines: list[str] = []
|
77 |
+
with open(hps.data.training_files, encoding="utf-8") as f:
|
78 |
lines.extend(f.readlines())
|
79 |
|
80 |
+
with open(hps.data.validation_files, encoding="utf-8") as f:
|
81 |
lines.extend(f.readlines())
|
82 |
add_blank = [hps.data.add_blank] * len(lines)
|
83 |
|
colab.ipynb
CHANGED
@@ -1,384 +1,455 @@
|
|
1 |
{
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
"\n",
|
43 |
-
"!git clone https://github.com/litagin02/Style-Bert-VITS2.git\n",
|
44 |
-
"%cd Style-Bert-VITS2/\n",
|
45 |
-
"!pip install -r requirements.txt\n",
|
46 |
-
"!python initialize.py --skip_jvnv"
|
47 |
-
]
|
48 |
-
},
|
49 |
-
{
|
50 |
-
"cell_type": "code",
|
51 |
-
"execution_count": null,
|
52 |
-
"metadata": {},
|
53 |
-
"outputs": [],
|
54 |
-
"source": [
|
55 |
-
"# Google driveを使う方はこちらを実行してください。\n",
|
56 |
-
"\n",
|
57 |
-
"from google.colab import drive\n",
|
58 |
-
"drive.mount(\"/content/drive\")"
|
59 |
-
]
|
60 |
-
},
|
61 |
-
{
|
62 |
-
"cell_type": "markdown",
|
63 |
-
"metadata": {},
|
64 |
-
"source": [
|
65 |
-
"## 1. 初期設定\n",
|
66 |
-
"\n",
|
67 |
-
"学習とその結果を保存するディレクトリ名を指定します。\n",
|
68 |
-
"Google driveの場合はそのまま実行、カスタマイズしたい方は変更して実行してください。"
|
69 |
-
]
|
70 |
-
},
|
71 |
-
{
|
72 |
-
"cell_type": "code",
|
73 |
-
"execution_count": 1,
|
74 |
-
"metadata": {},
|
75 |
-
"outputs": [],
|
76 |
-
"source": [
|
77 |
-
"# 学習に必要なファイルや途中経過が保存されるディレクトリ\n",
|
78 |
-
"dataset_root = \"/content/drive/MyDrive/Style-Bert-VITS2/Data\"\n",
|
79 |
-
"\n",
|
80 |
-
"# 学習結果(音声合成に必要なファイルたち)が保存されるディレクトリ\n",
|
81 |
-
"assets_root = \"/content/drive/MyDrive/Style-Bert-VITS2/model_assets\"\n",
|
82 |
-
"\n",
|
83 |
-
"import yaml\n",
|
84 |
-
"\n",
|
85 |
-
"\n",
|
86 |
-
"with open(\"configs/paths.yml\", \"w\", encoding=\"utf-8\") as f:\n",
|
87 |
-
" yaml.dump({\"dataset_root\": dataset_root, \"assets_root\": assets_root}, f)"
|
88 |
-
]
|
89 |
-
},
|
90 |
-
{
|
91 |
-
"cell_type": "markdown",
|
92 |
-
"metadata": {},
|
93 |
-
"source": [
|
94 |
-
"## 2. 学習に使うデータ準備\n",
|
95 |
-
"\n",
|
96 |
-
"すでに音声ファイル(1ファイル2-12秒程度)とその書き起こしデータがある場合は2.2を、ない場合は2.1を実行してください。"
|
97 |
-
]
|
98 |
-
},
|
99 |
-
{
|
100 |
-
"cell_type": "markdown",
|
101 |
-
"metadata": {},
|
102 |
-
"source": [
|
103 |
-
"### 2.1 音声ファイルからのデータセットの作成(ある人はスキップ可)\n",
|
104 |
-
"\n",
|
105 |
-
"音声ファイル(1ファイル2-12秒程度)とその書き起こしのデータセットを持っていない方は、(日本語の)音声ファイルのみから以下の手順でデータセットを作成することができます。Google drive上の`Style-Bert-VITS2/inputs/`フォルダに音声ファイル(wavファイル形式、1ファイルでも複数ファイルでも可)を置いて、下を実行すると、データセット��作られ、自動的に正しい場所へ配置されます。"
|
106 |
-
]
|
107 |
-
},
|
108 |
-
{
|
109 |
-
"cell_type": "code",
|
110 |
-
"execution_count": null,
|
111 |
-
"metadata": {},
|
112 |
-
"outputs": [],
|
113 |
-
"source": [
|
114 |
-
"# 元となる音声ファイル(wav形式)を入れるディレクトリ\n",
|
115 |
-
"input_dir = \"/content/drive/MyDrive/Style-Bert-VITS2/inputs\"\n",
|
116 |
-
"# モデル名(話者名)を入力\n",
|
117 |
-
"model_name = \"your_model_name\"\n",
|
118 |
-
"\n",
|
119 |
-
"# こういうふうに書き起こして欲しいという例文(句読点の入れ方・笑い方や固有名詞等)\n",
|
120 |
-
"initial_prompt = \"こんにちは。元気、ですかー?ふふっ、私は……ちゃんと元気だよ!\"\n",
|
121 |
-
"\n",
|
122 |
-
"!python slice.py -i {input_dir} --model_name {model_name}\n",
|
123 |
-
"!python transcribe.py --model_name {model_name} --initial_prompt {initial_prompt} --use_hf_whisper"
|
124 |
-
]
|
125 |
-
},
|
126 |
-
{
|
127 |
-
"cell_type": "markdown",
|
128 |
-
"metadata": {},
|
129 |
-
"source": [
|
130 |
-
"成功したらそのまま3へ進んでください"
|
131 |
-
]
|
132 |
-
},
|
133 |
-
{
|
134 |
-
"cell_type": "markdown",
|
135 |
-
"metadata": {},
|
136 |
-
"source": [
|
137 |
-
"### 2.2 音声ファイルと書き起こしデータがすでにある場合\n",
|
138 |
-
"\n",
|
139 |
-
"指示に従って適切にデータセットを配置してください。\n",
|
140 |
-
"\n",
|
141 |
-
"次のセルを実行して、学習データをいれるフォルダ(1で設定した`dataset_root`)を作成します。"
|
142 |
-
]
|
143 |
-
},
|
144 |
-
{
|
145 |
-
"cell_type": "code",
|
146 |
-
"execution_count": 5,
|
147 |
-
"metadata": {
|
148 |
-
"id": "esCNJl704h52"
|
149 |
-
},
|
150 |
-
"outputs": [],
|
151 |
-
"source": [
|
152 |
-
"import os\n",
|
153 |
-
"\n",
|
154 |
-
"os.makedirs(dataset_root, exist_ok=True)"
|
155 |
-
]
|
156 |
-
},
|
157 |
-
{
|
158 |
-
"cell_type": "markdown",
|
159 |
-
"metadata": {},
|
160 |
-
"source": [
|
161 |
-
"次に、学習に必要なデータを、Google driveに作成された`Style-Bert-VITS2/Data`フォルダに配置します。\n",
|
162 |
-
"\n",
|
163 |
-
"まず音声データ(wavファイルで1ファイルが2-12秒程度の、長すぎず短すぎない発話のものをいくつか)と、書き起こしテキストを用意してください。wavファイル名やモデルの名前は空白を含まない半角で、wavファイルの拡張子は小文字`.wav`である必要があります。\n",
|
164 |
-
"\n",
|
165 |
-
"書き起こしテキストは、次の形式で記述してください。\n",
|
166 |
-
"```\n",
|
167 |
-
"****.wav|{話者名}|{言語ID、ZHかJPかEN}|{書き起こしテキスト}\n",
|
168 |
-
"```\n",
|
169 |
-
"\n",
|
170 |
-
"例:\n",
|
171 |
-
"```\n",
|
172 |
-
"wav_number1.wav|hanako|JP|こんにちは、聞こえて、いますか?\n",
|
173 |
-
"wav_next.wav|taro|JP|はい、聞こえています……。\n",
|
174 |
-
"english_teacher.wav|Mary|EN|How are you? I'm fine, thank you, and you?\n",
|
175 |
-
"...\n",
|
176 |
-
"```\n",
|
177 |
-
"日本語話者の単一話者データセットで構いません。\n",
|
178 |
-
"\n",
|
179 |
-
"### データセットの配置\n",
|
180 |
-
"\n",
|
181 |
-
"次にモデルの名前を適当に決めてください(空白を含まない半角英数字がよいです)。\n",
|
182 |
-
"そして、書き起こしファイルを`esd.list`という名前で保存し、またwavファイルも`raw`というフォルダを作成し、あなたのGoogle Driveの中の(上で自動的に作られるはずの)`Data`フォルダのなかに、次のように配置します。\n",
|
183 |
-
"```\n",
|
184 |
-
"├── Data\n",
|
185 |
-
"│ ├── {モデルの名前}\n",
|
186 |
-
"│ │ ├── esd.list\n",
|
187 |
-
"│ │ ├── raw\n",
|
188 |
-
"│ │ │ ├── ****.wav\n",
|
189 |
-
"│ │ │ ├── ****.wav\n",
|
190 |
-
"│ │ │ ├── ...\n",
|
191 |
-
"```"
|
192 |
-
]
|
193 |
-
},
|
194 |
-
{
|
195 |
-
"cell_type": "markdown",
|
196 |
-
"metadata": {
|
197 |
-
"id": "5r85-W20ECcr"
|
198 |
-
},
|
199 |
-
"source": [
|
200 |
-
"## 3. 学習の前処理\n",
|
201 |
-
"\n",
|
202 |
-
"次に学習の前処理を行います。必要なパラメータをここで指定します。次のセルに設定等を入力して実行してください。「~~かどうか」は`True`もしくは`False`を指定してください。"
|
203 |
-
]
|
204 |
-
},
|
205 |
-
{
|
206 |
-
"cell_type": "code",
|
207 |
-
"execution_count": 6,
|
208 |
-
"metadata": {
|
209 |
-
"id": "CXR7kjuF5GlE"
|
210 |
-
},
|
211 |
-
"outputs": [],
|
212 |
-
"source": [
|
213 |
-
"# 上でつけたフォルダの名前`Data/{model_name}/`\n",
|
214 |
-
"model_name = \"your_model_name\"\n",
|
215 |
-
"\n",
|
216 |
-
"# JP-Extra (日本語特化版)を使うかどうか。日本語の能力が向上する代わりに英語と中国語は使えなくなります。\n",
|
217 |
-
"use_jp_extra = True\n",
|
218 |
-
"\n",
|
219 |
-
"# 学習のバッチサイズ。VRAMのはみ出具合に応じて調整してください。\n",
|
220 |
-
"batch_size = 4\n",
|
221 |
-
"\n",
|
222 |
-
"# 学習のエポック数(データセットを合計何周するか)。\n",
|
223 |
-
"# 100で多すぎるほどかもしれませんが、もっと多くやると質が上がるのかもしれません。\n",
|
224 |
-
"epochs = 100\n",
|
225 |
-
"\n",
|
226 |
-
"# 保存頻度。何ステップごとにモデルを保存するか。分からなければデフォルトのままで。\n",
|
227 |
-
"save_every_steps = 1000\n",
|
228 |
-
"\n",
|
229 |
-
"# 音声ファイルの音量を正規化するかどうか\n",
|
230 |
-
"normalize = False\n",
|
231 |
-
"\n",
|
232 |
-
"# 音声ファイルの開始・終了にある無音区間を削除するかどうか\n",
|
233 |
-
"trim = False\n",
|
234 |
-
"\n",
|
235 |
-
"# 読みのエラーが出た場合にどうするか。\n",
|
236 |
-
"# \"raise\"ならテキスト前処理が終わったら中断、\"skip\"なら読めない行は学習に使わない、\"use\"なら無理やり使う\n",
|
237 |
-
"yomi_error = \"skip\""
|
238 |
-
]
|
239 |
-
},
|
240 |
-
{
|
241 |
-
"cell_type": "markdown",
|
242 |
-
"metadata": {},
|
243 |
-
"source": [
|
244 |
-
"上のセルが実行されたら、次のセルを実行して学習の前処理を行います。"
|
245 |
-
]
|
246 |
-
},
|
247 |
-
{
|
248 |
-
"cell_type": "code",
|
249 |
-
"execution_count": null,
|
250 |
-
"metadata": {
|
251 |
-
"colab": {
|
252 |
-
"base_uri": "https://localhost:8080/"
|
253 |
-
},
|
254 |
-
"id": "xMVaOIPLabV5",
|
255 |
-
"outputId": "15fac868-9132-45d9-9f5f-365b6aeb67b0"
|
256 |
-
},
|
257 |
-
"outputs": [],
|
258 |
-
"source": [
|
259 |
-
"from gradio_tabs.train import preprocess_all\n",
|
260 |
-
"\n",
|
261 |
-
"preprocess_all(\n",
|
262 |
-
" model_name=model_name,\n",
|
263 |
-
" batch_size=batch_size,\n",
|
264 |
-
" epochs=epochs,\n",
|
265 |
-
" save_every_steps=save_every_steps,\n",
|
266 |
-
" num_processes=2,\n",
|
267 |
-
" normalize=normalize,\n",
|
268 |
-
" trim=trim,\n",
|
269 |
-
" freeze_EN_bert=False,\n",
|
270 |
-
" freeze_JP_bert=False,\n",
|
271 |
-
" freeze_ZH_bert=False,\n",
|
272 |
-
" freeze_style=False,\n",
|
273 |
-
" freeze_decoder=False, # ここをTrueにするともしかしたら違う結果になるかもしれません。\n",
|
274 |
-
" use_jp_extra=use_jp_extra,\n",
|
275 |
-
" val_per_lang=0,\n",
|
276 |
-
" log_interval=200,\n",
|
277 |
-
" yomi_error=yomi_error\n",
|
278 |
-
")"
|
279 |
-
]
|
280 |
-
},
|
281 |
-
{
|
282 |
-
"cell_type": "markdown",
|
283 |
-
"metadata": {},
|
284 |
-
"source": [
|
285 |
-
"## 4. 学習\n",
|
286 |
-
"\n",
|
287 |
-
"前処理が正常に終わったら、学習を行います。次のセルを実行すると学習が始まります。\n",
|
288 |
-
"\n",
|
289 |
-
"学習の結果は、上で指定した`save_every_steps`の間隔で、Google Driveの中の`Style-Bert-VITS2/Data/{モデルの名前}/model_assets/`フォルダに保存されます。\n",
|
290 |
-
"\n",
|
291 |
-
"このフォルダをダウンロードし、ローカルのStyle-Bert-VITS2の`model_assets`フォルダに上書きすれば、学習結果を使うことができます。"
|
292 |
-
]
|
293 |
},
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
" yml_data = yaml.safe_load(f)\n",
|
317 |
-
"yml_data[\"model_name\"] = model_name\n",
|
318 |
-
"with open(\"config.yml\", \"w\", encoding=\"utf-8\") as f:\n",
|
319 |
-
" yaml.dump(yml_data, f, allow_unicode=True)"
|
320 |
-
]
|
321 |
},
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
331 |
},
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
341 |
},
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
361 |
"colab": {
|
362 |
-
|
363 |
-
|
364 |
},
|
365 |
-
"
|
366 |
-
|
367 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
368 |
},
|
369 |
-
"
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
381 |
},
|
382 |
-
"
|
383 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
384 |
}
|
|
|
1 |
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {
|
6 |
+
"id": "F7aJhsgLAWvO"
|
7 |
+
},
|
8 |
+
"source": [
|
9 |
+
"# Style-Bert-VITS2 (ver 2.6.1) のGoogle Colabでの学習\n",
|
10 |
+
"\n",
|
11 |
+
"Google Colab上でStyle-Bert-VITS2の学習を行うことができます。\n",
|
12 |
+
"\n",
|
13 |
+
"このnotebookでは、通常使用ではあなたのGoogle Driveにフォルダ`Style-Bert-VITS2`を作り、その内部での作業を行います。他のフォルダには触れません。\n",
|
14 |
+
"Google Driveを使わない場合は、初期設定のところで適切なパスを指定してください。\n",
|
15 |
+
"\n",
|
16 |
+
"## 流れ\n",
|
17 |
+
"\n",
|
18 |
+
"### 学習を最初からやりたいとき\n",
|
19 |
+
"上から順に実行していけばいいです。音声合成に必要なファイルはGoogle Driveの`Style-Bert-VITS2/model_assets/`に保存されます。また、途中経過も`Style-Bert-VITS2/Data/`に保存されるので、学習を中断したり、途中から再開することもできます。\n",
|
20 |
+
"\n",
|
21 |
+
"### 学習を途中から再開したいとき\n",
|
22 |
+
"0と1を行い、3の前処理は飛ばして、4から始めてください。スタイル分け5は、学習が終わったら必要なら行ってください。\n"
|
23 |
+
]
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"cell_type": "markdown",
|
27 |
+
"metadata": {
|
28 |
+
"id": "L-gAIubBAWvQ"
|
29 |
+
},
|
30 |
+
"source": [
|
31 |
+
"## 0. 環境構築\n",
|
32 |
+
"\n",
|
33 |
+
"Style-Bert-VITS2の環境をcolab上に構築します。ランタイムがT4等のGPUバックエンドになっていることを確認し、実行してください。"
|
34 |
+
]
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"cell_type": "code",
|
38 |
+
"execution_count": null,
|
39 |
+
"metadata": {
|
40 |
+
"colab": {
|
41 |
+
"base_uri": "https://localhost:8080/"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
},
|
43 |
+
"id": "0GNj8JyDAlm2",
|
44 |
+
"outputId": "d8be4a1a-e52d-46f8-8675-3f1a24bc9a51"
|
45 |
+
},
|
46 |
+
"outputs": [],
|
47 |
+
"source": [
|
48 |
+
"import os\n",
|
49 |
+
"\n",
|
50 |
+
"os.environ[\"PATH\"] += \":/root/.cargo/bin\"\n",
|
51 |
+
"\n",
|
52 |
+
"!curl -LsSf https://astral.sh/uv/install.sh | sh\n",
|
53 |
+
"!git clone https://github.com/litagin02/Style-Bert-VITS2.git\n",
|
54 |
+
"%cd Style-Bert-VITS2/\n",
|
55 |
+
"!uv pip install --system -r requirements-colab.txt\n",
|
56 |
+
"!python initialize.py --skip_default_models"
|
57 |
+
]
|
58 |
+
},
|
59 |
+
{
|
60 |
+
"cell_type": "code",
|
61 |
+
"execution_count": null,
|
62 |
+
"metadata": {
|
63 |
+
"colab": {
|
64 |
+
"base_uri": "https://localhost:8080/"
|
|
|
|
|
|
|
|
|
|
|
65 |
},
|
66 |
+
"id": "o5z1nzkvAWvR",
|
67 |
+
"outputId": "cd87f053-18e0-4dbb-f904-d5230d1fa7ef"
|
68 |
+
},
|
69 |
+
"outputs": [],
|
70 |
+
"source": [
|
71 |
+
"# Google driveを使う方はこちらを実行してください。\n",
|
72 |
+
"\n",
|
73 |
+
"from google.colab import drive\n",
|
74 |
+
"\n",
|
75 |
+
"drive.mount(\"/content/drive\")"
|
76 |
+
]
|
77 |
+
},
|
78 |
+
{
|
79 |
+
"cell_type": "markdown",
|
80 |
+
"metadata": {
|
81 |
+
"id": "WU9apXzcAWvR"
|
82 |
+
},
|
83 |
+
"source": [
|
84 |
+
"## 1. 初期設定\n",
|
85 |
+
"\n",
|
86 |
+
"学習とその結果を保存するディレクトリ名を指定します。\n",
|
87 |
+
"Google driveの場合はそのまま実行、カスタマイズしたい方は変更して実行してください。"
|
88 |
+
]
|
89 |
+
},
|
90 |
+
{
|
91 |
+
"cell_type": "code",
|
92 |
+
"execution_count": null,
|
93 |
+
"metadata": {
|
94 |
+
"id": "gO3OwZV1AWvR"
|
95 |
+
},
|
96 |
+
"outputs": [],
|
97 |
+
"source": [
|
98 |
+
"# 学習に必要なファイルや途中経過が保存されるディレクトリ\n",
|
99 |
+
"dataset_root = \"/content/drive/MyDrive/Style-Bert-VITS2/Data\"\n",
|
100 |
+
"\n",
|
101 |
+
"# 学習結果(音声合成に必要なファイルたち)が保存されるディレクトリ\n",
|
102 |
+
"assets_root = \"/content/drive/MyDrive/Style-Bert-VITS2/model_assets\"\n",
|
103 |
+
"\n",
|
104 |
+
"import yaml\n",
|
105 |
+
"\n",
|
106 |
+
"\n",
|
107 |
+
"with open(\"configs/paths.yml\", \"w\", encoding=\"utf-8\") as f:\n",
|
108 |
+
" yaml.dump({\"dataset_root\": dataset_root, \"assets_root\": assets_root}, f)"
|
109 |
+
]
|
110 |
+
},
|
111 |
+
{
|
112 |
+
"cell_type": "markdown",
|
113 |
+
"metadata": {
|
114 |
+
"id": "dA_yLeezAWvS"
|
115 |
+
},
|
116 |
+
"source": [
|
117 |
+
"## 2. 学習に使うデータ準備\n",
|
118 |
+
"\n",
|
119 |
+
"すでに音声ファイル(1ファイル2-12秒程度)とその書き起こしデータがある場合は2.2を、ない場合は2.1を実行してください。"
|
120 |
+
]
|
121 |
+
},
|
122 |
+
{
|
123 |
+
"cell_type": "markdown",
|
124 |
+
"metadata": {
|
125 |
+
"id": "8s9gOnTCAWvS"
|
126 |
+
},
|
127 |
+
"source": [
|
128 |
+
"### 2.1 音声ファイルからのデータセットの作成(ある人はスキップ可)\n",
|
129 |
+
"\n",
|
130 |
+
"音声ファイル(1ファイル2-12秒程度)とその書き起こしのデータセットを持っていない方は、(日本語の)音声ファイルのみから以下の手順でデータセットを作成することができます。Google drive上の`Style-Bert-VITS2/inputs/`フォルダに音声ファイル(wavやmp3等の通常の音声ファイル形式、1ファイルでも複数ファイルでも可)を置いて、下を実行すると、データセットが作られ、自動的に正しい場所へ配置されます。\n",
|
131 |
+
"\n",
|
132 |
+
"**2024-06-02のVer 2.5以降**、`inputs/`フォルダにサブフォルダを2個以上作ってそこへ音声ファイルをスタイルに応じて振り分けて置くと、学習の際にサブディレクトリに応じたスタイルが自動的に作成されます。デフォルトスタイルのみでよい場合や手動でスタイルを後で作成する場合は`inputs/`直下へ入れれば大丈夫です。"
|
133 |
+
]
|
134 |
+
},
|
135 |
+
{
|
136 |
+
"cell_type": "code",
|
137 |
+
"execution_count": null,
|
138 |
+
"metadata": {
|
139 |
+
"colab": {
|
140 |
+
"base_uri": "https://localhost:8080/"
|
141 |
},
|
142 |
+
"id": "_fXCTPuiAWvS",
|
143 |
+
"outputId": "47abd55b-efe5-48e2-f6fa-8e2016efe0ec"
|
144 |
+
},
|
145 |
+
"outputs": [],
|
146 |
+
"source": [
|
147 |
+
"# 元となる音声ファイル(wav形式)を入れるディレクトリ\n",
|
148 |
+
"input_dir = \"/content/drive/MyDrive/Style-Bert-VITS2/inputs\"\n",
|
149 |
+
"# モデル名(話者名)を入力\n",
|
150 |
+
"model_name = \"your_model_name\"\n",
|
151 |
+
"\n",
|
152 |
+
"# こういうふうに書き起こして欲しいという例文(句読点の入れ方・笑い方や固有名詞等)\n",
|
153 |
+
"initial_prompt = \"こんにちは。元気、ですかー?ふふっ、私は……ちゃんと元気だよ!\"\n",
|
154 |
+
"\n",
|
155 |
+
"!python slice.py -i {input_dir} --model_name {model_name}\n",
|
156 |
+
"!python transcribe.py --model_name {model_name} --initial_prompt {initial_prompt} --use_hf_whisper"
|
157 |
+
]
|
158 |
+
},
|
159 |
+
{
|
160 |
+
"cell_type": "markdown",
|
161 |
+
"metadata": {
|
162 |
+
"id": "j7vEWewoAWvS"
|
163 |
+
},
|
164 |
+
"source": [
|
165 |
+
"成功したらそのまま3へ進んでください"
|
166 |
+
]
|
167 |
+
},
|
168 |
+
{
|
169 |
+
"cell_type": "markdown",
|
170 |
+
"metadata": {
|
171 |
+
"id": "Z3AC-3zpAWvS"
|
172 |
+
},
|
173 |
+
"source": [
|
174 |
+
"### 2.2 音声ファイルと書き起こしデータがすでにある場合\n",
|
175 |
+
"\n",
|
176 |
+
"指示に従って適切にデータセットを配置してください。\n",
|
177 |
+
"\n",
|
178 |
+
"次のセルを実行して、学習データをいれるフォルダ(1で設定した`dataset_root`)を作成します。"
|
179 |
+
]
|
180 |
+
},
|
181 |
+
{
|
182 |
+
"cell_type": "code",
|
183 |
+
"execution_count": null,
|
184 |
+
"metadata": {
|
185 |
+
"id": "esCNJl704h52"
|
186 |
+
},
|
187 |
+
"outputs": [],
|
188 |
+
"source": [
|
189 |
+
"import os\n",
|
190 |
+
"\n",
|
191 |
+
"os.makedirs(dataset_root, exist_ok=True)"
|
192 |
+
]
|
193 |
+
},
|
194 |
+
{
|
195 |
+
"cell_type": "markdown",
|
196 |
+
"metadata": {
|
197 |
+
"id": "aaDgJCjCAWvT"
|
198 |
+
},
|
199 |
+
"source": [
|
200 |
+
"まず音声データと、書き起こしテキストを用意してください。\n",
|
201 |
+
"\n",
|
202 |
+
"それを次のように配置します。\n",
|
203 |
+
"```\n",
|
204 |
+
"├── Data/\n",
|
205 |
+
"│ ├── {モデルの名前}\n",
|
206 |
+
"│ │ ├── esd.list\n",
|
207 |
+
"│ │ ├── raw/\n",
|
208 |
+
"│ │ │ ├── foo.wav\n",
|
209 |
+
"│ │ │ ├── bar.mp3\n",
|
210 |
+
"│ │ │ ├── style1/\n",
|
211 |
+
"│ │ │ │ ├── baz.wav\n",
|
212 |
+
"│ │ │ │ ├── qux.wav\n",
|
213 |
+
"│ │ │ ├── style2/\n",
|
214 |
+
"│ │ │ │ ├── corge.wav\n",
|
215 |
+
"│ │ │ │ ├── grault.wav\n",
|
216 |
+
"...\n",
|
217 |
+
"```\n",
|
218 |
+
"\n",
|
219 |
+
"### 配置の仕方\n",
|
220 |
+
"- 上のように配置すると、`style1/`と`style2/`フォルダの内部(直下以外も含む)に入っている音声ファイルたちから、自動的にデフォルトスタイルに加えて`style1`と`style2`というスタイルが作成されます\n",
|
221 |
+
"- 特にスタイルを作る必要がない場合や、スタイル分類機能等でスタイルを作る場合は、`raw/`フォルダ直下に全てを配置してください。このように`raw/`のサブディレクトリの個数が0または1の場合は、スタイルはデフォルトスタイルのみが作成されます。\n",
|
222 |
+
"- 音声ファイルのフォーマットはwav形式以外にもmp3等の多くの音声ファイルに対応しています\n",
|
223 |
+
"\n",
|
224 |
+
"### 書き起こしファイル`esd.list`\n",
|
225 |
+
"\n",
|
226 |
+
"`Data/{モデルの名前}/esd.list` ファイルには、以下のフォーマットで各音声ファイルの情報を記述してください。\n",
|
227 |
+
"\n",
|
228 |
+
"\n",
|
229 |
+
"```\n",
|
230 |
+
"path/to/audio.wav(wavファイル以外でもこう書く)|{話者名}|{言語ID、ZHかJPかEN}|{書き起こしテキスト}\n",
|
231 |
+
"```\n",
|
232 |
+
"\n",
|
233 |
+
"- ここで、最初の`path/to/audio.wav`は、`raw/`からの相対パスです。つまり、`raw/foo.wav`の場合は`foo.wav`、`raw/style1/bar.wav`の場合は`style1/bar.wav`となります。\n",
|
234 |
+
"- 拡張子がwavでない場合でも、`esd.list`には`wav`と書いてください、つまり、`raw/bar.mp3`の場合でも`bar.wav`と書いてください。\n",
|
235 |
+
"\n",
|
236 |
+
"\n",
|
237 |
+
"例:\n",
|
238 |
+
"```\n",
|
239 |
+
"foo.wav|hanako|JP|こんにちは、元気ですか?\n",
|
240 |
+
"bar.wav|taro|JP|はい、聞こえています……。何か用ですか?\n",
|
241 |
+
"style1/baz.wav|hanako|JP|今日はいい天気ですね。\n",
|
242 |
+
"style1/qux.wav|taro|JP|はい、そうですね。\n",
|
243 |
+
"...\n",
|
244 |
+
"english_teacher.wav|Mary|EN|How are you? I'm fine, thank you, and you?\n",
|
245 |
+
"...\n",
|
246 |
+
"```\n",
|
247 |
+
"もちろん日本語話者の単一話者データセットでも構いません。"
|
248 |
+
]
|
249 |
+
},
|
250 |
+
{
|
251 |
+
"cell_type": "markdown",
|
252 |
+
"metadata": {
|
253 |
+
"id": "5r85-W20ECcr"
|
254 |
+
},
|
255 |
+
"source": [
|
256 |
+
"## 3. 学習の前処理\n",
|
257 |
+
"\n",
|
258 |
+
"次に学習の前処理を行います。必要なパラメータをここで指定します。次のセルに設定等を入力して実行してください。「���~かどうか」は`True`もしくは`False`を指定してください。"
|
259 |
+
]
|
260 |
+
},
|
261 |
+
{
|
262 |
+
"cell_type": "code",
|
263 |
+
"execution_count": null,
|
264 |
+
"metadata": {
|
265 |
+
"id": "CXR7kjuF5GlE"
|
266 |
+
},
|
267 |
+
"outputs": [],
|
268 |
+
"source": [
|
269 |
+
"# 上でつけたフォルダの名前`Data/{model_name}/`\n",
|
270 |
+
"model_name = \"your_model_name\"\n",
|
271 |
+
"\n",
|
272 |
+
"# JP-Extra (日本語特化版)を使うかどうか。日本語の能力が向上する代わりに英語と中国語は使えなくなります。\n",
|
273 |
+
"use_jp_extra = True\n",
|
274 |
+
"\n",
|
275 |
+
"# 学習のバッチサイズ。VRAMのはみ出具合に応じて調整してください。\n",
|
276 |
+
"batch_size = 4\n",
|
277 |
+
"\n",
|
278 |
+
"# 学習のエポック数(データセットを合計何周するか)。\n",
|
279 |
+
"# 100で多すぎるほどかもしれませんが、もっと多くやると質が上がるのかもしれません。\n",
|
280 |
+
"epochs = 100\n",
|
281 |
+
"\n",
|
282 |
+
"# 保存頻度。何ステップごとにモデルを保存するか。分からなければデフォルトのままで。\n",
|
283 |
+
"save_every_steps = 1000\n",
|
284 |
+
"\n",
|
285 |
+
"# 音声ファイルの音量を正規化するかどうか\n",
|
286 |
+
"normalize = False\n",
|
287 |
+
"\n",
|
288 |
+
"# 音声ファイルの開始・終了にある無音区間を削除するかどうか\n",
|
289 |
+
"trim = False\n",
|
290 |
+
"\n",
|
291 |
+
"# 読みのエラーが出た場合にどうするか。\n",
|
292 |
+
"# \"raise\"ならテキスト前処理が終わったら中断、\"skip\"なら読めない行は学習に使わない、\"use\"なら無理やり使う\n",
|
293 |
+
"yomi_error = \"skip\""
|
294 |
+
]
|
295 |
+
},
|
296 |
+
{
|
297 |
+
"cell_type": "markdown",
|
298 |
+
"metadata": {
|
299 |
+
"id": "BFZdLTtpAWvT"
|
300 |
+
},
|
301 |
+
"source": [
|
302 |
+
"上のセルが実行されたら、次のセルを実行して学習の前処理を行います。"
|
303 |
+
]
|
304 |
+
},
|
305 |
+
{
|
306 |
+
"cell_type": "code",
|
307 |
+
"execution_count": null,
|
308 |
+
"metadata": {
|
309 |
+
"colab": {
|
310 |
+
"base_uri": "https://localhost:8080/"
|
311 |
},
|
312 |
+
"id": "xMVaOIPLabV5",
|
313 |
+
"outputId": "36b1c2b2-6df0-4d00-d86a-519a0fc0af63"
|
314 |
+
},
|
315 |
+
"outputs": [],
|
316 |
+
"source": [
|
317 |
+
"from gradio_tabs.train import preprocess_all\n",
|
318 |
+
"from style_bert_vits2.nlp.japanese import pyopenjtalk_worker\n",
|
319 |
+
"\n",
|
320 |
+
"\n",
|
321 |
+
"pyopenjtalk_worker.initialize_worker()\n",
|
322 |
+
"\n",
|
323 |
+
"preprocess_all(\n",
|
324 |
+
" model_name=model_name,\n",
|
325 |
+
" batch_size=batch_size,\n",
|
326 |
+
" epochs=epochs,\n",
|
327 |
+
" save_every_steps=save_every_steps,\n",
|
328 |
+
" num_processes=2,\n",
|
329 |
+
" normalize=normalize,\n",
|
330 |
+
" trim=trim,\n",
|
331 |
+
" freeze_EN_bert=False,\n",
|
332 |
+
" freeze_JP_bert=False,\n",
|
333 |
+
" freeze_ZH_bert=False,\n",
|
334 |
+
" freeze_style=False,\n",
|
335 |
+
" freeze_decoder=False,\n",
|
336 |
+
" use_jp_extra=use_jp_extra,\n",
|
337 |
+
" val_per_lang=0,\n",
|
338 |
+
" log_interval=200,\n",
|
339 |
+
" yomi_error=yomi_error,\n",
|
340 |
+
")"
|
341 |
+
]
|
342 |
+
},
|
343 |
+
{
|
344 |
+
"cell_type": "markdown",
|
345 |
+
"metadata": {
|
346 |
+
"id": "sVhwI5C-AWvT"
|
347 |
+
},
|
348 |
+
"source": [
|
349 |
+
"## 4. 学習\n",
|
350 |
+
"\n",
|
351 |
+
"前処理が正常に終わったら、学習を行います。次のセルを実行すると学習が始まります。\n",
|
352 |
+
"\n",
|
353 |
+
"学習の結果は、上で指定した`save_every_steps`の間隔で、Google Driveの中の`Style-Bert-VITS2/Data/{モデルの名前}/model_assets/`フォルダに保存されます。\n",
|
354 |
+
"\n",
|
355 |
+
"このフォルダをダウンロードし、ローカルのStyle-Bert-VITS2の`model_assets`フォルダに上書きすれば、学習結果を使うことができます。"
|
356 |
+
]
|
357 |
+
},
|
358 |
+
{
|
359 |
+
"cell_type": "code",
|
360 |
+
"execution_count": null,
|
361 |
+
"metadata": {
|
362 |
+
"id": "laieKrbEb6Ij"
|
363 |
+
},
|
364 |
+
"outputs": [],
|
365 |
+
"source": [
|
366 |
+
"# 上でつけたモデル名を入力。学習を途中からする場合はきちんとモデルが保存されているフォルダ名を入力。\n",
|
367 |
+
"model_name = \"your_model_name\"\n",
|
368 |
+
"\n",
|
369 |
+
"\n",
|
370 |
+
"import yaml\n",
|
371 |
+
"from gradio_tabs.train import get_path\n",
|
372 |
+
"\n",
|
373 |
+
"paths = get_path(model_name)\n",
|
374 |
+
"dataset_path = str(paths.dataset_path)\n",
|
375 |
+
"config_path = str(paths.config_path)\n",
|
376 |
+
"\n",
|
377 |
+
"with open(\"default_config.yml\", \"r\", encoding=\"utf-8\") as f:\n",
|
378 |
+
" yml_data = yaml.safe_load(f)\n",
|
379 |
+
"yml_data[\"model_name\"] = model_name\n",
|
380 |
+
"with open(\"config.yml\", \"w\", encoding=\"utf-8\") as f:\n",
|
381 |
+
" yaml.dump(yml_data, f, allow_unicode=True)"
|
382 |
+
]
|
383 |
+
},
|
384 |
+
{
|
385 |
+
"cell_type": "code",
|
386 |
+
"execution_count": null,
|
387 |
+
"metadata": {
|
388 |
"colab": {
|
389 |
+
"background_save": true,
|
390 |
+
"base_uri": "https://localhost:8080/"
|
391 |
},
|
392 |
+
"id": "JqGeHNabAWvT",
|
393 |
+
"outputId": "c51b422c-728b-420b-fa92-b787fa058adf"
|
394 |
+
},
|
395 |
+
"outputs": [],
|
396 |
+
"source": [
|
397 |
+
"# 日本語特化版を「使う」場合\n",
|
398 |
+
"!python train_ms_jp_extra.py --config {config_path} --model {dataset_path} --assets_root {assets_root}"
|
399 |
+
]
|
400 |
+
},
|
401 |
+
{
|
402 |
+
"cell_type": "code",
|
403 |
+
"execution_count": null,
|
404 |
+
"metadata": {
|
405 |
+
"id": "rVbjh-WPAWvU"
|
406 |
+
},
|
407 |
+
"outputs": [],
|
408 |
+
"source": [
|
409 |
+
"# 日本語特化版を「使わない」場合\n",
|
410 |
+
"!python train_ms.py --config {config_path} --model {dataset_path} --assets_root {assets_root}"
|
411 |
+
]
|
412 |
+
},
|
413 |
+
{
|
414 |
+
"cell_type": "code",
|
415 |
+
"execution_count": null,
|
416 |
+
"metadata": {
|
417 |
+
"colab": {
|
418 |
+
"base_uri": "https://localhost:8080/"
|
419 |
},
|
420 |
+
"id": "c7g0hrdeP1Tl",
|
421 |
+
"outputId": "4bb9d21e-50df-4ba5-a547-daa78a4b63dc"
|
422 |
+
},
|
423 |
+
"outputs": [],
|
424 |
+
"source": [
|
425 |
+
"# 学習結果を試す・マージ・スタイル分けはこちらから\n",
|
426 |
+
"!python app.py --share"
|
427 |
+
]
|
428 |
+
}
|
429 |
+
],
|
430 |
+
"metadata": {
|
431 |
+
"accelerator": "GPU",
|
432 |
+
"colab": {
|
433 |
+
"gpuType": "T4",
|
434 |
+
"provenance": []
|
435 |
+
},
|
436 |
+
"kernelspec": {
|
437 |
+
"display_name": "Python 3",
|
438 |
+
"name": "python3"
|
439 |
},
|
440 |
+
"language_info": {
|
441 |
+
"codemirror_mode": {
|
442 |
+
"name": "ipython",
|
443 |
+
"version": 3
|
444 |
+
},
|
445 |
+
"file_extension": ".py",
|
446 |
+
"mimetype": "text/x-python",
|
447 |
+
"name": "python",
|
448 |
+
"nbconvert_exporter": "python",
|
449 |
+
"pygments_lexer": "ipython3",
|
450 |
+
"version": "3.10.11"
|
451 |
+
}
|
452 |
+
},
|
453 |
+
"nbformat": 4,
|
454 |
+
"nbformat_minor": 0
|
455 |
}
|
config.py
CHANGED
@@ -2,9 +2,9 @@
|
|
2 |
@Desc: 全局配置文件读取
|
3 |
"""
|
4 |
|
5 |
-
import os
|
6 |
import shutil
|
7 |
-
from
|
|
|
8 |
|
9 |
import torch
|
10 |
import yaml
|
@@ -12,6 +12,12 @@ import yaml
|
|
12 |
from style_bert_vits2.logging import logger
|
13 |
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
# If not cuda available, set possible devices to cpu
|
16 |
cuda_available = torch.cuda.is_available()
|
17 |
|
@@ -20,17 +26,17 @@ class Resample_config:
|
|
20 |
"""重采样配置"""
|
21 |
|
22 |
def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):
|
23 |
-
self.sampling_rate
|
24 |
-
self.in_dir
|
25 |
-
self.out_dir
|
26 |
|
27 |
@classmethod
|
28 |
-
def from_dict(cls, dataset_path:
|
29 |
"""从字典中生成实例"""
|
30 |
|
31 |
# 不检查路径是否有效,此逻辑在resample.py中处理
|
32 |
-
data["in_dir"] =
|
33 |
-
data["out_dir"] =
|
34 |
|
35 |
return cls(**data)
|
36 |
|
@@ -49,39 +55,32 @@ class Preprocess_text_config:
|
|
49 |
max_val_total: int = 10000,
|
50 |
clean: bool = True,
|
51 |
):
|
52 |
-
self.transcription_path
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
)
|
61 |
-
self.
|
62 |
-
|
63 |
-
|
64 |
-
self.
|
65 |
-
self.val_per_lang: int = val_per_lang # 每个speaker的验证集条数
|
66 |
-
self.max_val_total: int = (
|
67 |
-
max_val_total # 验证集最大条数,多于的会被截断并放到训练集中
|
68 |
-
)
|
69 |
-
self.clean: bool = clean # 是否进行数据清洗
|
70 |
|
71 |
@classmethod
|
72 |
-
def from_dict(cls, dataset_path:
|
73 |
"""从字典中生成实例"""
|
74 |
|
75 |
-
data["transcription_path"] =
|
76 |
-
dataset_path, data["transcription_path"]
|
77 |
-
)
|
78 |
if data["cleaned_path"] == "" or data["cleaned_path"] is None:
|
79 |
-
data["cleaned_path"] =
|
80 |
else:
|
81 |
-
data["cleaned_path"] =
|
82 |
-
data["train_path"] =
|
83 |
-
data["val_path"] =
|
84 |
-
data["config_path"] =
|
85 |
|
86 |
return cls(**data)
|
87 |
|
@@ -96,7 +95,7 @@ class Bert_gen_config:
|
|
96 |
device: str = "cuda",
|
97 |
use_multi_device: bool = False,
|
98 |
):
|
99 |
-
self.config_path = config_path
|
100 |
self.num_processes = num_processes
|
101 |
if not cuda_available:
|
102 |
device = "cpu"
|
@@ -104,8 +103,8 @@ class Bert_gen_config:
|
|
104 |
self.use_multi_device = use_multi_device
|
105 |
|
106 |
@classmethod
|
107 |
-
def from_dict(cls, dataset_path:
|
108 |
-
data["config_path"] =
|
109 |
|
110 |
return cls(**data)
|
111 |
|
@@ -119,15 +118,15 @@ class Style_gen_config:
|
|
119 |
num_processes: int = 4,
|
120 |
device: str = "cuda",
|
121 |
):
|
122 |
-
self.config_path = config_path
|
123 |
self.num_processes = num_processes
|
124 |
if not cuda_available:
|
125 |
device = "cpu"
|
126 |
self.device = device
|
127 |
|
128 |
@classmethod
|
129 |
-
def from_dict(cls, dataset_path:
|
130 |
-
data["config_path"] =
|
131 |
|
132 |
return cls(**data)
|
133 |
|
@@ -138,7 +137,7 @@ class Train_ms_config:
|
|
138 |
def __init__(
|
139 |
self,
|
140 |
config_path: str,
|
141 |
-
env:
|
142 |
# base: Dict[str, any],
|
143 |
model_dir: str,
|
144 |
num_workers: int,
|
@@ -147,16 +146,18 @@ class Train_ms_config:
|
|
147 |
):
|
148 |
self.env = env # 需要加载的环境变量
|
149 |
# self.base = base # 底模配置
|
150 |
-
self.model_dir =
|
151 |
-
|
|
|
|
|
152 |
self.num_workers = num_workers # worker数量
|
153 |
self.spec_cache = spec_cache # 是否启用spec缓存
|
154 |
self.keep_ckpts = keep_ckpts # ckpt数量
|
155 |
|
156 |
@classmethod
|
157 |
-
def from_dict(cls, dataset_path:
|
158 |
# data["model"] = os.path.join(dataset_path, data["model"])
|
159 |
-
data["config_path"] =
|
160 |
|
161 |
return cls(**data)
|
162 |
|
@@ -176,20 +177,18 @@ class Webui_config:
|
|
176 |
):
|
177 |
if not cuda_available:
|
178 |
device = "cpu"
|
179 |
-
self.device
|
180 |
-
self.model
|
181 |
-
self.config_path
|
182 |
-
self.port: int = port
|
183 |
-
self.share: bool = share
|
184 |
-
self.debug: bool = debug
|
185 |
-
self.language_identification_library: str =
|
186 |
-
language_identification_library # 语种识别库
|
187 |
-
)
|
188 |
|
189 |
@classmethod
|
190 |
-
def from_dict(cls, dataset_path:
|
191 |
-
data["config_path"] =
|
192 |
-
data["model"] =
|
193 |
return cls(**data)
|
194 |
|
195 |
|
@@ -200,7 +199,7 @@ class Server_config:
|
|
200 |
device: str = "cuda",
|
201 |
limit: int = 100,
|
202 |
language: str = "JP",
|
203 |
-
origins:
|
204 |
):
|
205 |
self.port: int = port
|
206 |
if not cuda_available:
|
@@ -208,10 +207,10 @@ class Server_config:
|
|
208 |
self.device: str = device
|
209 |
self.language: str = language
|
210 |
self.limit: int = limit
|
211 |
-
self.origins:
|
212 |
|
213 |
@classmethod
|
214 |
-
def from_dict(cls, data:
|
215 |
return cls(**data)
|
216 |
|
217 |
|
@@ -223,32 +222,33 @@ class Translate_config:
|
|
223 |
self.secret_key = secret_key
|
224 |
|
225 |
@classmethod
|
226 |
-
def from_dict(cls, data:
|
227 |
return cls(**data)
|
228 |
|
229 |
|
230 |
class Config:
|
231 |
-
def __init__(self, config_path: str, path_config:
|
232 |
-
if not
|
233 |
shutil.copy(src="default_config.yml", dst=config_path)
|
234 |
logger.info(
|
235 |
f"A configuration file {config_path} has been generated based on the default configuration file default_config.yml."
|
236 |
)
|
237 |
logger.info(
|
238 |
-
"
|
239 |
)
|
240 |
# sys.exit(0)
|
241 |
-
with open(config_path,
|
242 |
-
yaml_config:
|
243 |
model_name: str = yaml_config["model_name"]
|
244 |
self.model_name: str = model_name
|
245 |
if "dataset_path" in yaml_config:
|
246 |
-
dataset_path = yaml_config["dataset_path"]
|
247 |
else:
|
248 |
-
dataset_path =
|
249 |
-
self.dataset_path
|
250 |
-
self.
|
251 |
-
self.
|
|
|
252 |
self.resample_config: Resample_config = Resample_config.from_dict(
|
253 |
dataset_path, yaml_config["resample"]
|
254 |
)
|
@@ -277,16 +277,31 @@ class Config:
|
|
277 |
# )
|
278 |
|
279 |
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
285 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
286 |
|
287 |
-
|
288 |
-
config = Config("config.yml", path_config)
|
289 |
-
except (TypeError, KeyError):
|
290 |
-
logger.warning("Old config.yml found. Replace it with default_config.yml.")
|
291 |
-
shutil.copy(src="default_config.yml", dst="config.yml")
|
292 |
-
config = Config("config.yml", path_config)
|
|
|
2 |
@Desc: 全局配置文件读取
|
3 |
"""
|
4 |
|
|
|
5 |
import shutil
|
6 |
+
from pathlib import Path
|
7 |
+
from typing import Any
|
8 |
|
9 |
import torch
|
10 |
import yaml
|
|
|
12 |
from style_bert_vits2.logging import logger
|
13 |
|
14 |
|
15 |
+
class PathConfig:
|
16 |
+
def __init__(self, dataset_root: str, assets_root: str):
|
17 |
+
self.dataset_root = Path(dataset_root)
|
18 |
+
self.assets_root = Path(assets_root)
|
19 |
+
|
20 |
+
|
21 |
# If not cuda available, set possible devices to cpu
|
22 |
cuda_available = torch.cuda.is_available()
|
23 |
|
|
|
26 |
"""重采样配置"""
|
27 |
|
28 |
def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):
|
29 |
+
self.sampling_rate = sampling_rate # 目标采样率
|
30 |
+
self.in_dir = Path(in_dir) # 待处理音频目录路径
|
31 |
+
self.out_dir = Path(out_dir) # 重采样输出路径
|
32 |
|
33 |
@classmethod
|
34 |
+
def from_dict(cls, dataset_path: Path, data: dict[str, Any]):
|
35 |
"""从字典中生成实例"""
|
36 |
|
37 |
# 不检查路径是否有效,此逻辑在resample.py中处理
|
38 |
+
data["in_dir"] = dataset_path / data["in_dir"]
|
39 |
+
data["out_dir"] = dataset_path / data["out_dir"]
|
40 |
|
41 |
return cls(**data)
|
42 |
|
|
|
55 |
max_val_total: int = 10000,
|
56 |
clean: bool = True,
|
57 |
):
|
58 |
+
self.transcription_path = Path(transcription_path)
|
59 |
+
self.train_path = Path(train_path)
|
60 |
+
if cleaned_path == "" or cleaned_path is None:
|
61 |
+
self.cleaned_path = self.transcription_path.with_name(
|
62 |
+
self.transcription_path.name + ".cleaned"
|
63 |
+
)
|
64 |
+
else:
|
65 |
+
self.cleaned_path = Path(cleaned_path)
|
66 |
+
self.val_path = Path(val_path)
|
67 |
+
self.config_path = Path(config_path)
|
68 |
+
self.val_per_lang = val_per_lang
|
69 |
+
self.max_val_total = max_val_total
|
70 |
+
self.clean = clean
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
@classmethod
|
73 |
+
def from_dict(cls, dataset_path: Path, data: dict[str, Any]):
|
74 |
"""从字典中生成实例"""
|
75 |
|
76 |
+
data["transcription_path"] = dataset_path / data["transcription_path"]
|
|
|
|
|
77 |
if data["cleaned_path"] == "" or data["cleaned_path"] is None:
|
78 |
+
data["cleaned_path"] = ""
|
79 |
else:
|
80 |
+
data["cleaned_path"] = dataset_path / data["cleaned_path"]
|
81 |
+
data["train_path"] = dataset_path / data["train_path"]
|
82 |
+
data["val_path"] = dataset_path / data["val_path"]
|
83 |
+
data["config_path"] = dataset_path / data["config_path"]
|
84 |
|
85 |
return cls(**data)
|
86 |
|
|
|
95 |
device: str = "cuda",
|
96 |
use_multi_device: bool = False,
|
97 |
):
|
98 |
+
self.config_path = Path(config_path)
|
99 |
self.num_processes = num_processes
|
100 |
if not cuda_available:
|
101 |
device = "cpu"
|
|
|
103 |
self.use_multi_device = use_multi_device
|
104 |
|
105 |
@classmethod
|
106 |
+
def from_dict(cls, dataset_path: Path, data: dict[str, Any]):
|
107 |
+
data["config_path"] = dataset_path / data["config_path"]
|
108 |
|
109 |
return cls(**data)
|
110 |
|
|
|
118 |
num_processes: int = 4,
|
119 |
device: str = "cuda",
|
120 |
):
|
121 |
+
self.config_path = Path(config_path)
|
122 |
self.num_processes = num_processes
|
123 |
if not cuda_available:
|
124 |
device = "cpu"
|
125 |
self.device = device
|
126 |
|
127 |
@classmethod
|
128 |
+
def from_dict(cls, dataset_path: Path, data: dict[str, Any]):
|
129 |
+
data["config_path"] = dataset_path / data["config_path"]
|
130 |
|
131 |
return cls(**data)
|
132 |
|
|
|
137 |
def __init__(
|
138 |
self,
|
139 |
config_path: str,
|
140 |
+
env: dict[str, Any],
|
141 |
# base: Dict[str, any],
|
142 |
model_dir: str,
|
143 |
num_workers: int,
|
|
|
146 |
):
|
147 |
self.env = env # 需要加载的环境变量
|
148 |
# self.base = base # 底模配置
|
149 |
+
self.model_dir = Path(
|
150 |
+
model_dir
|
151 |
+
) # 训练模型存储目录,该路径为相对于dataset_path的路径,而非项目根目录
|
152 |
+
self.config_path = Path(config_path) # 配置文件路径
|
153 |
self.num_workers = num_workers # worker数量
|
154 |
self.spec_cache = spec_cache # 是否启用spec缓存
|
155 |
self.keep_ckpts = keep_ckpts # ckpt数量
|
156 |
|
157 |
@classmethod
|
158 |
+
def from_dict(cls, dataset_path: Path, data: dict[str, Any]):
|
159 |
# data["model"] = os.path.join(dataset_path, data["model"])
|
160 |
+
data["config_path"] = dataset_path / data["config_path"]
|
161 |
|
162 |
return cls(**data)
|
163 |
|
|
|
177 |
):
|
178 |
if not cuda_available:
|
179 |
device = "cpu"
|
180 |
+
self.device = device
|
181 |
+
self.model = Path(model)
|
182 |
+
self.config_path = Path(config_path)
|
183 |
+
self.port: int = port
|
184 |
+
self.share: bool = share
|
185 |
+
self.debug: bool = debug
|
186 |
+
self.language_identification_library: str = language_identification_library
|
|
|
|
|
187 |
|
188 |
@classmethod
|
189 |
+
def from_dict(cls, dataset_path: Path, data: dict[str, Any]):
|
190 |
+
data["config_path"] = dataset_path / data["config_path"]
|
191 |
+
data["model"] = dataset_path / data["model"]
|
192 |
return cls(**data)
|
193 |
|
194 |
|
|
|
199 |
device: str = "cuda",
|
200 |
limit: int = 100,
|
201 |
language: str = "JP",
|
202 |
+
origins: list[str] = ["*"],
|
203 |
):
|
204 |
self.port: int = port
|
205 |
if not cuda_available:
|
|
|
207 |
self.device: str = device
|
208 |
self.language: str = language
|
209 |
self.limit: int = limit
|
210 |
+
self.origins: list[str] = origins
|
211 |
|
212 |
@classmethod
|
213 |
+
def from_dict(cls, data: dict[str, Any]):
|
214 |
return cls(**data)
|
215 |
|
216 |
|
|
|
222 |
self.secret_key = secret_key
|
223 |
|
224 |
@classmethod
|
225 |
+
def from_dict(cls, data: dict[str, Any]):
|
226 |
return cls(**data)
|
227 |
|
228 |
|
229 |
class Config:
|
230 |
+
def __init__(self, config_path: str, path_config: PathConfig):
|
231 |
+
if not Path(config_path).exists():
|
232 |
shutil.copy(src="default_config.yml", dst=config_path)
|
233 |
logger.info(
|
234 |
f"A configuration file {config_path} has been generated based on the default configuration file default_config.yml."
|
235 |
)
|
236 |
logger.info(
|
237 |
+
"Please do not modify default_config.yml. Instead, modify config.yml."
|
238 |
)
|
239 |
# sys.exit(0)
|
240 |
+
with open(config_path, encoding="utf-8") as file:
|
241 |
+
yaml_config: dict[str, Any] = yaml.safe_load(file.read())
|
242 |
model_name: str = yaml_config["model_name"]
|
243 |
self.model_name: str = model_name
|
244 |
if "dataset_path" in yaml_config:
|
245 |
+
dataset_path = Path(yaml_config["dataset_path"])
|
246 |
else:
|
247 |
+
dataset_path = path_config.dataset_root / model_name
|
248 |
+
self.dataset_path = dataset_path
|
249 |
+
self.dataset_root = path_config.dataset_root
|
250 |
+
self.assets_root = path_config.assets_root
|
251 |
+
self.out_dir = self.assets_root / model_name
|
252 |
self.resample_config: Resample_config = Resample_config.from_dict(
|
253 |
dataset_path, yaml_config["resample"]
|
254 |
)
|
|
|
277 |
# )
|
278 |
|
279 |
|
280 |
+
# Load and initialize the configuration
|
281 |
+
|
282 |
+
|
283 |
+
def get_path_config() -> PathConfig:
|
284 |
+
path_config_path = Path("configs/paths.yml")
|
285 |
+
if not path_config_path.exists():
|
286 |
+
shutil.copy(src="configs/default_paths.yml", dst=path_config_path)
|
287 |
+
logger.info(
|
288 |
+
f"A configuration file {path_config_path} has been generated based on the default configuration file default_paths.yml."
|
289 |
+
)
|
290 |
+
logger.info(
|
291 |
+
"Please do not modify configs/default_paths.yml. Instead, modify configs/paths.yml."
|
292 |
+
)
|
293 |
+
with open(path_config_path, encoding="utf-8") as file:
|
294 |
+
path_config_dict: dict[str, str] = yaml.safe_load(file.read())
|
295 |
+
return PathConfig(**path_config_dict)
|
296 |
+
|
297 |
|
298 |
+
def get_config() -> Config:
|
299 |
+
path_config = get_path_config()
|
300 |
+
try:
|
301 |
+
config = Config("config.yml", path_config)
|
302 |
+
except (TypeError, KeyError):
|
303 |
+
logger.warning("Old config.yml found. Replace it with default_config.yml.")
|
304 |
+
shutil.copy(src="default_config.yml", dst="config.yml")
|
305 |
+
config = Config("config.yml", path_config)
|
306 |
|
307 |
+
return config
|
|
|
|
|
|
|
|
|
|
configs/config.json
CHANGED
@@ -69,5 +69,5 @@
|
|
69 |
"use_spectral_norm": false,
|
70 |
"gin_channels": 256
|
71 |
},
|
72 |
-
"version": "2.
|
73 |
}
|
|
|
69 |
"use_spectral_norm": false,
|
70 |
"gin_channels": 256
|
71 |
},
|
72 |
+
"version": "2.6.1"
|
73 |
}
|
configs/config_jp_extra.json
CHANGED
@@ -76,5 +76,5 @@
|
|
76 |
"initial_channel": 64
|
77 |
}
|
78 |
},
|
79 |
-
"version": "2.
|
80 |
}
|
|
|
76 |
"initial_channel": 64
|
77 |
}
|
78 |
},
|
79 |
+
"version": "2.6.1-JP-Extra"
|
80 |
}
|
configs/default_paths.yml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Root directory of the training dataset.
|
2 |
+
# The training dataset of {model_name} should be placed in {dataset_root}/{model_name}.
|
3 |
+
dataset_root: Data
|
4 |
+
|
5 |
+
# Root directory of the model assets (for inference).
|
6 |
+
# In training, the model assets will be saved to {assets_root}/{model_name},
|
7 |
+
# and in inference, we load all the models from {assets_root}.
|
8 |
+
assets_root: model_assets
|
data_utils.py
CHANGED
@@ -7,7 +7,7 @@ import torch
|
|
7 |
import torch.utils.data
|
8 |
from tqdm import tqdm
|
9 |
|
10 |
-
from config import
|
11 |
from mel_processing import mel_spectrogram_torch, spectrogram_torch
|
12 |
from style_bert_vits2.logging import logger
|
13 |
from style_bert_vits2.models import commons
|
@@ -16,6 +16,7 @@ from style_bert_vits2.models.utils import load_filepaths_and_text, load_wav_to_t
|
|
16 |
from style_bert_vits2.nlp import cleaned_text_to_sequence
|
17 |
|
18 |
|
|
|
19 |
"""Multi speaker version"""
|
20 |
|
21 |
|
@@ -70,16 +71,16 @@ class TextAudioSpeakerLoader(torch.utils.data.Dataset):
|
|
70 |
self.audiopaths_sid_text, file=sys.stdout
|
71 |
):
|
72 |
audiopath = f"{_id}"
|
73 |
-
if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
else:
|
82 |
-
|
83 |
logger.info(
|
84 |
"skipped: "
|
85 |
+ str(skipped)
|
@@ -120,9 +121,7 @@ class TextAudioSpeakerLoader(torch.utils.data.Dataset):
|
|
120 |
audio, sampling_rate = load_wav_to_torch(filename)
|
121 |
if sampling_rate != self.sampling_rate:
|
122 |
raise ValueError(
|
123 |
-
"{} {} SR doesn't match target {} SR"
|
124 |
-
filename, sampling_rate, self.sampling_rate
|
125 |
-
)
|
126 |
)
|
127 |
audio_norm = audio / self.max_wav_value
|
128 |
audio_norm = audio_norm.unsqueeze(0)
|
|
|
7 |
import torch.utils.data
|
8 |
from tqdm import tqdm
|
9 |
|
10 |
+
from config import get_config
|
11 |
from mel_processing import mel_spectrogram_torch, spectrogram_torch
|
12 |
from style_bert_vits2.logging import logger
|
13 |
from style_bert_vits2.models import commons
|
|
|
16 |
from style_bert_vits2.nlp import cleaned_text_to_sequence
|
17 |
|
18 |
|
19 |
+
config = get_config()
|
20 |
"""Multi speaker version"""
|
21 |
|
22 |
|
|
|
71 |
self.audiopaths_sid_text, file=sys.stdout
|
72 |
):
|
73 |
audiopath = f"{_id}"
|
74 |
+
# if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:
|
75 |
+
phones = phones.split(" ")
|
76 |
+
tone = [int(i) for i in tone.split(" ")]
|
77 |
+
word2ph = [int(i) for i in word2ph.split(" ")]
|
78 |
+
audiopaths_sid_text_new.append(
|
79 |
+
[audiopath, spk, language, text, phones, tone, word2ph]
|
80 |
+
)
|
81 |
+
lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
|
82 |
+
# else:
|
83 |
+
# skipped += 1
|
84 |
logger.info(
|
85 |
"skipped: "
|
86 |
+ str(skipped)
|
|
|
121 |
audio, sampling_rate = load_wav_to_torch(filename)
|
122 |
if sampling_rate != self.sampling_rate:
|
123 |
raise ValueError(
|
124 |
+
f"{filename} {sampling_rate} SR doesn't match target {self.sampling_rate} SR"
|
|
|
|
|
125 |
)
|
126 |
audio_norm = audio / self.max_wav_value
|
127 |
audio_norm = audio_norm.unsqueeze(0)
|
default_style.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import json
|
2 |
-
import os
|
3 |
from pathlib import Path
|
4 |
from typing import Union
|
5 |
|
@@ -9,26 +8,91 @@ from style_bert_vits2.constants import DEFAULT_STYLE
|
|
9 |
from style_bert_vits2.logging import logger
|
10 |
|
11 |
|
12 |
-
def
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
json_dict = json.load(f)
|
15 |
json_dict["data"]["num_styles"] = 1
|
16 |
json_dict["data"]["style2id"] = {DEFAULT_STYLE: 0}
|
17 |
-
with open(
|
18 |
json.dump(json_dict, f, indent=2, ensure_ascii=False)
|
19 |
-
logger.info(f"
|
20 |
|
21 |
|
22 |
-
def
|
|
|
|
|
|
|
|
|
|
|
23 |
wav_dir = Path(wav_dir)
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
embs = []
|
26 |
for file in wav_dir.rglob("*.npy"):
|
27 |
xvec = np.load(file)
|
28 |
embs.append(np.expand_dims(xvec, axis=0))
|
29 |
-
|
30 |
x = np.concatenate(embs, axis=0) # (N, 256)
|
31 |
mean = np.mean(x, axis=0) # (256,)
|
32 |
-
|
33 |
-
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import json
|
|
|
2 |
from pathlib import Path
|
3 |
from typing import Union
|
4 |
|
|
|
8 |
from style_bert_vits2.logging import logger
|
9 |
|
10 |
|
11 |
+
def save_neutral_vector(
|
12 |
+
wav_dir: Union[Path, str],
|
13 |
+
output_dir: Union[Path, str],
|
14 |
+
config_path: Union[Path, str],
|
15 |
+
config_output_path: Union[Path, str],
|
16 |
+
):
|
17 |
+
wav_dir = Path(wav_dir)
|
18 |
+
output_dir = Path(output_dir)
|
19 |
+
embs = []
|
20 |
+
for file in wav_dir.rglob("*.npy"):
|
21 |
+
xvec = np.load(file)
|
22 |
+
embs.append(np.expand_dims(xvec, axis=0))
|
23 |
+
|
24 |
+
x = np.concatenate(embs, axis=0) # (N, 256)
|
25 |
+
mean = np.mean(x, axis=0) # (256,)
|
26 |
+
only_mean = np.stack([mean]) # (1, 256)
|
27 |
+
np.save(output_dir / "style_vectors.npy", only_mean)
|
28 |
+
logger.info(f"Saved mean style vector to {output_dir}")
|
29 |
+
|
30 |
+
with open(config_path, encoding="utf-8") as f:
|
31 |
json_dict = json.load(f)
|
32 |
json_dict["data"]["num_styles"] = 1
|
33 |
json_dict["data"]["style2id"] = {DEFAULT_STYLE: 0}
|
34 |
+
with open(config_output_path, "w", encoding="utf-8") as f:
|
35 |
json.dump(json_dict, f, indent=2, ensure_ascii=False)
|
36 |
+
logger.info(f"Saved style config to {config_output_path}")
|
37 |
|
38 |
|
39 |
+
def save_styles_by_dirs(
|
40 |
+
wav_dir: Union[Path, str],
|
41 |
+
output_dir: Union[Path, str],
|
42 |
+
config_path: Union[Path, str],
|
43 |
+
config_output_path: Union[Path, str],
|
44 |
+
):
|
45 |
wav_dir = Path(wav_dir)
|
46 |
+
output_dir = Path(output_dir)
|
47 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
48 |
+
config_path = Path(config_path)
|
49 |
+
config_output_path = Path(config_output_path)
|
50 |
+
|
51 |
+
subdirs = [d for d in wav_dir.iterdir() if d.is_dir()]
|
52 |
+
subdirs.sort()
|
53 |
+
if len(subdirs) in (0, 1):
|
54 |
+
logger.info(
|
55 |
+
f"At least 2 subdirectories are required for generating style vectors with respect to them, found {len(subdirs)}."
|
56 |
+
)
|
57 |
+
logger.info("Generating only neutral style vector instead.")
|
58 |
+
save_neutral_vector(wav_dir, output_dir, config_path, config_output_path)
|
59 |
+
return
|
60 |
+
|
61 |
+
# First get mean of all for Neutral
|
62 |
embs = []
|
63 |
for file in wav_dir.rglob("*.npy"):
|
64 |
xvec = np.load(file)
|
65 |
embs.append(np.expand_dims(xvec, axis=0))
|
|
|
66 |
x = np.concatenate(embs, axis=0) # (N, 256)
|
67 |
mean = np.mean(x, axis=0) # (256,)
|
68 |
+
style_vectors = [mean]
|
69 |
+
|
70 |
+
names = [DEFAULT_STYLE]
|
71 |
+
for style_dir in subdirs:
|
72 |
+
npy_files = list(style_dir.rglob("*.npy"))
|
73 |
+
if not npy_files:
|
74 |
+
continue
|
75 |
+
embs = []
|
76 |
+
for file in npy_files:
|
77 |
+
xvec = np.load(file)
|
78 |
+
embs.append(np.expand_dims(xvec, axis=0))
|
79 |
+
|
80 |
+
x = np.concatenate(embs, axis=0) # (N, 256)
|
81 |
+
mean = np.mean(x, axis=0) # (256,)
|
82 |
+
style_vectors.append(mean)
|
83 |
+
names.append(style_dir.name)
|
84 |
+
|
85 |
+
# Stack them to make (num_styles, 256)
|
86 |
+
style_vectors_npy = np.stack(style_vectors, axis=0)
|
87 |
+
np.save(output_dir / "style_vectors.npy", style_vectors_npy)
|
88 |
+
logger.info(f"Saved style vectors to {output_dir / 'style_vectors.npy'}")
|
89 |
+
|
90 |
+
# Save style2id config to json
|
91 |
+
style2id = {name: i for i, name in enumerate(names)}
|
92 |
+
with open(config_path, encoding="utf-8") as f:
|
93 |
+
json_dict = json.load(f)
|
94 |
+
json_dict["data"]["num_styles"] = len(names)
|
95 |
+
json_dict["data"]["style2id"] = style2id
|
96 |
+
with open(config_output_path, "w", encoding="utf-8") as f:
|
97 |
+
json.dump(json_dict, f, indent=2, ensure_ascii=False)
|
98 |
+
logger.info(f"Saved style config to {config_output_path}")
|
docs/CHANGELOG.md
CHANGED
@@ -1,5 +1,76 @@
|
|
1 |
# Changelog
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
## v2.4.1 (2024-03-16)
|
4 |
|
5 |
**batファイルでのインストール・アップデート方法の変更**(それ以外の変更はありません)
|
|
|
1 |
# Changelog
|
2 |
|
3 |
+
## v2.6.1 (2024-09-09)
|
4 |
+
|
5 |
+
- Google colabで、torchのバージョン由来でエラーが発生する不具合の修正(たぶん)
|
6 |
+
- WebUIからのスタイル作成での、サブフォルダによるスタイル分けでエラーが発生していた点の修正
|
7 |
+
|
8 |
+
## v2.6.0 (2024-06-16)
|
9 |
+
|
10 |
+
### 新機能
|
11 |
+
モデルのマージ時に、今までの `new = (1 - weight) * A + weight * B` の他に、次を追加
|
12 |
+
|
13 |
+
- `new = A + weight * (B - C)`: 差分マージ
|
14 |
+
- `new = a * A + b * B + c * C`: 加重和マージ
|
15 |
+
- `new = A + weight * B`: ヌルモデルのマージ
|
16 |
+
|
17 |
+
差分マージは、例えばBを「Cと同じ話者だけど囁いているモデル」とすると、`B - C`が囁きベクトル的なものだと思えるので、それをAに足すことで、Aの話者が囁いているような音声を生成できるようになります。
|
18 |
+
|
19 |
+
また、加重和で`new = A - B`を作って、それをヌルモデルマージで別のモデルに足せば、実質差分マージを実現できます。また謎に`new = -A`や`new = 41 * A`等のモデルも作ることができます。
|
20 |
+
|
21 |
+
これらのマージの活用法については各自いろいろ考えて実験してみて、面白い使い方があればぜひ共有してください。
|
22 |
+
|
23 |
+
囁きについて実験的に作ったヌルモデルを[こちら](https://huggingface.co/litagin/sbv2_null_models)に置いています。これをヌルモデルマージで使うことで、任意のモデルを囁きモデルにある程度は変換できます。
|
24 |
+
|
25 |
+
### 改善
|
26 |
+
|
27 |
+
- スタイルベクトルのマージ部分のUIの改善
|
28 |
+
- WebUIの`App.bat`の起動が少し重いので、それぞれの機能を分割した`Dataset.bat`, `Inference.bat`, `Merge.bat`, `StyleVectors.bat`, `Train.bat`を追加 (今までの`App.bat`もこれまで通り使えます)
|
29 |
+
|
30 |
+
## v2.5.1 (2024-06-14)
|
31 |
+
|
32 |
+
ライセンスとのコンフリクトから、[利用規約](/docs/TERMS_OF_USE.md)を[開発陣からのお願いとデフォルトモデルの利用規約](/docs/TERMS_OF_USE.md)に変更しました。
|
33 |
+
|
34 |
+
## v2.5.0 (2024-06-02)
|
35 |
+
|
36 |
+
このバージョンから[利用規約](/docs/TERMS_OF_USE.md)が追加されました。ご利用の際は必ずお読みください。
|
37 |
+
|
38 |
+
### 新機能等
|
39 |
+
|
40 |
+
- デフォルトモデルに [あみたろの声素材工房](https://amitaro.net/) のあみたろ様が公開しているコーパスとライブ配信音声を利用して学習した[**小春音アミ**](https://huggingface.co/litagin/sbv2_koharune_ami)と[**あみたろ**](https://huggingface.co/litagin/sbv2_amitaro)モデルを追加(あみたろ様には事前に連絡して許諾を得ています)
|
41 |
+
- アプデの場合は`Initialize.bat`をダブルクリックすればモデルをダウンロードできます(手動でダウンロードして`model_assets`フォルダに入れることも可能)
|
42 |
+
- 学習時に音声データをスタイルごとにフォルダ分けしておくことで、そのフォルダごとのスタイルを学習時に自動的に作成するように
|
43 |
+
- `inputs`からスライスして使う場合は`inputs`直下に作りたいスタイルだけサブフォルダを作りそこに音声ファイルを配置
|
44 |
+
- `Data/モデル名/raw`から使う場合も`raw`直下に同様に配置
|
45 |
+
- サブフォルダの個数が0または1の場合は、今まで通りのNeutralスタイルのみが作成されます
|
46 |
+
- batファイルでのインストールの大幅な高速化(Pythonのライブラリインストールに[uv](https://github.com/astral-sh/uv)を使用)
|
47 |
+
- 学習時に「カスタムバッチサンプラーを無効化」オプションを追加。これにより、長い音声ファイルも学習に使われるようになりますが、使用VRAMがかなり増えたり学習が不安定になる可能性があります。
|
48 |
+
- [よくある質問](/docs/FAQ.md)を追加
|
49 |
+
- 英語の音声合成の速度向上([gordon0414](https://github.com/gordon0414)さんによる[PR](https://github.com/litagin02/Style-Bert-VITS2/pull/124)です、ありがとうございます!)
|
50 |
+
- エディターの各種機能改善(多くが[kamexy](https://github.com/kamexy)様による[エディターリポジトリ](https://github.com/litagin02/Style-Bert-VITS2-Editor)へのプルリク群です、ありがとうございます!)
|
51 |
+
- 選択した行の下に新規の行を作成できるように
|
52 |
+
- Mac使用時に日本語変換のエンターで音声合成が走るバグの修正
|
53 |
+
- ペースト時に改行を含まない場合は通常のペーストの振る舞いになるように修正
|
54 |
+
|
55 |
+
|
56 |
+
### その他の改善
|
57 |
+
|
58 |
+
- 上のスタイル自動作成機能を既存モデルでも使えるような機能追加。具体的には、スタイル作成タブにて、フォルダ分けされた音声ファイルのディレクトリを任意に指定し、そのフォルダ分けを使って既存のモデルのスタイルの作成が可能に
|
59 |
+
- 音声書き起こしに[kotoba-whisper](https://huggingface.co/kotoba-tech/kotoba-whisper-v1.1)を追加
|
60 |
+
- 音声書き起こし時にHugging FaceのWhisperモデルを使う際に、書き起こしを順次保存するように改善
|
61 |
+
- 音声書き起こしのデフォルトをfaster-whiperからHugging FaceのWhisperモデルへ変更
|
62 |
+
- (**ライブラリとしてのみ**)依存関係の軽量化、音声合成時に読み上げテキストの読みを表す音素列を指定する機能を追加 + 様々な改善 ([tsukumijimaさん](https://github.com/tsukumijima)による[プルリク](https://github.com/litagin02/Style-Bert-VITS2/pull/118)です、ありがとうございます!)
|
63 |
+
|
64 |
+
### 内部変更
|
65 |
+
|
66 |
+
- これまでpath管理に`configs/paths.yml`を使っていたが、`configs/default_paths.yml`にリネームし、`configs/paths.yml`はgitの管理対象外に変更
|
67 |
+
|
68 |
+
### バグ修正
|
69 |
+
|
70 |
+
- Gradioのアップデートにより、モデル選択時やスタイルのDBSCAN作成時等に`TypeError: Type is not JSON serializable: WindowsPath`のようなエラーが出る問題を修正
|
71 |
+
- TensorboardをWebUIから立ち上げた際にエラーが出る問題の修正 ([#129](https://github.com/litagin02/Style-Bert-VITS2/issues/129))
|
72 |
+
|
73 |
+
|
74 |
## v2.4.1 (2024-03-16)
|
75 |
|
76 |
**batファイルでのインストール・アップデート方法の変更**(それ以外の変更はありません)
|
docs/CLI.md
CHANGED
@@ -7,17 +7,17 @@ git clone https://github.com/litagin02/Style-Bert-VITS2.git
|
|
7 |
cd Style-Bert-VITS2
|
8 |
python -m venv venv
|
9 |
venv\Scripts\activate
|
10 |
-
pip install torch
|
11 |
pip install -r requirements.txt
|
12 |
```
|
13 |
|
14 |
Then download the necessary models and the default TTS model, and set the global paths.
|
15 |
```bash
|
16 |
-
python initialize.py [--
|
17 |
```
|
18 |
|
19 |
Optional:
|
20 |
-
- `--
|
21 |
- `--dataset_root`: Default: `Data`. Root directory of the training dataset. The training dataset of `{model_name}` should be placed in `{dataset_root}/{model_name}`.
|
22 |
- `--assets_root`: Default: `model_assets`. Root directory of the model assets (for inference). In training, the model assets will be saved to `{assets_root}/{model_name}`, and in inference, we load all the models from `{assets_root}`.
|
23 |
|
@@ -26,7 +26,7 @@ Optional:
|
|
26 |
|
27 |
### 1.1. Slice audio files
|
28 |
|
29 |
-
The following audio formats are supported: ".wav", ".flac", ".mp3", ".ogg", ".opus".
|
30 |
```bash
|
31 |
python slice.py --model_name <model_name> [-i <input_dir>] [-m <min_sec>] [-M <max_sec>] [--time_suffix]
|
32 |
```
|
@@ -101,4 +101,4 @@ python train_ms_jp_extra.py [--repo_id <username>/<repo_name>] [--skip_default_s
|
|
101 |
|
102 |
Optional:
|
103 |
- `--repo_id`: Hugging Face repository ID to upload the trained model to. You should have logged in using `huggingface-cli login` before running this command.
|
104 |
-
- `--skip_default_style`: Skip making the default style vector. Use this if you want to resume training (since the default style vector
|
|
|
7 |
cd Style-Bert-VITS2
|
8 |
python -m venv venv
|
9 |
venv\Scripts\activate
|
10 |
+
pip install torch torchaudio --index-url https://download.pytorch.org/whl/cu118
|
11 |
pip install -r requirements.txt
|
12 |
```
|
13 |
|
14 |
Then download the necessary models and the default TTS model, and set the global paths.
|
15 |
```bash
|
16 |
+
python initialize.py [--skip_default_models] [--dataset_root <path>] [--assets_root <path>]
|
17 |
```
|
18 |
|
19 |
Optional:
|
20 |
+
- `--skip_default_models`: Skip downloading the default voice models (use this if you only have to train your own models).
|
21 |
- `--dataset_root`: Default: `Data`. Root directory of the training dataset. The training dataset of `{model_name}` should be placed in `{dataset_root}/{model_name}`.
|
22 |
- `--assets_root`: Default: `model_assets`. Root directory of the model assets (for inference). In training, the model assets will be saved to `{assets_root}/{model_name}`, and in inference, we load all the models from `{assets_root}`.
|
23 |
|
|
|
26 |
|
27 |
### 1.1. Slice audio files
|
28 |
|
29 |
+
The following audio formats are supported: ".wav", ".flac", ".mp3", ".ogg", ".opus", ".m4a".
|
30 |
```bash
|
31 |
python slice.py --model_name <model_name> [-i <input_dir>] [-m <min_sec>] [-M <max_sec>] [--time_suffix]
|
32 |
```
|
|
|
101 |
|
102 |
Optional:
|
103 |
- `--repo_id`: Hugging Face repository ID to upload the trained model to. You should have logged in using `huggingface-cli login` before running this command.
|
104 |
+
- `--skip_default_style`: Skip making the default style vector. Use this if you want to resume training (since the default style vector has been already made).
|
docs/FAQ.md
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# よくある質問
|
2 |
+
|
3 |
+
## 書き起こしにエラーが出たりして失敗する
|
4 |
+
|
5 |
+
ffmpegが入っていないことが問題のようです。
|
6 |
+
ググるか、おそらくWindowsなら
|
7 |
+
```
|
8 |
+
winget install ffmpeg
|
9 |
+
```
|
10 |
+
によりffmpegをインストールできます。その後で試してみてください。
|
11 |
+
|
12 |
+
## Google Colabでの学習がエラーが何か出て動かない
|
13 |
+
|
14 |
+
Google Colabのノートブックは以前のバージョンのノートブックのコピーを使っていませんか?
|
15 |
+
Colabノートブックは最新のバージョンに合ったノートブックで動かすことを前提としています。ノートブック記載のバージョンを確認して、[最新のcolabノートブック](http://colab.research.google.com/github/litagin02/Style-Bert-VITS2/blob/master/colab.ipynb)(を必要ならコピーして)から使うようにしてください。
|
16 |
+
|
17 |
+
## `ModuleNotFoundError: No module named '_socket'`と出る
|
18 |
+
|
19 |
+
フォルダ名をインストールした時から変えていませんか?フォルダ名を変えるとパスが変わってしまい、インストール時に指定したパスと異なるためにエラーが出ます。フォルダ名を元に戻してください。
|
20 |
+
|
21 |
+
## 学習に時間がかかりすぎる
|
22 |
+
|
23 |
+
デフォルトの100エポックは音声データ量によっては過剰な場合があります。デフォルトでは1000ステップごとにモデルが保存されるはずなので、途中で学習を中断してみて途中のもので試してみてもいいでしょう。
|
24 |
+
|
25 |
+
またバッチサイズが大き過ぎてメモリがVRAMから溢れると非常に遅くなることがあります。VRAM使用量がギリギリだったり物理メモリに溢れている場合はバッチサイズを小さくしてみてください。
|
26 |
+
|
27 |
+
## どのくらいの音声データが必要なの?
|
28 |
+
|
29 |
+
分かりません。試行錯誤してください。
|
30 |
+
|
31 |
+
参考として、数分程度でも学習はできるらしく、またRVCでよく言われているのは多くても45分くらいで十分説があります。ただ多ければ多いほど精度が上がる可能性もありますが、分かりません。
|
32 |
+
<!-- OpenJTalkの間違ったトーンで大量のデータを学習すると間違ったトーンの入力でなければ正しい出力ができなくなるが、学習データの範囲内ならば高い性能を発揮する -->
|
33 |
+
|
34 |
+
## どのくらいのステップ・エポックがいいの?
|
35 |
+
|
36 |
+
分かりません。試行錯誤してください。`python speech_mos.py -m <モデル名>`によって自然性の一つの評価ができるので、それが少し参考になります(ただあくまで一つの指標です)。
|
37 |
+
|
38 |
+
参考として、最初の2k-3kで声音はかなり似始めて、5k-10k-15kステップほどで感情含めてよい感じになりやすく、そこからどんどん回して20kなり30kなり50kなり100kなりでどんどん微妙に変わっていきます。が、微妙に変わるので、どこがいいとかは分かりません。
|
39 |
+
|
40 |
+
## APIサーバーで長い文章が合成できない
|
41 |
+
|
42 |
+
デフォルトで`server_fastapi.py`の入力文字上限は100文字に設定されています。
|
43 |
+
`config.yml`の`server.limit`の100を好きな数字に変更してください。
|
44 |
+
上限をなくしたい方は`server.limit`を-1に設定してください。
|
45 |
+
|
46 |
+
## 学習を中断・再開するには
|
47 |
+
|
48 |
+
- 学習を中断するには、学習の進捗が表示されている画面(bat使用ならコマンドプロンプト)を好きなタイミングで閉じてください。
|
49 |
+
- 学習を再開するには、WebUIでモデル名を再開したいモデルと同じ名前に設定して、前処理等はせずに一番下の「学習を開始する」ボタンを押してください(「スタイルファイルの生成をスキップする」にチェックを入れるのをおすすめします)。
|
50 |
+
|
51 |
+
## 途中でバッチサイズやエポック数を変更したい
|
52 |
+
|
53 |
+
`Data/{モデル名}/config.json`を手動で変更してから、学習を再開してください。
|
54 |
+
|
55 |
+
## その他
|
56 |
+
|
57 |
+
ググったり調べたりChatGPTに聞くか、それでも分からない場合・または手順通りやってもエラーが出る等明らかに不具合やバグと思われる場合は、GitHubの[Issue](https://github.com/litagin02/Style-Bert-VITS2/issues)に投稿してください。
|
docs/TERMS_OF_USE.md
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 開発陣からのお願いとデフォルトモデルの利用規約
|
2 |
+
|
3 |
+
- 2024-06-14: ライセンスとの整合性から「利用規約」を「お願い」に変更
|
4 |
+
- 2024-06-01: 初版
|
5 |
+
|
6 |
+
Style-Bert-VITS2を用いる際は、以下のお願いを守っていただけると幸いです。ただしモデルの利用規約以前の箇所はあくまで「お願い」であり、何の強制力はなく、Style-Bert-VITS2の利用規約ではありません。よって[リポジトリのライセンス](https://github.com/litagin02/Style-Bert-VITS2#license)とは矛盾せず、リポジトリの利用にあたっては常にリポジトリのライセンスのみが拘束力を持ちます。
|
7 |
+
|
8 |
+
## やってほしくないこと
|
9 |
+
|
10 |
+
以下の目的での利用はStyle-Bert-VITS2を使ってほしくありません。
|
11 |
+
|
12 |
+
- 法律に違反する目的
|
13 |
+
- 政治的な目的(本家Bert-VITS2で禁止されています)
|
14 |
+
- 他者を傷つける目的
|
15 |
+
- なりすまし・ディープフェイク作成目的
|
16 |
+
|
17 |
+
## 守ってほしいこと
|
18 |
+
|
19 |
+
- Style-Bert-VITS2を利用する際は、使用するモデルの利用規約・ライセンス必ず確認し、存在する場合はそれに従ってほしいです。
|
20 |
+
- またソースコードを利用する際は、[リポジトリのライセンス](https://github.com/litagin02/Style-Bert-VITS2#license)に従ってほしいです。
|
21 |
+
|
22 |
+
# モデルの利用規約・ライセンス
|
23 |
+
|
24 |
+
以下はデフォルトで付随しているモデルの利用規約・ライセンスです。このリポジトリ自体にはモデルは付随していないので、[リポジトリのライセンス](https://github.com/litagin02/Style-Bert-VITS2#license)とは関係がありません(なのでリポジトリライセンスとの矛盾は発生しません)。
|
25 |
+
|
26 |
+
## JVNVコーパス (jvnv-F1-jp, jvnv-F2-jp, jvnv-M1-jp, jvnv-M2-jp)
|
27 |
+
|
28 |
+
- [JVNVコーパス](https://sites.google.com/site/shinnosuketakamichi/research-topics/jvnv_corpus) のライセンスは[CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/deed.ja)ですので、これを継承します。
|
29 |
+
|
30 |
+
## 小春音アミ (koharune-ami) / あみたろ (amitaro)
|
31 |
+
|
32 |
+
[あみたろの声素材工房様の規約](https://amitaro.net/voice/voice_rule/) と [あみたろのライブ配信音声・利用規約](https://amitaro.net/voice/livevoice/#index_id6) を全て守らなければなりません。特に、以下の事項を遵守してください(規約を守れば商用非商用問わず利用できます)。
|
33 |
+
|
34 |
+
### 禁止事項
|
35 |
+
|
36 |
+
- 年齢制限のある作品・用途への使用
|
37 |
+
- 新興宗教・政治・マルチ購などに深く関係する作品・用途
|
38 |
+
- 特定の団体や個人や国家を誹謗中傷する作品・用途
|
39 |
+
- 生成された音声を、あみたろ本人の声として扱うこと
|
40 |
+
- 生成された音声を、あみたろ以外の人の声として扱うこと
|
41 |
+
|
42 |
+
### クレジット表記
|
43 |
+
|
44 |
+
生成音声を公開する際は(媒体は問わない)、必ず分かりやすい場所に `あみたろの声素材工房 (https://amitaro.net/)` の声を元にした音声モデルを使用していることが分かるようなクレジット表記を記載してください。
|
45 |
+
|
46 |
+
クレジット表記例:
|
47 |
+
- `Style-BertVITS2モデル: 小春音アミ、あみたろの声素材工房 (https://amitaro.net/)`
|
48 |
+
- `Style-BertVITS2モデル: あみたろ、あみたろの声素材工房 (https://amitaro.net/)`
|
49 |
+
|
50 |
+
### モデルマージ
|
51 |
+
|
52 |
+
モデルマージに関しては、[あみたろの声素材工房のよくある質問への回答](https://amitaro.net/voice/faq/#index_id17)を遵守してください:
|
53 |
+
- 本モデルを別モデルとマージできるのは、その別モデル作成の際に学習に使われた声の権利者が許諾している場合に限る
|
54 |
+
- あみたろの声の特徴が残っている場合(マージの割合が25%以上の場合)は、その利用は[あみたろの声素材工房様の規約](https://amitaro.net/voice/voice_rule/)の範囲内に限定され、そのモデルに関してもこの規約が適応される
|
gen_yaml.py
CHANGED
@@ -22,7 +22,7 @@ args = parser.parse_args()
|
|
22 |
def gen_yaml(model_name, dataset_path):
|
23 |
if not os.path.exists("config.yml"):
|
24 |
shutil.copy(src="default_config.yml", dst="config.yml")
|
25 |
-
with open("config.yml",
|
26 |
yml_data = yaml.safe_load(f)
|
27 |
yml_data["model_name"] = model_name
|
28 |
yml_data["dataset_path"] = dataset_path
|
|
|
22 |
def gen_yaml(model_name, dataset_path):
|
23 |
if not os.path.exists("config.yml"):
|
24 |
shutil.copy(src="default_config.yml", dst="config.yml")
|
25 |
+
with open("config.yml", encoding="utf-8") as f:
|
26 |
yml_data = yaml.safe_load(f)
|
27 |
yml_data["model_name"] = model_name
|
28 |
yml_data["dataset_path"] = dataset_path
|
gradio_tabs/dataset.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
|
|
|
3 |
from style_bert_vits2.logging import logger
|
4 |
from style_bert_vits2.utils.subprocess import run_script_with_log
|
5 |
|
@@ -43,10 +44,10 @@ def do_transcribe(
|
|
43 |
compute_type,
|
44 |
language,
|
45 |
initial_prompt,
|
46 |
-
device,
|
47 |
use_hf_whisper,
|
48 |
batch_size,
|
49 |
num_beams,
|
|
|
50 |
):
|
51 |
if model_name == "":
|
52 |
return "Error: モデル名を入力してください。"
|
@@ -59,8 +60,6 @@ def do_transcribe(
|
|
59 |
whisper_model,
|
60 |
"--compute_type",
|
61 |
compute_type,
|
62 |
-
"--device",
|
63 |
-
device,
|
64 |
"--language",
|
65 |
language,
|
66 |
"--initial_prompt",
|
@@ -71,9 +70,12 @@ def do_transcribe(
|
|
71 |
if use_hf_whisper:
|
72 |
cmd.append("--use_hf_whisper")
|
73 |
cmd.extend(["--batch_size", str(batch_size)])
|
74 |
-
|
|
|
|
|
75 |
if not success:
|
76 |
return f"Error: {message}. エラーメッセージが空の場合、何も問題がない可能性があるので、書き起こしファイルをチェックして問題なければ無視してください。"
|
|
|
77 |
|
78 |
|
79 |
how_to_md = """
|
@@ -82,46 +84,51 @@ Style-Bert-VITS2の学習用データセットを作成するためのツール
|
|
82 |
- 与えられた音声からちょうどいい長さの発話区間を切り取りスライス
|
83 |
- 音声に対して文字起こし
|
84 |
|
85 |
-
|
86 |
|
87 |
## 必要なもの
|
88 |
|
89 |
-
|
90 |
合計時間がある程度はあったほうがいいかも、10分とかでも大丈夫だったとの報告あり。単一ファイルでも良いし複数ファイルでもよい。
|
91 |
|
92 |
## スライス使い方
|
93 |
-
1. `inputs
|
94 |
2. `モデル名`を入力して、設定を必要なら調整して`音声のスライス`ボタンを押す
|
95 |
3. 出来上がった音声ファイルたちは`Data/{モデル名}/raw`に保存される
|
96 |
|
97 |
## 書き起こし使い方
|
98 |
|
99 |
-
1.
|
100 |
2. 設定を必要なら調整してボタンを押す
|
101 |
3. 書き起こしファイルは`Data/{モデル名}/esd.list`に保存される
|
102 |
|
103 |
## 注意
|
104 |
|
105 |
-
-
|
106 |
- 書き起こしの結果をどれだけ修正すればいいかはデータセットに依存しそうです。
|
107 |
-
- 手動で書き起こしをいろいろ修正したり結果を細かく確認したい場合は、[Aivis Dataset](https://github.com/litagin02/Aivis-Dataset)もおすすめします。書き起こし部分もかなり工夫されています。ですがファイル数が多い場合などは、このツールで簡易的に切り出してデータセットを作るだけでも十分という気もしています。
|
108 |
"""
|
109 |
|
110 |
|
111 |
def create_dataset_app() -> gr.Blocks:
|
112 |
-
with gr.Blocks() as app:
|
|
|
|
|
|
|
113 |
with gr.Accordion("使い方", open=False):
|
114 |
gr.Markdown(how_to_md)
|
115 |
model_name = gr.Textbox(
|
116 |
label="モデル名を入力してください(話者名としても使われます)。"
|
117 |
)
|
118 |
with gr.Accordion("音声のスライス"):
|
|
|
|
|
|
|
119 |
with gr.Row():
|
120 |
with gr.Column():
|
121 |
input_dir = gr.Textbox(
|
122 |
label="元音声の入っているフォルダパス",
|
123 |
value="inputs",
|
124 |
-
info="下記フォルダにwav
|
125 |
)
|
126 |
min_sec = gr.Slider(
|
127 |
minimum=0,
|
@@ -167,6 +174,12 @@ def create_dataset_app() -> gr.Blocks:
|
|
167 |
)
|
168 |
use_hf_whisper = gr.Checkbox(
|
169 |
label="HuggingFaceのWhisperを使う(速度が速いがVRAMを多く使う)",
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
)
|
171 |
compute_type = gr.Dropdown(
|
172 |
[
|
@@ -181,6 +194,7 @@ def create_dataset_app() -> gr.Blocks:
|
|
181 |
],
|
182 |
label="計算精度",
|
183 |
value="bfloat16",
|
|
|
184 |
)
|
185 |
batch_size = gr.Slider(
|
186 |
minimum=1,
|
@@ -189,9 +203,7 @@ def create_dataset_app() -> gr.Blocks:
|
|
189 |
step=1,
|
190 |
label="バッチサイズ",
|
191 |
info="大きくすると速度が速くなるがVRAMを多く使う",
|
192 |
-
visible=False,
|
193 |
)
|
194 |
-
device = gr.Radio(["cuda", "cpu"], label="デバイス", value="cuda")
|
195 |
language = gr.Dropdown(["ja", "en", "zh"], value="ja", label="言語")
|
196 |
initial_prompt = gr.Textbox(
|
197 |
label="初期プロンプト",
|
@@ -228,21 +240,26 @@ def create_dataset_app() -> gr.Blocks:
|
|
228 |
compute_type,
|
229 |
language,
|
230 |
initial_prompt,
|
231 |
-
device,
|
232 |
use_hf_whisper,
|
233 |
batch_size,
|
234 |
num_beams,
|
|
|
235 |
],
|
236 |
outputs=[result2],
|
237 |
)
|
238 |
use_hf_whisper.change(
|
239 |
lambda x: (
|
240 |
gr.update(visible=x),
|
241 |
-
gr.update(visible=
|
242 |
gr.update(visible=not x),
|
243 |
),
|
244 |
inputs=[use_hf_whisper],
|
245 |
-
outputs=[batch_size, compute_type
|
246 |
)
|
247 |
|
248 |
return app
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
|
3 |
+
from style_bert_vits2.constants import GRADIO_THEME
|
4 |
from style_bert_vits2.logging import logger
|
5 |
from style_bert_vits2.utils.subprocess import run_script_with_log
|
6 |
|
|
|
44 |
compute_type,
|
45 |
language,
|
46 |
initial_prompt,
|
|
|
47 |
use_hf_whisper,
|
48 |
batch_size,
|
49 |
num_beams,
|
50 |
+
hf_repo_id,
|
51 |
):
|
52 |
if model_name == "":
|
53 |
return "Error: モデル名を入力してください。"
|
|
|
60 |
whisper_model,
|
61 |
"--compute_type",
|
62 |
compute_type,
|
|
|
|
|
63 |
"--language",
|
64 |
language,
|
65 |
"--initial_prompt",
|
|
|
70 |
if use_hf_whisper:
|
71 |
cmd.append("--use_hf_whisper")
|
72 |
cmd.extend(["--batch_size", str(batch_size)])
|
73 |
+
if hf_repo_id != "openai/whisper":
|
74 |
+
cmd.extend(["--hf_repo_id", hf_repo_id])
|
75 |
+
success, message = run_script_with_log(cmd, ignore_warning=True)
|
76 |
if not success:
|
77 |
return f"Error: {message}. エラーメッセージが空の場合、何も問題がない可能性があるので、書き起こしファイルをチェックして問題なければ無視してください。"
|
78 |
+
return "音声の文字起こしが完了しました。"
|
79 |
|
80 |
|
81 |
how_to_md = """
|
|
|
84 |
- 与えられた音声からちょうどいい長さの発話区間を切り取りスライス
|
85 |
- 音声に対して文字起こし
|
86 |
|
87 |
+
このうち両方を使ってもよいし、スライスする必要がない場合は後者のみを使ってもよいです。**コーパス音源などすでに適度な長さの音声ファイルがある場合はスライスは不要**です。
|
88 |
|
89 |
## 必要なもの
|
90 |
|
91 |
+
学習したい音声が入った音声ファイルいくつか(形式はwav以外でもmp3等通常の音声ファイル形式なら可能)。
|
92 |
合計時間がある程度はあったほうがいいかも、10分とかでも大丈夫だったとの報告あり。単一ファイルでも良いし複数ファイルでもよい。
|
93 |
|
94 |
## スライス使い方
|
95 |
+
1. `inputs`フォルダに音声ファイルをすべて入れる(スタイル分けをしたい場合は、サブフォルダにスタイルごとに音声を分けて入れる)
|
96 |
2. `モデル名`を入力して、設定を必要なら調整して`音声のスライス`ボタンを押す
|
97 |
3. 出来上がった音声ファイルたちは`Data/{モデル名}/raw`に保存される
|
98 |
|
99 |
## 書き起こし使い方
|
100 |
|
101 |
+
1. `Data/{モデル名}/raw`に音声ファイルが入っていることを確認(直下でなくてもよい)
|
102 |
2. 設定を必要なら調整してボタンを押す
|
103 |
3. 書き起こしファイルは`Data/{モデル名}/esd.list`に保存される
|
104 |
|
105 |
## 注意
|
106 |
|
107 |
+
- ~~長すぎる秒数(12-15秒くらいより長い?)のwavファイルは学習に用いられないようです。また短すぎてもあまりよくない可能性もあります。~~ この制限はVer 2.5では学習時に「カスタムバッチサンプラーを使わない」を選択すればなくなりました。が、長すぎる音声があるとVRAM消費量が増えたり安定しなかったりするので、適度な長さにスライスすることをおすすめします。
|
108 |
- 書き起こしの結果をどれだけ修正すればいいかはデータセットに依存しそうです。
|
|
|
109 |
"""
|
110 |
|
111 |
|
112 |
def create_dataset_app() -> gr.Blocks:
|
113 |
+
with gr.Blocks(theme=GRADIO_THEME) as app:
|
114 |
+
gr.Markdown(
|
115 |
+
"**既に1ファイル2-12秒程度の音声ファイル集とその書き起こしデータがある場合は、このタブは使用せずに学習できます。**"
|
116 |
+
)
|
117 |
with gr.Accordion("使い方", open=False):
|
118 |
gr.Markdown(how_to_md)
|
119 |
model_name = gr.Textbox(
|
120 |
label="モデル名を入力してください(話者名としても使われます)。"
|
121 |
)
|
122 |
with gr.Accordion("音声のスライス"):
|
123 |
+
gr.Markdown(
|
124 |
+
"**すでに適度な長さの音声ファイルからなるデータがある場合は、その音声をData/{モデル名}/rawに入れれば、このステップは不要です。**"
|
125 |
+
)
|
126 |
with gr.Row():
|
127 |
with gr.Column():
|
128 |
input_dir = gr.Textbox(
|
129 |
label="元音声の入っているフォルダパス",
|
130 |
value="inputs",
|
131 |
+
info="下記フォルダにwavやmp3等のファイルを入れておいてください",
|
132 |
)
|
133 |
min_sec = gr.Slider(
|
134 |
minimum=0,
|
|
|
174 |
)
|
175 |
use_hf_whisper = gr.Checkbox(
|
176 |
label="HuggingFaceのWhisperを使う(速度が速いがVRAMを多く使う)",
|
177 |
+
value=True,
|
178 |
+
)
|
179 |
+
hf_repo_id = gr.Dropdown(
|
180 |
+
["openai/whisper", "kotoba-tech/kotoba-whisper-v1.1"],
|
181 |
+
label="HuggingFaceのWhisperモデル",
|
182 |
+
value="openai/whisper",
|
183 |
)
|
184 |
compute_type = gr.Dropdown(
|
185 |
[
|
|
|
194 |
],
|
195 |
label="計算精度",
|
196 |
value="bfloat16",
|
197 |
+
visible=False,
|
198 |
)
|
199 |
batch_size = gr.Slider(
|
200 |
minimum=1,
|
|
|
203 |
step=1,
|
204 |
label="バッチサイズ",
|
205 |
info="大きくすると速度が速くなるがVRAMを多く使う",
|
|
|
206 |
)
|
|
|
207 |
language = gr.Dropdown(["ja", "en", "zh"], value="ja", label="言語")
|
208 |
initial_prompt = gr.Textbox(
|
209 |
label="初期プロンプト",
|
|
|
240 |
compute_type,
|
241 |
language,
|
242 |
initial_prompt,
|
|
|
243 |
use_hf_whisper,
|
244 |
batch_size,
|
245 |
num_beams,
|
246 |
+
hf_repo_id,
|
247 |
],
|
248 |
outputs=[result2],
|
249 |
)
|
250 |
use_hf_whisper.change(
|
251 |
lambda x: (
|
252 |
gr.update(visible=x),
|
253 |
+
gr.update(visible=x),
|
254 |
gr.update(visible=not x),
|
255 |
),
|
256 |
inputs=[use_hf_whisper],
|
257 |
+
outputs=[hf_repo_id, batch_size, compute_type],
|
258 |
)
|
259 |
|
260 |
return app
|
261 |
+
|
262 |
+
|
263 |
+
if __name__ == "__main__":
|
264 |
+
app = create_dataset_app()
|
265 |
+
app.launch(inbrowser=True)
|
gradio_tabs/inference.py
CHANGED
@@ -96,9 +96,65 @@ examples = [
|
|
96 |
]
|
97 |
|
98 |
initial_md = """
|
99 |
-
- Ver 2.
|
100 |
|
101 |
-
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
"""
|
103 |
|
104 |
how_to_md = """
|
@@ -260,10 +316,13 @@ def create_inference_app(model_holder: TTSModelHolder) -> gr.Blocks:
|
|
260 |
)
|
261 |
return app
|
262 |
initial_id = 0
|
263 |
-
initial_pth_files =
|
|
|
|
|
264 |
|
265 |
with gr.Blocks(theme=GRADIO_THEME) as app:
|
266 |
gr.Markdown(initial_md)
|
|
|
267 |
with gr.Accordion(label="使い方", open=False):
|
268 |
gr.Markdown(how_to_md)
|
269 |
with gr.Row():
|
@@ -392,10 +451,10 @@ def create_inference_app(model_holder: TTSModelHolder) -> gr.Blocks:
|
|
392 |
)
|
393 |
style_weight = gr.Slider(
|
394 |
minimum=0,
|
395 |
-
maximum=
|
396 |
value=DEFAULT_STYLE_WEIGHT,
|
397 |
step=0.1,
|
398 |
-
label="
|
399 |
)
|
400 |
ref_audio_path = gr.Audio(
|
401 |
label="参照音声", type="filepath", visible=False
|
@@ -464,3 +523,15 @@ def create_inference_app(model_holder: TTSModelHolder) -> gr.Blocks:
|
|
464 |
)
|
465 |
|
466 |
return app
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
]
|
97 |
|
98 |
initial_md = """
|
99 |
+
- Ver 2.5で追加されたデフォルトの [`koharune-ami`(小春音アミ)モデル](https://huggingface.co/litagin/sbv2_koharune_ami) と[`amitaro`(あみたろ)モデル](https://huggingface.co/litagin/sbv2_amitaro) は、[あみたろの声素材工房](https://amitaro.net/)で公開されているコーパス音源・ライブ配信音声を利用して事前に許可を得て学習したモデルです。下記の**利用規約を必ず読んで**からご利用ください。
|
100 |
|
101 |
+
- Ver 2.5のアップデート後に上記モデルをダウンロードするには、`Initialize.bat`をダブルクリックするか、手動でダウンロードして`model_assets`ディレクトリに配置してください。
|
102 |
+
|
103 |
+
- Ver 2.3で追加された**エディター版**のほうが実際に読み上げさせるには使いやすいかもしれません。`Editor.bat`か`python server_editor.py --inbrowser`で起動できます。
|
104 |
+
"""
|
105 |
+
|
106 |
+
terms_of_use_md = """
|
107 |
+
## お願いとデフォルトモデルのライセンス
|
108 |
+
|
109 |
+
最新のお願い・利用規約は [こちら](https://github.com/litagin02/Style-Bert-VITS2/blob/master/docs/TERMS_OF_USE.md) を参照してください。常に最新のものが適用されます。
|
110 |
+
|
111 |
+
Style-Bert-VITS2を用いる際は、以下のお願いを守っていただけると幸いです。ただしモデルの利用規約以前の箇所はあくまで「お願い」であり、何の強制力はなく、Style-Bert-VITS2の利用規約ではありません。よって[リポジトリのライセンス](https://github.com/litagin02/Style-Bert-VITS2#license)とは矛盾せず、リポジトリの利用にあたっては常にリポジトリのライセンスのみが拘束力を持ちます。
|
112 |
+
|
113 |
+
### やってほしくないこと
|
114 |
+
|
115 |
+
以下の目的での利用はStyle-Bert-VITS2を使ってほしくありません。
|
116 |
+
|
117 |
+
- 法律に違反する目的
|
118 |
+
- 政治的な目的(本家Bert-VITS2で禁止されています)
|
119 |
+
- 他者を傷つける目的
|
120 |
+
- なりすまし・ディープフェイク作成目的
|
121 |
+
|
122 |
+
### 守ってほしいこと
|
123 |
+
|
124 |
+
- Style-Bert-VITS2を利用する際は、使用するモデルの利用規約・ライセンス必ず確認し、存在する場合はそれに従ってほしいです。
|
125 |
+
- またソースコードを利用する際は、[リポジトリのライセンス](https://github.com/litagin02/Style-Bert-VITS2#license)に従ってほしいです。
|
126 |
+
|
127 |
+
以下はデフォルトで付随しているモデルのライセンスです。
|
128 |
+
|
129 |
+
### JVNVコーパス (jvnv-F1-jp, jvnv-F2-jp, jvnv-M1-jp, jvnv-M2-jp)
|
130 |
+
|
131 |
+
- [JVNVコーパス](https://sites.google.com/site/shinnosuketakamichi/research-topics/jvnv_corpus) のライセンスは[CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/deed.ja)ですので、これを継承します。
|
132 |
+
|
133 |
+
### 小春音アミ (koharune-ami) / あみたろ (amitaro)
|
134 |
+
|
135 |
+
[あみたろの声素材工房様の規約](https://amitaro.net/voice/voice_rule/) と [あみたろのライブ配信音声・利用規約](https://amitaro.net/voice/livevoice/#index_id6) を全て守らなければなりません。特に、以下の事項を遵守してください(規約を守れば商用非商用問わず利用できます):
|
136 |
+
|
137 |
+
#### 禁止事項
|
138 |
+
|
139 |
+
- 年齢制限のある作品・用途への使用
|
140 |
+
- 新興宗教・政治・マルチ購などに深く関係する作品・用途
|
141 |
+
- 特定の団体や個人や国家を誹謗中傷する作品・用途
|
142 |
+
- 生成された音声を、あみたろ本人の声として扱うこと
|
143 |
+
- 生成された音声を、あみたろ以外の人の声として扱うこと
|
144 |
+
|
145 |
+
#### クレジット表記
|
146 |
+
|
147 |
+
生成音声を公開する際は(媒体は問わない)、必ず分かりやすい場所に `あみたろの声素材工房 (https://amitaro.net/)` の声を元にした音声モデルを使用していることが分かるようなクレジット表記を記載してください。
|
148 |
+
|
149 |
+
クレジット表記例:
|
150 |
+
- `Style-BertVITS2モデル: 小春音アミ、あみたろの声素材工房 (https://amitaro.net/)`
|
151 |
+
- `Style-BertVITS2モデル: あみたろ、あみたろの声素材工房 (https://amitaro.net/)`
|
152 |
+
|
153 |
+
#### モデルマージ
|
154 |
+
|
155 |
+
モデルマージに関しては、[あみたろの声素材工房のよくある質問への回答](https://amitaro.net/voice/faq/#index_id17)を遵守してください:
|
156 |
+
- 本モデルを別��デルとマージできるのは、その別モデル作成の際に学習に使われた声の権利者が許諾している場合に限る
|
157 |
+
- あみたろの声の特徴が残っている場合(マージの割合が25%以上の場合)は、その利用は[あみたろの声素材工房様の規約](https://amitaro.net/voice/voice_rule/)の範囲内に限定され、そのモデルに関してもこの規約が適応される
|
158 |
"""
|
159 |
|
160 |
how_to_md = """
|
|
|
316 |
)
|
317 |
return app
|
318 |
initial_id = 0
|
319 |
+
initial_pth_files = [
|
320 |
+
str(f) for f in model_holder.model_files_dict[model_names[initial_id]]
|
321 |
+
]
|
322 |
|
323 |
with gr.Blocks(theme=GRADIO_THEME) as app:
|
324 |
gr.Markdown(initial_md)
|
325 |
+
gr.Markdown(terms_of_use_md)
|
326 |
with gr.Accordion(label="使い方", open=False):
|
327 |
gr.Markdown(how_to_md)
|
328 |
with gr.Row():
|
|
|
451 |
)
|
452 |
style_weight = gr.Slider(
|
453 |
minimum=0,
|
454 |
+
maximum=20,
|
455 |
value=DEFAULT_STYLE_WEIGHT,
|
456 |
step=0.1,
|
457 |
+
label="スタイルの強さ(声が崩壊したら小さくしてください)",
|
458 |
)
|
459 |
ref_audio_path = gr.Audio(
|
460 |
label="参照音声", type="filepath", visible=False
|
|
|
523 |
)
|
524 |
|
525 |
return app
|
526 |
+
|
527 |
+
|
528 |
+
if __name__ == "__main__":
|
529 |
+
from config import get_path_config
|
530 |
+
import torch
|
531 |
+
|
532 |
+
path_config = get_path_config()
|
533 |
+
assets_root = path_config.assets_root
|
534 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
535 |
+
model_holder = TTSModelHolder(assets_root, device)
|
536 |
+
app = create_inference_app(model_holder)
|
537 |
+
app.launch(inbrowser=True)
|
gradio_tabs/merge.py
CHANGED
@@ -1,14 +1,14 @@
|
|
1 |
import json
|
2 |
-
import os
|
3 |
from pathlib import Path
|
|
|
4 |
|
5 |
import gradio as gr
|
6 |
import numpy as np
|
7 |
import torch
|
8 |
-
import yaml
|
9 |
from safetensors import safe_open
|
10 |
from safetensors.torch import save_file
|
11 |
|
|
|
12 |
from style_bert_vits2.constants import DEFAULT_STYLE, GRADIO_THEME
|
13 |
from style_bert_vits2.logging import logger
|
14 |
from style_bert_vits2.tts_model import TTSModel, TTSModelHolder
|
@@ -20,45 +20,72 @@ speech_style_keys = ["enc_p"]
|
|
20 |
tempo_keys = ["sdp", "dp"]
|
21 |
|
22 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
|
23 |
|
24 |
-
# Get path settings
|
25 |
-
with open(os.path.join("configs", "paths.yml"), "r", encoding="utf-8") as f:
|
26 |
-
path_config: dict[str, str] = yaml.safe_load(f.read())
|
27 |
-
# dataset_root = path_config["dataset_root"]
|
28 |
-
assets_root = path_config["assets_root"]
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
"""
|
|
|
33 |
style_triple_list: list[(model_aでのスタイル名, model_bでのスタイル名, 出力するスタイル名)]
|
34 |
"""
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
else:
|
40 |
-
# 存在しない場合、エラーを発生
|
41 |
-
raise ValueError(f"No element with {DEFAULT_STYLE} output style name found.")
|
42 |
-
|
43 |
-
style_vectors_a = np.load(
|
44 |
-
os.path.join(assets_root, model_name_a, "style_vectors.npy")
|
45 |
-
) # (style_num_a, 256)
|
46 |
-
style_vectors_b = np.load(
|
47 |
-
os.path.join(assets_root, model_name_b, "style_vectors.npy")
|
48 |
-
) # (style_num_b, 256)
|
49 |
-
with open(
|
50 |
-
os.path.join(assets_root, model_name_a, "config.json"), "r", encoding="utf-8"
|
51 |
-
) as f:
|
52 |
-
config_a = json.load(f)
|
53 |
-
with open(
|
54 |
-
os.path.join(assets_root, model_name_b, "config.json"), "r", encoding="utf-8"
|
55 |
-
) as f:
|
56 |
-
config_b = json.load(f)
|
57 |
style2id_a = config_a["data"]["style2id"]
|
58 |
style2id_b = config_b["data"]["style2id"]
|
59 |
new_style_vecs = []
|
60 |
new_style2id = {}
|
61 |
-
for style_a, style_b, style_out in
|
62 |
if style_a not in style2id_a:
|
63 |
logger.error(f"{style_a} is not in {model_name_a}.")
|
64 |
raise ValueError(f"{style_a} は {model_name_a} にありません。")
|
@@ -72,38 +99,191 @@ def merge_style(model_name_a, model_name_b, weight, output_name, style_triple_li
|
|
72 |
new_style_vecs.append(new_style)
|
73 |
new_style2id[style_out] = len(new_style_vecs) - 1
|
74 |
new_style_vecs = np.array(new_style_vecs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
-
|
77 |
-
np.save(output_style_path, new_style_vecs)
|
78 |
|
79 |
new_config = config_a.copy()
|
80 |
new_config["data"]["num_styles"] = len(new_style2id)
|
81 |
new_config["data"]["style2id"] = new_style2id
|
82 |
new_config["model_name"] = output_name
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
|
|
|
|
|
|
87 |
|
88 |
-
# recipe.jsonを読み込んで、style_triple_listを追記
|
89 |
-
info_path = os.path.join(assets_root, output_name, "recipe.json")
|
90 |
-
if os.path.exists(info_path):
|
91 |
-
with open(info_path, "r", encoding="utf-8") as f:
|
92 |
-
info = json.load(f)
|
93 |
-
else:
|
94 |
-
info = {}
|
95 |
-
info["style_triple_list"] = style_triple_list
|
96 |
-
with open(info_path, "w", encoding="utf-8") as f:
|
97 |
-
json.dump(info, f, indent=2, ensure_ascii=False)
|
98 |
|
99 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
|
|
|
101 |
|
102 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
return v0 * (1 - t) + v1 * t
|
104 |
|
105 |
|
106 |
-
def slerp_tensors(
|
|
|
|
|
107 |
device = v0.device
|
108 |
v0c = v0.cpu().numpy()
|
109 |
v1c = v1.cpu().numpy()
|
@@ -122,31 +302,25 @@ def slerp_tensors(t, v0, v1, dot_thres=0.998):
|
|
122 |
).to(device)
|
123 |
|
124 |
|
125 |
-
def
|
126 |
-
model_path_a,
|
127 |
-
model_path_b,
|
128 |
-
voice_weight,
|
129 |
-
voice_pitch_weight,
|
130 |
-
speech_style_weight,
|
131 |
-
tempo_weight,
|
132 |
-
output_name,
|
133 |
-
use_slerp_instead_of_lerp,
|
134 |
):
|
135 |
-
"""
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
model_a_weight[k] = f.get_tensor(k)
|
141 |
-
|
142 |
-
model_b_weight = {}
|
143 |
-
with safe_open(model_path_b, framework="pt", device="cpu") as f:
|
144 |
-
for k in f.keys():
|
145 |
-
model_b_weight[k] = f.get_tensor(k)
|
146 |
|
147 |
merged_model_weight = model_a_weight.copy()
|
148 |
|
149 |
-
for key in model_a_weight
|
150 |
if any([key.startswith(prefix) for prefix in voice_keys]):
|
151 |
weight = voice_weight
|
152 |
elif any([key.startswith(prefix) for prefix in voice_pitch_keys]):
|
@@ -161,13 +335,239 @@ def merge_models(
|
|
161 |
slerp_tensors if use_slerp_instead_of_lerp else lerp_tensors
|
162 |
)(weight, model_a_weight[key], model_b_weight[key])
|
163 |
|
164 |
-
merged_model_path =
|
165 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
)
|
167 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
168 |
save_file(merged_model_weight, merged_model_path)
|
169 |
|
170 |
info = {
|
|
|
171 |
"model_a": model_path_a,
|
172 |
"model_b": model_path_b,
|
173 |
"voice_weight": voice_weight,
|
@@ -175,98 +575,253 @@ def merge_models(
|
|
175 |
"speech_style_weight": speech_style_weight,
|
176 |
"tempo_weight": tempo_weight,
|
177 |
}
|
178 |
-
with open(
|
179 |
-
os.path.join(assets_root, output_name, "recipe.json"), "w", encoding="utf-8"
|
180 |
-
) as f:
|
181 |
json.dump(info, f, indent=2, ensure_ascii=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
return merged_model_path
|
183 |
|
184 |
|
185 |
def merge_models_gr(
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
|
|
|
|
|
|
196 |
):
|
197 |
if output_name == "":
|
198 |
return "Error: 新しいモデル名を入力してください。"
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
206 |
output_name,
|
207 |
-
|
|
|
|
|
|
|
208 |
)
|
209 |
-
return f"Success: モデルを{merged_model_path}に保存しました。"
|
210 |
|
211 |
|
212 |
-
def
|
213 |
-
model_name_a,
|
214 |
-
model_name_b,
|
215 |
-
|
216 |
-
|
217 |
-
|
|
|
|
|
|
|
218 |
):
|
219 |
if output_name == "":
|
220 |
return "Error: 新しいモデル名を入力してください。", None
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
style_a, style_b, style_out = style_triple
|
233 |
-
style_a = style_a.strip()
|
234 |
-
style_b = style_b.strip()
|
235 |
-
style_out = style_out.strip()
|
236 |
-
style_triple_list.append((style_a, style_b, style_out))
|
237 |
-
try:
|
238 |
-
new_style_path, new_styles = merge_style(
|
239 |
-
model_name_a, model_name_b, weight, output_name, style_triple_list
|
240 |
-
)
|
241 |
-
except ValueError as e:
|
242 |
-
return f"Error: {e}"
|
243 |
-
return f"Success: スタイルを{new_style_path}に保存しました。", gr.Dropdown(
|
244 |
choices=new_styles, value=new_styles[0]
|
245 |
)
|
246 |
|
247 |
|
248 |
-
def
|
249 |
-
|
250 |
-
|
251 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
252 |
|
253 |
-
|
254 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
255 |
|
|
|
256 |
|
257 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
258 |
new_names, new_files, _ = model_holder.update_model_names_for_gradio()
|
259 |
-
return new_names, new_files, new_names, new_files
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
260 |
|
261 |
|
262 |
-
def
|
263 |
-
|
264 |
-
|
|
|
|
|
|
|
|
|
265 |
config_a = json.load(f)
|
266 |
styles_a = list(config_a["data"]["style2id"].keys())
|
267 |
|
268 |
-
config_path_b =
|
269 |
-
with open(config_path_b,
|
270 |
config_b = json.load(f)
|
271 |
styles_b = list(config_b["data"]["style2id"].keys())
|
272 |
|
@@ -288,11 +843,30 @@ def load_styles_gr(model_name_a, model_name_b):
|
|
288 |
initial_md = """
|
289 |
## 使い方
|
290 |
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
296 |
|
297 |
以上でマージは完了で、`model_assets/マージ後のモデル名`にマージ後のモデルが保存され、音声合成のときに使えます。
|
298 |
|
@@ -301,28 +875,131 @@ initial_md = """
|
|
301 |
一番下にマージしたモデルによる簡易的な音声合成機能もつけています。
|
302 |
|
303 |
## 注意
|
304 |
-
|
|
|
|
|
305 |
"""
|
306 |
|
307 |
style_merge_md = f"""
|
308 |
-
## スタイルベクトルのマージ
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
309 |
|
310 |
-
|
311 |
-
|
|
|
|
|
|
|
|
|
312 |
```
|
313 |
-
|
314 |
-
Happy, Surprise, HappySurprise
|
315 |
```
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
の2つになります。
|
320 |
-
|
321 |
-
### 注意
|
322 |
-
- 必ず「{DEFAULT_STYLE}」という名前のスタイルを作ってください。これは、マージ後のモデルの平均スタイルになります。
|
323 |
-
- 構造上の相性の関係で、スタイルベクトルを混ぜる重みは、上の「話し方」と同じ比率で混ぜられます。例えば「話し方」が0のときはモデルAのみしか使われません。
|
324 |
"""
|
325 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
326 |
|
327 |
def create_merge_app(model_holder: TTSModelHolder) -> gr.Blocks:
|
328 |
model_names = model_holder.model_names
|
@@ -336,14 +1013,26 @@ def create_merge_app(model_holder: TTSModelHolder) -> gr.Blocks:
|
|
336 |
)
|
337 |
return app
|
338 |
initial_id = 0
|
339 |
-
initial_model_files =
|
|
|
|
|
340 |
|
341 |
with gr.Blocks(theme=GRADIO_THEME) as app:
|
342 |
gr.Markdown(
|
343 |
-
"
|
344 |
)
|
345 |
with gr.Accordion(label="使い方", open=False):
|
346 |
gr.Markdown(initial_md)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
347 |
with gr.Row():
|
348 |
with gr.Column(scale=3):
|
349 |
model_name_a = gr.Dropdown(
|
@@ -356,6 +1045,12 @@ def create_merge_app(model_holder: TTSModelHolder) -> gr.Blocks:
|
|
356 |
choices=initial_model_files,
|
357 |
value=initial_model_files[0],
|
358 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
359 |
with gr.Column(scale=3):
|
360 |
model_name_b = gr.Dropdown(
|
361 |
label="モデルB",
|
@@ -367,10 +1062,34 @@ def create_merge_app(model_holder: TTSModelHolder) -> gr.Blocks:
|
|
367 |
choices=initial_model_files,
|
368 |
value=initial_model_files[0],
|
369 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
370 |
refresh_button = gr.Button("更新", scale=1, visible=True)
|
|
|
371 |
with gr.Column(variant="panel"):
|
372 |
new_name = gr.Textbox(label="新しいモデル名", placeholder="new_model")
|
373 |
-
with gr.Row():
|
374 |
voice_slider = gr.Slider(
|
375 |
label="声質",
|
376 |
value=0,
|
@@ -402,45 +1121,337 @@ def create_merge_app(model_holder: TTSModelHolder) -> gr.Blocks:
|
|
402 |
use_slerp_instead_of_lerp = gr.Checkbox(
|
403 |
label="線形補完のかわりに球面線形補完を使う",
|
404 |
value=False,
|
|
|
405 |
)
|
406 |
-
|
407 |
-
|
|
|
408 |
model_merge_button = gr.Button(
|
409 |
"モデルファイルのマージ", variant="primary"
|
410 |
)
|
411 |
info_model_merge = gr.Textbox(label="情報")
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
425 |
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
435 |
-
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
443 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
444 |
model_name_a.change(
|
445 |
model_holder.update_model_files_for_gradio,
|
446 |
inputs=[model_name_a],
|
@@ -451,25 +1462,34 @@ def create_merge_app(model_holder: TTSModelHolder) -> gr.Blocks:
|
|
451 |
inputs=[model_name_b],
|
452 |
outputs=[model_path_b],
|
453 |
)
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
outputs=[
|
458 |
)
|
459 |
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
464 |
)
|
465 |
|
466 |
model_merge_button.click(
|
467 |
merge_models_gr,
|
468 |
inputs=[
|
469 |
-
model_name_a,
|
470 |
model_path_a,
|
471 |
-
model_name_b,
|
472 |
model_path_b,
|
|
|
|
|
|
|
|
|
|
|
473 |
new_name,
|
474 |
voice_slider,
|
475 |
voice_pitch_slider,
|
@@ -477,25 +1497,35 @@ def create_merge_app(model_holder: TTSModelHolder) -> gr.Blocks:
|
|
477 |
tempo_slider,
|
478 |
use_slerp_instead_of_lerp,
|
479 |
],
|
480 |
-
outputs=[info_model_merge],
|
481 |
)
|
482 |
|
483 |
-
style_merge_button.click(
|
484 |
-
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
|
489 |
-
|
490 |
-
|
491 |
-
|
492 |
-
|
493 |
-
|
|
|
|
|
494 |
|
495 |
tts_button.click(
|
496 |
simple_tts,
|
497 |
inputs=[new_name, text_input, style, emotion_weight],
|
498 |
-
outputs=[audio_output],
|
499 |
)
|
500 |
|
501 |
return app
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import json
|
|
|
2 |
from pathlib import Path
|
3 |
+
from typing import Any, Union
|
4 |
|
5 |
import gradio as gr
|
6 |
import numpy as np
|
7 |
import torch
|
|
|
8 |
from safetensors import safe_open
|
9 |
from safetensors.torch import save_file
|
10 |
|
11 |
+
from config import get_path_config
|
12 |
from style_bert_vits2.constants import DEFAULT_STYLE, GRADIO_THEME
|
13 |
from style_bert_vits2.logging import logger
|
14 |
from style_bert_vits2.tts_model import TTSModel, TTSModelHolder
|
|
|
20 |
tempo_keys = ["sdp", "dp"]
|
21 |
|
22 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
23 |
+
path_config = get_path_config()
|
24 |
+
assets_root = path_config.assets_root
|
25 |
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
+
def load_safetensors(model_path: Union[str, Path]) -> dict[str, torch.Tensor]:
|
28 |
+
result: dict[str, torch.Tensor] = {}
|
29 |
+
with safe_open(model_path, framework="pt", device="cpu") as f:
|
30 |
+
for k in f.keys():
|
31 |
+
result[k] = f.get_tensor(k)
|
32 |
+
return result
|
33 |
+
|
34 |
+
|
35 |
+
def load_config(model_name: str) -> dict[str, Any]:
|
36 |
+
with open(assets_root / model_name / "config.json", encoding="utf-8") as f:
|
37 |
+
config = json.load(f)
|
38 |
+
return config
|
39 |
+
|
40 |
+
|
41 |
+
def save_config(config: dict[str, Any], model_name: str):
|
42 |
+
with open(assets_root / model_name / "config.json", "w", encoding="utf-8") as f:
|
43 |
+
json.dump(config, f, indent=2, ensure_ascii=False)
|
44 |
+
|
45 |
+
|
46 |
+
def load_recipe(model_name: str) -> dict[str, Any]:
|
47 |
+
receipe_path = assets_root / model_name / "recipe.json"
|
48 |
+
if receipe_path.exists():
|
49 |
+
with open(receipe_path, encoding="utf-8") as f:
|
50 |
+
recipe = json.load(f)
|
51 |
+
else:
|
52 |
+
recipe = {}
|
53 |
+
return recipe
|
54 |
+
|
55 |
+
|
56 |
+
def save_recipe(recipe: dict[str, Any], model_name: str):
|
57 |
+
with open(assets_root / model_name / "recipe.json", "w", encoding="utf-8") as f:
|
58 |
+
json.dump(recipe, f, indent=2, ensure_ascii=False)
|
59 |
+
|
60 |
+
|
61 |
+
def load_style_vectors(model_name: str) -> np.ndarray:
|
62 |
+
return np.load(assets_root / model_name / "style_vectors.npy")
|
63 |
|
64 |
+
|
65 |
+
def save_style_vectors(style_vectors: np.ndarray, model_name: str):
|
66 |
+
np.save(assets_root / model_name / "style_vectors.npy", style_vectors)
|
67 |
+
|
68 |
+
|
69 |
+
def merge_style_usual(
|
70 |
+
model_name_a: str,
|
71 |
+
model_name_b: str,
|
72 |
+
weight: float,
|
73 |
+
output_name: str,
|
74 |
+
style_tuple_list: list[tuple[str, ...]],
|
75 |
+
) -> list[str]:
|
76 |
"""
|
77 |
+
new = (1 - weight) * A + weight * B
|
78 |
style_triple_list: list[(model_aでのスタイル名, model_bでのスタイル名, 出力するスタイル名)]
|
79 |
"""
|
80 |
+
style_vectors_a = load_style_vectors(model_name_a)
|
81 |
+
style_vectors_b = load_style_vectors(model_name_b)
|
82 |
+
config_a = load_config(model_name_a)
|
83 |
+
config_b = load_config(model_name_b)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
style2id_a = config_a["data"]["style2id"]
|
85 |
style2id_b = config_b["data"]["style2id"]
|
86 |
new_style_vecs = []
|
87 |
new_style2id = {}
|
88 |
+
for style_a, style_b, style_out in style_tuple_list:
|
89 |
if style_a not in style2id_a:
|
90 |
logger.error(f"{style_a} is not in {model_name_a}.")
|
91 |
raise ValueError(f"{style_a} は {model_name_a} にありません。")
|
|
|
99 |
new_style_vecs.append(new_style)
|
100 |
new_style2id[style_out] = len(new_style_vecs) - 1
|
101 |
new_style_vecs = np.array(new_style_vecs)
|
102 |
+
save_style_vectors(new_style_vecs, output_name)
|
103 |
+
|
104 |
+
new_config = config_a.copy()
|
105 |
+
new_config["data"]["num_styles"] = len(new_style2id)
|
106 |
+
new_config["data"]["style2id"] = new_style2id
|
107 |
+
new_config["model_name"] = output_name
|
108 |
+
save_config(new_config, output_name)
|
109 |
+
|
110 |
+
receipe = load_recipe(output_name)
|
111 |
+
receipe["style_tuple_list"] = style_tuple_list
|
112 |
+
save_recipe(receipe, output_name)
|
113 |
+
|
114 |
+
return list(new_style2id.keys())
|
115 |
+
|
116 |
+
|
117 |
+
def merge_style_add_diff(
|
118 |
+
model_name_a: str,
|
119 |
+
model_name_b: str,
|
120 |
+
model_name_c: str,
|
121 |
+
weight: float,
|
122 |
+
output_name: str,
|
123 |
+
style_tuple_list: list[tuple[str, ...]],
|
124 |
+
) -> list[str]:
|
125 |
+
"""
|
126 |
+
new = A + weight * (B - C)
|
127 |
+
style_tuple_list: list[(model_aでのスタイル名, model_bでのスタイル名, model_cでのスタイル名, 出力するスタイル名)]
|
128 |
+
"""
|
129 |
+
style_vectors_a = load_style_vectors(model_name_a)
|
130 |
+
style_vectors_b = load_style_vectors(model_name_b)
|
131 |
+
style_vectors_c = load_style_vectors(model_name_c)
|
132 |
+
config_a = load_config(model_name_a)
|
133 |
+
config_b = load_config(model_name_b)
|
134 |
+
config_c = load_config(model_name_c)
|
135 |
+
style2id_a = config_a["data"]["style2id"]
|
136 |
+
style2id_b = config_b["data"]["style2id"]
|
137 |
+
style2id_c = config_c["data"]["style2id"]
|
138 |
+
new_style_vecs = []
|
139 |
+
new_style2id = {}
|
140 |
+
for style_a, style_b, style_c, style_out in style_tuple_list:
|
141 |
+
if style_a not in style2id_a:
|
142 |
+
logger.error(f"{style_a} is not in {model_name_a}.")
|
143 |
+
raise ValueError(f"{style_a} は {model_name_a} にありません。")
|
144 |
+
if style_b not in style2id_b:
|
145 |
+
logger.error(f"{style_b} is not in {model_name_b}.")
|
146 |
+
raise ValueError(f"{style_b} は {model_name_b} にありません。")
|
147 |
+
if style_c not in style2id_c:
|
148 |
+
logger.error(f"{style_c} is not in {model_name_c}.")
|
149 |
+
raise ValueError(f"{style_c} は {model_name_c} にありません。")
|
150 |
+
new_style = style_vectors_a[style2id_a[style_a]] + weight * (
|
151 |
+
style_vectors_b[style2id_b[style_b]] - style_vectors_c[style2id_c[style_c]]
|
152 |
+
)
|
153 |
+
new_style_vecs.append(new_style)
|
154 |
+
new_style2id[style_out] = len(new_style_vecs) - 1
|
155 |
+
new_style_vecs = np.array(new_style_vecs)
|
156 |
|
157 |
+
save_style_vectors(new_style_vecs, output_name)
|
|
|
158 |
|
159 |
new_config = config_a.copy()
|
160 |
new_config["data"]["num_styles"] = len(new_style2id)
|
161 |
new_config["data"]["style2id"] = new_style2id
|
162 |
new_config["model_name"] = output_name
|
163 |
+
save_config(new_config, output_name)
|
164 |
+
|
165 |
+
receipe = load_recipe(output_name)
|
166 |
+
receipe["style_tuple_list"] = style_tuple_list
|
167 |
+
save_recipe(receipe, output_name)
|
168 |
+
|
169 |
+
return list(new_style2id.keys())
|
170 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
|
172 |
+
def merge_style_weighted_sum(
|
173 |
+
model_name_a: str,
|
174 |
+
model_name_b: str,
|
175 |
+
model_name_c: str,
|
176 |
+
model_a_coeff: float,
|
177 |
+
model_b_coeff: float,
|
178 |
+
model_c_coeff: float,
|
179 |
+
output_name: str,
|
180 |
+
style_tuple_list: list[tuple[str, ...]],
|
181 |
+
) -> list[str]:
|
182 |
+
"""
|
183 |
+
new = A * model_a_coeff + B * model_b_coeff + C * model_c_coeff
|
184 |
+
style_tuple_list: list[(model_aでのスタイル名, model_bでのスタイル名, model_cでのスタイル名, 出力するスタイル名)]
|
185 |
+
"""
|
186 |
+
style_vectors_a = load_style_vectors(model_name_a)
|
187 |
+
style_vectors_b = load_style_vectors(model_name_b)
|
188 |
+
style_vectors_c = load_style_vectors(model_name_c)
|
189 |
+
config_a = load_config(model_name_a)
|
190 |
+
config_b = load_config(model_name_b)
|
191 |
+
config_c = load_config(model_name_c)
|
192 |
+
style2id_a = config_a["data"]["style2id"]
|
193 |
+
style2id_b = config_b["data"]["style2id"]
|
194 |
+
style2id_c = config_c["data"]["style2id"]
|
195 |
+
new_style_vecs = []
|
196 |
+
new_style2id = {}
|
197 |
+
for style_a, style_b, style_c, style_out in style_tuple_list:
|
198 |
+
if style_a not in style2id_a:
|
199 |
+
logger.error(f"{style_a} is not in {model_name_a}.")
|
200 |
+
raise ValueError(f"{style_a} は {model_name_a} にありません。")
|
201 |
+
if style_b not in style2id_b:
|
202 |
+
logger.error(f"{style_b} is not in {model_name_b}.")
|
203 |
+
raise ValueError(f"{style_b} は {model_name_b} にありません。")
|
204 |
+
if style_c not in style2id_c:
|
205 |
+
logger.error(f"{style_c} is not in {model_name_c}.")
|
206 |
+
raise ValueError(f"{style_c} は {model_name_c} にありません。")
|
207 |
+
new_style = (
|
208 |
+
style_vectors_a[style2id_a[style_a]] * model_a_coeff
|
209 |
+
+ style_vectors_b[style2id_b[style_b]] * model_b_coeff
|
210 |
+
+ style_vectors_c[style2id_c[style_c]] * model_c_coeff
|
211 |
+
)
|
212 |
+
new_style_vecs.append(new_style)
|
213 |
+
new_style2id[style_out] = len(new_style_vecs) - 1
|
214 |
+
new_style_vecs = np.array(new_style_vecs)
|
215 |
|
216 |
+
save_style_vectors(new_style_vecs, output_name)
|
217 |
|
218 |
+
new_config = config_a.copy()
|
219 |
+
new_config["data"]["num_styles"] = len(new_style2id)
|
220 |
+
new_config["data"]["style2id"] = new_style2id
|
221 |
+
new_config["model_name"] = output_name
|
222 |
+
save_config(new_config, output_name)
|
223 |
+
|
224 |
+
receipe = load_recipe(output_name)
|
225 |
+
receipe["style_tuple_list"] = style_tuple_list
|
226 |
+
save_recipe(receipe, output_name)
|
227 |
+
|
228 |
+
return list(new_style2id.keys())
|
229 |
+
|
230 |
+
|
231 |
+
def merge_style_add_null(
|
232 |
+
model_name_a: str,
|
233 |
+
model_name_b: str,
|
234 |
+
weight: float,
|
235 |
+
output_name: str,
|
236 |
+
style_tuple_list: list[tuple[str, ...]],
|
237 |
+
) -> list[str]:
|
238 |
+
"""
|
239 |
+
new = A + weight * B
|
240 |
+
style_tuple_list: list[(model_aでのスタイル名, model_bでのスタイル名, 出力するスタイル名)]
|
241 |
+
"""
|
242 |
+
style_vectors_a = load_style_vectors(model_name_a)
|
243 |
+
style_vectors_b = load_style_vectors(model_name_b)
|
244 |
+
config_a = load_config(model_name_a)
|
245 |
+
config_b = load_config(model_name_b)
|
246 |
+
style2id_a = config_a["data"]["style2id"]
|
247 |
+
style2id_b = config_b["data"]["style2id"]
|
248 |
+
new_style_vecs = []
|
249 |
+
new_style2id = {}
|
250 |
+
for style_a, style_b, style_out in style_tuple_list:
|
251 |
+
if style_a not in style2id_a:
|
252 |
+
logger.error(f"{style_a} is not in {model_name_a}.")
|
253 |
+
raise ValueError(f"{style_a} は {model_name_a} にありません。")
|
254 |
+
if style_b not in style2id_b:
|
255 |
+
logger.error(f"{style_b} is not in {model_name_b}.")
|
256 |
+
raise ValueError(f"{style_b} は {model_name_b} にありません。")
|
257 |
+
new_style = (
|
258 |
+
style_vectors_a[style2id_a[style_a]]
|
259 |
+
+ weight * style_vectors_b[style2id_b[style_b]]
|
260 |
+
)
|
261 |
+
new_style_vecs.append(new_style)
|
262 |
+
new_style2id[style_out] = len(new_style_vecs) - 1
|
263 |
+
new_style_vecs = np.array(new_style_vecs)
|
264 |
+
|
265 |
+
save_style_vectors(new_style_vecs, output_name)
|
266 |
+
|
267 |
+
new_config = config_a.copy()
|
268 |
+
new_config["data"]["num_styles"] = len(new_style2id)
|
269 |
+
new_config["data"]["style2id"] = new_style2id
|
270 |
+
new_config["model_name"] = output_name
|
271 |
+
save_config(new_config, output_name)
|
272 |
+
|
273 |
+
receipe = load_recipe(output_name)
|
274 |
+
receipe["style_tuple_list"] = style_tuple_list
|
275 |
+
save_recipe(receipe, output_name)
|
276 |
+
|
277 |
+
return list(new_style2id.keys())
|
278 |
+
|
279 |
+
|
280 |
+
def lerp_tensors(t: float, v0: torch.Tensor, v1: torch.Tensor):
|
281 |
return v0 * (1 - t) + v1 * t
|
282 |
|
283 |
|
284 |
+
def slerp_tensors(
|
285 |
+
t: float, v0: torch.Tensor, v1: torch.Tensor, dot_thres: float = 0.998
|
286 |
+
):
|
287 |
device = v0.device
|
288 |
v0c = v0.cpu().numpy()
|
289 |
v1c = v1.cpu().numpy()
|
|
|
302 |
).to(device)
|
303 |
|
304 |
|
305 |
+
def merge_models_usual(
|
306 |
+
model_path_a: str,
|
307 |
+
model_path_b: str,
|
308 |
+
voice_weight: float,
|
309 |
+
voice_pitch_weight: float,
|
310 |
+
speech_style_weight: float,
|
311 |
+
tempo_weight: float,
|
312 |
+
output_name: str,
|
313 |
+
use_slerp_instead_of_lerp: bool,
|
314 |
):
|
315 |
+
"""
|
316 |
+
new = (1 - weight) * A + weight * B
|
317 |
+
"""
|
318 |
+
model_a_weight = load_safetensors(model_path_a)
|
319 |
+
model_b_weight = load_safetensors(model_path_b)
|
|
|
|
|
|
|
|
|
|
|
|
|
320 |
|
321 |
merged_model_weight = model_a_weight.copy()
|
322 |
|
323 |
+
for key in model_a_weight:
|
324 |
if any([key.startswith(prefix) for prefix in voice_keys]):
|
325 |
weight = voice_weight
|
326 |
elif any([key.startswith(prefix) for prefix in voice_pitch_keys]):
|
|
|
335 |
slerp_tensors if use_slerp_instead_of_lerp else lerp_tensors
|
336 |
)(weight, model_a_weight[key], model_b_weight[key])
|
337 |
|
338 |
+
merged_model_path = assets_root / output_name / f"{output_name}.safetensors"
|
339 |
+
merged_model_path.parent.mkdir(parents=True, exist_ok=True)
|
340 |
+
save_file(merged_model_weight, merged_model_path)
|
341 |
+
|
342 |
+
receipe = {
|
343 |
+
"method": "usual",
|
344 |
+
"model_a": model_path_a,
|
345 |
+
"model_b": model_path_b,
|
346 |
+
"voice_weight": voice_weight,
|
347 |
+
"voice_pitch_weight": voice_pitch_weight,
|
348 |
+
"speech_style_weight": speech_style_weight,
|
349 |
+
"tempo_weight": tempo_weight,
|
350 |
+
"use_slerp_instead_of_lerp": use_slerp_instead_of_lerp,
|
351 |
+
}
|
352 |
+
save_recipe(receipe, output_name)
|
353 |
+
|
354 |
+
# Merge default Neutral style vectors and save
|
355 |
+
model_name_a = Path(model_path_a).parent.name
|
356 |
+
model_name_b = Path(model_path_b).parent.name
|
357 |
+
style_vectors_a = load_style_vectors(model_name_a)
|
358 |
+
style_vectors_b = load_style_vectors(model_name_b)
|
359 |
+
|
360 |
+
new_config = load_config(model_name_a)
|
361 |
+
new_config["model_name"] = output_name
|
362 |
+
new_config["data"]["num_styles"] = 1
|
363 |
+
new_config["data"]["style2id"] = {DEFAULT_STYLE: 0}
|
364 |
+
save_config(new_config, output_name)
|
365 |
+
|
366 |
+
neutral_vector_a = style_vectors_a[0]
|
367 |
+
neutral_vector_b = style_vectors_b[0]
|
368 |
+
weight = speech_style_weight
|
369 |
+
new_neutral_vector = (1 - weight) * neutral_vector_a + weight * neutral_vector_b
|
370 |
+
new_style_vectors = np.array([new_neutral_vector])
|
371 |
+
save_style_vectors(new_style_vectors, output_name)
|
372 |
+
return merged_model_path
|
373 |
+
|
374 |
+
|
375 |
+
def merge_models_add_diff(
|
376 |
+
model_path_a: str,
|
377 |
+
model_path_b: str,
|
378 |
+
model_path_c: str,
|
379 |
+
voice_weight: float,
|
380 |
+
voice_pitch_weight: float,
|
381 |
+
speech_style_weight: float,
|
382 |
+
tempo_weight: float,
|
383 |
+
output_name: str,
|
384 |
+
):
|
385 |
+
"""
|
386 |
+
new = A + weight * (B - C)
|
387 |
+
"""
|
388 |
+
model_a_weight = load_safetensors(model_path_a)
|
389 |
+
model_b_weight = load_safetensors(model_path_b)
|
390 |
+
model_c_weight = load_safetensors(model_path_c)
|
391 |
+
|
392 |
+
merged_model_weight = model_a_weight.copy()
|
393 |
+
|
394 |
+
for key in model_a_weight:
|
395 |
+
if any([key.startswith(prefix) for prefix in voice_keys]):
|
396 |
+
weight = voice_weight
|
397 |
+
elif any([key.startswith(prefix) for prefix in voice_pitch_keys]):
|
398 |
+
weight = voice_pitch_weight
|
399 |
+
elif any([key.startswith(prefix) for prefix in speech_style_keys]):
|
400 |
+
weight = speech_style_weight
|
401 |
+
elif any([key.startswith(prefix) for prefix in tempo_keys]):
|
402 |
+
weight = tempo_weight
|
403 |
+
else:
|
404 |
+
continue
|
405 |
+
merged_model_weight[key] = model_a_weight[key] + weight * (
|
406 |
+
model_b_weight[key] - model_c_weight[key]
|
407 |
+
)
|
408 |
+
|
409 |
+
merged_model_path = assets_root / output_name / f"{output_name}.safetensors"
|
410 |
+
merged_model_path.parent.mkdir(parents=True, exist_ok=True)
|
411 |
+
save_file(merged_model_weight, merged_model_path)
|
412 |
+
|
413 |
+
info = {
|
414 |
+
"method": "add_diff",
|
415 |
+
"model_a": model_path_a,
|
416 |
+
"model_b": model_path_b,
|
417 |
+
"model_c": model_path_c,
|
418 |
+
"voice_weight": voice_weight,
|
419 |
+
"voice_pitch_weight": voice_pitch_weight,
|
420 |
+
"speech_style_weight": speech_style_weight,
|
421 |
+
"tempo_weight": tempo_weight,
|
422 |
+
}
|
423 |
+
with open(assets_root / output_name / "recipe.json", "w", encoding="utf-8") as f:
|
424 |
+
json.dump(info, f, indent=2, ensure_ascii=False)
|
425 |
+
|
426 |
+
# Default style merge only using Neutral style
|
427 |
+
model_name_a = Path(model_path_a).parent.name
|
428 |
+
model_name_b = Path(model_path_b).parent.name
|
429 |
+
model_name_c = Path(model_path_c).parent.name
|
430 |
+
|
431 |
+
style_vectors_a = np.load(
|
432 |
+
assets_root / model_name_a / "style_vectors.npy"
|
433 |
+
) # (style_num_a, 256)
|
434 |
+
style_vectors_b = np.load(
|
435 |
+
assets_root / model_name_b / "style_vectors.npy"
|
436 |
+
) # (style_num_b, 256)
|
437 |
+
style_vectors_c = np.load(
|
438 |
+
assets_root / model_name_c / "style_vectors.npy"
|
439 |
+
) # (style_num_c, 256)
|
440 |
+
with open(assets_root / model_name_a / "config.json", encoding="utf-8") as f:
|
441 |
+
new_config = json.load(f)
|
442 |
+
|
443 |
+
new_config["model_name"] = output_name
|
444 |
+
new_config["data"]["num_styles"] = 1
|
445 |
+
new_config["data"]["style2id"] = {DEFAULT_STYLE: 0}
|
446 |
+
with open(assets_root / output_name / "config.json", "w", encoding="utf-8") as f:
|
447 |
+
json.dump(new_config, f, indent=2, ensure_ascii=False)
|
448 |
+
|
449 |
+
neutral_vector_a = style_vectors_a[0]
|
450 |
+
neutral_vector_b = style_vectors_b[0]
|
451 |
+
neutral_vector_c = style_vectors_c[0]
|
452 |
+
weight = speech_style_weight
|
453 |
+
new_neutral_vector = neutral_vector_a + weight * (
|
454 |
+
neutral_vector_b - neutral_vector_c
|
455 |
+
)
|
456 |
+
new_style_vectors = np.array([new_neutral_vector])
|
457 |
+
new_style_path = assets_root / output_name / "style_vectors.npy"
|
458 |
+
np.save(new_style_path, new_style_vectors)
|
459 |
+
return merged_model_path
|
460 |
+
|
461 |
+
|
462 |
+
def merge_models_weighted_sum(
|
463 |
+
model_path_a: str,
|
464 |
+
model_path_b: str,
|
465 |
+
model_path_c: str,
|
466 |
+
model_a_coeff: float,
|
467 |
+
model_b_coeff: float,
|
468 |
+
model_c_coeff: float,
|
469 |
+
output_name: str,
|
470 |
+
):
|
471 |
+
model_a_weight = load_safetensors(model_path_a)
|
472 |
+
model_b_weight = load_safetensors(model_path_b)
|
473 |
+
model_c_weight = load_safetensors(model_path_c)
|
474 |
+
|
475 |
+
merged_model_weight = model_a_weight.copy()
|
476 |
+
|
477 |
+
for key in model_a_weight:
|
478 |
+
merged_model_weight[key] = (
|
479 |
+
model_a_coeff * model_a_weight[key]
|
480 |
+
+ model_b_coeff * model_b_weight[key]
|
481 |
+
+ model_c_coeff * model_c_weight[key]
|
482 |
+
)
|
483 |
+
|
484 |
+
merged_model_path = assets_root / output_name / f"{output_name}.safetensors"
|
485 |
+
merged_model_path.parent.mkdir(parents=True, exist_ok=True)
|
486 |
+
save_file(merged_model_weight, merged_model_path)
|
487 |
+
|
488 |
+
info = {
|
489 |
+
"method": "weighted_sum",
|
490 |
+
"model_a": model_path_a,
|
491 |
+
"model_b": model_path_b,
|
492 |
+
"model_c": model_path_c,
|
493 |
+
"model_a_coeff": model_a_coeff,
|
494 |
+
"model_b_coeff": model_b_coeff,
|
495 |
+
"model_c_coeff": model_c_coeff,
|
496 |
+
}
|
497 |
+
with open(assets_root / output_name / "recipe.json", "w", encoding="utf-8") as f:
|
498 |
+
json.dump(info, f, indent=2, ensure_ascii=False)
|
499 |
+
|
500 |
+
# Default style merge only using Neutral style
|
501 |
+
model_name_a = Path(model_path_a).parent.name
|
502 |
+
model_name_b = Path(model_path_b).parent.name
|
503 |
+
model_name_c = Path(model_path_c).parent.name
|
504 |
+
|
505 |
+
style_vectors_a = np.load(
|
506 |
+
assets_root / model_name_a / "style_vectors.npy"
|
507 |
+
) # (style_num_a, 256)
|
508 |
+
style_vectors_b = np.load(
|
509 |
+
assets_root / model_name_b / "style_vectors.npy"
|
510 |
+
) # (style_num_b, 256)
|
511 |
+
style_vectors_c = np.load(
|
512 |
+
assets_root / model_name_c / "style_vectors.npy"
|
513 |
+
) # (style_num_c, 256)
|
514 |
+
|
515 |
+
with open(assets_root / model_name_a / "config.json", encoding="utf-8") as f:
|
516 |
+
new_config = json.load(f)
|
517 |
+
|
518 |
+
new_config["model_name"] = output_name
|
519 |
+
new_config["data"]["num_styles"] = 1
|
520 |
+
new_config["data"]["style2id"] = {DEFAULT_STYLE: 0}
|
521 |
+
with open(assets_root / output_name / "config.json", "w", encoding="utf-8") as f:
|
522 |
+
json.dump(new_config, f, indent=2, ensure_ascii=False)
|
523 |
+
|
524 |
+
neutral_vector_a = style_vectors_a[0]
|
525 |
+
neutral_vector_b = style_vectors_b[0]
|
526 |
+
neutral_vector_c = style_vectors_c[0]
|
527 |
+
new_neutral_vector = (
|
528 |
+
model_a_coeff * neutral_vector_a
|
529 |
+
+ model_b_coeff * neutral_vector_b
|
530 |
+
+ model_c_coeff * neutral_vector_c
|
531 |
)
|
532 |
+
new_style_vectors = np.array([new_neutral_vector])
|
533 |
+
new_style_path = assets_root / output_name / "style_vectors.npy"
|
534 |
+
np.save(new_style_path, new_style_vectors)
|
535 |
+
return merged_model_path
|
536 |
+
|
537 |
+
|
538 |
+
def merge_models_add_null(
|
539 |
+
model_path_a: str,
|
540 |
+
model_path_b: str,
|
541 |
+
voice_weight: float,
|
542 |
+
voice_pitch_weight: float,
|
543 |
+
speech_style_weight: float,
|
544 |
+
tempo_weight: float,
|
545 |
+
output_name: str,
|
546 |
+
):
|
547 |
+
model_a_weight = load_safetensors(model_path_a)
|
548 |
+
model_b_weight = load_safetensors(model_path_b)
|
549 |
+
|
550 |
+
merged_model_weight = model_a_weight.copy()
|
551 |
+
|
552 |
+
for key in model_a_weight:
|
553 |
+
if any([key.startswith(prefix) for prefix in voice_keys]):
|
554 |
+
weight = voice_weight
|
555 |
+
elif any([key.startswith(prefix) for prefix in voice_pitch_keys]):
|
556 |
+
weight = voice_pitch_weight
|
557 |
+
elif any([key.startswith(prefix) for prefix in speech_style_keys]):
|
558 |
+
weight = speech_style_weight
|
559 |
+
elif any([key.startswith(prefix) for prefix in tempo_keys]):
|
560 |
+
weight = tempo_weight
|
561 |
+
else:
|
562 |
+
continue
|
563 |
+
merged_model_weight[key] = model_a_weight[key] + weight * model_b_weight[key]
|
564 |
+
|
565 |
+
merged_model_path = assets_root / output_name / f"{output_name}.safetensors"
|
566 |
+
merged_model_path.parent.mkdir(parents=True, exist_ok=True)
|
567 |
save_file(merged_model_weight, merged_model_path)
|
568 |
|
569 |
info = {
|
570 |
+
"method": "add_null",
|
571 |
"model_a": model_path_a,
|
572 |
"model_b": model_path_b,
|
573 |
"voice_weight": voice_weight,
|
|
|
575 |
"speech_style_weight": speech_style_weight,
|
576 |
"tempo_weight": tempo_weight,
|
577 |
}
|
578 |
+
with open(assets_root / output_name / "recipe.json", "w", encoding="utf-8") as f:
|
|
|
|
|
579 |
json.dump(info, f, indent=2, ensure_ascii=False)
|
580 |
+
|
581 |
+
# Default style merge only using Neutral style
|
582 |
+
model_name_a = Path(model_path_a).parent.name
|
583 |
+
model_name_b = Path(model_path_b).parent.name
|
584 |
+
|
585 |
+
style_vectors_a = np.load(
|
586 |
+
assets_root / model_name_a / "style_vectors.npy"
|
587 |
+
) # (style_num_a, 256)
|
588 |
+
style_vectors_b = np.load(
|
589 |
+
assets_root / model_name_b / "style_vectors.npy"
|
590 |
+
) # (style_num_b, 256)
|
591 |
+
with open(assets_root / model_name_a / "config.json", encoding="utf-8") as f:
|
592 |
+
new_config = json.load(f)
|
593 |
+
|
594 |
+
new_config["model_name"] = output_name
|
595 |
+
new_config["data"]["num_styles"] = 1
|
596 |
+
new_config["data"]["style2id"] = {DEFAULT_STYLE: 0}
|
597 |
+
with open(assets_root / output_name / "config.json", "w", encoding="utf-8") as f:
|
598 |
+
json.dump(new_config, f, indent=2, ensure_ascii=False)
|
599 |
+
|
600 |
+
neutral_vector_a = style_vectors_a[0]
|
601 |
+
neutral_vector_b = style_vectors_b[0]
|
602 |
+
weight = speech_style_weight
|
603 |
+
new_neutral_vector = neutral_vector_a + weight * neutral_vector_b
|
604 |
+
new_style_vectors = np.array([new_neutral_vector])
|
605 |
+
new_style_path = assets_root / output_name / "style_vectors.npy"
|
606 |
+
np.save(new_style_path, new_style_vectors)
|
607 |
return merged_model_path
|
608 |
|
609 |
|
610 |
def merge_models_gr(
|
611 |
+
model_path_a: str,
|
612 |
+
model_path_b: str,
|
613 |
+
model_path_c: str,
|
614 |
+
model_a_coeff: float,
|
615 |
+
model_b_coeff: float,
|
616 |
+
model_c_coeff: float,
|
617 |
+
method: str,
|
618 |
+
output_name: str,
|
619 |
+
voice_weight: float,
|
620 |
+
voice_pitch_weight: float,
|
621 |
+
speech_style_weight: float,
|
622 |
+
tempo_weight: float,
|
623 |
+
use_slerp_instead_of_lerp: bool,
|
624 |
):
|
625 |
if output_name == "":
|
626 |
return "Error: 新しいモデル名を入力してください。"
|
627 |
+
assert method in [
|
628 |
+
"usual",
|
629 |
+
"add_diff",
|
630 |
+
"weighted_sum",
|
631 |
+
"add_null",
|
632 |
+
], f"Invalid method: {method}"
|
633 |
+
model_a_name = Path(model_path_a).parent.name
|
634 |
+
model_b_name = Path(model_path_b).parent.name
|
635 |
+
model_c_name = Path(model_path_c).parent.name
|
636 |
+
if method == "usual":
|
637 |
+
if output_name in [model_a_name, model_b_name]:
|
638 |
+
return "Error: マージ元のモデル名と同じ名前は使用できません。", None
|
639 |
+
merged_model_path = merge_models_usual(
|
640 |
+
model_path_a,
|
641 |
+
model_path_b,
|
642 |
+
voice_weight,
|
643 |
+
voice_pitch_weight,
|
644 |
+
speech_style_weight,
|
645 |
+
tempo_weight,
|
646 |
+
output_name,
|
647 |
+
use_slerp_instead_of_lerp,
|
648 |
+
)
|
649 |
+
elif method == "add_diff":
|
650 |
+
if output_name in [model_a_name, model_b_name, model_c_name]:
|
651 |
+
return "Error: マージ元のモデル名と同じ名前は使用できません。", None
|
652 |
+
merged_model_path = merge_models_add_diff(
|
653 |
+
model_path_a,
|
654 |
+
model_path_b,
|
655 |
+
model_path_c,
|
656 |
+
voice_weight,
|
657 |
+
voice_pitch_weight,
|
658 |
+
speech_style_weight,
|
659 |
+
tempo_weight,
|
660 |
+
output_name,
|
661 |
+
)
|
662 |
+
elif method == "weighted_sum":
|
663 |
+
if output_name in [model_a_name, model_b_name, model_c_name]:
|
664 |
+
return "Error: マージ元のモデル名と同じ名前は使用できません。", None
|
665 |
+
merged_model_path = merge_models_weighted_sum(
|
666 |
+
model_path_a,
|
667 |
+
model_path_b,
|
668 |
+
model_path_c,
|
669 |
+
model_a_coeff,
|
670 |
+
model_b_coeff,
|
671 |
+
model_c_coeff,
|
672 |
+
output_name,
|
673 |
+
)
|
674 |
+
else: # add_null
|
675 |
+
if output_name in [model_a_name, model_b_name]:
|
676 |
+
return "Error: マージ元のモデル名と同じ名前は使用できません。", None
|
677 |
+
merged_model_path = merge_models_add_null(
|
678 |
+
model_path_a,
|
679 |
+
model_path_b,
|
680 |
+
voice_weight,
|
681 |
+
voice_pitch_weight,
|
682 |
+
speech_style_weight,
|
683 |
+
tempo_weight,
|
684 |
+
output_name,
|
685 |
+
)
|
686 |
+
return f"Success: モデルを{merged_model_path}に保存しました。", gr.Dropdown(
|
687 |
+
choices=[DEFAULT_STYLE], value=DEFAULT_STYLE
|
688 |
+
)
|
689 |
+
|
690 |
+
|
691 |
+
def merge_style_usual_gr(
|
692 |
+
model_name_a: str,
|
693 |
+
model_name_b: str,
|
694 |
+
weight: float,
|
695 |
+
output_name: str,
|
696 |
+
style_tuple_list: list[tuple[str, ...]],
|
697 |
+
):
|
698 |
+
if output_name == "":
|
699 |
+
return "Error: 新しいモデル名を入力してください。", None
|
700 |
+
new_styles = merge_style_usual(
|
701 |
+
model_name_a,
|
702 |
+
model_name_b,
|
703 |
+
weight,
|
704 |
+
output_name,
|
705 |
+
style_tuple_list,
|
706 |
+
)
|
707 |
+
return f"Success: {output_name}のスタイルを保存しました。", gr.Dropdown(
|
708 |
+
choices=new_styles, value=new_styles[0]
|
709 |
+
)
|
710 |
+
|
711 |
+
|
712 |
+
def merge_style_add_diff_gr(
|
713 |
+
model_name_a: str,
|
714 |
+
model_name_b: str,
|
715 |
+
model_name_c: str,
|
716 |
+
weight: float,
|
717 |
+
output_name: str,
|
718 |
+
style_tuple_list: list[tuple[str, ...]],
|
719 |
+
):
|
720 |
+
if output_name == "":
|
721 |
+
return "Error: 新しいモデル名を入力してください。", None
|
722 |
+
new_styles = merge_style_add_diff(
|
723 |
+
model_name_a,
|
724 |
+
model_name_b,
|
725 |
+
model_name_c,
|
726 |
+
weight,
|
727 |
output_name,
|
728 |
+
style_tuple_list,
|
729 |
+
)
|
730 |
+
return f"Success: {output_name}のスタイルを保存しました。", gr.Dropdown(
|
731 |
+
choices=new_styles, value=new_styles[0]
|
732 |
)
|
|
|
733 |
|
734 |
|
735 |
+
def merge_style_weighted_sum_gr(
|
736 |
+
model_name_a: str,
|
737 |
+
model_name_b: str,
|
738 |
+
model_name_c: str,
|
739 |
+
model_a_coeff: float,
|
740 |
+
model_b_coeff: float,
|
741 |
+
model_c_coeff: float,
|
742 |
+
output_name: str,
|
743 |
+
style_tuple_list: list[tuple[str, ...]],
|
744 |
):
|
745 |
if output_name == "":
|
746 |
return "Error: 新しいモデル名を入力してください。", None
|
747 |
+
new_styles = merge_style_weighted_sum(
|
748 |
+
model_name_a,
|
749 |
+
model_name_b,
|
750 |
+
model_name_c,
|
751 |
+
model_a_coeff,
|
752 |
+
model_b_coeff,
|
753 |
+
model_c_coeff,
|
754 |
+
output_name,
|
755 |
+
style_tuple_list,
|
756 |
+
)
|
757 |
+
return f"Success: {output_name}のスタイルを保存しました。", gr.Dropdown(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
758 |
choices=new_styles, value=new_styles[0]
|
759 |
)
|
760 |
|
761 |
|
762 |
+
def merge_style_add_null_gr(
|
763 |
+
model_name_a: str,
|
764 |
+
model_name_b: str,
|
765 |
+
weight: float,
|
766 |
+
output_name: str,
|
767 |
+
style_tuple_list: list[tuple[str, ...]],
|
768 |
+
):
|
769 |
+
if output_name == "":
|
770 |
+
return "Error: 新しいモデル名を入力してください。", None
|
771 |
+
new_styles = merge_style_add_null(
|
772 |
+
model_name_a,
|
773 |
+
model_name_b,
|
774 |
+
weight,
|
775 |
+
output_name,
|
776 |
+
style_tuple_list,
|
777 |
+
)
|
778 |
+
return f"Success: {output_name}のスタイルを保存しました。", gr.Dropdown(
|
779 |
+
choices=new_styles, value=new_styles[0]
|
780 |
+
)
|
781 |
+
|
782 |
|
783 |
+
def simple_tts(
|
784 |
+
model_name: str, text: str, style: str = DEFAULT_STYLE, style_weight: float = 1.0
|
785 |
+
):
|
786 |
+
if model_name == "":
|
787 |
+
return "Error: モデル名を入力してください。", None
|
788 |
+
model_path = assets_root / model_name / f"{model_name}.safetensors"
|
789 |
+
config_path = assets_root / model_name / "config.json"
|
790 |
+
style_vec_path = assets_root / model_name / "style_vectors.npy"
|
791 |
|
792 |
+
model = TTSModel(model_path, config_path, style_vec_path, device)
|
793 |
|
794 |
+
return (
|
795 |
+
"Success: 音声を生成しました。",
|
796 |
+
model.infer(text, style=style, style_weight=style_weight),
|
797 |
+
)
|
798 |
+
|
799 |
+
|
800 |
+
def update_three_model_names_dropdown(model_holder: TTSModelHolder):
|
801 |
new_names, new_files, _ = model_holder.update_model_names_for_gradio()
|
802 |
+
return new_names, new_files, new_names, new_files, new_names, new_files
|
803 |
+
|
804 |
+
|
805 |
+
def get_styles(model_name: str):
|
806 |
+
config_path = assets_root / model_name / "config.json"
|
807 |
+
with open(config_path, encoding="utf-8") as f:
|
808 |
+
config = json.load(f)
|
809 |
+
styles = list(config["data"]["style2id"].keys())
|
810 |
+
return styles
|
811 |
|
812 |
|
813 |
+
def get_triple_styles(model_name_a: str, model_name_b: str, model_name_c: str):
|
814 |
+
return get_styles(model_name_a), get_styles(model_name_b), get_styles(model_name_c)
|
815 |
+
|
816 |
+
|
817 |
+
def load_styles_gr(model_name_a: str, model_name_b: str):
|
818 |
+
config_path_a = assets_root / model_name_a / "config.json"
|
819 |
+
with open(config_path_a, encoding="utf-8") as f:
|
820 |
config_a = json.load(f)
|
821 |
styles_a = list(config_a["data"]["style2id"].keys())
|
822 |
|
823 |
+
config_path_b = assets_root / model_name_b / "config.json"
|
824 |
+
with open(config_path_b, encoding="utf-8") as f:
|
825 |
config_b = json.load(f)
|
826 |
styles_b = list(config_b["data"]["style2id"].keys())
|
827 |
|
|
|
843 |
initial_md = """
|
844 |
## 使い方
|
845 |
|
846 |
+
### マージ方法の選択
|
847 |
+
|
848 |
+
マージの方法には4つの方法があります。
|
849 |
+
- 通常のマージ `new = (1 - weight) * A + weight * B`: AとBのモデルを指定して、要素ごとに比率を指定して混ぜる
|
850 |
+
- 単純にAとBの二人の話し方や声音を混ぜたいとき
|
851 |
+
- 差分マージ `new = A + weight * (B - C)`: AとBとCのモデルを指定して、「Bの要素からCの要素を引いたもの」をAに足す
|
852 |
+
- 例えば、Bが「Cと同じ人だけど囁いているモデル」とすると、`B - C`は「囁きを表すベクトル」だと思えるので、それをAに足すことで、Aの声のままで囁き声を出すモデルができたりする
|
853 |
+
- 他にも活用例はいろいろありそう
|
854 |
+
- 重み付き和 `new = a * A + b * B + c * C`: AとBとCのモデルを指定して、各モデルの係数を指定して混ぜる
|
855 |
+
- 例えば`new = A - B` としておくと、結果としてできたモデルを別のモデルと「ヌルモデルの加算」で使うことで、差分マージが実現できる
|
856 |
+
- 他にも何らかの活用法があるかもしれない
|
857 |
+
- ヌルモデルの加算 `new = A + weight * B`: AとBのモデルを指定して、Bのモデルに要素ごとに比率をかけたものをAに足す
|
858 |
+
- Bのモデルは重み付き和などで `C - D` などとして作っている場合を想定している
|
859 |
+
- 他にも何らかの活用法があるかもしれない
|
860 |
+
|
861 |
+
|
862 |
+
### マージの手順
|
863 |
+
|
864 |
+
1. マージ元のモデルたちを選択(`model_assets`フォルダの中から選ばれます)
|
865 |
+
2. マージ後のモデルの名前を入力
|
866 |
+
3. 指示に従って重みや係数を入力
|
867 |
+
4. 「モデルファイルのマージ」ボタンを押す (safetensorsファイルがマージされる)
|
868 |
+
5. 結果を簡易音声合成で確認
|
869 |
+
6. 必要に応じてスタイルベクトルのマージを行う
|
870 |
|
871 |
以上でマージは完了で、`model_assets/マージ後のモデル名`にマージ後のモデルが保存され、音声合成のときに使えます。
|
872 |
|
|
|
875 |
一番下にマージしたモデルによる簡易的な音声合成機能もつけています。
|
876 |
|
877 |
## 注意
|
878 |
+
|
879 |
+
- 1.x系と2.x-JP-Extraのモデルマージは失敗するようです。
|
880 |
+
- 話者数が違うモデル同士はおそらくマージできません。
|
881 |
"""
|
882 |
|
883 |
style_merge_md = f"""
|
884 |
+
## 3. スタイルベクトルのマージ
|
885 |
+
|
886 |
+
1. マージ後のモデルにいくつスタイルを追加したいかを「作りたいスタイル数」で指定
|
887 |
+
2. マージ前のモデルのスタイルを「各モデルのスタイルを取得」ボタンで取得
|
888 |
+
3. どのスタイルたちから新しいスタイルを作るかを下の欄で入力
|
889 |
+
4. 「スタイルのマージ」をクリック
|
890 |
+
|
891 |
+
### スタイルベクトルの混ぜられ方
|
892 |
|
893 |
+
- 構造上の相性の関係で、スタイルベクトルを混ぜる重みは、加重和以外の場合は、上の「話し方」と同じ比率で混ぜられます。例えば「話し方」が0のときはモデルAのみしか使われません。
|
894 |
+
- 加重和の場合は、AとBとCの係数によって混ぜられます。
|
895 |
+
"""
|
896 |
+
|
897 |
+
usual_md = """
|
898 |
+
`weight` を下の各スライダーで定める数値とすると、各要素ごとに、
|
899 |
```
|
900 |
+
new_model = (1 - weight) * A + weight * B
|
|
|
901 |
```
|
902 |
+
としてマージされます。
|
903 |
+
|
904 |
+
つまり、`weight = 0` のときはモデルA、`weight = 1` のときはモデルBになります。
|
|
|
|
|
|
|
|
|
|
|
905 |
"""
|
906 |
|
907 |
+
add_diff_md = """
|
908 |
+
`weight` を下の各スライダーで定める数値とすると、各要素ごとに、
|
909 |
+
```
|
910 |
+
new_model = A + weight * (B - C)
|
911 |
+
```
|
912 |
+
としてマージされます。
|
913 |
+
|
914 |
+
通常のマージと違い、**重みを1にしてもAの要素はそのまま保たれます**。
|
915 |
+
"""
|
916 |
+
|
917 |
+
weighted_sum_md = """
|
918 |
+
モデルの係数をそれぞれ `a`, `b`, `c` とすると、 **全要素に対して**、
|
919 |
+
```
|
920 |
+
new_model = a * A + b * B + c * C
|
921 |
+
```
|
922 |
+
としてマージされます。
|
923 |
+
|
924 |
+
## TIPS
|
925 |
+
|
926 |
+
- A, B, C ��全て通常モデルで、通常モデルを作りたい場合は、`a + b + c = 1`となるようにするのがよいと思います。
|
927 |
+
- `a + b + c = 0` とすると(たとえば `A - B`)、話者性を持たないヌルモデルを作ることができ、「ヌルモデルとの和」で結果を使うことが出来ます(差分マージの材料などに)
|
928 |
+
- 他にも、`a = 0.5, b = c = 0`などでモデルAを謎に小さくしたり大きくしたり負にしたりできるので、実験に使ってください。
|
929 |
+
"""
|
930 |
+
|
931 |
+
add_null_md = """
|
932 |
+
「ヌルモデル」を、いくつかのモデルの加重和であってその係数の和が0であるようなものとします(例えば `C - D` など)。
|
933 |
+
|
934 |
+
そうして作ったヌルモデルBと通常モデルAに対して、`weight` を下の各スライダーで定める数値とすると、各要素ごとに、
|
935 |
+
```
|
936 |
+
new_model = A + weight * B
|
937 |
+
```
|
938 |
+
としてマージされます。
|
939 |
+
|
940 |
+
通常のマージと違い、**重みを1にしてもAの要素はそのまま保たれます**。
|
941 |
+
|
942 |
+
実際にはヌルモデルでないBに対しても使えますが、その場合はおそらく音声が正常に生成されないモデルができる気がします。が、もしかしたら何かに使えるかもしれません。
|
943 |
+
|
944 |
+
囁きについて実験的に作ったヌルモデルを[こちら](https://huggingface.co/litagin/sbv2_null_models)に置いています。これを `B` に使うことで、任意のモデルを囁きモデルにある程度は変換できます。
|
945 |
+
"""
|
946 |
+
|
947 |
+
tts_md = f"""
|
948 |
+
## 2. 結果のテスト
|
949 |
+
|
950 |
+
マージ後のモデルで音声合成を行います。ただし、デフォルトではスタイルは`{DEFAULT_STYLE}`しか使えないので、他のスタイルを使いたい場合は、下の「スタイルベクトルのマージ」を行ってください。
|
951 |
+
"""
|
952 |
+
|
953 |
+
|
954 |
+
def method_change(x: str):
|
955 |
+
assert x in [
|
956 |
+
"usual",
|
957 |
+
"add_diff",
|
958 |
+
"weighted_sum",
|
959 |
+
"add_null",
|
960 |
+
], f"Invalid method: {x}"
|
961 |
+
# model_desc, c_col, model_a_coeff, model_b_coeff, model_c_coeff, weight_row, use_slerp_instead_of_lerp
|
962 |
+
if x == "usual":
|
963 |
+
return (
|
964 |
+
gr.Markdown(usual_md),
|
965 |
+
gr.Column(visible=False),
|
966 |
+
gr.Number(visible=False),
|
967 |
+
gr.Number(visible=False),
|
968 |
+
gr.Number(visible=False),
|
969 |
+
gr.Row(visible=True),
|
970 |
+
gr.Checkbox(visible=True),
|
971 |
+
)
|
972 |
+
elif x == "add_diff":
|
973 |
+
return (
|
974 |
+
gr.Markdown(add_diff_md),
|
975 |
+
gr.Column(visible=True),
|
976 |
+
gr.Number(visible=False),
|
977 |
+
gr.Number(visible=False),
|
978 |
+
gr.Number(visible=False),
|
979 |
+
gr.Row(visible=True),
|
980 |
+
gr.Checkbox(visible=False),
|
981 |
+
)
|
982 |
+
elif x == "add_null":
|
983 |
+
return (
|
984 |
+
gr.Markdown(add_null_md),
|
985 |
+
gr.Column(visible=False),
|
986 |
+
gr.Number(visible=False),
|
987 |
+
gr.Number(visible=False),
|
988 |
+
gr.Number(visible=False),
|
989 |
+
gr.Row(visible=True),
|
990 |
+
gr.Checkbox(visible=False),
|
991 |
+
)
|
992 |
+
else: # weighted_sum
|
993 |
+
return (
|
994 |
+
gr.Markdown(weighted_sum_md),
|
995 |
+
gr.Column(visible=True),
|
996 |
+
gr.Number(visible=True),
|
997 |
+
gr.Number(visible=True),
|
998 |
+
gr.Number(visible=True),
|
999 |
+
gr.Row(visible=False),
|
1000 |
+
gr.Checkbox(visible=False),
|
1001 |
+
)
|
1002 |
+
|
1003 |
|
1004 |
def create_merge_app(model_holder: TTSModelHolder) -> gr.Blocks:
|
1005 |
model_names = model_holder.model_names
|
|
|
1013 |
)
|
1014 |
return app
|
1015 |
initial_id = 0
|
1016 |
+
initial_model_files = [
|
1017 |
+
str(f) for f in model_holder.model_files_dict[model_names[initial_id]]
|
1018 |
+
]
|
1019 |
|
1020 |
with gr.Blocks(theme=GRADIO_THEME) as app:
|
1021 |
gr.Markdown(
|
1022 |
+
"複数のStyle-Bert-VITS2モデルから、声質・話し方・話す速さを取り替えたり混ぜたり引いたりして新しいモデルを作成できます。"
|
1023 |
)
|
1024 |
with gr.Accordion(label="使い方", open=False):
|
1025 |
gr.Markdown(initial_md)
|
1026 |
+
method = gr.Radio(
|
1027 |
+
label="マージ方法",
|
1028 |
+
choices=[
|
1029 |
+
("通常マージ", "usual"),
|
1030 |
+
("差分マージ", "add_diff"),
|
1031 |
+
("加重和", "weighted_sum"),
|
1032 |
+
("ヌルモデルマージ", "add_null"),
|
1033 |
+
],
|
1034 |
+
value="usual",
|
1035 |
+
)
|
1036 |
with gr.Row():
|
1037 |
with gr.Column(scale=3):
|
1038 |
model_name_a = gr.Dropdown(
|
|
|
1045 |
choices=initial_model_files,
|
1046 |
value=initial_model_files[0],
|
1047 |
)
|
1048 |
+
model_a_coeff = gr.Number(
|
1049 |
+
label="モデルAの係数",
|
1050 |
+
value=1.0,
|
1051 |
+
step=0.1,
|
1052 |
+
visible=False,
|
1053 |
+
)
|
1054 |
with gr.Column(scale=3):
|
1055 |
model_name_b = gr.Dropdown(
|
1056 |
label="モデルB",
|
|
|
1062 |
choices=initial_model_files,
|
1063 |
value=initial_model_files[0],
|
1064 |
)
|
1065 |
+
model_b_coeff = gr.Number(
|
1066 |
+
label="モデルBの係数",
|
1067 |
+
value=-1.0,
|
1068 |
+
step=0.1,
|
1069 |
+
visible=False,
|
1070 |
+
)
|
1071 |
+
with gr.Column(scale=3, visible=False) as c_col:
|
1072 |
+
model_name_c = gr.Dropdown(
|
1073 |
+
label="モデルC",
|
1074 |
+
choices=model_names,
|
1075 |
+
value=model_names[initial_id],
|
1076 |
+
)
|
1077 |
+
model_path_c = gr.Dropdown(
|
1078 |
+
label="モデルファイル",
|
1079 |
+
choices=initial_model_files,
|
1080 |
+
value=initial_model_files[0],
|
1081 |
+
)
|
1082 |
+
model_c_coeff = gr.Number(
|
1083 |
+
label="モデルCの係数",
|
1084 |
+
value=0.0,
|
1085 |
+
step=0.1,
|
1086 |
+
visible=False,
|
1087 |
+
)
|
1088 |
refresh_button = gr.Button("更新", scale=1, visible=True)
|
1089 |
+
method_desc = gr.Markdown(usual_md)
|
1090 |
with gr.Column(variant="panel"):
|
1091 |
new_name = gr.Textbox(label="新しいモデル名", placeholder="new_model")
|
1092 |
+
with gr.Row() as weight_row:
|
1093 |
voice_slider = gr.Slider(
|
1094 |
label="声質",
|
1095 |
value=0,
|
|
|
1121 |
use_slerp_instead_of_lerp = gr.Checkbox(
|
1122 |
label="線形補完のかわりに球面線形補完を使う",
|
1123 |
value=False,
|
1124 |
+
visible=True,
|
1125 |
)
|
1126 |
+
with gr.Column(variant="panel"):
|
1127 |
+
gr.Markdown("## 1. モデルファイル (safetensors) のマージ")
|
1128 |
+
with gr.Row():
|
1129 |
model_merge_button = gr.Button(
|
1130 |
"モデルファイルのマージ", variant="primary"
|
1131 |
)
|
1132 |
info_model_merge = gr.Textbox(label="情報")
|
1133 |
+
with gr.Column(variant="panel"):
|
1134 |
+
gr.Markdown(tts_md)
|
1135 |
+
text_input = gr.TextArea(
|
1136 |
+
label="テキスト", value="これはテストです。聞こえていますか?"
|
1137 |
+
)
|
1138 |
+
with gr.Row():
|
1139 |
+
with gr.Column():
|
1140 |
+
style = gr.Dropdown(
|
1141 |
+
label="スタイル",
|
1142 |
+
choices=[DEFAULT_STYLE],
|
1143 |
+
value=DEFAULT_STYLE,
|
1144 |
+
)
|
1145 |
+
emotion_weight = gr.Slider(
|
1146 |
+
minimum=0,
|
1147 |
+
maximum=50,
|
1148 |
+
value=1,
|
1149 |
+
step=0.1,
|
1150 |
+
label="スタイルの強さ",
|
1151 |
+
)
|
1152 |
+
tts_button = gr.Button("音声合成", variant="primary")
|
1153 |
+
tts_info = gr.Textbox(label="情報")
|
1154 |
+
audio_output = gr.Audio(label="結果")
|
1155 |
+
with gr.Column(variant="panel"):
|
1156 |
+
gr.Markdown(style_merge_md)
|
1157 |
+
style_a_list = gr.State([DEFAULT_STYLE])
|
1158 |
+
style_b_list = gr.State([DEFAULT_STYLE])
|
1159 |
+
style_c_list = gr.State([DEFAULT_STYLE])
|
1160 |
+
gr.Markdown("Hello world!")
|
1161 |
+
with gr.Row():
|
1162 |
+
style_count = gr.Number(label="作るスタイルの数", value=1, step=1)
|
1163 |
|
1164 |
+
get_style_btn = gr.Button("各モデルのスタイルを取得", variant="primary")
|
1165 |
+
get_style_btn.click(
|
1166 |
+
get_triple_styles,
|
1167 |
+
inputs=[model_name_a, model_name_b, model_name_c],
|
1168 |
+
outputs=[style_a_list, style_b_list, style_c_list],
|
1169 |
+
)
|
1170 |
+
|
1171 |
+
def join_names(*args):
|
1172 |
+
if all(arg == DEFAULT_STYLE for arg in args):
|
1173 |
+
return DEFAULT_STYLE
|
1174 |
+
return "_".join(args)
|
1175 |
+
|
1176 |
+
@gr.render(
|
1177 |
+
inputs=[
|
1178 |
+
style_count,
|
1179 |
+
style_a_list,
|
1180 |
+
style_b_list,
|
1181 |
+
style_c_list,
|
1182 |
+
method,
|
1183 |
+
]
|
1184 |
+
)
|
1185 |
+
def render_style(
|
1186 |
+
style_count, style_a_list, style_b_list, style_c_list, method
|
1187 |
+
):
|
1188 |
+
a_components = []
|
1189 |
+
b_components = []
|
1190 |
+
c_components = []
|
1191 |
+
out_components = []
|
1192 |
+
if method in ["usual", "add_null"]:
|
1193 |
+
for i in range(style_count):
|
1194 |
+
with gr.Row():
|
1195 |
+
style_a = gr.Dropdown(
|
1196 |
+
label="モデルAのスタイル名",
|
1197 |
+
key=f"style_a_{i}",
|
1198 |
+
choices=style_a_list,
|
1199 |
+
value=DEFAULT_STYLE,
|
1200 |
+
interactive=i != 0,
|
1201 |
+
)
|
1202 |
+
style_b = gr.Dropdown(
|
1203 |
+
label="モデルBのスタイル名",
|
1204 |
+
key=f"style_b_{i}",
|
1205 |
+
choices=style_b_list,
|
1206 |
+
value=DEFAULT_STYLE,
|
1207 |
+
interactive=i != 0,
|
1208 |
+
)
|
1209 |
+
style_out = gr.Textbox(
|
1210 |
+
label="出力スタイル名",
|
1211 |
+
key=f"style_out_{i}",
|
1212 |
+
value=DEFAULT_STYLE,
|
1213 |
+
interactive=i != 0,
|
1214 |
+
)
|
1215 |
+
style_a.change(
|
1216 |
+
join_names,
|
1217 |
+
inputs=[style_a, style_b],
|
1218 |
+
outputs=[style_out],
|
1219 |
+
)
|
1220 |
+
style_b.change(
|
1221 |
+
join_names,
|
1222 |
+
inputs=[style_a, style_b],
|
1223 |
+
outputs=[style_out],
|
1224 |
+
)
|
1225 |
+
a_components.append(style_a)
|
1226 |
+
b_components.append(style_b)
|
1227 |
+
out_components.append(style_out)
|
1228 |
+
if method == "usual":
|
1229 |
+
|
1230 |
+
def _merge_usual(data):
|
1231 |
+
style_tuple_list = [
|
1232 |
+
(data[a], data[b], data[out])
|
1233 |
+
for a, b, out in zip(
|
1234 |
+
a_components, b_components, out_components
|
1235 |
+
)
|
1236 |
+
]
|
1237 |
+
return merge_style_usual_gr(
|
1238 |
+
data[model_name_a],
|
1239 |
+
data[model_name_b],
|
1240 |
+
data[speech_style_slider],
|
1241 |
+
data[new_name],
|
1242 |
+
style_tuple_list,
|
1243 |
+
)
|
1244 |
+
|
1245 |
+
style_merge_btn.click(
|
1246 |
+
_merge_usual,
|
1247 |
+
inputs=set(
|
1248 |
+
a_components
|
1249 |
+
+ b_components
|
1250 |
+
+ out_components
|
1251 |
+
+ [
|
1252 |
+
model_name_a,
|
1253 |
+
model_name_b,
|
1254 |
+
speech_style_slider,
|
1255 |
+
new_name,
|
1256 |
+
]
|
1257 |
+
),
|
1258 |
+
outputs=[info_style_merge, style],
|
1259 |
+
)
|
1260 |
+
else: # add_null
|
1261 |
+
|
1262 |
+
def _merge_add_null(data):
|
1263 |
+
print("Method is add_null")
|
1264 |
+
style_tuple_list = [
|
1265 |
+
(data[a], data[b], data[out])
|
1266 |
+
for a, b, out in zip(
|
1267 |
+
a_components, b_components, out_components
|
1268 |
+
)
|
1269 |
+
]
|
1270 |
+
return merge_style_add_null_gr(
|
1271 |
+
data[model_name_a],
|
1272 |
+
data[model_name_b],
|
1273 |
+
data[speech_style_slider],
|
1274 |
+
data[new_name],
|
1275 |
+
style_tuple_list,
|
1276 |
+
)
|
1277 |
+
|
1278 |
+
style_merge_btn.click(
|
1279 |
+
_merge_add_null,
|
1280 |
+
inputs=set(
|
1281 |
+
a_components
|
1282 |
+
+ b_components
|
1283 |
+
+ out_components
|
1284 |
+
+ [
|
1285 |
+
model_name_a,
|
1286 |
+
model_name_b,
|
1287 |
+
speech_style_slider,
|
1288 |
+
new_name,
|
1289 |
+
]
|
1290 |
+
),
|
1291 |
+
outputs=[info_style_merge, style],
|
1292 |
+
)
|
1293 |
+
|
1294 |
+
elif method in ["add_diff", "weighted_sum"]:
|
1295 |
+
for i in range(style_count):
|
1296 |
+
with gr.Row():
|
1297 |
+
style_a = gr.Dropdown(
|
1298 |
+
label="モデルAのスタイル名",
|
1299 |
+
key=f"style_a_{i}",
|
1300 |
+
choices=style_a_list,
|
1301 |
+
value=DEFAULT_STYLE,
|
1302 |
+
interactive=i != 0,
|
1303 |
+
)
|
1304 |
+
style_b = gr.Dropdown(
|
1305 |
+
label="モデルBのスタイル名",
|
1306 |
+
key=f"style_b_{i}",
|
1307 |
+
choices=style_b_list,
|
1308 |
+
value=DEFAULT_STYLE,
|
1309 |
+
interactive=i != 0,
|
1310 |
+
)
|
1311 |
+
style_c = gr.Dropdown(
|
1312 |
+
label="モデルCのスタイル名",
|
1313 |
+
key=f"style_c_{i}",
|
1314 |
+
choices=style_c_list,
|
1315 |
+
value=DEFAULT_STYLE,
|
1316 |
+
interactive=i != 0,
|
1317 |
+
)
|
1318 |
+
style_out = gr.Textbox(
|
1319 |
+
label="出力スタイル名",
|
1320 |
+
key=f"style_out_{i}",
|
1321 |
+
value=DEFAULT_STYLE,
|
1322 |
+
interactive=i != 0,
|
1323 |
+
)
|
1324 |
+
style_a.change(
|
1325 |
+
join_names,
|
1326 |
+
inputs=[style_a, style_b, style_c],
|
1327 |
+
outputs=[style_out],
|
1328 |
+
)
|
1329 |
+
style_b.change(
|
1330 |
+
join_names,
|
1331 |
+
inputs=[style_a, style_b, style_c],
|
1332 |
+
outputs=[style_out],
|
1333 |
+
)
|
1334 |
+
style_c.change(
|
1335 |
+
join_names,
|
1336 |
+
inputs=[style_a, style_b, style_c],
|
1337 |
+
outputs=[style_out],
|
1338 |
+
)
|
1339 |
|
1340 |
+
a_components.append(style_a)
|
1341 |
+
b_components.append(style_b)
|
1342 |
+
c_components.append(style_c)
|
1343 |
+
out_components.append(style_out)
|
1344 |
+
if method == "add_diff":
|
1345 |
+
|
1346 |
+
def _merge_add_diff(data):
|
1347 |
+
style_tuple_list = [
|
1348 |
+
(data[a], data[b], data[c], data[out])
|
1349 |
+
for a, b, c, out in zip(
|
1350 |
+
a_components,
|
1351 |
+
b_components,
|
1352 |
+
c_components,
|
1353 |
+
out_components,
|
1354 |
+
)
|
1355 |
+
]
|
1356 |
+
return merge_style_add_diff_gr(
|
1357 |
+
data[model_name_a],
|
1358 |
+
data[model_name_b],
|
1359 |
+
data[model_name_c],
|
1360 |
+
data[speech_style_slider],
|
1361 |
+
data[new_name],
|
1362 |
+
style_tuple_list,
|
1363 |
+
)
|
1364 |
+
|
1365 |
+
style_merge_btn.click(
|
1366 |
+
_merge_add_diff,
|
1367 |
+
inputs=set(
|
1368 |
+
a_components
|
1369 |
+
+ b_components
|
1370 |
+
+ c_components
|
1371 |
+
+ out_components
|
1372 |
+
+ [
|
1373 |
+
model_name_a,
|
1374 |
+
model_name_b,
|
1375 |
+
model_name_c,
|
1376 |
+
speech_style_slider,
|
1377 |
+
new_name,
|
1378 |
+
]
|
1379 |
+
),
|
1380 |
+
outputs=[info_style_merge, style],
|
1381 |
+
)
|
1382 |
+
else: # weighted_sum
|
1383 |
+
|
1384 |
+
def _merge_weighted_sum(data):
|
1385 |
+
style_tuple_list = [
|
1386 |
+
(data[a], data[b], data[c], data[out])
|
1387 |
+
for a, b, c, out in zip(
|
1388 |
+
a_components,
|
1389 |
+
b_components,
|
1390 |
+
c_components,
|
1391 |
+
out_components,
|
1392 |
+
)
|
1393 |
+
]
|
1394 |
+
return merge_style_weighted_sum_gr(
|
1395 |
+
data[model_name_a],
|
1396 |
+
data[model_name_b],
|
1397 |
+
data[model_name_c],
|
1398 |
+
data[model_a_coeff],
|
1399 |
+
data[model_b_coeff],
|
1400 |
+
data[model_c_coeff],
|
1401 |
+
data[new_name],
|
1402 |
+
style_tuple_list,
|
1403 |
+
)
|
1404 |
+
|
1405 |
+
style_merge_btn.click(
|
1406 |
+
_merge_weighted_sum,
|
1407 |
+
inputs=set(
|
1408 |
+
a_components
|
1409 |
+
+ b_components
|
1410 |
+
+ c_components
|
1411 |
+
+ out_components
|
1412 |
+
+ [
|
1413 |
+
model_name_a,
|
1414 |
+
model_name_b,
|
1415 |
+
model_name_c,
|
1416 |
+
model_a_coeff,
|
1417 |
+
model_b_coeff,
|
1418 |
+
model_c_coeff,
|
1419 |
+
new_name,
|
1420 |
+
]
|
1421 |
+
),
|
1422 |
+
outputs=[info_style_merge, style],
|
1423 |
+
)
|
1424 |
+
|
1425 |
+
with gr.Row():
|
1426 |
+
add_btn = gr.Button("スタイルを増やす")
|
1427 |
+
del_btn = gr.Button("スタイルを減らす")
|
1428 |
+
add_btn.click(
|
1429 |
+
lambda x: x + 1,
|
1430 |
+
inputs=[style_count],
|
1431 |
+
outputs=[style_count],
|
1432 |
+
)
|
1433 |
+
del_btn.click(
|
1434 |
+
lambda x: x - 1 if x > 1 else 1,
|
1435 |
+
inputs=[style_count],
|
1436 |
+
outputs=[style_count],
|
1437 |
+
)
|
1438 |
+
style_merge_btn = gr.Button("スタイルのマージ", variant="primary")
|
1439 |
+
|
1440 |
+
info_style_merge = gr.Textbox(label="情報")
|
1441 |
+
|
1442 |
+
method.change(
|
1443 |
+
method_change,
|
1444 |
+
inputs=[method],
|
1445 |
+
outputs=[
|
1446 |
+
method_desc,
|
1447 |
+
c_col,
|
1448 |
+
model_a_coeff,
|
1449 |
+
model_b_coeff,
|
1450 |
+
model_c_coeff,
|
1451 |
+
weight_row,
|
1452 |
+
use_slerp_instead_of_lerp,
|
1453 |
+
],
|
1454 |
+
)
|
1455 |
model_name_a.change(
|
1456 |
model_holder.update_model_files_for_gradio,
|
1457 |
inputs=[model_name_a],
|
|
|
1462 |
inputs=[model_name_b],
|
1463 |
outputs=[model_path_b],
|
1464 |
)
|
1465 |
+
model_name_c.change(
|
1466 |
+
model_holder.update_model_files_for_gradio,
|
1467 |
+
inputs=[model_name_c],
|
1468 |
+
outputs=[model_path_c],
|
1469 |
)
|
1470 |
|
1471 |
+
refresh_button.click(
|
1472 |
+
lambda: update_three_model_names_dropdown(model_holder),
|
1473 |
+
outputs=[
|
1474 |
+
model_name_a,
|
1475 |
+
model_path_a,
|
1476 |
+
model_name_b,
|
1477 |
+
model_path_b,
|
1478 |
+
model_name_c,
|
1479 |
+
model_path_c,
|
1480 |
+
],
|
1481 |
)
|
1482 |
|
1483 |
model_merge_button.click(
|
1484 |
merge_models_gr,
|
1485 |
inputs=[
|
|
|
1486 |
model_path_a,
|
|
|
1487 |
model_path_b,
|
1488 |
+
model_path_c,
|
1489 |
+
model_a_coeff,
|
1490 |
+
model_b_coeff,
|
1491 |
+
model_c_coeff,
|
1492 |
+
method,
|
1493 |
new_name,
|
1494 |
voice_slider,
|
1495 |
voice_pitch_slider,
|
|
|
1497 |
tempo_slider,
|
1498 |
use_slerp_instead_of_lerp,
|
1499 |
],
|
1500 |
+
outputs=[info_model_merge, style],
|
1501 |
)
|
1502 |
|
1503 |
+
# style_merge_button.click(
|
1504 |
+
# merge_style_gr,
|
1505 |
+
# inputs=[
|
1506 |
+
# model_name_a,
|
1507 |
+
# model_name_b,
|
1508 |
+
# model_name_c,
|
1509 |
+
# method,
|
1510 |
+
# speech_style_slider,
|
1511 |
+
# new_name,
|
1512 |
+
# style_triple_list,
|
1513 |
+
# ],
|
1514 |
+
# outputs=[info_style_merge, style],
|
1515 |
+
# )
|
1516 |
|
1517 |
tts_button.click(
|
1518 |
simple_tts,
|
1519 |
inputs=[new_name, text_input, style, emotion_weight],
|
1520 |
+
outputs=[tts_info, audio_output],
|
1521 |
)
|
1522 |
|
1523 |
return app
|
1524 |
+
|
1525 |
+
|
1526 |
+
if __name__ == "__main__":
|
1527 |
+
model_holder = TTSModelHolder(
|
1528 |
+
assets_root, device="cuda" if torch.cuda.is_available() else "cpu"
|
1529 |
+
)
|
1530 |
+
app = create_merge_app(model_holder)
|
1531 |
+
app.launch(inbrowser=True)
|
gradio_tabs/style_vectors.py
CHANGED
@@ -1,26 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import json
|
2 |
-
import os
|
3 |
import shutil
|
4 |
from pathlib import Path
|
5 |
|
6 |
import gradio as gr
|
7 |
import matplotlib.pyplot as plt
|
8 |
import numpy as np
|
9 |
-
import yaml
|
10 |
from scipy.spatial.distance import pdist, squareform
|
11 |
from sklearn.cluster import DBSCAN, AgglomerativeClustering, KMeans
|
12 |
from sklearn.manifold import TSNE
|
13 |
from umap import UMAP
|
14 |
|
15 |
-
from config import
|
|
|
16 |
from style_bert_vits2.constants import DEFAULT_STYLE, GRADIO_THEME
|
17 |
from style_bert_vits2.logging import logger
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
# assets_root = path_config["assets_root"]
|
24 |
|
25 |
MAX_CLUSTER_NUM = 10
|
26 |
MAX_AUDIO_NUM = 10
|
@@ -38,11 +41,7 @@ centroids = []
|
|
38 |
|
39 |
def load(model_name: str, reduction_method: str):
|
40 |
global wav_files, x, x_reduced, mean
|
41 |
-
# wavs_dir = os.path.join(dataset_root, model_name, "wavs")
|
42 |
wavs_dir = dataset_root / model_name / "wavs"
|
43 |
-
# style_vector_files = [
|
44 |
-
# os.path.join(wavs_dir, f) for f in os.listdir(wavs_dir) if f.endswith(".npy")
|
45 |
-
# ]
|
46 |
style_vector_files = [f for f in wavs_dir.rglob("*.npy") if f.is_file()]
|
47 |
# foo.wav.npy -> foo.wav
|
48 |
wav_files = [f.with_suffix("") for f in style_vector_files]
|
@@ -142,7 +141,7 @@ def do_dbscan_gradio(eps=2.5, min_samples=15):
|
|
142 |
)
|
143 |
plt.legend()
|
144 |
|
145 |
-
n_clusters = max(y_pred) + 1
|
146 |
|
147 |
if n_clusters > MAX_CLUSTER_NUM:
|
148 |
# raise ValueError(f"The number of clusters is too large: {n_clusters}")
|
@@ -169,7 +168,7 @@ def representative_wav_files_gradio(cluster_id, num_files=1):
|
|
169 |
closest_indices = representative_wav_files(cluster_id, num_files)
|
170 |
actual_num_files = len(closest_indices) # ファイル数が少ないときのため
|
171 |
return [
|
172 |
-
gr.Audio(wav_files[i], visible=True, label=wav_files[i])
|
173 |
for i in closest_indices
|
174 |
] + [gr.update(visible=False)] * (MAX_AUDIO_NUM - actual_num_files)
|
175 |
|
@@ -195,21 +194,21 @@ def do_clustering_gradio(n_clusters=4, method="KMeans"):
|
|
195 |
] * MAX_AUDIO_NUM
|
196 |
|
197 |
|
198 |
-
def save_style_vectors_from_clustering(model_name, style_names_str: str):
|
199 |
"""centerとcentroidsを保存する"""
|
200 |
-
result_dir =
|
201 |
-
|
202 |
style_vectors = np.stack([mean] + centroids)
|
203 |
-
style_vector_path =
|
204 |
-
if
|
205 |
logger.info(f"Backup {style_vector_path} to {style_vector_path}.bak")
|
206 |
shutil.copy(style_vector_path, f"{style_vector_path}.bak")
|
207 |
np.save(style_vector_path, style_vectors)
|
208 |
logger.success(f"Saved style vectors to {style_vector_path}")
|
209 |
|
210 |
# config.jsonの更新
|
211 |
-
config_path =
|
212 |
-
if not
|
213 |
return f"{config_path}が存在しません。"
|
214 |
style_names = [name.strip() for name in style_names_str.split(",")]
|
215 |
style_name_list = [DEFAULT_STYLE] + style_names
|
@@ -220,7 +219,7 @@ def save_style_vectors_from_clustering(model_name, style_names_str: str):
|
|
220 |
|
221 |
logger.info(f"Backup {config_path} to {config_path}.bak")
|
222 |
shutil.copy(config_path, f"{config_path}.bak")
|
223 |
-
with open(config_path,
|
224 |
json_dict = json.load(f)
|
225 |
json_dict["data"]["num_styles"] = len(style_name_list)
|
226 |
style_dict = {name: i for i, name in enumerate(style_name_list)}
|
@@ -232,7 +231,7 @@ def save_style_vectors_from_clustering(model_name, style_names_str: str):
|
|
232 |
|
233 |
|
234 |
def save_style_vectors_from_files(
|
235 |
-
model_name, audio_files_str: str, style_names_str: str
|
236 |
):
|
237 |
"""音声ファイルからスタイルベクトルを作成して保存する"""
|
238 |
global mean
|
@@ -240,8 +239,8 @@ def save_style_vectors_from_files(
|
|
240 |
return "Error: スタイルベクトルを読み込んでください。"
|
241 |
mean = np.mean(x, axis=0)
|
242 |
|
243 |
-
result_dir =
|
244 |
-
|
245 |
audio_files = [name.strip() for name in audio_files_str.split(",")]
|
246 |
style_names = [name.strip() for name in style_names_str.split(",")]
|
247 |
if len(audio_files) != len(style_names):
|
@@ -251,28 +250,28 @@ def save_style_vectors_from_files(
|
|
251 |
return "スタイル名が重複しています。"
|
252 |
style_vectors = [mean]
|
253 |
|
254 |
-
wavs_dir =
|
255 |
for audio_file in audio_files:
|
256 |
-
path =
|
257 |
-
if not
|
258 |
return f"{path}が存在しません。"
|
259 |
style_vectors.append(np.load(f"{path}.npy"))
|
260 |
style_vectors = np.stack(style_vectors)
|
261 |
assert len(style_name_list) == len(style_vectors)
|
262 |
-
style_vector_path =
|
263 |
-
if
|
264 |
logger.info(f"Backup {style_vector_path} to {style_vector_path}.bak")
|
265 |
shutil.copy(style_vector_path, f"{style_vector_path}.bak")
|
266 |
np.save(style_vector_path, style_vectors)
|
267 |
|
268 |
# config.jsonの更新
|
269 |
-
config_path =
|
270 |
-
if not
|
271 |
return f"{config_path}が存在しません。"
|
272 |
logger.info(f"Backup {config_path} to {config_path}.bak")
|
273 |
shutil.copy(config_path, f"{config_path}.bak")
|
274 |
|
275 |
-
with open(config_path,
|
276 |
json_dict = json.load(f)
|
277 |
json_dict["data"]["num_styles"] = len(style_name_list)
|
278 |
style_dict = {name: i for i, name in enumerate(style_name_list)}
|
@@ -283,20 +282,107 @@ def save_style_vectors_from_files(
|
|
283 |
return f"成功!\n{style_vector_path}に保存し{config_path}を更新しました。"
|
284 |
|
285 |
|
286 |
-
|
287 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
288 |
|
289 |
-
|
|
|
|
|
|
|
|
|
|
|
290 |
|
291 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
292 |
|
293 |
## 方法
|
294 |
|
|
|
295 |
- 方法1: 音声ファイルを自動でスタイル別に分け、その各スタイルの平均を取って保存
|
296 |
- 方法2: スタイルを代表する音声ファイルを手動で選んで、その音声のスタイルベクトルを保存
|
297 |
- 方法3: 自分でもっと頑張ってこだわって作る(JVNVコーパスなど、もともとスタイルラベル等が利用可能な場合はこれがよいかも)
|
298 |
"""
|
299 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
300 |
method1 = f"""
|
301 |
学習の時に取り出したスタイルベクトルを読み込んで、可視化を見ながらスタイルを分けていきます。
|
302 |
|
@@ -332,138 +418,168 @@ def create_style_vectors_app():
|
|
332 |
with gr.Blocks(theme=GRADIO_THEME) as app:
|
333 |
with gr.Accordion("使い方", open=False):
|
334 |
gr.Markdown(how_to_md)
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
)
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
minimum=2,
|
350 |
-
maximum=10,
|
351 |
-
step=1,
|
352 |
-
value=4,
|
353 |
-
label="作るスタイルの数(平均スタイルを除く)",
|
354 |
-
info="上の図を見ながらスタイルの数を試行錯誤してください。",
|
355 |
-
)
|
356 |
-
c_method = gr.Radio(
|
357 |
-
choices=[
|
358 |
-
"Agglomerative after reduction",
|
359 |
-
"KMeans after reduction",
|
360 |
-
"Agglomerative",
|
361 |
-
"KMeans",
|
362 |
-
],
|
363 |
-
label="アルゴリズム",
|
364 |
-
info="分類する(クラスタリング)アルゴリズムを選択します。いろいろ試してみてください。",
|
365 |
-
value="Agglomerative after reduction",
|
366 |
-
)
|
367 |
-
c_button = gr.Button("スタイル分けを実行")
|
368 |
-
with gr.Tab("スタイル分け2: DBSCAN"):
|
369 |
-
gr.Markdown(dbscan_md)
|
370 |
-
eps = gr.Slider(
|
371 |
-
minimum=0.1,
|
372 |
-
maximum=10,
|
373 |
-
step=0.01,
|
374 |
-
value=0.3,
|
375 |
-
label="eps",
|
376 |
-
)
|
377 |
-
min_samples = gr.Slider(
|
378 |
-
minimum=1,
|
379 |
-
maximum=50,
|
380 |
-
step=1,
|
381 |
-
value=15,
|
382 |
-
label="min_samples",
|
383 |
-
)
|
384 |
-
with gr.Row():
|
385 |
-
dbscan_button = gr.Button("スタイル分けを実行")
|
386 |
-
num_styles_result = gr.Textbox(label="スタイル数")
|
387 |
-
gr.Markdown("スタイル分けの結果")
|
388 |
-
gr.Markdown(
|
389 |
-
"注意: もともと256次元なものをを2次元に落としているので、正確なベクトルの位置関係ではありません。"
|
390 |
)
|
|
|
391 |
with gr.Row():
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
maximum=MAX_CLUSTER_NUM,
|
398 |
-
step=1,
|
399 |
-
value=1,
|
400 |
-
label="スタイル番号",
|
401 |
-
info="選択したスタイルの代表音声を表示します。",
|
402 |
-
)
|
403 |
-
num_files = gr.Slider(
|
404 |
-
minimum=1,
|
405 |
-
maximum=MAX_AUDIO_NUM,
|
406 |
-
step=1,
|
407 |
-
value=5,
|
408 |
-
label="代表音声の数をいくつ表示するか",
|
409 |
-
)
|
410 |
-
get_audios_button = gr.Button("代表音声を取得")
|
411 |
-
with gr.Row():
|
412 |
-
audio_list = []
|
413 |
-
for i in range(MAX_AUDIO_NUM):
|
414 |
-
audio_list.append(gr.Audio(visible=False, show_label=True))
|
415 |
-
c_button.click(
|
416 |
-
do_clustering_gradio,
|
417 |
-
inputs=[n_clusters, c_method],
|
418 |
-
outputs=[gr_plot, cluster_index] + audio_list,
|
419 |
-
)
|
420 |
-
dbscan_button.click(
|
421 |
-
do_dbscan_gradio,
|
422 |
-
inputs=[eps, min_samples],
|
423 |
-
outputs=[gr_plot, cluster_index, num_styles_result] + audio_list,
|
424 |
)
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
)
|
430 |
-
gr.Markdown("結果が良さそうなら、これを保存します。")
|
431 |
-
style_names = gr.Textbox(
|
432 |
-
"Angry, Sad, Happy",
|
433 |
-
label="スタイルの名前",
|
434 |
-
info=f"スタイルの名前を`,`で区切って入力してください(日本語可)。例: `Angry, Sad, Happy`や`怒り, 悲しみ, 喜び`など。平均音声は{DEFAULT_STYLE}として自動的に保存されます。",
|
435 |
-
)
|
436 |
-
with gr.Row():
|
437 |
-
save_button1 = gr.Button("スタイルベクトルを保存", variant="primary")
|
438 |
-
info2 = gr.Textbox(label="保存結果")
|
439 |
-
|
440 |
-
save_button1.click(
|
441 |
-
save_style_vectors_from_clustering,
|
442 |
-
inputs=[model_name, style_names],
|
443 |
-
outputs=[info2],
|
444 |
-
)
|
445 |
-
with gr.Tab("方法2: 手動でスタイルを選ぶ"):
|
446 |
-
gr.Markdown(
|
447 |
-
"下のテキスト欄に、各スタイルの代表音声のファイル名を`,`区切りで、その横に対応するスタイル名を`,`区切りで入力してください。"
|
448 |
-
)
|
449 |
-
gr.Markdown("例: `angry.wav, sad.wav, happy.wav`と`Angry, Sad, Happy`")
|
450 |
-
gr.Markdown(
|
451 |
-
f"注意: {DEFAULT_STYLE}スタイルは自動的に保存されます、手動では{DEFAULT_STYLE}という名前のスタイルは指定しないでください。"
|
452 |
)
|
453 |
-
with gr.
|
454 |
-
|
455 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
456 |
)
|
457 |
-
|
458 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
459 |
)
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
|
|
|
|
|
|
466 |
outputs=[info2],
|
467 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
468 |
|
469 |
return app
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
TODO:
|
3 |
+
importが重いので、WebUI全般が重くなっている。どうにかしたい。
|
4 |
+
"""
|
5 |
+
|
6 |
import json
|
|
|
7 |
import shutil
|
8 |
from pathlib import Path
|
9 |
|
10 |
import gradio as gr
|
11 |
import matplotlib.pyplot as plt
|
12 |
import numpy as np
|
|
|
13 |
from scipy.spatial.distance import pdist, squareform
|
14 |
from sklearn.cluster import DBSCAN, AgglomerativeClustering, KMeans
|
15 |
from sklearn.manifold import TSNE
|
16 |
from umap import UMAP
|
17 |
|
18 |
+
from config import get_path_config
|
19 |
+
from default_style import save_styles_by_dirs
|
20 |
from style_bert_vits2.constants import DEFAULT_STYLE, GRADIO_THEME
|
21 |
from style_bert_vits2.logging import logger
|
22 |
|
23 |
+
|
24 |
+
path_config = get_path_config()
|
25 |
+
dataset_root = path_config.dataset_root
|
26 |
+
assets_root = path_config.assets_root
|
|
|
27 |
|
28 |
MAX_CLUSTER_NUM = 10
|
29 |
MAX_AUDIO_NUM = 10
|
|
|
41 |
|
42 |
def load(model_name: str, reduction_method: str):
|
43 |
global wav_files, x, x_reduced, mean
|
|
|
44 |
wavs_dir = dataset_root / model_name / "wavs"
|
|
|
|
|
|
|
45 |
style_vector_files = [f for f in wavs_dir.rglob("*.npy") if f.is_file()]
|
46 |
# foo.wav.npy -> foo.wav
|
47 |
wav_files = [f.with_suffix("") for f in style_vector_files]
|
|
|
141 |
)
|
142 |
plt.legend()
|
143 |
|
144 |
+
n_clusters = int(max(y_pred) + 1)
|
145 |
|
146 |
if n_clusters > MAX_CLUSTER_NUM:
|
147 |
# raise ValueError(f"The number of clusters is too large: {n_clusters}")
|
|
|
168 |
closest_indices = representative_wav_files(cluster_id, num_files)
|
169 |
actual_num_files = len(closest_indices) # ファイル数が少ないときのため
|
170 |
return [
|
171 |
+
gr.Audio(wav_files[i], visible=True, label=str(wav_files[i]))
|
172 |
for i in closest_indices
|
173 |
] + [gr.update(visible=False)] * (MAX_AUDIO_NUM - actual_num_files)
|
174 |
|
|
|
194 |
] * MAX_AUDIO_NUM
|
195 |
|
196 |
|
197 |
+
def save_style_vectors_from_clustering(model_name: str, style_names_str: str):
|
198 |
"""centerとcentroidsを保存する"""
|
199 |
+
result_dir = assets_root / model_name
|
200 |
+
result_dir.mkdir(parents=True, exist_ok=True)
|
201 |
style_vectors = np.stack([mean] + centroids)
|
202 |
+
style_vector_path = result_dir / "style_vectors.npy"
|
203 |
+
if style_vector_path.exists():
|
204 |
logger.info(f"Backup {style_vector_path} to {style_vector_path}.bak")
|
205 |
shutil.copy(style_vector_path, f"{style_vector_path}.bak")
|
206 |
np.save(style_vector_path, style_vectors)
|
207 |
logger.success(f"Saved style vectors to {style_vector_path}")
|
208 |
|
209 |
# config.jsonの更新
|
210 |
+
config_path = result_dir / "config.json"
|
211 |
+
if not config_path.exists():
|
212 |
return f"{config_path}が存在しません。"
|
213 |
style_names = [name.strip() for name in style_names_str.split(",")]
|
214 |
style_name_list = [DEFAULT_STYLE] + style_names
|
|
|
219 |
|
220 |
logger.info(f"Backup {config_path} to {config_path}.bak")
|
221 |
shutil.copy(config_path, f"{config_path}.bak")
|
222 |
+
with open(config_path, encoding="utf-8") as f:
|
223 |
json_dict = json.load(f)
|
224 |
json_dict["data"]["num_styles"] = len(style_name_list)
|
225 |
style_dict = {name: i for i, name in enumerate(style_name_list)}
|
|
|
231 |
|
232 |
|
233 |
def save_style_vectors_from_files(
|
234 |
+
model_name: str, audio_files_str: str, style_names_str: str
|
235 |
):
|
236 |
"""音声ファイルからスタイルベクトルを作成して保存する"""
|
237 |
global mean
|
|
|
239 |
return "Error: スタイルベクトルを読み込んでください。"
|
240 |
mean = np.mean(x, axis=0)
|
241 |
|
242 |
+
result_dir = assets_root / model_name
|
243 |
+
result_dir.mkdir(parents=True, exist_ok=True)
|
244 |
audio_files = [name.strip() for name in audio_files_str.split(",")]
|
245 |
style_names = [name.strip() for name in style_names_str.split(",")]
|
246 |
if len(audio_files) != len(style_names):
|
|
|
250 |
return "スタイル名が重複しています。"
|
251 |
style_vectors = [mean]
|
252 |
|
253 |
+
wavs_dir = dataset_root / model_name / "wavs"
|
254 |
for audio_file in audio_files:
|
255 |
+
path = wavs_dir / audio_file
|
256 |
+
if not path.exists():
|
257 |
return f"{path}が存在しません。"
|
258 |
style_vectors.append(np.load(f"{path}.npy"))
|
259 |
style_vectors = np.stack(style_vectors)
|
260 |
assert len(style_name_list) == len(style_vectors)
|
261 |
+
style_vector_path = result_dir / "style_vectors.npy"
|
262 |
+
if style_vector_path.exists():
|
263 |
logger.info(f"Backup {style_vector_path} to {style_vector_path}.bak")
|
264 |
shutil.copy(style_vector_path, f"{style_vector_path}.bak")
|
265 |
np.save(style_vector_path, style_vectors)
|
266 |
|
267 |
# config.jsonの更新
|
268 |
+
config_path = result_dir / "config.json"
|
269 |
+
if not config_path.exists():
|
270 |
return f"{config_path}が存在しません。"
|
271 |
logger.info(f"Backup {config_path} to {config_path}.bak")
|
272 |
shutil.copy(config_path, f"{config_path}.bak")
|
273 |
|
274 |
+
with open(config_path, encoding="utf-8") as f:
|
275 |
json_dict = json.load(f)
|
276 |
json_dict["data"]["num_styles"] = len(style_name_list)
|
277 |
style_dict = {name: i for i, name in enumerate(style_name_list)}
|
|
|
282 |
return f"成功!\n{style_vector_path}に保存し{config_path}を更新しました。"
|
283 |
|
284 |
|
285 |
+
def save_style_vectors_by_dirs(model_name: str, audio_dir_str: str):
|
286 |
+
if model_name == "":
|
287 |
+
return "モデル名を入力してください。"
|
288 |
+
if audio_dir_str == "":
|
289 |
+
return "音声ファイルが入っているディレクトリを入力してください。"
|
290 |
+
|
291 |
+
from concurrent.futures import ThreadPoolExecutor
|
292 |
+
from multiprocessing import cpu_count
|
293 |
+
|
294 |
+
from tqdm import tqdm
|
295 |
+
|
296 |
+
from style_bert_vits2.utils.stdout_wrapper import SAFE_STDOUT
|
297 |
+
from style_gen import save_style_vector
|
298 |
+
|
299 |
+
# First generate style vectors for each audio file
|
300 |
+
|
301 |
+
audio_dir = Path(audio_dir_str)
|
302 |
+
audio_suffixes = [".wav", ".flac", ".mp3", ".ogg", ".opus", ".m4a"]
|
303 |
+
audio_files = [f for f in audio_dir.rglob("*") if f.suffix in audio_suffixes]
|
304 |
+
|
305 |
+
def process(file: Path):
|
306 |
+
# f: `test.wav` -> search `test.wav.npy`
|
307 |
+
if (file.with_name(file.name + ".npy")).exists():
|
308 |
+
return file, None
|
309 |
+
try:
|
310 |
+
save_style_vector(str(file))
|
311 |
+
except Exception as e:
|
312 |
+
return file, e
|
313 |
+
return file, None
|
314 |
+
|
315 |
+
with ThreadPoolExecutor(max_workers=cpu_count() // 2) as executor:
|
316 |
+
_ = list(
|
317 |
+
tqdm(
|
318 |
+
executor.map(
|
319 |
+
process,
|
320 |
+
audio_files,
|
321 |
+
),
|
322 |
+
total=len(audio_files),
|
323 |
+
file=SAFE_STDOUT,
|
324 |
+
desc="Generating style vectors",
|
325 |
+
)
|
326 |
+
)
|
327 |
|
328 |
+
result_dir = assets_root / model_name
|
329 |
+
config_path = result_dir / "config.json"
|
330 |
+
if not config_path.exists():
|
331 |
+
return f"{config_path}が存在しません。"
|
332 |
+
logger.info(f"Backup {config_path} to {config_path}.bak")
|
333 |
+
shutil.copy(config_path, f"{config_path}.bak")
|
334 |
|
335 |
+
style_vector_path = result_dir / "style_vectors.npy"
|
336 |
+
if style_vector_path.exists():
|
337 |
+
logger.info(f"Backup {style_vector_path} to {style_vector_path}.bak")
|
338 |
+
shutil.copy(style_vector_path, f"{style_vector_path}.bak")
|
339 |
+
save_styles_by_dirs(
|
340 |
+
wav_dir=audio_dir,
|
341 |
+
output_dir=result_dir,
|
342 |
+
config_path=config_path,
|
343 |
+
config_output_path=config_path,
|
344 |
+
)
|
345 |
+
return f"成功!\n{result_dir}にスタイルベクトルを保存しました。"
|
346 |
+
|
347 |
+
|
348 |
+
how_to_md = f"""
|
349 |
+
Style-Bert-VITS2でこまかくスタイルを指定して音声合成するには、モデルごとにスタイルベクトルのファイル`style_vectors.npy`を作成する必要があります。
|
350 |
+
|
351 |
+
ただし、学習の過程では自動的に、平均スタイル「{DEFAULT_STYLE}」と、(**Ver 2.5.0以降からは**)音声をサブフォルダに分けていた場合はそのサブフォルダごとのスタイルが保存されています。
|
352 |
|
353 |
## 方法
|
354 |
|
355 |
+
- 方法0: 音声を作りたいスタイルごとのサブフォルダに分け、そのフォルダごとにスタイルベクトルを作成
|
356 |
- 方法1: 音声ファイルを自動でスタイル別に分け、その各スタイルの平均を取って保存
|
357 |
- 方法2: スタイルを代表する音声ファイルを手動で選んで、その音声のスタイルベクトルを保存
|
358 |
- 方法3: 自分でもっと頑張ってこだわって作る(JVNVコーパスなど、もともとスタイルラベル等が利用可能な場合はこれがよいかも)
|
359 |
"""
|
360 |
|
361 |
+
method0 = """
|
362 |
+
音声をスタイルごとにサブフォルダを作り、その中に音声ファイルを入れてください。
|
363 |
+
|
364 |
+
**注意**
|
365 |
+
|
366 |
+
- Ver 2.5.0以降では、`inputs/`フォルダや`raw/`フォルダにサブディレクトリに分けて音声ファイルを入れるだけで、スタイルベクトルが自動で作成されるので、この手順は不要です。
|
367 |
+
- それ未満のバージョンで学習したモデルに新しくスタイルベクトルをつけたい場合や、学習に使ったのとは別の音声でスタイルベクトルを作成したい場合に使います。
|
368 |
+
- 学習との整合性のため、もし**現在学習中や、今後学習する予定がある場合は**、音声ファイルは、`Data/{モデル名}/wavs`フォルダではなく**新しい別のディレクトリに保存してください**。
|
369 |
+
|
370 |
+
例:
|
371 |
+
|
372 |
+
```bash
|
373 |
+
audio_dir
|
374 |
+
├── style1
|
375 |
+
│ ├── audio1.wav
|
376 |
+
│ ├── audio2.wav
|
377 |
+
│ └── ...
|
378 |
+
├── style2
|
379 |
+
│ ├── audio1.wav
|
380 |
+
│ ├── audio2.wav
|
381 |
+
│ └── ...
|
382 |
+
└── ...
|
383 |
+
```
|
384 |
+
"""
|
385 |
+
|
386 |
method1 = f"""
|
387 |
学習の時に取り出したスタイルベクトルを読み込んで、可視化を見ながらスタイルを分けていきます。
|
388 |
|
|
|
418 |
with gr.Blocks(theme=GRADIO_THEME) as app:
|
419 |
with gr.Accordion("使い方", open=False):
|
420 |
gr.Markdown(how_to_md)
|
421 |
+
model_name = gr.Textbox(placeholder="your_model_name", label="モデル名")
|
422 |
+
with gr.Tab("方法0: サブフォルダごとにスタイルベクトルを作成"):
|
423 |
+
gr.Markdown(method0)
|
424 |
+
audio_dir = gr.Textbox(
|
425 |
+
placeholder="path/to/audio_dir",
|
426 |
+
label="音声が入っているフォルダ",
|
427 |
+
info="音声ファイルをスタイルごとにサブフォルダに分けて保存してください。",
|
428 |
)
|
429 |
+
method0_btn = gr.Button("スタイルベクトルを作成", variant="primary")
|
430 |
+
method0_info = gr.Textbox(label="結果")
|
431 |
+
method0_btn.click(
|
432 |
+
save_style_vectors_by_dirs,
|
433 |
+
inputs=[model_name, audio_dir],
|
434 |
+
outputs=[method0_info],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
435 |
)
|
436 |
+
with gr.Tab("その他の方法"):
|
437 |
with gr.Row():
|
438 |
+
reduction_method = gr.Radio(
|
439 |
+
choices=["UMAP", "t-SNE"],
|
440 |
+
label="次元削減方法",
|
441 |
+
info="v 1.3以前はt-SNEでしたがUMAPのほうがよい可能性もあります。",
|
442 |
+
value="UMAP",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
443 |
)
|
444 |
+
load_button = gr.Button("スタイルベクトルを読み込む", variant="primary")
|
445 |
+
output = gr.Plot(label="音声スタイルの可視化")
|
446 |
+
load_button.click(
|
447 |
+
load, inputs=[model_name, reduction_method], outputs=[output]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
448 |
)
|
449 |
+
with gr.Tab("方法1: スタイル分けを自動で行う"):
|
450 |
+
with gr.Tab("スタイル分け1"):
|
451 |
+
n_clusters = gr.Slider(
|
452 |
+
minimum=2,
|
453 |
+
maximum=10,
|
454 |
+
step=1,
|
455 |
+
value=4,
|
456 |
+
label="作るスタイルの数(平均スタイルを除く)",
|
457 |
+
info="上の図を見ながらスタイルの数を試行錯誤してください。",
|
458 |
+
)
|
459 |
+
c_method = gr.Radio(
|
460 |
+
choices=[
|
461 |
+
"Agglomerative after reduction",
|
462 |
+
"KMeans after reduction",
|
463 |
+
"Agglomerative",
|
464 |
+
"KMeans",
|
465 |
+
],
|
466 |
+
label="アルゴリズム",
|
467 |
+
info="分類する(クラスタリング)アルゴリズムを選択します。いろいろ試してみてください。",
|
468 |
+
value="Agglomerative after reduction",
|
469 |
+
)
|
470 |
+
c_button = gr.Button("スタイル分けを実行")
|
471 |
+
with gr.Tab("スタイル分け2: DBSCAN"):
|
472 |
+
gr.Markdown(dbscan_md)
|
473 |
+
eps = gr.Slider(
|
474 |
+
minimum=0.1,
|
475 |
+
maximum=10,
|
476 |
+
step=0.01,
|
477 |
+
value=0.3,
|
478 |
+
label="eps",
|
479 |
+
)
|
480 |
+
min_samples = gr.Slider(
|
481 |
+
minimum=1,
|
482 |
+
maximum=50,
|
483 |
+
step=1,
|
484 |
+
value=15,
|
485 |
+
label="min_samples",
|
486 |
+
)
|
487 |
+
with gr.Row():
|
488 |
+
dbscan_button = gr.Button("スタイル分けを実行")
|
489 |
+
num_styles_result = gr.Textbox(label="スタイル数")
|
490 |
+
gr.Markdown("スタイル分けの結果")
|
491 |
+
gr.Markdown(
|
492 |
+
"注意: もともと256次元なものをを2次元に落としているので、正確なベクトルの位置関係ではありません。"
|
493 |
)
|
494 |
+
with gr.Row():
|
495 |
+
gr_plot = gr.Plot()
|
496 |
+
with gr.Column():
|
497 |
+
with gr.Row():
|
498 |
+
cluster_index = gr.Slider(
|
499 |
+
minimum=1,
|
500 |
+
maximum=MAX_CLUSTER_NUM,
|
501 |
+
step=1,
|
502 |
+
value=1,
|
503 |
+
label="スタイル番号",
|
504 |
+
info="選択したスタイルの代表音声を表示します。",
|
505 |
+
)
|
506 |
+
num_files = gr.Slider(
|
507 |
+
minimum=1,
|
508 |
+
maximum=MAX_AUDIO_NUM,
|
509 |
+
step=1,
|
510 |
+
value=5,
|
511 |
+
label="代表音声の数をいくつ表示するか",
|
512 |
+
)
|
513 |
+
get_audios_button = gr.Button("代表音声を取得")
|
514 |
+
with gr.Row():
|
515 |
+
audio_list = []
|
516 |
+
for i in range(MAX_AUDIO_NUM):
|
517 |
+
audio_list.append(
|
518 |
+
gr.Audio(visible=False, show_label=True)
|
519 |
+
)
|
520 |
+
c_button.click(
|
521 |
+
do_clustering_gradio,
|
522 |
+
inputs=[n_clusters, c_method],
|
523 |
+
outputs=[gr_plot, cluster_index] + audio_list,
|
524 |
+
)
|
525 |
+
dbscan_button.click(
|
526 |
+
do_dbscan_gradio,
|
527 |
+
inputs=[eps, min_samples],
|
528 |
+
outputs=[gr_plot, cluster_index, num_styles_result]
|
529 |
+
+ audio_list,
|
530 |
+
)
|
531 |
+
get_audios_button.click(
|
532 |
+
representative_wav_files_gradio,
|
533 |
+
inputs=[cluster_index, num_files],
|
534 |
+
outputs=audio_list,
|
535 |
+
)
|
536 |
+
gr.Markdown("結果が良さそうなら、これを保存します。")
|
537 |
+
style_names = gr.Textbox(
|
538 |
+
"Angry, Sad, Happy",
|
539 |
+
label="スタイルの名前",
|
540 |
+
info=f"スタイルの名前を`,`で区切って入力してください(日本語可)。例: `Angry, Sad, Happy`や`怒り, 悲しみ, 喜び`など。平均音声は{DEFAULT_STYLE}として自動的に保存されます。",
|
541 |
)
|
542 |
+
with gr.Row():
|
543 |
+
save_button1 = gr.Button(
|
544 |
+
"スタイルベクトルを保存", variant="primary"
|
545 |
+
)
|
546 |
+
info2 = gr.Textbox(label="保存結果")
|
547 |
+
|
548 |
+
save_button1.click(
|
549 |
+
save_style_vectors_from_clustering,
|
550 |
+
inputs=[model_name, style_names],
|
551 |
outputs=[info2],
|
552 |
)
|
553 |
+
with gr.Tab("方法2: 手動でスタイルを選ぶ"):
|
554 |
+
gr.Markdown(
|
555 |
+
"下のテキスト欄に、各スタイルの代表音声のファイル名を`,`区切りで、その横に対応するスタイル名を`,`区切りで入力してください。"
|
556 |
+
)
|
557 |
+
gr.Markdown("例: `angry.wav, sad.wav, happy.wav`と`Angry, Sad, Happy`")
|
558 |
+
gr.Markdown(
|
559 |
+
f"注意: {DEFAULT_STYLE}スタイルは自動的に保存されます、手動では{DEFAULT_STYLE}という名前のスタイルは指定しないでください。"
|
560 |
+
)
|
561 |
+
with gr.Row():
|
562 |
+
audio_files_text = gr.Textbox(
|
563 |
+
label="音声ファイル名",
|
564 |
+
placeholder="angry.wav, sad.wav, happy.wav",
|
565 |
+
)
|
566 |
+
style_names_text = gr.Textbox(
|
567 |
+
label="スタイル名", placeholder="Angry, Sad, Happy"
|
568 |
+
)
|
569 |
+
with gr.Row():
|
570 |
+
save_button2 = gr.Button(
|
571 |
+
"スタイルベクトルを保存", variant="primary"
|
572 |
+
)
|
573 |
+
info2 = gr.Textbox(label="保存結果")
|
574 |
+
save_button2.click(
|
575 |
+
save_style_vectors_from_files,
|
576 |
+
inputs=[model_name, audio_files_text, style_names_text],
|
577 |
+
outputs=[info2],
|
578 |
+
)
|
579 |
|
580 |
return app
|
581 |
+
|
582 |
+
|
583 |
+
if __name__ == "__main__":
|
584 |
+
app = create_style_vectors_app()
|
585 |
+
app.launch(inbrowser=True)
|
gradio_tabs/train.py
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
import json
|
2 |
-
import os
|
3 |
import shutil
|
4 |
import socket
|
5 |
import subprocess
|
6 |
import sys
|
7 |
import time
|
8 |
import webbrowser
|
|
|
9 |
from datetime import datetime
|
10 |
from multiprocessing import cpu_count
|
11 |
from pathlib import Path
|
@@ -13,6 +13,8 @@ from pathlib import Path
|
|
13 |
import gradio as gr
|
14 |
import yaml
|
15 |
|
|
|
|
|
16 |
from style_bert_vits2.logging import logger
|
17 |
from style_bert_vits2.utils.stdout_wrapper import SAFE_STDOUT
|
18 |
from style_bert_vits2.utils.subprocess import run_script_with_log, second_elem_of
|
@@ -21,20 +23,27 @@ from style_bert_vits2.utils.subprocess import run_script_with_log, second_elem_o
|
|
21 |
logger_handler = None
|
22 |
tensorboard_executed = False
|
23 |
|
24 |
-
|
25 |
-
|
26 |
-
path_config: dict[str, str] = yaml.safe_load(f.read())
|
27 |
-
dataset_root = Path(path_config["dataset_root"])
|
28 |
|
29 |
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
assert model_name != "", "モデル名は空にできません"
|
32 |
dataset_path = dataset_root / model_name
|
33 |
-
|
34 |
train_path = dataset_path / "train.list"
|
35 |
val_path = dataset_path / "val.list"
|
36 |
config_path = dataset_path / "config.json"
|
37 |
-
return dataset_path,
|
38 |
|
39 |
|
40 |
def initialize(
|
@@ -51,14 +60,14 @@ def initialize(
|
|
51 |
log_interval: int,
|
52 |
):
|
53 |
global logger_handler
|
54 |
-
|
55 |
|
56 |
# 前処理のログをファイルに保存する
|
57 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
58 |
file_name = f"preprocess_{timestamp}.log"
|
59 |
if logger_handler is not None:
|
60 |
logger.remove(logger_handler)
|
61 |
-
logger_handler = logger.add(
|
62 |
|
63 |
logger.info(
|
64 |
f"Step 1: start initialization...\nmodel_name: {model_name}, batch_size: {batch_size}, epochs: {epochs}, save_every_steps: {save_every_steps}, freeze_ZH_bert: {freeze_ZH_bert}, freeze_JP_bert: {freeze_JP_bert}, freeze_EN_bert: {freeze_EN_bert}, freeze_style: {freeze_style}, freeze_decoder: {freeze_decoder}, use_jp_extra: {use_jp_extra}"
|
@@ -68,11 +77,11 @@ def initialize(
|
|
68 |
"configs/config.json" if not use_jp_extra else "configs/config_jp_extra.json"
|
69 |
)
|
70 |
|
71 |
-
with open(default_config_path,
|
72 |
config = json.load(f)
|
73 |
config["model_name"] = model_name
|
74 |
-
config["data"]["training_files"] = str(train_path)
|
75 |
-
config["data"]["validation_files"] = str(val_path)
|
76 |
config["train"]["batch_size"] = batch_size
|
77 |
config["train"]["epochs"] = epochs
|
78 |
config["train"]["eval_interval"] = save_every_steps
|
@@ -89,14 +98,14 @@ def initialize(
|
|
89 |
# 今はデフォルトであるが、以前は非JP-Extra版になくバグの原因になるので念のため
|
90 |
config["data"]["use_jp_extra"] = use_jp_extra
|
91 |
|
92 |
-
model_path = dataset_path / "models"
|
93 |
if model_path.exists():
|
94 |
logger.warning(
|
95 |
f"Step 1: {model_path} already exists, so copy it to backup to {model_path}_backup"
|
96 |
)
|
97 |
shutil.copytree(
|
98 |
src=model_path,
|
99 |
-
dst=dataset_path / "models_backup",
|
100 |
dirs_exist_ok=True,
|
101 |
)
|
102 |
shutil.rmtree(model_path)
|
@@ -110,14 +119,14 @@ def initialize(
|
|
110 |
logger.error(f"Step 1: {pretrained_dir} folder not found.")
|
111 |
return False, f"Step 1, Error: {pretrained_dir}フォルダが見つかりません。"
|
112 |
|
113 |
-
with open(config_path, "w", encoding="utf-8") as f:
|
114 |
json.dump(config, f, indent=2, ensure_ascii=False)
|
115 |
if not Path("config.yml").exists():
|
116 |
shutil.copy(src="default_config.yml", dst="config.yml")
|
117 |
-
with open("config.yml",
|
118 |
yml_data = yaml.safe_load(f)
|
119 |
yml_data["model_name"] = model_name
|
120 |
-
yml_data["dataset_path"] = str(dataset_path)
|
121 |
with open("config.yml", "w", encoding="utf-8") as f:
|
122 |
yaml.dump(yml_data, f, allow_unicode=True)
|
123 |
logger.success("Step 1: initialization finished.")
|
@@ -126,7 +135,7 @@ def initialize(
|
|
126 |
|
127 |
def resample(model_name: str, normalize: bool, trim: bool, num_processes: int):
|
128 |
logger.info("Step 2: start resampling...")
|
129 |
-
dataset_path
|
130 |
input_dir = dataset_path / "raw"
|
131 |
output_dir = dataset_path / "wavs"
|
132 |
cmd = [
|
@@ -159,21 +168,24 @@ def preprocess_text(
|
|
159 |
model_name: str, use_jp_extra: bool, val_per_lang: int, yomi_error: str
|
160 |
):
|
161 |
logger.info("Step 3: start preprocessing text...")
|
162 |
-
|
163 |
-
if not
|
164 |
-
logger.error(f"Step 3: {
|
165 |
-
return
|
|
|
|
|
|
|
166 |
|
167 |
cmd = [
|
168 |
"preprocess_text.py",
|
169 |
"--config-path",
|
170 |
-
str(config_path),
|
171 |
"--transcription-path",
|
172 |
-
str(
|
173 |
"--train-path",
|
174 |
-
str(train_path),
|
175 |
"--val-path",
|
176 |
-
str(val_path),
|
177 |
"--val-per-lang",
|
178 |
str(val_per_lang),
|
179 |
"--yomi_error",
|
@@ -201,7 +213,7 @@ def preprocess_text(
|
|
201 |
|
202 |
def bert_gen(model_name: str):
|
203 |
logger.info("Step 4: start bert_gen...")
|
204 |
-
|
205 |
success, message = run_script_with_log(
|
206 |
["bert_gen.py", "--config", str(config_path)]
|
207 |
)
|
@@ -220,7 +232,7 @@ def bert_gen(model_name: str):
|
|
220 |
|
221 |
def style_gen(model_name: str, num_processes: int):
|
222 |
logger.info("Step 5: start style_gen...")
|
223 |
-
|
224 |
success, message = run_script_with_log(
|
225 |
[
|
226 |
"style_gen.py",
|
@@ -318,22 +330,31 @@ def train(
|
|
318 |
skip_style: bool = False,
|
319 |
use_jp_extra: bool = True,
|
320 |
speedup: bool = False,
|
|
|
321 |
):
|
322 |
-
|
323 |
# 学習再開の場合を考えて念のためconfig.ymlの名前等を更新
|
324 |
-
with open("config.yml",
|
325 |
yml_data = yaml.safe_load(f)
|
326 |
yml_data["model_name"] = model_name
|
327 |
-
yml_data["dataset_path"] = str(dataset_path)
|
328 |
with open("config.yml", "w", encoding="utf-8") as f:
|
329 |
yaml.dump(yml_data, f, allow_unicode=True)
|
330 |
|
331 |
train_py = "train_ms.py" if not use_jp_extra else "train_ms_jp_extra.py"
|
332 |
-
cmd = [
|
|
|
|
|
|
|
|
|
|
|
|
|
333 |
if skip_style:
|
334 |
cmd.append("--skip_default_style")
|
335 |
if speedup:
|
336 |
cmd.append("--speedup")
|
|
|
|
|
337 |
success, message = run_script_with_log(cmd, ignore_warning=True)
|
338 |
if not success:
|
339 |
logger.error("Train failed.")
|
@@ -385,6 +406,15 @@ def run_tensorboard(model_name: str):
|
|
385 |
yield gr.Button("Tensorboardを開く")
|
386 |
|
387 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
388 |
how_to_md = """
|
389 |
## 使い方
|
390 |
|
@@ -396,9 +426,6 @@ how_to_md = """
|
|
396 |
|
397 |
- 途中から学習を再開する場合は、モデル名を入力してから「学習を開始する」を押せばよいです。
|
398 |
|
399 |
-
注意: 標準スタイル以外のスタイルを音声合成で使うには、スタイルベクトルファ���ル`style_vectors.npy`を作る必要があります。これは、`Style.bat`を実行してそこで作成してください。
|
400 |
-
動作は軽いはずなので、学習中でも実行でき、何度でも繰り返して試せます。
|
401 |
-
|
402 |
## JP-Extra版について
|
403 |
|
404 |
元とするモデル構造として [Bert-VITS2 Japanese-Extra](https://github.com/fishaudio/Bert-VITS2/releases/tag/JP-Exta) を使うことができます。
|
@@ -406,40 +433,60 @@ how_to_md = """
|
|
406 |
"""
|
407 |
|
408 |
prepare_md = """
|
409 |
-
|
410 |
|
411 |
それを次のように配置します。
|
412 |
```
|
413 |
-
├── Data
|
414 |
│ ├── {モデルの名前}
|
415 |
│ │ ├── esd.list
|
416 |
-
│ │ ├── raw
|
417 |
-
│ │ │ ├──
|
418 |
-
│ │ │ ├──
|
419 |
-
│ │ │ ├──
|
|
|
|
|
|
|
|
|
|
|
|
|
420 |
```
|
421 |
|
422 |
-
|
423 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
424 |
```
|
425 |
-
|
426 |
```
|
427 |
|
|
|
|
|
|
|
|
|
428 |
例:
|
429 |
```
|
430 |
-
|
431 |
-
|
|
|
|
|
|
|
432 |
english_teacher.wav|Mary|EN|How are you? I'm fine, thank you, and you?
|
433 |
...
|
434 |
```
|
435 |
-
|
436 |
-
|
437 |
-
- 音声ファイルはrawフォルダの直下でなくてもサブフォルダに入れても構いません。その場合は、`esd.list`の最初には`raw`からの相対パスを記述してください。
|
438 |
"""
|
439 |
|
440 |
|
441 |
def create_train_app():
|
442 |
-
with gr.Blocks().queue() as app:
|
|
|
443 |
with gr.Accordion("使い方", open=False):
|
444 |
gr.Markdown(how_to_md)
|
445 |
with gr.Accordion(label="データの前準備", open=False):
|
@@ -491,7 +538,7 @@ def create_train_app():
|
|
491 |
("読めないファイルは使わず続行", "skip"),
|
492 |
("読めないファイルも無理やり読んで学習に使う", "use"),
|
493 |
],
|
494 |
-
value="
|
495 |
)
|
496 |
with gr.Accordion("詳細設定", open=False):
|
497 |
num_processes = gr.Slider(
|
@@ -677,6 +724,11 @@ def create_train_app():
|
|
677 |
label="JP-Extra版を使う",
|
678 |
value=True,
|
679 |
)
|
|
|
|
|
|
|
|
|
|
|
680 |
speedup = gr.Checkbox(
|
681 |
label="ログ等をスキップして学習を高速化する",
|
682 |
value=False,
|
@@ -764,7 +816,13 @@ def create_train_app():
|
|
764 |
# Train
|
765 |
train_btn.click(
|
766 |
second_elem_of(train),
|
767 |
-
inputs=[
|
|
|
|
|
|
|
|
|
|
|
|
|
768 |
outputs=[info_train],
|
769 |
)
|
770 |
tensorboard_btn.click(
|
@@ -783,3 +841,8 @@ def create_train_app():
|
|
783 |
)
|
784 |
|
785 |
return app
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import json
|
|
|
2 |
import shutil
|
3 |
import socket
|
4 |
import subprocess
|
5 |
import sys
|
6 |
import time
|
7 |
import webbrowser
|
8 |
+
from dataclasses import dataclass
|
9 |
from datetime import datetime
|
10 |
from multiprocessing import cpu_count
|
11 |
from pathlib import Path
|
|
|
13 |
import gradio as gr
|
14 |
import yaml
|
15 |
|
16 |
+
from config import get_path_config
|
17 |
+
from style_bert_vits2.constants import GRADIO_THEME
|
18 |
from style_bert_vits2.logging import logger
|
19 |
from style_bert_vits2.utils.stdout_wrapper import SAFE_STDOUT
|
20 |
from style_bert_vits2.utils.subprocess import run_script_with_log, second_elem_of
|
|
|
23 |
logger_handler = None
|
24 |
tensorboard_executed = False
|
25 |
|
26 |
+
path_config = get_path_config()
|
27 |
+
dataset_root = path_config.dataset_root
|
|
|
|
|
28 |
|
29 |
|
30 |
+
@dataclass
|
31 |
+
class PathsForPreprocess:
|
32 |
+
dataset_path: Path
|
33 |
+
esd_path: Path
|
34 |
+
train_path: Path
|
35 |
+
val_path: Path
|
36 |
+
config_path: Path
|
37 |
+
|
38 |
+
|
39 |
+
def get_path(model_name: str) -> PathsForPreprocess:
|
40 |
assert model_name != "", "モデル名は空にできません"
|
41 |
dataset_path = dataset_root / model_name
|
42 |
+
esd_path = dataset_path / "esd.list"
|
43 |
train_path = dataset_path / "train.list"
|
44 |
val_path = dataset_path / "val.list"
|
45 |
config_path = dataset_path / "config.json"
|
46 |
+
return PathsForPreprocess(dataset_path, esd_path, train_path, val_path, config_path)
|
47 |
|
48 |
|
49 |
def initialize(
|
|
|
60 |
log_interval: int,
|
61 |
):
|
62 |
global logger_handler
|
63 |
+
paths = get_path(model_name)
|
64 |
|
65 |
# 前処理のログをファイルに保存する
|
66 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
67 |
file_name = f"preprocess_{timestamp}.log"
|
68 |
if logger_handler is not None:
|
69 |
logger.remove(logger_handler)
|
70 |
+
logger_handler = logger.add(paths.dataset_path / file_name)
|
71 |
|
72 |
logger.info(
|
73 |
f"Step 1: start initialization...\nmodel_name: {model_name}, batch_size: {batch_size}, epochs: {epochs}, save_every_steps: {save_every_steps}, freeze_ZH_bert: {freeze_ZH_bert}, freeze_JP_bert: {freeze_JP_bert}, freeze_EN_bert: {freeze_EN_bert}, freeze_style: {freeze_style}, freeze_decoder: {freeze_decoder}, use_jp_extra: {use_jp_extra}"
|
|
|
77 |
"configs/config.json" if not use_jp_extra else "configs/config_jp_extra.json"
|
78 |
)
|
79 |
|
80 |
+
with open(default_config_path, encoding="utf-8") as f:
|
81 |
config = json.load(f)
|
82 |
config["model_name"] = model_name
|
83 |
+
config["data"]["training_files"] = str(paths.train_path)
|
84 |
+
config["data"]["validation_files"] = str(paths.val_path)
|
85 |
config["train"]["batch_size"] = batch_size
|
86 |
config["train"]["epochs"] = epochs
|
87 |
config["train"]["eval_interval"] = save_every_steps
|
|
|
98 |
# 今はデフォルトであるが、以前は非JP-Extra版になくバグの原因になるので念のため
|
99 |
config["data"]["use_jp_extra"] = use_jp_extra
|
100 |
|
101 |
+
model_path = paths.dataset_path / "models"
|
102 |
if model_path.exists():
|
103 |
logger.warning(
|
104 |
f"Step 1: {model_path} already exists, so copy it to backup to {model_path}_backup"
|
105 |
)
|
106 |
shutil.copytree(
|
107 |
src=model_path,
|
108 |
+
dst=paths.dataset_path / "models_backup",
|
109 |
dirs_exist_ok=True,
|
110 |
)
|
111 |
shutil.rmtree(model_path)
|
|
|
119 |
logger.error(f"Step 1: {pretrained_dir} folder not found.")
|
120 |
return False, f"Step 1, Error: {pretrained_dir}フォルダが見つかりません。"
|
121 |
|
122 |
+
with open(paths.config_path, "w", encoding="utf-8") as f:
|
123 |
json.dump(config, f, indent=2, ensure_ascii=False)
|
124 |
if not Path("config.yml").exists():
|
125 |
shutil.copy(src="default_config.yml", dst="config.yml")
|
126 |
+
with open("config.yml", encoding="utf-8") as f:
|
127 |
yml_data = yaml.safe_load(f)
|
128 |
yml_data["model_name"] = model_name
|
129 |
+
yml_data["dataset_path"] = str(paths.dataset_path)
|
130 |
with open("config.yml", "w", encoding="utf-8") as f:
|
131 |
yaml.dump(yml_data, f, allow_unicode=True)
|
132 |
logger.success("Step 1: initialization finished.")
|
|
|
135 |
|
136 |
def resample(model_name: str, normalize: bool, trim: bool, num_processes: int):
|
137 |
logger.info("Step 2: start resampling...")
|
138 |
+
dataset_path = get_path(model_name).dataset_path
|
139 |
input_dir = dataset_path / "raw"
|
140 |
output_dir = dataset_path / "wavs"
|
141 |
cmd = [
|
|
|
168 |
model_name: str, use_jp_extra: bool, val_per_lang: int, yomi_error: str
|
169 |
):
|
170 |
logger.info("Step 3: start preprocessing text...")
|
171 |
+
paths = get_path(model_name)
|
172 |
+
if not paths.esd_path.exists():
|
173 |
+
logger.error(f"Step 3: {paths.esd_path} not found.")
|
174 |
+
return (
|
175 |
+
False,
|
176 |
+
f"Step 3, Error: 書き起こしファイル {paths.esd_path} が見つかりません。",
|
177 |
+
)
|
178 |
|
179 |
cmd = [
|
180 |
"preprocess_text.py",
|
181 |
"--config-path",
|
182 |
+
str(paths.config_path),
|
183 |
"--transcription-path",
|
184 |
+
str(paths.esd_path),
|
185 |
"--train-path",
|
186 |
+
str(paths.train_path),
|
187 |
"--val-path",
|
188 |
+
str(paths.val_path),
|
189 |
"--val-per-lang",
|
190 |
str(val_per_lang),
|
191 |
"--yomi_error",
|
|
|
213 |
|
214 |
def bert_gen(model_name: str):
|
215 |
logger.info("Step 4: start bert_gen...")
|
216 |
+
config_path = get_path(model_name).config_path
|
217 |
success, message = run_script_with_log(
|
218 |
["bert_gen.py", "--config", str(config_path)]
|
219 |
)
|
|
|
232 |
|
233 |
def style_gen(model_name: str, num_processes: int):
|
234 |
logger.info("Step 5: start style_gen...")
|
235 |
+
config_path = get_path(model_name).config_path
|
236 |
success, message = run_script_with_log(
|
237 |
[
|
238 |
"style_gen.py",
|
|
|
330 |
skip_style: bool = False,
|
331 |
use_jp_extra: bool = True,
|
332 |
speedup: bool = False,
|
333 |
+
not_use_custom_batch_sampler: bool = False,
|
334 |
):
|
335 |
+
paths = get_path(model_name)
|
336 |
# 学習再開の場合を考えて念のためconfig.ymlの名前等を更新
|
337 |
+
with open("config.yml", encoding="utf-8") as f:
|
338 |
yml_data = yaml.safe_load(f)
|
339 |
yml_data["model_name"] = model_name
|
340 |
+
yml_data["dataset_path"] = str(paths.dataset_path)
|
341 |
with open("config.yml", "w", encoding="utf-8") as f:
|
342 |
yaml.dump(yml_data, f, allow_unicode=True)
|
343 |
|
344 |
train_py = "train_ms.py" if not use_jp_extra else "train_ms_jp_extra.py"
|
345 |
+
cmd = [
|
346 |
+
train_py,
|
347 |
+
"--config",
|
348 |
+
str(paths.config_path),
|
349 |
+
"--model",
|
350 |
+
str(paths.dataset_path),
|
351 |
+
]
|
352 |
if skip_style:
|
353 |
cmd.append("--skip_default_style")
|
354 |
if speedup:
|
355 |
cmd.append("--speedup")
|
356 |
+
if not_use_custom_batch_sampler:
|
357 |
+
cmd.append("--not_use_custom_batch_sampler")
|
358 |
success, message = run_script_with_log(cmd, ignore_warning=True)
|
359 |
if not success:
|
360 |
logger.error("Train failed.")
|
|
|
406 |
yield gr.Button("Tensorboardを開く")
|
407 |
|
408 |
|
409 |
+
change_log_md = """
|
410 |
+
**Ver 2.5以降の変更点**
|
411 |
+
|
412 |
+
- `raw/`フォルダの中で音声をサブディレクトリに分けて配置することで、自動的にスタイルが作成されるようになりました。詳細は下の「使い方/データの前準備」を参照してください。
|
413 |
+
- これまでは1ファイルあたり14秒程度を超えた音声ファイルは学習には用いられていませんでしたが、Ver 2.5以降では「カスタムバッチサンプラーを無効化」にチェックを入れることでその制限が無しに学習できるようになりました(デフォルトはオフ)。ただし:
|
414 |
+
- 音声ファイルが長い場合の学習効率は悪いかもしれず、挙動も確認していません
|
415 |
+
- チェックを入れると要求VRAMがかなり増えるようので、学習に失敗したりVRAM不足になる場合は、バッチサイズを小さくするか、チェックを外してください
|
416 |
+
"""
|
417 |
+
|
418 |
how_to_md = """
|
419 |
## 使い方
|
420 |
|
|
|
426 |
|
427 |
- 途中から学習を再開する場合は、モデル名を入力してから「学習を開始する」を押せばよいです。
|
428 |
|
|
|
|
|
|
|
429 |
## JP-Extra版について
|
430 |
|
431 |
元とするモデル構造として [Bert-VITS2 Japanese-Extra](https://github.com/fishaudio/Bert-VITS2/releases/tag/JP-Exta) を使うことができます。
|
|
|
433 |
"""
|
434 |
|
435 |
prepare_md = """
|
436 |
+
まず音声データと、書き起こしテキストを用意してください。
|
437 |
|
438 |
それを次のように配置します。
|
439 |
```
|
440 |
+
├── Data/
|
441 |
│ ├── {モデルの名前}
|
442 |
│ │ ├── esd.list
|
443 |
+
│ │ ├── raw/
|
444 |
+
│ │ │ ├── foo.wav
|
445 |
+
│ │ │ ├── bar.mp3
|
446 |
+
│ │ │ ├── style1/
|
447 |
+
│ │ │ │ ├── baz.wav
|
448 |
+
│ │ │ │ ├── qux.wav
|
449 |
+
│ │ │ ├── style2/
|
450 |
+
│ │ │ │ ├── corge.wav
|
451 |
+
│ │ │ │ ├── grault.wav
|
452 |
+
...
|
453 |
```
|
454 |
|
455 |
+
### 配置の仕方
|
456 |
+
- 上のように配置すると、`style1/`と`style2/`フォルダの内部(直下以外も含む)に入っている音声ファイルたちから、自動的にデフォルトスタイルに加えて`style1`と`style2`というスタイルが作成されます
|
457 |
+
- 特にスタイルを作る必要がない場合や、スタイル分類機能等でスタイルを作る場合は、`raw/`フォルダ直下に全てを配置してください。このように`raw/`のサブディレクトリの個数が0または1の場合は、スタイルはデフォルトスタイルのみが作成されます。
|
458 |
+
- 音声ファイルのフォーマットはwav形式以外にもmp3等の多くの音声ファイルに対応しています
|
459 |
+
|
460 |
+
### 書き起こしファイル`esd.list`
|
461 |
+
|
462 |
+
`Data/{モデルの名前}/esd.list` ファイルには、以下のフォーマットで各音声ファイルの情報を記述してください。
|
463 |
+
|
464 |
+
|
465 |
```
|
466 |
+
path/to/audio.wav(wavファイル以外でもこう書く)|{話者名}|{言語ID、ZHかJPかEN}|{書き起こしテキスト}
|
467 |
```
|
468 |
|
469 |
+
- ここで、最初の`path/to/audio.wav`は、`raw/`からの相対パスです。つまり、`raw/foo.wav`の場合は`foo.wav`、`raw/style1/bar.wav`の場合は`style1/bar.wav`となります。
|
470 |
+
- 拡張子がwavでない場合でも、`esd.list`には`wav`と書いてください、つまり、`raw/bar.mp3`の場合でも`bar.wav`と書いてください。
|
471 |
+
|
472 |
+
|
473 |
例:
|
474 |
```
|
475 |
+
foo.wav|hanako|JP|こんにちは、元気ですか?
|
476 |
+
bar.wav|taro|JP|はい、聞こえています……。何か用ですか?
|
477 |
+
style1/baz.wav|hanako|JP|今日はいい天気ですね。
|
478 |
+
style1/qux.wav|taro|JP|はい、そうですね。
|
479 |
+
...
|
480 |
english_teacher.wav|Mary|EN|How are you? I'm fine, thank you, and you?
|
481 |
...
|
482 |
```
|
483 |
+
もちろん日本語話者の単一話者データセットでも構いません。
|
|
|
|
|
484 |
"""
|
485 |
|
486 |
|
487 |
def create_train_app():
|
488 |
+
with gr.Blocks(theme=GRADIO_THEME).queue() as app:
|
489 |
+
gr.Markdown(change_log_md)
|
490 |
with gr.Accordion("使い方", open=False):
|
491 |
gr.Markdown(how_to_md)
|
492 |
with gr.Accordion(label="データの前準備", open=False):
|
|
|
538 |
("読めないファイルは使わず続行", "skip"),
|
539 |
("読めないファイルも無理やり読んで学習に使う", "use"),
|
540 |
],
|
541 |
+
value="skip",
|
542 |
)
|
543 |
with gr.Accordion("詳細設定", open=False):
|
544 |
num_processes = gr.Slider(
|
|
|
724 |
label="JP-Extra版を使う",
|
725 |
value=True,
|
726 |
)
|
727 |
+
not_use_custom_batch_sampler = gr.Checkbox(
|
728 |
+
label="カスタムバッチサンプラーを無効化",
|
729 |
+
info="VRAMに余裕がある場合にチェックすると、長い音声ファイルも学習に使われるようになります",
|
730 |
+
value=False,
|
731 |
+
)
|
732 |
speedup = gr.Checkbox(
|
733 |
label="ログ等をスキップして学習を高速化する",
|
734 |
value=False,
|
|
|
816 |
# Train
|
817 |
train_btn.click(
|
818 |
second_elem_of(train),
|
819 |
+
inputs=[
|
820 |
+
model_name,
|
821 |
+
skip_style,
|
822 |
+
use_jp_extra_train,
|
823 |
+
speedup,
|
824 |
+
not_use_custom_batch_sampler,
|
825 |
+
],
|
826 |
outputs=[info_train],
|
827 |
)
|
828 |
tensorboard_btn.click(
|
|
|
841 |
)
|
842 |
|
843 |
return app
|
844 |
+
|
845 |
+
|
846 |
+
if __name__ == "__main__":
|
847 |
+
app = create_train_app()
|
848 |
+
app.launch(inbrowser=True)
|
initialize.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import argparse
|
2 |
import json
|
|
|
3 |
from pathlib import Path
|
4 |
|
5 |
import yaml
|
@@ -9,7 +10,7 @@ from style_bert_vits2.logging import logger
|
|
9 |
|
10 |
|
11 |
def download_bert_models():
|
12 |
-
with open("bert/bert_models.json",
|
13 |
models = json.load(fp)
|
14 |
for k, v in models.items():
|
15 |
local_path = Path("bert").joinpath(k)
|
@@ -49,7 +50,7 @@ def download_jp_extra_pretrained_models():
|
|
49 |
)
|
50 |
|
51 |
|
52 |
-
def
|
53 |
files = [
|
54 |
"jvnv-F1-jp/config.json",
|
55 |
"jvnv-F1-jp/jvnv-F1-jp_e160_s14000.safetensors",
|
@@ -71,13 +72,33 @@ def download_jvnv_models():
|
|
71 |
"litagin/style_bert_vits2_jvnv",
|
72 |
file,
|
73 |
local_dir="model_assets",
|
74 |
-
local_dir_use_symlinks=False,
|
75 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
|
78 |
def main():
|
79 |
parser = argparse.ArgumentParser()
|
80 |
-
parser.add_argument("--
|
81 |
parser.add_argument("--only_infer", action="store_true")
|
82 |
parser.add_argument(
|
83 |
"--dataset_root",
|
@@ -95,19 +116,24 @@ def main():
|
|
95 |
|
96 |
download_bert_models()
|
97 |
|
98 |
-
if not args.
|
99 |
-
|
100 |
if not args.only_infer:
|
101 |
download_slm_model()
|
102 |
download_pretrained_models()
|
103 |
download_jp_extra_pretrained_models()
|
104 |
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
if args.dataset_root is None and args.assets_root is None:
|
106 |
return
|
107 |
|
108 |
# Change default paths if necessary
|
109 |
-
paths_yml =
|
110 |
-
with open(paths_yml, "r", encoding="utf-8") as f:
|
111 |
yml_data = yaml.safe_load(f)
|
112 |
if args.assets_root is not None:
|
113 |
yml_data["assets_root"] = args.assets_root
|
|
|
1 |
import argparse
|
2 |
import json
|
3 |
+
import shutil
|
4 |
from pathlib import Path
|
5 |
|
6 |
import yaml
|
|
|
10 |
|
11 |
|
12 |
def download_bert_models():
|
13 |
+
with open("bert/bert_models.json", encoding="utf-8") as fp:
|
14 |
models = json.load(fp)
|
15 |
for k, v in models.items():
|
16 |
local_path = Path("bert").joinpath(k)
|
|
|
50 |
)
|
51 |
|
52 |
|
53 |
+
def download_default_models():
|
54 |
files = [
|
55 |
"jvnv-F1-jp/config.json",
|
56 |
"jvnv-F1-jp/jvnv-F1-jp_e160_s14000.safetensors",
|
|
|
72 |
"litagin/style_bert_vits2_jvnv",
|
73 |
file,
|
74 |
local_dir="model_assets",
|
|
|
75 |
)
|
76 |
+
additional_files = {
|
77 |
+
"litagin/sbv2_koharune_ami": [
|
78 |
+
"koharune-ami/config.json",
|
79 |
+
"koharune-ami/style_vectors.npy",
|
80 |
+
"koharune-ami/koharune-ami.safetensors",
|
81 |
+
],
|
82 |
+
"litagin/sbv2_amitaro": [
|
83 |
+
"amitaro/config.json",
|
84 |
+
"amitaro/style_vectors.npy",
|
85 |
+
"amitaro/amitaro.safetensors",
|
86 |
+
],
|
87 |
+
}
|
88 |
+
for repo_id, files in additional_files.items():
|
89 |
+
for file in files:
|
90 |
+
if not Path(f"model_assets/{file}").exists():
|
91 |
+
logger.info(f"Downloading {file}")
|
92 |
+
hf_hub_download(
|
93 |
+
repo_id,
|
94 |
+
file,
|
95 |
+
local_dir="model_assets",
|
96 |
+
)
|
97 |
|
98 |
|
99 |
def main():
|
100 |
parser = argparse.ArgumentParser()
|
101 |
+
parser.add_argument("--skip_default_models", action="store_true")
|
102 |
parser.add_argument("--only_infer", action="store_true")
|
103 |
parser.add_argument(
|
104 |
"--dataset_root",
|
|
|
116 |
|
117 |
download_bert_models()
|
118 |
|
119 |
+
if not args.skip_default_models:
|
120 |
+
download_default_models()
|
121 |
if not args.only_infer:
|
122 |
download_slm_model()
|
123 |
download_pretrained_models()
|
124 |
download_jp_extra_pretrained_models()
|
125 |
|
126 |
+
# If configs/paths.yml not exists, create it
|
127 |
+
default_paths_yml = Path("configs/default_paths.yml")
|
128 |
+
paths_yml = Path("configs/paths.yml")
|
129 |
+
if not paths_yml.exists():
|
130 |
+
shutil.copy(default_paths_yml, paths_yml)
|
131 |
+
|
132 |
if args.dataset_root is None and args.assets_root is None:
|
133 |
return
|
134 |
|
135 |
# Change default paths if necessary
|
136 |
+
with open(paths_yml, encoding="utf-8") as f:
|
|
|
137 |
yml_data = yaml.safe_load(f)
|
138 |
if args.assets_root is not None:
|
139 |
yml_data["assets_root"] = args.assets_root
|
library.ipynb
CHANGED
@@ -1,138 +1,135 @@
|
|
1 |
{
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
},
|
12 |
-
{
|
13 |
-
"cell_type": "code",
|
14 |
-
"execution_count": null,
|
15 |
-
"metadata": {},
|
16 |
-
"outputs": [],
|
17 |
-
"source": [
|
18 |
-
"# PyTorch環境の構築(ない場合)\n",
|
19 |
-
"# 参照: https://pytorch.org/get-started/locally/\n",
|
20 |
-
"\n",
|
21 |
-
"!pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118"
|
22 |
-
]
|
23 |
-
},
|
24 |
-
{
|
25 |
-
"cell_type": "code",
|
26 |
-
"execution_count": null,
|
27 |
-
"metadata": {
|
28 |
-
"id": "LLrngKcQEAyP"
|
29 |
-
},
|
30 |
-
"outputs": [],
|
31 |
-
"source": [
|
32 |
-
"# style-bert-vits2のインストール\n",
|
33 |
-
"\n",
|
34 |
-
"!pip install style-bert-vits2"
|
35 |
-
]
|
36 |
-
},
|
37 |
-
{
|
38 |
-
"cell_type": "code",
|
39 |
-
"execution_count": null,
|
40 |
-
"metadata": {
|
41 |
-
"id": "9xRtfUg5EZkx"
|
42 |
-
},
|
43 |
-
"outputs": [],
|
44 |
-
"source": [
|
45 |
-
"# BERTモデルをロード(ローカルに手動でダウンロードする必要はありません)\n",
|
46 |
-
"\n",
|
47 |
-
"from style_bert_vits2.nlp import bert_models\n",
|
48 |
-
"from style_bert_vits2.constants import Languages\n",
|
49 |
-
"\n",
|
50 |
-
"\n",
|
51 |
-
"bert_models.load_model(Languages.JP, \"ku-nlp/deberta-v2-large-japanese-char-wwm\")\n",
|
52 |
-
"bert_models.load_tokenizer(Languages.JP, \"ku-nlp/deberta-v2-large-japanese-char-wwm\")\n",
|
53 |
-
"# bert_models.load_model(Languages.EN, \"microsoft/deberta-v3-large\")\n",
|
54 |
-
"# bert_models.load_tokenizer(Languages.EN, \"microsoft/deberta-v3-large\")\n",
|
55 |
-
"# bert_models.load_model(Languages.ZH, \"hfl/chinese-roberta-wwm-ext-large\")\n",
|
56 |
-
"# bert_models.load_tokenizer(Languages.ZH, \"hfl/chinese-roberta-wwm-ext-large\")"
|
57 |
-
]
|
58 |
-
},
|
59 |
-
{
|
60 |
-
"cell_type": "code",
|
61 |
-
"execution_count": null,
|
62 |
-
"metadata": {
|
63 |
-
"id": "q2V9d3HyFAr_"
|
64 |
-
},
|
65 |
-
"outputs": [],
|
66 |
-
"source": [
|
67 |
-
"# Hugging Faceから試しにデフォルトモデルをダウンロードしてみて、それを音声合成に使ってみる\n",
|
68 |
-
"# model_assetsディレクトリにダウンロードされます\n",
|
69 |
-
"\n",
|
70 |
-
"from pathlib import Path\n",
|
71 |
-
"from huggingface_hub import hf_hub_download\n",
|
72 |
-
"\n",
|
73 |
-
"\n",
|
74 |
-
"model_file = \"jvnv-F1-jp/jvnv-F1-jp_e160_s14000.safetensors\"\n",
|
75 |
-
"config_file = \"jvnv-F1-jp/config.json\"\n",
|
76 |
-
"style_file = \"jvnv-F1-jp/style_vectors.npy\"\n",
|
77 |
-
"\n",
|
78 |
-
"for file in [model_file, config_file, style_file]:\n",
|
79 |
-
" print(file)\n",
|
80 |
-
" hf_hub_download(\n",
|
81 |
-
" \"litagin/style_bert_vits2_jvnv\",\n",
|
82 |
-
" file,\n",
|
83 |
-
" local_dir=\"model_assets\"\n",
|
84 |
-
" )"
|
85 |
-
]
|
86 |
-
},
|
87 |
-
{
|
88 |
-
"cell_type": "code",
|
89 |
-
"execution_count": null,
|
90 |
-
"metadata": {
|
91 |
-
"id": "hJa31MEUFhe4"
|
92 |
-
},
|
93 |
-
"outputs": [],
|
94 |
-
"source": [
|
95 |
-
"# 上でダウンロードしたモデルファイルを指定して音声合成のテスト\n",
|
96 |
-
"\n",
|
97 |
-
"from style_bert_vits2.tts_model import TTSModel\n",
|
98 |
-
"\n",
|
99 |
-
"assets_root = Path(\"model_assets\")\n",
|
100 |
-
"\n",
|
101 |
-
"model = TTSModel(\n",
|
102 |
-
" model_path=assets_root / model_file,\n",
|
103 |
-
" config_path=assets_root / config_file,\n",
|
104 |
-
" style_vec_path=assets_root / style_file,\n",
|
105 |
-
" device=\"cpu\"\n",
|
106 |
-
")"
|
107 |
-
]
|
108 |
-
},
|
109 |
-
{
|
110 |
-
"cell_type": "code",
|
111 |
-
"execution_count": null,
|
112 |
-
"metadata": {
|
113 |
-
"id": "Gal0tqrtGXZx"
|
114 |
-
},
|
115 |
-
"outputs": [],
|
116 |
-
"source": [
|
117 |
-
"from IPython.display import Audio, display\n",
|
118 |
-
"\n",
|
119 |
-
"sr, audio = model.infer(text=\"こんにちは\")\n",
|
120 |
-
"display(Audio(audio, rate=sr))"
|
121 |
-
]
|
122 |
-
}
|
123 |
-
],
|
124 |
-
"metadata": {
|
125 |
-
"colab": {
|
126 |
-
"provenance": []
|
127 |
-
},
|
128 |
-
"kernelspec": {
|
129 |
-
"display_name": "Python 3",
|
130 |
-
"name": "python3"
|
131 |
-
},
|
132 |
-
"language_info": {
|
133 |
-
"name": "python"
|
134 |
-
}
|
135 |
},
|
136 |
-
|
137 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
}
|
|
|
1 |
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"# Style-Bert-VITS2ライブラリの使用例\n",
|
8 |
+
"\n",
|
9 |
+
"`pip install style-bert-vits2`を使った、jupyter notebookでの使用例です。Google colab等でも動きます。"
|
10 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
},
|
12 |
+
{
|
13 |
+
"cell_type": "code",
|
14 |
+
"execution_count": null,
|
15 |
+
"metadata": {},
|
16 |
+
"outputs": [],
|
17 |
+
"source": [
|
18 |
+
"# PyTorch環境の構築(ない場合)\n",
|
19 |
+
"# 参照: https://pytorch.org/get-started/locally/\n",
|
20 |
+
"\n",
|
21 |
+
"!pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118"
|
22 |
+
]
|
23 |
+
},
|
24 |
+
{
|
25 |
+
"cell_type": "code",
|
26 |
+
"execution_count": null,
|
27 |
+
"metadata": {
|
28 |
+
"id": "LLrngKcQEAyP"
|
29 |
+
},
|
30 |
+
"outputs": [],
|
31 |
+
"source": [
|
32 |
+
"# style-bert-vits2のインストール\n",
|
33 |
+
"\n",
|
34 |
+
"!pip install style-bert-vits2"
|
35 |
+
]
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"cell_type": "code",
|
39 |
+
"execution_count": null,
|
40 |
+
"metadata": {
|
41 |
+
"id": "9xRtfUg5EZkx"
|
42 |
+
},
|
43 |
+
"outputs": [],
|
44 |
+
"source": [
|
45 |
+
"# BERTモデルをロード(ローカルに手動でダウンロードする必要はありません)\n",
|
46 |
+
"\n",
|
47 |
+
"from style_bert_vits2.nlp import bert_models\n",
|
48 |
+
"from style_bert_vits2.constants import Languages\n",
|
49 |
+
"\n",
|
50 |
+
"\n",
|
51 |
+
"bert_models.load_model(Languages.JP, \"ku-nlp/deberta-v2-large-japanese-char-wwm\")\n",
|
52 |
+
"bert_models.load_tokenizer(Languages.JP, \"ku-nlp/deberta-v2-large-japanese-char-wwm\")\n",
|
53 |
+
"# bert_models.load_model(Languages.EN, \"microsoft/deberta-v3-large\")\n",
|
54 |
+
"# bert_models.load_tokenizer(Languages.EN, \"microsoft/deberta-v3-large\")\n",
|
55 |
+
"# bert_models.load_model(Languages.ZH, \"hfl/chinese-roberta-wwm-ext-large\")\n",
|
56 |
+
"# bert_models.load_tokenizer(Languages.ZH, \"hfl/chinese-roberta-wwm-ext-large\")"
|
57 |
+
]
|
58 |
+
},
|
59 |
+
{
|
60 |
+
"cell_type": "code",
|
61 |
+
"execution_count": null,
|
62 |
+
"metadata": {
|
63 |
+
"id": "q2V9d3HyFAr_"
|
64 |
+
},
|
65 |
+
"outputs": [],
|
66 |
+
"source": [
|
67 |
+
"# Hugging Faceから試しにデフォルトモデルをダウンロードしてみて、それを音声合成に使ってみる\n",
|
68 |
+
"# model_assetsディレクトリにダウンロードされます\n",
|
69 |
+
"\n",
|
70 |
+
"from pathlib import Path\n",
|
71 |
+
"from huggingface_hub import hf_hub_download\n",
|
72 |
+
"\n",
|
73 |
+
"\n",
|
74 |
+
"model_file = \"jvnv-F1-jp/jvnv-F1-jp_e160_s14000.safetensors\"\n",
|
75 |
+
"config_file = \"jvnv-F1-jp/config.json\"\n",
|
76 |
+
"style_file = \"jvnv-F1-jp/style_vectors.npy\"\n",
|
77 |
+
"\n",
|
78 |
+
"for file in [model_file, config_file, style_file]:\n",
|
79 |
+
" print(file)\n",
|
80 |
+
" hf_hub_download(\"litagin/style_bert_vits2_jvnv\", file, local_dir=\"model_assets\")"
|
81 |
+
]
|
82 |
+
},
|
83 |
+
{
|
84 |
+
"cell_type": "code",
|
85 |
+
"execution_count": null,
|
86 |
+
"metadata": {
|
87 |
+
"id": "hJa31MEUFhe4"
|
88 |
+
},
|
89 |
+
"outputs": [],
|
90 |
+
"source": [
|
91 |
+
"# 上でダウンロードしたモデルファイルを指定して音声合成のテスト\n",
|
92 |
+
"\n",
|
93 |
+
"from style_bert_vits2.tts_model import TTSModel\n",
|
94 |
+
"\n",
|
95 |
+
"assets_root = Path(\"model_assets\")\n",
|
96 |
+
"\n",
|
97 |
+
"model = TTSModel(\n",
|
98 |
+
" model_path=assets_root / model_file,\n",
|
99 |
+
" config_path=assets_root / config_file,\n",
|
100 |
+
" style_vec_path=assets_root / style_file,\n",
|
101 |
+
" device=\"cpu\",\n",
|
102 |
+
")"
|
103 |
+
]
|
104 |
+
},
|
105 |
+
{
|
106 |
+
"cell_type": "code",
|
107 |
+
"execution_count": null,
|
108 |
+
"metadata": {
|
109 |
+
"id": "Gal0tqrtGXZx"
|
110 |
+
},
|
111 |
+
"outputs": [],
|
112 |
+
"source": [
|
113 |
+
"from IPython.display import Audio, display\n",
|
114 |
+
"\n",
|
115 |
+
"sr, audio = model.infer(text=\"こんにちは\")\n",
|
116 |
+
"display(Audio(audio, rate=sr))"
|
117 |
+
]
|
118 |
+
}
|
119 |
+
],
|
120 |
+
"metadata": {
|
121 |
+
"colab": {
|
122 |
+
"provenance": []
|
123 |
+
},
|
124 |
+
"kernelspec": {
|
125 |
+
"display_name": "Python 3",
|
126 |
+
"name": "python3"
|
127 |
+
},
|
128 |
+
"language_info": {
|
129 |
+
"name": "python",
|
130 |
+
"version": "3.10.11"
|
131 |
+
}
|
132 |
+
},
|
133 |
+
"nbformat": 4,
|
134 |
+
"nbformat_minor": 0
|
135 |
}
|
preprocess_text.py
CHANGED
@@ -2,12 +2,12 @@ import argparse
|
|
2 |
import json
|
3 |
from collections import defaultdict
|
4 |
from pathlib import Path
|
5 |
-
from random import shuffle
|
6 |
from typing import Optional
|
7 |
|
8 |
from tqdm import tqdm
|
9 |
|
10 |
-
from config import
|
11 |
from style_bert_vits2.logging import logger
|
12 |
from style_bert_vits2.nlp import clean_text
|
13 |
from style_bert_vits2.nlp.japanese import pyopenjtalk_worker
|
@@ -22,7 +22,7 @@ pyopenjtalk_worker.initialize_worker()
|
|
22 |
update_dict()
|
23 |
|
24 |
|
25 |
-
preprocess_text_config
|
26 |
|
27 |
|
28 |
# Count lines for tqdm
|
@@ -145,7 +145,7 @@ def preprocess(
|
|
145 |
spk_utt_map[spk].append(line)
|
146 |
|
147 |
# 新しい話者が出てきたら話者IDを割り当て、current_sidを1増やす
|
148 |
-
if spk not in spk_id_map
|
149 |
spk_id_map[spk] = current_sid
|
150 |
current_sid += 1
|
151 |
if count_same > 0 or count_not_found > 0:
|
@@ -156,16 +156,26 @@ def preprocess(
|
|
156 |
train_list: list[str] = []
|
157 |
val_list: list[str] = []
|
158 |
|
159 |
-
#
|
160 |
for spk, utts in spk_utt_map.items():
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
if len(val_list) > max_val_total:
|
167 |
-
|
168 |
val_list = val_list[:max_val_total]
|
|
|
|
|
169 |
|
170 |
with train_path.open("w", encoding="utf-8") as f:
|
171 |
for line in train_list:
|
|
|
2 |
import json
|
3 |
from collections import defaultdict
|
4 |
from pathlib import Path
|
5 |
+
from random import sample, shuffle
|
6 |
from typing import Optional
|
7 |
|
8 |
from tqdm import tqdm
|
9 |
|
10 |
+
from config import get_config
|
11 |
from style_bert_vits2.logging import logger
|
12 |
from style_bert_vits2.nlp import clean_text
|
13 |
from style_bert_vits2.nlp.japanese import pyopenjtalk_worker
|
|
|
22 |
update_dict()
|
23 |
|
24 |
|
25 |
+
preprocess_text_config = get_config().preprocess_text_config
|
26 |
|
27 |
|
28 |
# Count lines for tqdm
|
|
|
145 |
spk_utt_map[spk].append(line)
|
146 |
|
147 |
# 新しい話者が出てきたら話者IDを割り当て、current_sidを1増やす
|
148 |
+
if spk not in spk_id_map:
|
149 |
spk_id_map[spk] = current_sid
|
150 |
current_sid += 1
|
151 |
if count_same > 0 or count_not_found > 0:
|
|
|
156 |
train_list: list[str] = []
|
157 |
val_list: list[str] = []
|
158 |
|
159 |
+
# 各話者ごとに発話リストを処理
|
160 |
for spk, utts in spk_utt_map.items():
|
161 |
+
if val_per_lang == 0:
|
162 |
+
train_list.extend(utts)
|
163 |
+
continue
|
164 |
+
# ランダムにval_per_lang個のインデックスを選択
|
165 |
+
val_indices = set(sample(range(len(utts)), val_per_lang))
|
166 |
+
# 元の順序を保ちながらリストを分割
|
167 |
+
for index, utt in enumerate(utts):
|
168 |
+
if index in val_indices:
|
169 |
+
val_list.append(utt)
|
170 |
+
else:
|
171 |
+
train_list.append(utt)
|
172 |
+
|
173 |
+
# バリデーションリストのサイズ調整
|
174 |
if len(val_list) > max_val_total:
|
175 |
+
extra_val = val_list[max_val_total:]
|
176 |
val_list = val_list[:max_val_total]
|
177 |
+
# 余剰のバリデーション発話をトレーニングリストに追加(元の順序を保持)
|
178 |
+
train_list.extend(extra_val)
|
179 |
|
180 |
with train_path.open("w", encoding="utf-8") as f:
|
181 |
for line in train_list:
|
pyproject.toml
CHANGED
@@ -5,7 +5,7 @@ build-backend = "hatchling.build"
|
|
5 |
[project]
|
6 |
name = "style-bert-vits2"
|
7 |
dynamic = ["version"]
|
8 |
-
description =
|
9 |
readme = "README.md"
|
10 |
requires-python = ">=3.9"
|
11 |
license = "AGPL-3.0"
|
@@ -22,25 +22,21 @@ classifiers = [
|
|
22 |
"Programming Language :: Python :: Implementation :: CPython",
|
23 |
]
|
24 |
dependencies = [
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
'safetensors',
|
41 |
-
'scipy',
|
42 |
-
'torch>=2.1',
|
43 |
-
'transformers',
|
44 |
]
|
45 |
|
46 |
[project.urls]
|
@@ -63,42 +59,26 @@ only-include = [
|
|
63 |
"pyproject.toml",
|
64 |
"README.md",
|
65 |
]
|
66 |
-
exclude = [
|
67 |
-
".git",
|
68 |
-
".gitignore",
|
69 |
-
".gitattributes",
|
70 |
-
]
|
71 |
|
72 |
[tool.hatch.build.targets.wheel]
|
73 |
packages = ["style_bert_vits2"]
|
74 |
|
75 |
[tool.hatch.envs.test]
|
76 |
-
dependencies = [
|
77 |
-
"coverage[toml]>=6.5",
|
78 |
-
"pytest",
|
79 |
-
]
|
80 |
[tool.hatch.envs.test.scripts]
|
81 |
# Usage: `hatch run test:test`
|
82 |
test = "pytest {args:tests}"
|
83 |
# Usage: `hatch run test:coverage`
|
84 |
test-cov = "coverage run -m pytest {args:tests}"
|
85 |
# Usage: `hatch run test:cov-report`
|
86 |
-
cov-report = [
|
87 |
-
"- coverage combine",
|
88 |
-
"coverage report",
|
89 |
-
]
|
90 |
# Usage: `hatch run test:cov`
|
91 |
-
cov = [
|
92 |
-
"test-cov",
|
93 |
-
"cov-report",
|
94 |
-
]
|
95 |
|
96 |
[tool.hatch.envs.style]
|
97 |
detached = true
|
98 |
-
dependencies = [
|
99 |
-
"black",
|
100 |
-
"isort",
|
101 |
-
]
|
102 |
[tool.hatch.envs.style.scripts]
|
103 |
check = [
|
104 |
"black --check --diff .",
|
@@ -117,17 +97,17 @@ python = ["3.9", "3.10", "3.11"]
|
|
117 |
source_pkgs = ["style_bert_vits2", "tests"]
|
118 |
branch = true
|
119 |
parallel = true
|
120 |
-
omit = [
|
121 |
-
"style_bert_vits2/constants.py",
|
122 |
-
]
|
123 |
|
124 |
[tool.coverage.paths]
|
125 |
style_bert_vits2 = ["style_bert_vits2", "*/style-bert-vits2/style_bert_vits2"]
|
126 |
tests = ["tests", "*/style-bert-vits2/tests"]
|
127 |
|
128 |
[tool.coverage.report]
|
129 |
-
exclude_lines = [
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
|
|
|
|
|
5 |
[project]
|
6 |
name = "style-bert-vits2"
|
7 |
dynamic = ["version"]
|
8 |
+
description = "Style-Bert-VITS2: Bert-VITS2 with more controllable voice styles."
|
9 |
readme = "README.md"
|
10 |
requires-python = ">=3.9"
|
11 |
license = "AGPL-3.0"
|
|
|
22 |
"Programming Language :: Python :: Implementation :: CPython",
|
23 |
]
|
24 |
dependencies = [
|
25 |
+
"cmudict",
|
26 |
+
"cn2an",
|
27 |
+
"g2p_en",
|
28 |
+
"jieba",
|
29 |
+
"loguru",
|
30 |
+
"num2words",
|
31 |
+
"numba",
|
32 |
+
"numpy",
|
33 |
+
"pydantic>=2.0",
|
34 |
+
"pyopenjtalk-dict",
|
35 |
+
"pypinyin",
|
36 |
+
"pyworld-prebuilt",
|
37 |
+
"safetensors",
|
38 |
+
"torch>=2.1",
|
39 |
+
"transformers",
|
|
|
|
|
|
|
|
|
40 |
]
|
41 |
|
42 |
[project.urls]
|
|
|
59 |
"pyproject.toml",
|
60 |
"README.md",
|
61 |
]
|
62 |
+
exclude = [".git", ".gitignore", ".gitattributes"]
|
|
|
|
|
|
|
|
|
63 |
|
64 |
[tool.hatch.build.targets.wheel]
|
65 |
packages = ["style_bert_vits2"]
|
66 |
|
67 |
[tool.hatch.envs.test]
|
68 |
+
dependencies = ["coverage[toml]>=6.5", "pytest"]
|
|
|
|
|
|
|
69 |
[tool.hatch.envs.test.scripts]
|
70 |
# Usage: `hatch run test:test`
|
71 |
test = "pytest {args:tests}"
|
72 |
# Usage: `hatch run test:coverage`
|
73 |
test-cov = "coverage run -m pytest {args:tests}"
|
74 |
# Usage: `hatch run test:cov-report`
|
75 |
+
cov-report = ["- coverage combine", "coverage report"]
|
|
|
|
|
|
|
76 |
# Usage: `hatch run test:cov`
|
77 |
+
cov = ["test-cov", "cov-report"]
|
|
|
|
|
|
|
78 |
|
79 |
[tool.hatch.envs.style]
|
80 |
detached = true
|
81 |
+
dependencies = ["black[jupyter]", "isort"]
|
|
|
|
|
|
|
82 |
[tool.hatch.envs.style.scripts]
|
83 |
check = [
|
84 |
"black --check --diff .",
|
|
|
97 |
source_pkgs = ["style_bert_vits2", "tests"]
|
98 |
branch = true
|
99 |
parallel = true
|
100 |
+
omit = ["style_bert_vits2/constants.py"]
|
|
|
|
|
101 |
|
102 |
[tool.coverage.paths]
|
103 |
style_bert_vits2 = ["style_bert_vits2", "*/style-bert-vits2/style_bert_vits2"]
|
104 |
tests = ["tests", "*/style-bert-vits2/tests"]
|
105 |
|
106 |
[tool.coverage.report]
|
107 |
+
exclude_lines = ["no cov", "if __name__ == .__main__.:", "if TYPE_CHECKING:"]
|
108 |
+
|
109 |
+
[tool.ruff]
|
110 |
+
extend-select = ["I"]
|
111 |
+
|
112 |
+
[tool.ruff.lint.isort]
|
113 |
+
lines-after-imports = 2
|
requirements-colab.txt
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
cmudict
|
2 |
+
cn2an
|
3 |
+
g2p_en
|
4 |
+
gradio>=4.32
|
5 |
+
jieba
|
6 |
+
librosa==0.9.2
|
7 |
+
loguru
|
8 |
+
num2words
|
9 |
+
numpy<2
|
10 |
+
onnxruntime
|
11 |
+
pyannote.audio>=3.1.0
|
12 |
+
pyloudnorm
|
13 |
+
pyopenjtalk-dict
|
14 |
+
pypinyin
|
15 |
+
pyworld-prebuilt
|
16 |
+
torch
|
17 |
+
torchaudio
|
18 |
+
torchvision
|
19 |
+
transformers
|
20 |
+
umap-learn
|
requirements-infer.txt
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
cmudict
|
2 |
+
cn2an
|
3 |
+
# faster-whisper==0.10.1
|
4 |
+
g2p_en
|
5 |
+
GPUtil
|
6 |
+
gradio
|
7 |
+
jieba
|
8 |
+
# librosa==0.9.2
|
9 |
+
loguru
|
10 |
+
num2words
|
11 |
+
numpy<2
|
12 |
+
# protobuf==4.25
|
13 |
+
psutil
|
14 |
+
# punctuators
|
15 |
+
pyannote.audio>=3.1.0
|
16 |
+
# pyloudnorm
|
17 |
+
pyopenjtalk-dict
|
18 |
+
pypinyin
|
19 |
+
pyworld-prebuilt
|
20 |
+
# stable_ts
|
21 |
+
# tensorboard
|
22 |
+
torch<2.4
|
23 |
+
transformers
|
24 |
+
umap-learn
|
requirements.txt
CHANGED
@@ -3,28 +3,23 @@ cn2an
|
|
3 |
faster-whisper==0.10.1
|
4 |
g2p_en
|
5 |
GPUtil
|
6 |
-
gradio
|
7 |
jieba
|
8 |
-
langid
|
9 |
librosa==0.9.2
|
10 |
loguru
|
11 |
-
matplotlib
|
12 |
num2words
|
13 |
-
|
14 |
-
|
15 |
psutil
|
|
|
16 |
pyannote.audio>=3.1.0
|
17 |
-
pydantic>=2.0
|
18 |
pyloudnorm
|
19 |
-
# pyopenjtalk-prebuilt # Should be manually uninstalled
|
20 |
pyopenjtalk-dict
|
21 |
pypinyin
|
22 |
pyworld-prebuilt
|
23 |
-
|
24 |
-
requests
|
25 |
-
safetensors
|
26 |
-
scipy
|
27 |
tensorboard
|
28 |
-
torch
|
|
|
29 |
transformers
|
30 |
umap-learn
|
|
|
3 |
faster-whisper==0.10.1
|
4 |
g2p_en
|
5 |
GPUtil
|
6 |
+
gradio>=4.32
|
7 |
jieba
|
|
|
8 |
librosa==0.9.2
|
9 |
loguru
|
|
|
10 |
num2words
|
11 |
+
numpy<2
|
12 |
+
protobuf==4.25
|
13 |
psutil
|
14 |
+
punctuators
|
15 |
pyannote.audio>=3.1.0
|
|
|
16 |
pyloudnorm
|
|
|
17 |
pyopenjtalk-dict
|
18 |
pypinyin
|
19 |
pyworld-prebuilt
|
20 |
+
stable_ts
|
|
|
|
|
|
|
21 |
tensorboard
|
22 |
+
torch<2.4
|
23 |
+
torchaudio<2.4
|
24 |
transformers
|
25 |
umap-learn
|
resample.py
CHANGED
@@ -10,7 +10,7 @@ import soundfile
|
|
10 |
from numpy.typing import NDArray
|
11 |
from tqdm import tqdm
|
12 |
|
13 |
-
from config import
|
14 |
from style_bert_vits2.logging import logger
|
15 |
from style_bert_vits2.utils.stdout_wrapper import SAFE_STDOUT
|
16 |
|
@@ -62,6 +62,7 @@ def resample(
|
|
62 |
if trim:
|
63 |
wav, _ = librosa.effects.trim(wav, top_db=30)
|
64 |
relative_path = file.relative_to(input_dir)
|
|
|
65 |
output_path = output_dir / relative_path.with_suffix(".wav")
|
66 |
output_path.parent.mkdir(parents=True, exist_ok=True)
|
67 |
soundfile.write(output_path, wav, sr)
|
@@ -70,6 +71,7 @@ def resample(
|
|
70 |
|
71 |
|
72 |
if __name__ == "__main__":
|
|
|
73 |
parser = argparse.ArgumentParser()
|
74 |
parser.add_argument(
|
75 |
"--sr",
|
|
|
10 |
from numpy.typing import NDArray
|
11 |
from tqdm import tqdm
|
12 |
|
13 |
+
from config import get_config
|
14 |
from style_bert_vits2.logging import logger
|
15 |
from style_bert_vits2.utils.stdout_wrapper import SAFE_STDOUT
|
16 |
|
|
|
62 |
if trim:
|
63 |
wav, _ = librosa.effects.trim(wav, top_db=30)
|
64 |
relative_path = file.relative_to(input_dir)
|
65 |
+
# ここで拡張子が.wav以外でも.wavに置き換えられる
|
66 |
output_path = output_dir / relative_path.with_suffix(".wav")
|
67 |
output_path.parent.mkdir(parents=True, exist_ok=True)
|
68 |
soundfile.write(output_path, wav, sr)
|
|
|
71 |
|
72 |
|
73 |
if __name__ == "__main__":
|
74 |
+
config = get_config()
|
75 |
parser = argparse.ArgumentParser()
|
76 |
parser.add_argument(
|
77 |
"--sr",
|
scripts/Install-Style-Bert-VITS2-CPU.bat
CHANGED
@@ -1,123 +1,134 @@
|
|
1 |
-
chcp 65001 > NUL
|
2 |
-
@echo off
|
3 |
-
|
4 |
-
@REM エラーコードを遅延評価するために設定
|
5 |
-
setlocal enabledelayedexpansion
|
6 |
-
|
7 |
-
@REM PowerShellのコマンド
|
8 |
-
set PS_CMD=PowerShell -Version 5.1 -ExecutionPolicy Bypass
|
9 |
-
|
10 |
-
@REM PortableGitのURLと保存先
|
11 |
-
set DL_URL=https://github.com/git-for-windows/git/releases/download/v2.44.0.windows.1/PortableGit-2.44.0-64-bit.7z.exe
|
12 |
-
set DL_DST=%~dp0lib\PortableGit-2.44.0-64-bit.7z.exe
|
13 |
-
|
14 |
-
@REM Style-Bert-VITS2のリポジトリURL
|
15 |
-
set REPO_URL=https://github.com/litagin02/Style-Bert-VITS2
|
16 |
-
|
17 |
-
@REM カレントディレクトリをbatファイルのディレクトリに変更
|
18 |
-
pushd %~dp0
|
19 |
-
|
20 |
-
@REM lib フォルダがなければ作成
|
21 |
-
if not exist lib\ ( mkdir lib )
|
22 |
-
|
23 |
-
echo --------------------------------------------------
|
24 |
-
echo PS_CMD: %PS_CMD%
|
25 |
-
echo DL_URL: %DL_URL%
|
26 |
-
echo DL_DST: %DL_DST%
|
27 |
-
echo REPO_URL: %REPO_URL%
|
28 |
-
echo --------------------------------------------------
|
29 |
-
echo.
|
30 |
-
echo --------------------------------------------------
|
31 |
-
echo Checking Git Installation...
|
32 |
-
echo --------------------------------------------------
|
33 |
-
echo Executing: git --version
|
34 |
-
git --version
|
35 |
-
if !errorlevel! neq 0 (
|
36 |
-
echo --------------------------------------------------
|
37 |
-
echo Git is not installed, so download and use PortableGit.
|
38 |
-
echo Downloading PortableGit...
|
39 |
-
echo --------------------------------------------------
|
40 |
-
echo Executing: curl -L %DL_URL% -o "%DL_DST%"
|
41 |
-
curl -L %DL_URL% -o "%DL_DST%"
|
42 |
-
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
43 |
-
|
44 |
-
echo --------------------------------------------------
|
45 |
-
echo Extracting PortableGit...
|
46 |
-
echo --------------------------------------------------
|
47 |
-
echo Executing: "%DL_DST%" -y
|
48 |
-
"%DL_DST%" -y
|
49 |
-
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
50 |
-
|
51 |
-
echo --------------------------------------------------
|
52 |
-
echo Removing %DL_DST%...
|
53 |
-
echo --------------------------------------------------
|
54 |
-
echo Executing: del "%DL_DST%"
|
55 |
-
del "%DL_DST%"
|
56 |
-
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
57 |
-
|
58 |
-
@REM Gitコマンドのパスを設定
|
59 |
-
echo --------------------------------------------------
|
60 |
-
echo Setting up PATH...
|
61 |
-
echo --------------------------------------------------
|
62 |
-
echo Executing: set "PATH=%~dp0lib\PortableGit\bin;%PATH%"
|
63 |
-
set "PATH=%~dp0lib\PortableGit\bin;%PATH%"
|
64 |
-
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
65 |
-
|
66 |
-
echo --------------------------------------------------
|
67 |
-
echo Checking Git Installation...
|
68 |
-
echo --------------------------------------------------
|
69 |
-
echo Executing: git --version
|
70 |
-
git --version
|
71 |
-
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
72 |
-
)
|
73 |
-
|
74 |
-
echo --------------------------------------------------
|
75 |
-
echo Cloning repository...
|
76 |
-
echo --------------------------------------------------
|
77 |
-
echo Executing: git clone %REPO_URL%
|
78 |
-
git clone %REPO_URL%
|
79 |
-
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
80 |
-
|
81 |
-
@REM Pythonのセットアップ、仮想環境が有効化されて戻って来る
|
82 |
-
echo --------------------------------------------------
|
83 |
-
echo Setting up Python environment...
|
84 |
-
echo --------------------------------------------------
|
85 |
-
echo Executing: call Setup-Python.bat ".\lib\python" ".\Style-Bert-VITS2\venv"
|
86 |
-
call Setup-Python.bat ".\lib\python" ".\Style-Bert-VITS2\venv"
|
87 |
-
if !errorlevel! neq 0 ( popd & exit /b !errorlevel! )
|
88 |
-
|
89 |
-
@REM Style-Bert-VITS2フォルダに移動
|
90 |
-
pushd Style-Bert-VITS2
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
echo
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
echo
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
echo
|
113 |
-
echo
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
chcp 65001 > NUL
|
2 |
+
@echo off
|
3 |
+
|
4 |
+
@REM エラーコードを遅延評価するために設定
|
5 |
+
setlocal enabledelayedexpansion
|
6 |
+
|
7 |
+
@REM PowerShellのコマンド
|
8 |
+
set PS_CMD=PowerShell -Version 5.1 -ExecutionPolicy Bypass
|
9 |
+
|
10 |
+
@REM PortableGitのURLと保存先
|
11 |
+
set DL_URL=https://github.com/git-for-windows/git/releases/download/v2.44.0.windows.1/PortableGit-2.44.0-64-bit.7z.exe
|
12 |
+
set DL_DST=%~dp0lib\PortableGit-2.44.0-64-bit.7z.exe
|
13 |
+
|
14 |
+
@REM Style-Bert-VITS2のリポジトリURL
|
15 |
+
set REPO_URL=https://github.com/litagin02/Style-Bert-VITS2
|
16 |
+
|
17 |
+
@REM カレントディレクトリをbatファイルのディレクトリに変更
|
18 |
+
pushd %~dp0
|
19 |
+
|
20 |
+
@REM lib フォルダがなければ作成
|
21 |
+
if not exist lib\ ( mkdir lib )
|
22 |
+
|
23 |
+
echo --------------------------------------------------
|
24 |
+
echo PS_CMD: %PS_CMD%
|
25 |
+
echo DL_URL: %DL_URL%
|
26 |
+
echo DL_DST: %DL_DST%
|
27 |
+
echo REPO_URL: %REPO_URL%
|
28 |
+
echo --------------------------------------------------
|
29 |
+
echo.
|
30 |
+
echo --------------------------------------------------
|
31 |
+
echo Checking Git Installation...
|
32 |
+
echo --------------------------------------------------
|
33 |
+
echo Executing: git --version
|
34 |
+
git --version
|
35 |
+
if !errorlevel! neq 0 (
|
36 |
+
echo --------------------------------------------------
|
37 |
+
echo Git is not installed, so download and use PortableGit.
|
38 |
+
echo Downloading PortableGit...
|
39 |
+
echo --------------------------------------------------
|
40 |
+
echo Executing: curl -L %DL_URL% -o "%DL_DST%"
|
41 |
+
curl -L %DL_URL% -o "%DL_DST%"
|
42 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
43 |
+
|
44 |
+
echo --------------------------------------------------
|
45 |
+
echo Extracting PortableGit...
|
46 |
+
echo --------------------------------------------------
|
47 |
+
echo Executing: "%DL_DST%" -y
|
48 |
+
"%DL_DST%" -y
|
49 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
50 |
+
|
51 |
+
echo --------------------------------------------------
|
52 |
+
echo Removing %DL_DST%...
|
53 |
+
echo --------------------------------------------------
|
54 |
+
echo Executing: del "%DL_DST%"
|
55 |
+
del "%DL_DST%"
|
56 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
57 |
+
|
58 |
+
@REM Gitコマンドのパスを設定
|
59 |
+
echo --------------------------------------------------
|
60 |
+
echo Setting up PATH...
|
61 |
+
echo --------------------------------------------------
|
62 |
+
echo Executing: set "PATH=%~dp0lib\PortableGit\bin;%PATH%"
|
63 |
+
set "PATH=%~dp0lib\PortableGit\bin;%PATH%"
|
64 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
65 |
+
|
66 |
+
echo --------------------------------------------------
|
67 |
+
echo Checking Git Installation...
|
68 |
+
echo --------------------------------------------------
|
69 |
+
echo Executing: git --version
|
70 |
+
git --version
|
71 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
72 |
+
)
|
73 |
+
|
74 |
+
echo --------------------------------------------------
|
75 |
+
echo Cloning repository...
|
76 |
+
echo --------------------------------------------------
|
77 |
+
echo Executing: git clone %REPO_URL%
|
78 |
+
git clone %REPO_URL%
|
79 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
80 |
+
|
81 |
+
@REM Pythonのセットアップ、仮想環境が有効化されて戻って来る
|
82 |
+
echo --------------------------------------------------
|
83 |
+
echo Setting up Python environment...
|
84 |
+
echo --------------------------------------------------
|
85 |
+
echo Executing: call Setup-Python.bat ".\lib\python" ".\Style-Bert-VITS2\venv"
|
86 |
+
call Setup-Python.bat ".\lib\python" ".\Style-Bert-VITS2\venv"
|
87 |
+
if !errorlevel! neq 0 ( popd & exit /b !errorlevel! )
|
88 |
+
|
89 |
+
@REM Style-Bert-VITS2フォルダに移動
|
90 |
+
pushd Style-Bert-VITS2
|
91 |
+
|
92 |
+
@REM 後で消す!!!!!!!!!!
|
93 |
+
@REM git checkout dev
|
94 |
+
@REM 後で消す!!!!!!!!!!
|
95 |
+
|
96 |
+
echo --------------------------------------------------
|
97 |
+
echo Activating the virtual environment...
|
98 |
+
echo --------------------------------------------------
|
99 |
+
echo Executing: call ".\venv\Scripts\activate.bat"
|
100 |
+
call ".\venv\Scripts\activate.bat"
|
101 |
+
if !errorlevel! neq 0 ( popd & exit /b !errorlevel! )
|
102 |
+
|
103 |
+
echo --------------------------------------------------
|
104 |
+
echo Installing package manager uv...
|
105 |
+
echo --------------------------------------------------
|
106 |
+
echo Executing: pip install uv
|
107 |
+
pip install uv
|
108 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
109 |
+
|
110 |
+
echo --------------------------------------------------
|
111 |
+
echo Installing dependencies...
|
112 |
+
echo --------------------------------------------------
|
113 |
+
echo Executing: uv pip install -r requirements-infer.txt
|
114 |
+
uv pip install -r requirements-infer.txt
|
115 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
116 |
+
|
117 |
+
echo ----------------------------------------
|
118 |
+
echo Environment setup is complete. Start downloading the model.
|
119 |
+
echo ----------------------------------------
|
120 |
+
echo Executing: python initialize.py
|
121 |
+
python initialize.py --only_infer
|
122 |
+
|
123 |
+
echo ----------------------------------------
|
124 |
+
echo Model download is complete. Start Style-Bert-VITS2 Editor.
|
125 |
+
echo ----------------------------------------
|
126 |
+
echo Executing: python server_editor.py --inbrowser
|
127 |
+
python server_editor.py --inbrowser
|
128 |
+
pause
|
129 |
+
|
130 |
+
popd
|
131 |
+
|
132 |
+
popd
|
133 |
+
|
134 |
+
endlocal
|
scripts/Install-Style-Bert-VITS2.bat
CHANGED
@@ -1,130 +1,141 @@
|
|
1 |
-
chcp 65001 > NUL
|
2 |
-
@echo off
|
3 |
-
|
4 |
-
@REM エラーコードを遅延評価するために設定
|
5 |
-
setlocal enabledelayedexpansion
|
6 |
-
|
7 |
-
@REM PowerShellのコマンド
|
8 |
-
set PS_CMD=PowerShell -Version 5.1 -ExecutionPolicy Bypass
|
9 |
-
|
10 |
-
@REM PortableGitのURLと保存先
|
11 |
-
set DL_URL=https://github.com/git-for-windows/git/releases/download/v2.44.0.windows.1/PortableGit-2.44.0-64-bit.7z.exe
|
12 |
-
set DL_DST=%~dp0lib\PortableGit-2.44.0-64-bit.7z.exe
|
13 |
-
|
14 |
-
@REM Style-Bert-VITS2のリポジトリURL
|
15 |
-
set REPO_URL=https://github.com/litagin02/Style-Bert-VITS2
|
16 |
-
|
17 |
-
@REM カレントディレクトリをbatファイルのディレクトリに変更
|
18 |
-
pushd %~dp0
|
19 |
-
|
20 |
-
@REM lib フォルダがなければ作成
|
21 |
-
if not exist lib\ ( mkdir lib )
|
22 |
-
|
23 |
-
echo --------------------------------------------------
|
24 |
-
echo PS_CMD: %PS_CMD%
|
25 |
-
echo DL_URL: %DL_URL%
|
26 |
-
echo DL_DST: %DL_DST%
|
27 |
-
echo REPO_URL: %REPO_URL%
|
28 |
-
echo --------------------------------------------------
|
29 |
-
echo.
|
30 |
-
echo --------------------------------------------------
|
31 |
-
echo Checking Git Installation...
|
32 |
-
echo --------------------------------------------------
|
33 |
-
echo Executing: git --version
|
34 |
-
git --version
|
35 |
-
if !errorlevel! neq 0 (
|
36 |
-
echo --------------------------------------------------
|
37 |
-
echo Git is not installed, so download and use PortableGit.
|
38 |
-
echo Downloading PortableGit...
|
39 |
-
echo --------------------------------------------------
|
40 |
-
echo Executing: curl -L %DL_URL% -o "%DL_DST%"
|
41 |
-
curl -L %DL_URL% -o "%DL_DST%"
|
42 |
-
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
43 |
-
|
44 |
-
echo --------------------------------------------------
|
45 |
-
echo Extracting PortableGit...
|
46 |
-
echo --------------------------------------------------
|
47 |
-
echo Executing: "%DL_DST%" -y
|
48 |
-
"%DL_DST%" -y
|
49 |
-
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
50 |
-
|
51 |
-
echo --------------------------------------------------
|
52 |
-
echo Removing %DL_DST%...
|
53 |
-
echo --------------------------------------------------
|
54 |
-
echo Executing: del "%DL_DST%"
|
55 |
-
del "%DL_DST%"
|
56 |
-
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
57 |
-
|
58 |
-
@REM Gitコマンドのパスを設定
|
59 |
-
echo --------------------------------------------------
|
60 |
-
echo Setting up PATH...
|
61 |
-
echo --------------------------------------------------
|
62 |
-
echo Executing: set "PATH=%~dp0lib\PortableGit\bin;%PATH%"
|
63 |
-
set "PATH=%~dp0lib\PortableGit\bin;%PATH%"
|
64 |
-
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
65 |
-
|
66 |
-
echo --------------------------------------------------
|
67 |
-
echo Checking Git Installation...
|
68 |
-
echo --------------------------------------------------
|
69 |
-
echo Executing: git --version
|
70 |
-
git --version
|
71 |
-
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
72 |
-
)
|
73 |
-
|
74 |
-
echo --------------------------------------------------
|
75 |
-
echo Cloning repository...
|
76 |
-
echo --------------------------------------------------
|
77 |
-
echo Executing: git clone %REPO_URL%
|
78 |
-
git clone %REPO_URL%
|
79 |
-
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
80 |
-
|
81 |
-
@REM Pythonのセットアップ
|
82 |
-
echo --------------------------------------------------
|
83 |
-
echo Setting up Python environment...
|
84 |
-
echo --------------------------------------------------
|
85 |
-
echo Executing: call Setup-Python.bat ".\lib\python" ".\Style-Bert-VITS2\venv"
|
86 |
-
call Setup-Python.bat ".\lib\python" ".\Style-Bert-VITS2\venv"
|
87 |
-
if !errorlevel! neq 0 ( popd & exit /b !errorlevel! )
|
88 |
-
|
89 |
-
@REM Style-Bert-VITS2フォルダに移動
|
90 |
-
pushd Style-Bert-VITS2
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
echo
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
echo
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
echo
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
echo
|
120 |
-
echo
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
chcp 65001 > NUL
|
2 |
+
@echo off
|
3 |
+
|
4 |
+
@REM エラーコードを遅延評価するために設定
|
5 |
+
setlocal enabledelayedexpansion
|
6 |
+
|
7 |
+
@REM PowerShellのコマンド
|
8 |
+
set PS_CMD=PowerShell -Version 5.1 -ExecutionPolicy Bypass
|
9 |
+
|
10 |
+
@REM PortableGitのURLと保存先
|
11 |
+
set DL_URL=https://github.com/git-for-windows/git/releases/download/v2.44.0.windows.1/PortableGit-2.44.0-64-bit.7z.exe
|
12 |
+
set DL_DST=%~dp0lib\PortableGit-2.44.0-64-bit.7z.exe
|
13 |
+
|
14 |
+
@REM Style-Bert-VITS2のリポジトリURL
|
15 |
+
set REPO_URL=https://github.com/litagin02/Style-Bert-VITS2
|
16 |
+
|
17 |
+
@REM カレントディレクトリをbatファイルのディレクトリに変更
|
18 |
+
pushd %~dp0
|
19 |
+
|
20 |
+
@REM lib フォルダがなければ作成
|
21 |
+
if not exist lib\ ( mkdir lib )
|
22 |
+
|
23 |
+
echo --------------------------------------------------
|
24 |
+
echo PS_CMD: %PS_CMD%
|
25 |
+
echo DL_URL: %DL_URL%
|
26 |
+
echo DL_DST: %DL_DST%
|
27 |
+
echo REPO_URL: %REPO_URL%
|
28 |
+
echo --------------------------------------------------
|
29 |
+
echo.
|
30 |
+
echo --------------------------------------------------
|
31 |
+
echo Checking Git Installation...
|
32 |
+
echo --------------------------------------------------
|
33 |
+
echo Executing: git --version
|
34 |
+
git --version
|
35 |
+
if !errorlevel! neq 0 (
|
36 |
+
echo --------------------------------------------------
|
37 |
+
echo Git is not installed, so download and use PortableGit.
|
38 |
+
echo Downloading PortableGit...
|
39 |
+
echo --------------------------------------------------
|
40 |
+
echo Executing: curl -L %DL_URL% -o "%DL_DST%"
|
41 |
+
curl -L %DL_URL% -o "%DL_DST%"
|
42 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
43 |
+
|
44 |
+
echo --------------------------------------------------
|
45 |
+
echo Extracting PortableGit...
|
46 |
+
echo --------------------------------------------------
|
47 |
+
echo Executing: "%DL_DST%" -y
|
48 |
+
"%DL_DST%" -y
|
49 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
50 |
+
|
51 |
+
echo --------------------------------------------------
|
52 |
+
echo Removing %DL_DST%...
|
53 |
+
echo --------------------------------------------------
|
54 |
+
echo Executing: del "%DL_DST%"
|
55 |
+
del "%DL_DST%"
|
56 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
57 |
+
|
58 |
+
@REM Gitコマンドのパスを設定
|
59 |
+
echo --------------------------------------------------
|
60 |
+
echo Setting up PATH...
|
61 |
+
echo --------------------------------------------------
|
62 |
+
echo Executing: set "PATH=%~dp0lib\PortableGit\bin;%PATH%"
|
63 |
+
set "PATH=%~dp0lib\PortableGit\bin;%PATH%"
|
64 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
65 |
+
|
66 |
+
echo --------------------------------------------------
|
67 |
+
echo Checking Git Installation...
|
68 |
+
echo --------------------------------------------------
|
69 |
+
echo Executing: git --version
|
70 |
+
git --version
|
71 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
72 |
+
)
|
73 |
+
|
74 |
+
echo --------------------------------------------------
|
75 |
+
echo Cloning repository...
|
76 |
+
echo --------------------------------------------------
|
77 |
+
echo Executing: git clone %REPO_URL%
|
78 |
+
git clone %REPO_URL%
|
79 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
80 |
+
|
81 |
+
@REM Pythonのセットアップ
|
82 |
+
echo --------------------------------------------------
|
83 |
+
echo Setting up Python environment...
|
84 |
+
echo --------------------------------------------------
|
85 |
+
echo Executing: call Setup-Python.bat ".\lib\python" ".\Style-Bert-VITS2\venv"
|
86 |
+
call Setup-Python.bat ".\lib\python" ".\Style-Bert-VITS2\venv"
|
87 |
+
if !errorlevel! neq 0 ( popd & exit /b !errorlevel! )
|
88 |
+
|
89 |
+
@REM Style-Bert-VITS2フォルダに移動
|
90 |
+
pushd Style-Bert-VITS2
|
91 |
+
|
92 |
+
@REM 後で消す!!!!!!!!!!
|
93 |
+
@REM git checkout dev
|
94 |
+
@REM 後で消す!!!!!!!!!!
|
95 |
+
|
96 |
+
echo --------------------------------------------------
|
97 |
+
echo Activating the virtual environment...
|
98 |
+
echo --------------------------------------------------
|
99 |
+
echo Executing: call ".\venv\Scripts\activate.bat"
|
100 |
+
call ".\venv\Scripts\activate.bat"
|
101 |
+
if !errorlevel! neq 0 ( popd & exit /b !errorlevel! )
|
102 |
+
|
103 |
+
echo --------------------------------------------------
|
104 |
+
echo Installing package manager uv...
|
105 |
+
echo --------------------------------------------------
|
106 |
+
echo Executing: pip install uv
|
107 |
+
pip install uv
|
108 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
109 |
+
|
110 |
+
echo --------------------------------------------------
|
111 |
+
echo Installing PyTorch...
|
112 |
+
echo --------------------------------------------------
|
113 |
+
echo Executing: uv pip install "torch<2.4" "torchaudio<2.4" --index-url https://download.pytorch.org/whl/cu118
|
114 |
+
uv pip install "torch<2.4" "torchaudio<2.4" --index-url https://download.pytorch.org/whl/cu118
|
115 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
116 |
+
|
117 |
+
echo --------------------------------------------------
|
118 |
+
echo Installing other dependencies...
|
119 |
+
echo --------------------------------------------------
|
120 |
+
echo Executing: uv pip install -r requirements.txt
|
121 |
+
uv pip install -r requirements.txt
|
122 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
123 |
+
|
124 |
+
echo ----------------------------------------
|
125 |
+
echo Environment setup is complete. Start downloading the model.
|
126 |
+
echo ----------------------------------------
|
127 |
+
echo Executing: python initialize.py
|
128 |
+
python initialize.py
|
129 |
+
|
130 |
+
echo ----------------------------------------
|
131 |
+
echo Model download is complete. Start Style-Bert-VITS2 Editor.
|
132 |
+
echo ----------------------------------------
|
133 |
+
echo Executing: python server_editor.py --inbrowser
|
134 |
+
python server_editor.py --inbrowser
|
135 |
+
pause
|
136 |
+
|
137 |
+
popd
|
138 |
+
|
139 |
+
popd
|
140 |
+
|
141 |
+
endlocal
|
scripts/Setup-Python.bat
CHANGED
@@ -1,115 +1,101 @@
|
|
1 |
-
chcp 65001 > NUL
|
2 |
-
|
3 |
-
@REM https://github.com/Zuntan03/EasyBertVits2 より引用・改変
|
4 |
-
|
5 |
-
@REM エラーコードを遅延評価するために設定
|
6 |
-
setlocal enabledelayedexpansion
|
7 |
-
|
8 |
-
@echo off
|
9 |
-
set PS_CMD=PowerShell -Version 5.1 -ExecutionPolicy Bypass
|
10 |
-
set CURL_CMD=C:\Windows\System32\curl.exe
|
11 |
-
|
12 |
-
if not exist %CURL_CMD% (
|
13 |
-
echo [ERROR] %CURL_CMD%
|
14 |
-
pause & exit /b 1
|
15 |
-
)
|
16 |
-
|
17 |
-
if "%1" neq "" (
|
18 |
-
set PYTHON_DIR=%~dp0%~1
|
19 |
-
) else (
|
20 |
-
set PYTHON_DIR=%~dp0python
|
21 |
-
)
|
22 |
-
set PYTHON_CMD=%PYTHON_DIR%\python.exe
|
23 |
-
|
24 |
-
if "%2" neq "" (
|
25 |
-
set VENV_DIR=%~dp0%~2
|
26 |
-
) else (
|
27 |
-
set VENV_DIR=%~dp0venv
|
28 |
-
)
|
29 |
-
|
30 |
-
echo --------------------------------------------------
|
31 |
-
echo PS_CMD: %PS_CMD%
|
32 |
-
echo CURL_CMD: %CURL_CMD%
|
33 |
-
echo PYTHON_CMD: %PYTHON_CMD%
|
34 |
-
echo PYTHON_DIR: %PYTHON_DIR%
|
35 |
-
echo VENV_DIR: %VENV_DIR%
|
36 |
-
echo --------------------------------------------------
|
37 |
-
echo.
|
38 |
-
|
39 |
-
if not exist "%PYTHON_DIR%"\ (
|
40 |
-
echo --------------------------------------------------
|
41 |
-
echo Downloading Python...
|
42 |
-
echo --------------------------------------------------
|
43 |
-
echo Executing: %CURL_CMD% -o python.zip https://www.python.org/ftp/python/3.10.11/python-3.10.11-embed-amd64.zip
|
44 |
-
%CURL_CMD% -o python.zip https://www.python.org/ftp/python/3.10.11/python-3.10.11-embed-amd64.zip
|
45 |
-
if !errorlevel! neq 0 ( pause & exit /b !errorlevel! )
|
46 |
-
|
47 |
-
echo --------------------------------------------------
|
48 |
-
echo Extracting zip...
|
49 |
-
echo --------------------------------------------------
|
50 |
-
echo Executing: %PS_CMD% Expand-Archive -Path python.zip -DestinationPath \"%PYTHON_DIR%\"
|
51 |
-
%PS_CMD% Expand-Archive -Path python.zip -DestinationPath \"%PYTHON_DIR%\"
|
52 |
-
if !errorlevel! neq 0 ( pause & exit /b !errorlevel! )
|
53 |
-
|
54 |
-
echo --------------------------------------------------
|
55 |
-
echo Removing python.zip...
|
56 |
-
echo --------------------------------------------------
|
57 |
-
echo Executing: del python.zip
|
58 |
-
del python.zip
|
59 |
-
if !errorlevel! neq 0 ( pause & exit /b !errorlevel! )
|
60 |
-
|
61 |
-
echo --------------------------------------------------
|
62 |
-
echo Enabling 'site' module in the embedded Python environment...
|
63 |
-
echo --------------------------------------------------
|
64 |
-
echo Executing: %PS_CMD% "&{(Get-Content '%PYTHON_DIR%/python310._pth') -creplace '#import site', 'import site' | Set-Content '%PYTHON_DIR%/python310._pth' }"
|
65 |
-
%PS_CMD% "&{(Get-Content '%PYTHON_DIR%/python310._pth') -creplace '#import site', 'import site' | Set-Content '%PYTHON_DIR%/python310._pth' }"
|
66 |
-
if !errorlevel! neq 0 ( pause & exit /b !errorlevel! )
|
67 |
-
|
68 |
-
echo --------------------------------------------------
|
69 |
-
echo
|
70 |
-
echo --------------------------------------------------
|
71 |
-
echo Executing: %CURL_CMD% -o "%PYTHON_DIR%\get-pip.py" https://bootstrap.pypa.io/get-pip.py
|
72 |
-
%CURL_CMD% -o "%PYTHON_DIR%\get-pip.py" https://bootstrap.pypa.io/get-pip.py
|
73 |
-
if !errorlevel! neq 0 ( pause & exit /b !errorlevel! )
|
74 |
-
|
75 |
-
echo --------------------------------------------------
|
76 |
-
echo Installing pip...
|
77 |
-
echo --------------------------------------------------
|
78 |
-
echo Executing: "%PYTHON_CMD%" "%PYTHON_DIR%\get-pip.py" --no-warn-script-location
|
79 |
-
"%PYTHON_CMD%" "%PYTHON_DIR%\get-pip.py" --no-warn-script-location
|
80 |
-
if !errorlevel! neq 0 ( pause & exit /b !errorlevel! )
|
81 |
-
|
82 |
-
echo --------------------------------------------------
|
83 |
-
echo Installing virtualenv...
|
84 |
-
echo --------------------------------------------------
|
85 |
-
echo Executing: "%PYTHON_CMD%" -m pip install virtualenv --no-warn-script-location
|
86 |
-
"%PYTHON_CMD%" -m pip install virtualenv --no-warn-script-location
|
87 |
-
if !errorlevel! neq 0 ( pause & exit /b !errorlevel! )
|
88 |
-
)
|
89 |
-
|
90 |
-
if not exist %VENV_DIR%\ (
|
91 |
-
echo --------------------------------------------------
|
92 |
-
echo Creating virtual environment...
|
93 |
-
echo --------------------------------------------------
|
94 |
-
echo Executing: "%PYTHON_CMD%" -m virtualenv --copies "%VENV_DIR%"
|
95 |
-
"%PYTHON_CMD%" -m virtualenv --copies "%VENV_DIR%"
|
96 |
-
if !errorlevel! neq 0 ( pause & exit /b !errorlevel! )
|
97 |
-
)
|
98 |
-
|
99 |
-
echo --------------------------------------------------
|
100 |
-
echo
|
101 |
-
echo --------------------------------------------------
|
102 |
-
echo Executing: call "%VENV_DIR%\Scripts\activate.bat"
|
103 |
-
call "%VENV_DIR%\Scripts\activate.bat"
|
104 |
-
if !errorlevel! neq 0 ( pause & exit /b !errorlevel! )
|
105 |
-
|
106 |
-
echo --------------------------------------------------
|
107 |
-
echo Upgrading pip...
|
108 |
-
echo --------------------------------------------------
|
109 |
-
echo Executing: python -m pip install --upgrade pip
|
110 |
-
python -m pip install --upgrade pip
|
111 |
-
if !errorlevel! neq 0 ( pause & exit /b !errorlevel! )
|
112 |
-
|
113 |
-
echo --------------------------------------------------
|
114 |
-
echo Completed.
|
115 |
-
echo --------------------------------------------------
|
|
|
1 |
+
chcp 65001 > NUL
|
2 |
+
|
3 |
+
@REM https://github.com/Zuntan03/EasyBertVits2 より引用・改変
|
4 |
+
|
5 |
+
@REM エラーコードを遅延評価するために設定
|
6 |
+
setlocal enabledelayedexpansion
|
7 |
+
|
8 |
+
@echo off
|
9 |
+
set PS_CMD=PowerShell -Version 5.1 -ExecutionPolicy Bypass
|
10 |
+
set CURL_CMD=C:\Windows\System32\curl.exe
|
11 |
+
|
12 |
+
if not exist %CURL_CMD% (
|
13 |
+
echo [ERROR] %CURL_CMD% が見つかりません���
|
14 |
+
pause & exit /b 1
|
15 |
+
)
|
16 |
+
|
17 |
+
if "%1" neq "" (
|
18 |
+
set PYTHON_DIR=%~dp0%~1
|
19 |
+
) else (
|
20 |
+
set PYTHON_DIR=%~dp0python
|
21 |
+
)
|
22 |
+
set PYTHON_CMD=%PYTHON_DIR%\python.exe
|
23 |
+
|
24 |
+
if "%2" neq "" (
|
25 |
+
set VENV_DIR=%~dp0%~2
|
26 |
+
) else (
|
27 |
+
set VENV_DIR=%~dp0venv
|
28 |
+
)
|
29 |
+
|
30 |
+
echo --------------------------------------------------
|
31 |
+
echo PS_CMD: %PS_CMD%
|
32 |
+
echo CURL_CMD: %CURL_CMD%
|
33 |
+
echo PYTHON_CMD: %PYTHON_CMD%
|
34 |
+
echo PYTHON_DIR: %PYTHON_DIR%
|
35 |
+
echo VENV_DIR: %VENV_DIR%
|
36 |
+
echo --------------------------------------------------
|
37 |
+
echo.
|
38 |
+
|
39 |
+
if not exist "%PYTHON_DIR%"\ (
|
40 |
+
echo --------------------------------------------------
|
41 |
+
echo Downloading Python...
|
42 |
+
echo --------------------------------------------------
|
43 |
+
echo Executing: %CURL_CMD% -o python.zip https://www.python.org/ftp/python/3.10.11/python-3.10.11-embed-amd64.zip
|
44 |
+
%CURL_CMD% -o python.zip https://www.python.org/ftp/python/3.10.11/python-3.10.11-embed-amd64.zip
|
45 |
+
if !errorlevel! neq 0 ( pause & exit /b !errorlevel! )
|
46 |
+
|
47 |
+
echo --------------------------------------------------
|
48 |
+
echo Extracting zip...
|
49 |
+
echo --------------------------------------------------
|
50 |
+
echo Executing: %PS_CMD% Expand-Archive -Path python.zip -DestinationPath \"%PYTHON_DIR%\"
|
51 |
+
%PS_CMD% Expand-Archive -Path python.zip -DestinationPath \"%PYTHON_DIR%\"
|
52 |
+
if !errorlevel! neq 0 ( pause & exit /b !errorlevel! )
|
53 |
+
|
54 |
+
echo --------------------------------------------------
|
55 |
+
echo Removing python.zip...
|
56 |
+
echo --------------------------------------------------
|
57 |
+
echo Executing: del python.zip
|
58 |
+
del python.zip
|
59 |
+
if !errorlevel! neq 0 ( pause & exit /b !errorlevel! )
|
60 |
+
|
61 |
+
echo --------------------------------------------------
|
62 |
+
echo Enabling 'site' module in the embedded Python environment...
|
63 |
+
echo --------------------------------------------------
|
64 |
+
echo Executing: %PS_CMD% "&{(Get-Content '%PYTHON_DIR%/python310._pth') -creplace '#import site', 'import site' | Set-Content '%PYTHON_DIR%/python310._pth' }"
|
65 |
+
%PS_CMD% "&{(Get-Content '%PYTHON_DIR%/python310._pth') -creplace '#import site', 'import site' | Set-Content '%PYTHON_DIR%/python310._pth' }"
|
66 |
+
if !errorlevel! neq 0 ( pause & exit /b !errorlevel! )
|
67 |
+
|
68 |
+
echo --------------------------------------------------
|
69 |
+
echo Downloading get-pip.py...
|
70 |
+
echo --------------------------------------------------
|
71 |
+
echo Executing: %CURL_CMD% -o "%PYTHON_DIR%\get-pip.py" https://bootstrap.pypa.io/get-pip.py
|
72 |
+
%CURL_CMD% -o "%PYTHON_DIR%\get-pip.py" https://bootstrap.pypa.io/get-pip.py
|
73 |
+
if !errorlevel! neq 0 ( pause & exit /b !errorlevel! )
|
74 |
+
|
75 |
+
echo --------------------------------------------------
|
76 |
+
echo Installing pip...
|
77 |
+
echo --------------------------------------------------
|
78 |
+
echo Executing: "%PYTHON_CMD%" "%PYTHON_DIR%\get-pip.py" --no-warn-script-location
|
79 |
+
"%PYTHON_CMD%" "%PYTHON_DIR%\get-pip.py" --no-warn-script-location
|
80 |
+
if !errorlevel! neq 0 ( pause & exit /b !errorlevel! )
|
81 |
+
|
82 |
+
echo --------------------------------------------------
|
83 |
+
echo Installing virtualenv...
|
84 |
+
echo --------------------------------------------------
|
85 |
+
echo Executing: "%PYTHON_CMD%" -m pip install virtualenv --no-warn-script-location
|
86 |
+
"%PYTHON_CMD%" -m pip install virtualenv --no-warn-script-location
|
87 |
+
if !errorlevel! neq 0 ( pause & exit /b !errorlevel! )
|
88 |
+
)
|
89 |
+
|
90 |
+
if not exist %VENV_DIR%\ (
|
91 |
+
echo --------------------------------------------------
|
92 |
+
echo Creating virtual environment...
|
93 |
+
echo --------------------------------------------------
|
94 |
+
echo Executing: "%PYTHON_CMD%" -m virtualenv --copies "%VENV_DIR%"
|
95 |
+
"%PYTHON_CMD%" -m virtualenv --copies "%VENV_DIR%"
|
96 |
+
if !errorlevel! neq 0 ( pause & exit /b !errorlevel! )
|
97 |
+
)
|
98 |
+
|
99 |
+
echo --------------------------------------------------
|
100 |
+
echo Completed.
|
101 |
+
echo --------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
scripts/Update-Style-Bert-VITS2.bat
CHANGED
@@ -1,62 +1,69 @@
|
|
1 |
-
chcp 65001 > NUL
|
2 |
-
@echo off
|
3 |
-
|
4 |
-
@REM エラーコードを遅延評価するために設定
|
5 |
-
setlocal enabledelayedexpansion
|
6 |
-
|
7 |
-
pushd %~dp0
|
8 |
-
|
9 |
-
|
10 |
-
pushd Style-Bert-VITS2
|
11 |
-
|
12 |
-
echo --------------------------------------------------
|
13 |
-
echo Checking Git Installation...
|
14 |
-
echo --------------------------------------------------
|
15 |
-
git --version
|
16 |
-
if !errorlevel! neq 0 (
|
17 |
-
echo --------------------------------------------------
|
18 |
-
echo Global Git is not installed, so use PortableGit.
|
19 |
-
echo Setting up PATH...
|
20 |
-
echo --------------------------------------------------
|
21 |
-
echo Executing: set "PATH=%~dp0lib\PortableGit\bin;%PATH%"
|
22 |
-
set "PATH=%~dp0lib\PortableGit\bin;%PATH%"
|
23 |
-
|
24 |
-
echo --------------------------------------------------
|
25 |
-
echo Checking Git Installation...
|
26 |
-
echo --------------------------------------------------
|
27 |
-
echo Executing: git --version
|
28 |
-
git --version
|
29 |
-
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
30 |
-
)
|
31 |
-
|
32 |
-
echo --------------------------------------------------
|
33 |
-
echo Git pull...
|
34 |
-
echo --------------------------------------------------
|
35 |
-
git pull
|
36 |
-
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
37 |
-
|
38 |
-
@REM 仮想環境のpip requirements.txtを更新
|
39 |
-
|
40 |
-
echo --------------------------------------------------
|
41 |
-
echo Activating virtual environment...
|
42 |
-
echo --------------------------------------------------
|
43 |
-
echo Executing: call ".\venv\Scripts\activate.bat"
|
44 |
-
call ".\venv\Scripts\activate.bat"
|
45 |
-
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
46 |
-
|
47 |
-
echo --------------------------------------------------
|
48 |
-
echo
|
49 |
-
echo --------------------------------------------------
|
50 |
-
echo Executing: pip install -U
|
51 |
-
pip install -U
|
52 |
-
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
53 |
-
|
54 |
-
echo
|
55 |
-
echo
|
56 |
-
echo
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
chcp 65001 > NUL
|
2 |
+
@echo off
|
3 |
+
|
4 |
+
@REM エラーコードを遅延評価するために設定
|
5 |
+
setlocal enabledelayedexpansion
|
6 |
+
|
7 |
+
pushd %~dp0
|
8 |
+
|
9 |
+
|
10 |
+
pushd Style-Bert-VITS2
|
11 |
+
|
12 |
+
echo --------------------------------------------------
|
13 |
+
echo Checking Git Installation...
|
14 |
+
echo --------------------------------------------------
|
15 |
+
git --version
|
16 |
+
if !errorlevel! neq 0 (
|
17 |
+
echo --------------------------------------------------
|
18 |
+
echo Global Git is not installed, so use PortableGit.
|
19 |
+
echo Setting up PATH...
|
20 |
+
echo --------------------------------------------------
|
21 |
+
echo Executing: set "PATH=%~dp0lib\PortableGit\bin;%PATH%"
|
22 |
+
set "PATH=%~dp0lib\PortableGit\bin;%PATH%"
|
23 |
+
|
24 |
+
echo --------------------------------------------------
|
25 |
+
echo Checking Git Installation...
|
26 |
+
echo --------------------------------------------------
|
27 |
+
echo Executing: git --version
|
28 |
+
git --version
|
29 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
30 |
+
)
|
31 |
+
|
32 |
+
echo --------------------------------------------------
|
33 |
+
echo Git pull...
|
34 |
+
echo --------------------------------------------------
|
35 |
+
git pull
|
36 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
37 |
+
|
38 |
+
@REM 仮想環境のpip requirements.txtを更新
|
39 |
+
|
40 |
+
echo --------------------------------------------------
|
41 |
+
echo Activating virtual environment...
|
42 |
+
echo --------------------------------------------------
|
43 |
+
echo Executing: call ".\venv\Scripts\activate.bat"
|
44 |
+
call ".\venv\Scripts\activate.bat"
|
45 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
46 |
+
|
47 |
+
echo --------------------------------------------------
|
48 |
+
echo Installing uv...
|
49 |
+
echo --------------------------------------------------
|
50 |
+
echo Executing: pip install -U uv
|
51 |
+
pip install -U uv
|
52 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
53 |
+
|
54 |
+
echo --------------------------------------------------
|
55 |
+
echo Updating dependencies...
|
56 |
+
echo --------------------------------------------------
|
57 |
+
echo Executing: uv pip install -U -r requirements.txt
|
58 |
+
uv pip install -U -r requirements.txt
|
59 |
+
if !errorlevel! neq 0 ( pause & popd & exit /b !errorlevel! )
|
60 |
+
|
61 |
+
echo ----------------------------------------
|
62 |
+
echo Update completed.
|
63 |
+
echo ----------------------------------------
|
64 |
+
|
65 |
+
pause
|
66 |
+
|
67 |
+
popd
|
68 |
+
|
69 |
+
popd
|
server_editor.py
CHANGED
@@ -22,7 +22,6 @@ import numpy as np
|
|
22 |
import requests
|
23 |
import torch
|
24 |
import uvicorn
|
25 |
-
import yaml
|
26 |
from fastapi import APIRouter, FastAPI, HTTPException, status
|
27 |
from fastapi.middleware.cors import CORSMiddleware
|
28 |
from fastapi.responses import JSONResponse, Response
|
@@ -30,6 +29,7 @@ from fastapi.staticfiles import StaticFiles
|
|
30 |
from pydantic import BaseModel
|
31 |
from scipy.io import wavfile
|
32 |
|
|
|
33 |
from style_bert_vits2.constants import (
|
34 |
DEFAULT_ASSIST_TEXT_WEIGHT,
|
35 |
DEFAULT_NOISE,
|
@@ -127,7 +127,7 @@ def download_and_extract(url, extract_to: Path):
|
|
127 |
|
128 |
def new_release_available(latest_release):
|
129 |
if LAST_DOWNLOAD_FILE.exists():
|
130 |
-
with open(LAST_DOWNLOAD_FILE
|
131 |
last_download_str = file.read().strip()
|
132 |
# 'Z'を除去して日時オブジェクトに変換
|
133 |
last_download_str = last_download_str.replace("Z", "+00:00")
|
@@ -174,35 +174,32 @@ origins = [
|
|
174 |
"http://127.0.0.1:8000",
|
175 |
]
|
176 |
|
177 |
-
|
178 |
-
with open(Path("configs/paths.yml"), "r", encoding="utf-8") as f:
|
179 |
-
path_config: dict[str, str] = yaml.safe_load(f.read())
|
180 |
-
# dataset_root = path_config["dataset_root"]
|
181 |
-
assets_root = path_config["assets_root"]
|
182 |
-
|
183 |
parser = argparse.ArgumentParser()
|
184 |
-
parser.add_argument("--model_dir", type=str, default=
|
185 |
parser.add_argument("--device", type=str, default="cuda")
|
186 |
parser.add_argument("--port", type=int, default=8000)
|
187 |
parser.add_argument("--inbrowser", action="store_true")
|
188 |
parser.add_argument("--line_length", type=int, default=None)
|
189 |
parser.add_argument("--line_count", type=int, default=None)
|
190 |
-
parser.add_argument(
|
191 |
-
|
192 |
-
)
|
193 |
-
|
194 |
args = parser.parse_args()
|
195 |
device = args.device
|
196 |
if device == "cuda" and not torch.cuda.is_available():
|
197 |
device = "cpu"
|
198 |
model_dir = Path(args.model_dir)
|
199 |
port = int(args.port)
|
|
|
|
|
|
|
200 |
|
201 |
model_holder = TTSModelHolder(model_dir, device)
|
202 |
if len(model_holder.model_names) == 0:
|
203 |
logger.error(f"Models not found in {model_dir}.")
|
204 |
sys.exit(1)
|
205 |
|
|
|
206 |
app = FastAPI()
|
207 |
|
208 |
|
@@ -444,7 +441,8 @@ def delete_user_dict_word(uuid: str):
|
|
444 |
app.include_router(router, prefix="/api")
|
445 |
|
446 |
if __name__ == "__main__":
|
447 |
-
|
|
|
448 |
app.mount("/", StaticFiles(directory=STATIC_DIR, html=True), name="static")
|
449 |
if args.inbrowser:
|
450 |
webbrowser.open(f"http://localhost:{port}")
|
|
|
22 |
import requests
|
23 |
import torch
|
24 |
import uvicorn
|
|
|
25 |
from fastapi import APIRouter, FastAPI, HTTPException, status
|
26 |
from fastapi.middleware.cors import CORSMiddleware
|
27 |
from fastapi.responses import JSONResponse, Response
|
|
|
29 |
from pydantic import BaseModel
|
30 |
from scipy.io import wavfile
|
31 |
|
32 |
+
from config import get_path_config
|
33 |
from style_bert_vits2.constants import (
|
34 |
DEFAULT_ASSIST_TEXT_WEIGHT,
|
35 |
DEFAULT_NOISE,
|
|
|
127 |
|
128 |
def new_release_available(latest_release):
|
129 |
if LAST_DOWNLOAD_FILE.exists():
|
130 |
+
with open(LAST_DOWNLOAD_FILE) as file:
|
131 |
last_download_str = file.read().strip()
|
132 |
# 'Z'を除去して日時オブジェクトに変換
|
133 |
last_download_str = last_download_str.replace("Z", "+00:00")
|
|
|
174 |
"http://127.0.0.1:8000",
|
175 |
]
|
176 |
|
177 |
+
path_config = get_path_config()
|
|
|
|
|
|
|
|
|
|
|
178 |
parser = argparse.ArgumentParser()
|
179 |
+
parser.add_argument("--model_dir", type=str, default=path_config.assets_root)
|
180 |
parser.add_argument("--device", type=str, default="cuda")
|
181 |
parser.add_argument("--port", type=int, default=8000)
|
182 |
parser.add_argument("--inbrowser", action="store_true")
|
183 |
parser.add_argument("--line_length", type=int, default=None)
|
184 |
parser.add_argument("--line_count", type=int, default=None)
|
185 |
+
# parser.add_argument("--skip_default_models", action="store_true")
|
186 |
+
parser.add_argument("--skip_static_files", action="store_true")
|
|
|
|
|
187 |
args = parser.parse_args()
|
188 |
device = args.device
|
189 |
if device == "cuda" and not torch.cuda.is_available():
|
190 |
device = "cpu"
|
191 |
model_dir = Path(args.model_dir)
|
192 |
port = int(args.port)
|
193 |
+
# if not args.skip_default_models:
|
194 |
+
# download_default_models()
|
195 |
+
skip_static_files = bool(args.skip_static_files)
|
196 |
|
197 |
model_holder = TTSModelHolder(model_dir, device)
|
198 |
if len(model_holder.model_names) == 0:
|
199 |
logger.error(f"Models not found in {model_dir}.")
|
200 |
sys.exit(1)
|
201 |
|
202 |
+
|
203 |
app = FastAPI()
|
204 |
|
205 |
|
|
|
441 |
app.include_router(router, prefix="/api")
|
442 |
|
443 |
if __name__ == "__main__":
|
444 |
+
if not skip_static_files:
|
445 |
+
download_static_files("litagin02", "Style-Bert-VITS2-Editor", "out.zip")
|
446 |
app.mount("/", StaticFiles(directory=STATIC_DIR, html=True), name="static")
|
447 |
if args.inbrowser:
|
448 |
webbrowser.open(f"http://localhost:{port}")
|
server_fastapi.py
CHANGED
@@ -20,7 +20,7 @@ from fastapi.middleware.cors import CORSMiddleware
|
|
20 |
from fastapi.responses import FileResponse, Response
|
21 |
from scipy.io import wavfile
|
22 |
|
23 |
-
from config import
|
24 |
from style_bert_vits2.constants import (
|
25 |
DEFAULT_ASSIST_TEXT_WEIGHT,
|
26 |
DEFAULT_LENGTH,
|
@@ -40,6 +40,7 @@ from style_bert_vits2.nlp.japanese.user_dict import update_dict
|
|
40 |
from style_bert_vits2.tts_model import TTSModel, TTSModelHolder
|
41 |
|
42 |
|
|
|
43 |
ln = config.server_config.language
|
44 |
|
45 |
|
@@ -113,6 +114,12 @@ if __name__ == "__main__":
|
|
113 |
load_models(model_holder)
|
114 |
|
115 |
limit = config.server_config.limit
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
app = FastAPI()
|
117 |
allow_origins = config.server_config.origins
|
118 |
if allow_origins:
|
@@ -134,6 +141,10 @@ if __name__ == "__main__":
|
|
134 |
request: Request,
|
135 |
text: str = Query(..., min_length=1, max_length=limit, description="セリフ"),
|
136 |
encoding: str = Query(None, description="textをURLデコードする(ex, `utf-8`)"),
|
|
|
|
|
|
|
|
|
137 |
model_id: int = Query(
|
138 |
0, description="モデルID。`GET /models/info`のkeyの値を指定ください"
|
139 |
),
|
@@ -191,6 +202,20 @@ if __name__ == "__main__":
|
|
191 |
): # /models/refresh があるためQuery(le)で表現不可
|
192 |
raise_validation_error(f"model_id={model_id} not found", "model_id")
|
193 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
model = loaded_models[model_id]
|
195 |
if speaker_name is None:
|
196 |
if speaker_id not in model.id2spk.keys():
|
@@ -230,6 +255,10 @@ if __name__ == "__main__":
|
|
230 |
wavfile.write(wavContent, sr, audio)
|
231 |
return Response(content=wavContent.getvalue(), media_type="audio/wav")
|
232 |
|
|
|
|
|
|
|
|
|
233 |
@app.get("/models/info")
|
234 |
def get_loaded_models_info():
|
235 |
"""ロードされたモデル情報の取得"""
|
@@ -305,6 +334,9 @@ if __name__ == "__main__":
|
|
305 |
|
306 |
logger.info(f"server listen: http://127.0.0.1:{config.server_config.port}")
|
307 |
logger.info(f"API docs: http://127.0.0.1:{config.server_config.port}/docs")
|
|
|
|
|
|
|
308 |
uvicorn.run(
|
309 |
app, port=config.server_config.port, host="0.0.0.0", log_level="warning"
|
310 |
)
|
|
|
20 |
from fastapi.responses import FileResponse, Response
|
21 |
from scipy.io import wavfile
|
22 |
|
23 |
+
from config import get_config
|
24 |
from style_bert_vits2.constants import (
|
25 |
DEFAULT_ASSIST_TEXT_WEIGHT,
|
26 |
DEFAULT_LENGTH,
|
|
|
40 |
from style_bert_vits2.tts_model import TTSModel, TTSModelHolder
|
41 |
|
42 |
|
43 |
+
config = get_config()
|
44 |
ln = config.server_config.language
|
45 |
|
46 |
|
|
|
114 |
load_models(model_holder)
|
115 |
|
116 |
limit = config.server_config.limit
|
117 |
+
if limit < 1:
|
118 |
+
limit = None
|
119 |
+
else:
|
120 |
+
logger.info(
|
121 |
+
f"The maximum length of the text is {limit}. If you want to change it, modify config.yml. Set limit to -1 to remove the limit."
|
122 |
+
)
|
123 |
app = FastAPI()
|
124 |
allow_origins = config.server_config.origins
|
125 |
if allow_origins:
|
|
|
141 |
request: Request,
|
142 |
text: str = Query(..., min_length=1, max_length=limit, description="セリフ"),
|
143 |
encoding: str = Query(None, description="textをURLデコードする(ex, `utf-8`)"),
|
144 |
+
model_name: str = Query(
|
145 |
+
None,
|
146 |
+
description="モデル名(model_idより優先)。model_assets内のディレクトリ名を指定",
|
147 |
+
),
|
148 |
model_id: int = Query(
|
149 |
0, description="モデルID。`GET /models/info`のkeyの値を指定ください"
|
150 |
),
|
|
|
202 |
): # /models/refresh があるためQuery(le)で表現不可
|
203 |
raise_validation_error(f"model_id={model_id} not found", "model_id")
|
204 |
|
205 |
+
if model_name:
|
206 |
+
# load_models() の 処理内容が i の正当性を担保していることに注意
|
207 |
+
model_ids = [i for i, x in enumerate(model_holder.models_info) if x.name == model_name]
|
208 |
+
if not model_ids:
|
209 |
+
raise_validation_error(
|
210 |
+
f"model_name={model_name} not found", "model_name"
|
211 |
+
)
|
212 |
+
# 今の実装ではディレクトリ名が重複することは無いはずだが...
|
213 |
+
if len(model_ids) > 1:
|
214 |
+
raise_validation_error(
|
215 |
+
f"model_name={model_name} is ambiguous", "model_name"
|
216 |
+
)
|
217 |
+
model_id = model_ids[0]
|
218 |
+
|
219 |
model = loaded_models[model_id]
|
220 |
if speaker_name is None:
|
221 |
if speaker_id not in model.id2spk.keys():
|
|
|
255 |
wavfile.write(wavContent, sr, audio)
|
256 |
return Response(content=wavContent.getvalue(), media_type="audio/wav")
|
257 |
|
258 |
+
@app.post("/g2p")
|
259 |
+
def g2p(text: str):
|
260 |
+
return g2kata_tone(normalize_text(text))
|
261 |
+
|
262 |
@app.get("/models/info")
|
263 |
def get_loaded_models_info():
|
264 |
"""ロードされたモデル情報の取得"""
|
|
|
334 |
|
335 |
logger.info(f"server listen: http://127.0.0.1:{config.server_config.port}")
|
336 |
logger.info(f"API docs: http://127.0.0.1:{config.server_config.port}/docs")
|
337 |
+
logger.info(
|
338 |
+
f"Input text length limit: {limit}. You can change it in server.limit in config.yml"
|
339 |
+
)
|
340 |
uvicorn.run(
|
341 |
app, port=config.server_config.port, host="0.0.0.0", log_level="warning"
|
342 |
)
|
slice.py
CHANGED
@@ -7,15 +7,15 @@ from typing import Any, Optional
|
|
7 |
|
8 |
import soundfile as sf
|
9 |
import torch
|
10 |
-
import yaml
|
11 |
from tqdm import tqdm
|
12 |
|
|
|
13 |
from style_bert_vits2.logging import logger
|
14 |
from style_bert_vits2.utils.stdout_wrapper import SAFE_STDOUT
|
15 |
|
16 |
|
17 |
def is_audio_file(file: Path) -> bool:
|
18 |
-
supported_extensions = [".wav", ".flac", ".mp3", ".ogg", ".opus"]
|
19 |
return file.suffix.lower() in supported_extensions
|
20 |
|
21 |
|
@@ -150,13 +150,12 @@ if __name__ == "__main__":
|
|
150 |
)
|
151 |
args = parser.parse_args()
|
152 |
|
153 |
-
|
154 |
-
|
155 |
-
dataset_root = path_config["dataset_root"]
|
156 |
|
157 |
model_name = str(args.model_name)
|
158 |
input_dir = Path(args.input_dir)
|
159 |
-
output_dir =
|
160 |
min_sec: float = args.min_sec
|
161 |
max_sec: float = args.max_sec
|
162 |
min_silence_dur_ms: int = args.min_silence_dur_ms
|
@@ -198,11 +197,12 @@ if __name__ == "__main__":
|
|
198 |
q.task_done()
|
199 |
break
|
200 |
try:
|
|
|
201 |
time_sec, count = split_wav(
|
202 |
vad_model=vad_model,
|
203 |
utils=utils,
|
204 |
audio_file=file,
|
205 |
-
target_dir=output_dir,
|
206 |
min_sec=min_sec,
|
207 |
max_sec=max_sec,
|
208 |
min_silence_dur_ms=min_silence_dur_ms,
|
|
|
7 |
|
8 |
import soundfile as sf
|
9 |
import torch
|
|
|
10 |
from tqdm import tqdm
|
11 |
|
12 |
+
from config import get_path_config
|
13 |
from style_bert_vits2.logging import logger
|
14 |
from style_bert_vits2.utils.stdout_wrapper import SAFE_STDOUT
|
15 |
|
16 |
|
17 |
def is_audio_file(file: Path) -> bool:
|
18 |
+
supported_extensions = [".wav", ".flac", ".mp3", ".ogg", ".opus", ".m4a"]
|
19 |
return file.suffix.lower() in supported_extensions
|
20 |
|
21 |
|
|
|
150 |
)
|
151 |
args = parser.parse_args()
|
152 |
|
153 |
+
path_config = get_path_config()
|
154 |
+
dataset_root = path_config.dataset_root
|
|
|
155 |
|
156 |
model_name = str(args.model_name)
|
157 |
input_dir = Path(args.input_dir)
|
158 |
+
output_dir = dataset_root / model_name / "raw"
|
159 |
min_sec: float = args.min_sec
|
160 |
max_sec: float = args.max_sec
|
161 |
min_silence_dur_ms: int = args.min_silence_dur_ms
|
|
|
197 |
q.task_done()
|
198 |
break
|
199 |
try:
|
200 |
+
rel_path = file.relative_to(input_dir)
|
201 |
time_sec, count = split_wav(
|
202 |
vad_model=vad_model,
|
203 |
utils=utils,
|
204 |
audio_file=file,
|
205 |
+
target_dir=output_dir / rel_path.parent,
|
206 |
min_sec=min_sec,
|
207 |
max_sec=max_sec,
|
208 |
min_silence_dur_ms=min_silence_dur_ms,
|
speech_mos.py
CHANGED
@@ -10,7 +10,7 @@ import pandas as pd
|
|
10 |
import torch
|
11 |
from tqdm import tqdm
|
12 |
|
13 |
-
from config import
|
14 |
from style_bert_vits2.logging import logger
|
15 |
from style_bert_vits2.tts_model import TTSModel
|
16 |
|
@@ -35,6 +35,8 @@ test_texts = [
|
|
35 |
"この分野の最新の研究成果を使うと、より自然で表現豊かな音声の生成が可能である。深層学習の応用により、感情やアクセントを含む声質の微妙な変化も再現することが出来る。",
|
36 |
]
|
37 |
|
|
|
|
|
38 |
predictor = torch.hub.load(
|
39 |
"tarepan/SpeechMOS:v1.2.0", "utmos22_strong", trust_repo=True
|
40 |
)
|
@@ -48,17 +50,16 @@ args = parser.parse_args()
|
|
48 |
model_name: str = args.model_name
|
49 |
device: str = args.device
|
50 |
|
51 |
-
model_path =
|
52 |
-
|
53 |
# .safetensorsファイルを検索
|
54 |
safetensors_files = model_path.glob("*.safetensors")
|
55 |
|
56 |
|
57 |
def get_model(model_file: Path):
|
58 |
return TTSModel(
|
59 |
-
model_path=
|
60 |
-
config_path=
|
61 |
-
style_vec_path=
|
62 |
device=device,
|
63 |
)
|
64 |
|
|
|
10 |
import torch
|
11 |
from tqdm import tqdm
|
12 |
|
13 |
+
from config import get_path_config
|
14 |
from style_bert_vits2.logging import logger
|
15 |
from style_bert_vits2.tts_model import TTSModel
|
16 |
|
|
|
35 |
"この分野の最新の研究成果を使うと、より自然で表現豊かな音声の生成が可能である。深層学習の応用により、感情やアクセントを含む声質の微妙な変化も再現することが出来る。",
|
36 |
]
|
37 |
|
38 |
+
path_config = get_path_config()
|
39 |
+
|
40 |
predictor = torch.hub.load(
|
41 |
"tarepan/SpeechMOS:v1.2.0", "utmos22_strong", trust_repo=True
|
42 |
)
|
|
|
50 |
model_name: str = args.model_name
|
51 |
device: str = args.device
|
52 |
|
53 |
+
model_path = path_config.assets_root / model_name
|
|
|
54 |
# .safetensorsファイルを検索
|
55 |
safetensors_files = model_path.glob("*.safetensors")
|
56 |
|
57 |
|
58 |
def get_model(model_file: Path):
|
59 |
return TTSModel(
|
60 |
+
model_path=model_file,
|
61 |
+
config_path=model_file.parent / "config.json",
|
62 |
+
style_vec_path=model_file.parent / "style_vectors.npy",
|
63 |
device=device,
|
64 |
)
|
65 |
|