smjkshp123 commited on
Commit
6a8b8f4
·
verified ·
1 Parent(s): 38397b0

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. .github/workflows/pre-commit.yaml +14 -0
  3. .github/workflows/publish-docker-image.yaml +60 -0
  4. .github/workflows/sync-hf.yaml +18 -0
  5. .gitignore +173 -0
  6. .pre-commit-config.yaml +14 -0
  7. Dockerfile +24 -0
  8. LICENSE +21 -0
  9. New Text Document.txt +17 -0
  10. README.md +170 -12
  11. pyproject.toml +61 -0
  12. ruff.toml +10 -0
  13. src/ElevenLabs_2024_10_31T10_14_17_Nicoletta_gen_s50_sb75_se10_b_m2.wav +3 -0
  14. src/f5_tts.egg-info/PKG-INFO +208 -0
  15. src/f5_tts.egg-info/SOURCES.txt +60 -0
  16. src/f5_tts.egg-info/dependency_links.txt +1 -0
  17. src/f5_tts.egg-info/entry_points.txt +5 -0
  18. src/f5_tts.egg-info/requires.txt +33 -0
  19. src/f5_tts.egg-info/top_level.txt +1 -0
  20. src/f5_tts/ElevenLabs_2024_10_31T10_14_17_Nicoletta_gen_s50_sb75_se10_b_m2.wav +3 -0
  21. src/f5_tts/api.py +138 -0
  22. src/f5_tts/eval/README.md +49 -0
  23. src/f5_tts/eval/ecapa_tdnn.py +330 -0
  24. src/f5_tts/eval/eval_infer_batch.py +197 -0
  25. src/f5_tts/eval/eval_infer_batch.sh +13 -0
  26. src/f5_tts/eval/eval_librispeech_test_clean.py +73 -0
  27. src/f5_tts/eval/eval_seedtts_testset.py +75 -0
  28. src/f5_tts/eval/utils_eval.py +397 -0
  29. src/f5_tts/infer/README.md +112 -0
  30. src/f5_tts/infer/examples/basic/basic.toml +10 -0
  31. src/f5_tts/infer/examples/basic/basic_ref_en.wav +0 -0
  32. src/f5_tts/infer/examples/basic/basic_ref_zh.wav +0 -0
  33. src/f5_tts/infer/examples/multi/country.flac +0 -0
  34. src/f5_tts/infer/examples/multi/main.flac +0 -0
  35. src/f5_tts/infer/examples/multi/story.toml +19 -0
  36. src/f5_tts/infer/examples/multi/story.txt +1 -0
  37. src/f5_tts/infer/examples/multi/town.flac +0 -0
  38. src/f5_tts/infer/examples/vocab.txt +2545 -0
  39. src/f5_tts/infer/infer_cli.py +200 -0
  40. src/f5_tts/infer/infer_gradio.py +729 -0
  41. src/f5_tts/infer/speech_edit.py +191 -0
  42. src/f5_tts/infer/utils_infer.py +439 -0
  43. src/f5_tts/model/__init__.py +10 -0
  44. src/f5_tts/model/backbones/README.md +20 -0
  45. src/f5_tts/model/backbones/dit.py +163 -0
  46. src/f5_tts/model/backbones/mmdit.py +146 -0
  47. src/f5_tts/model/backbones/unett.py +219 -0
  48. src/f5_tts/model/cfm.py +287 -0
  49. src/f5_tts/model/dataset.py +296 -0
  50. src/f5_tts/model/modules.py +581 -0
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ src/ElevenLabs_2024_10_31T10_14_17_Nicoletta_gen_s50_sb75_se10_b_m2.wav filter=lfs diff=lfs merge=lfs -text
37
+ src/f5_tts/ElevenLabs_2024_10_31T10_14_17_Nicoletta_gen_s50_sb75_se10_b_m2.wav filter=lfs diff=lfs merge=lfs -text
.github/workflows/pre-commit.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: pre-commit
2
+
3
+ on:
4
+ pull_request:
5
+ push:
6
+ branches: [main]
7
+
8
+ jobs:
9
+ pre-commit:
10
+ runs-on: ubuntu-latest
11
+ steps:
12
+ - uses: actions/checkout@v3
13
+ - uses: actions/setup-python@v3
14
+ - uses: pre-commit/[email protected]
.github/workflows/publish-docker-image.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Create and publish a Docker image
2
+
3
+ # Configures this workflow to run every time a change is pushed to the branch called `release`.
4
+ on:
5
+ push:
6
+ branches: ['main']
7
+
8
+ # Defines two custom environment variables for the workflow. These are used for the Container registry domain, and a name for the Docker image that this workflow builds.
9
+ env:
10
+ REGISTRY: ghcr.io
11
+ IMAGE_NAME: ${{ github.repository }}
12
+
13
+ # There is a single job in this workflow. It's configured to run on the latest available version of Ubuntu.
14
+ jobs:
15
+ build-and-push-image:
16
+ runs-on: ubuntu-latest
17
+ # Sets the permissions granted to the `GITHUB_TOKEN` for the actions in this job.
18
+ permissions:
19
+ contents: read
20
+ packages: write
21
+ #
22
+ steps:
23
+ - name: Checkout repository
24
+ uses: actions/checkout@v4
25
+ - name: Free Up GitHub Actions Ubuntu Runner Disk Space 🔧
26
+ uses: jlumbroso/free-disk-space@main
27
+ with:
28
+ # This might remove tools that are actually needed, if set to "true" but frees about 6 GB
29
+ tool-cache: false
30
+
31
+ # All of these default to true, but feel free to set to "false" if necessary for your workflow
32
+ android: true
33
+ dotnet: true
34
+ haskell: true
35
+ large-packages: false
36
+ swap-storage: false
37
+ docker-images: false
38
+ # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
39
+ - name: Log in to the Container registry
40
+ uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
41
+ with:
42
+ registry: ${{ env.REGISTRY }}
43
+ username: ${{ github.actor }}
44
+ password: ${{ secrets.GITHUB_TOKEN }}
45
+ # This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels.
46
+ - name: Extract metadata (tags, labels) for Docker
47
+ id: meta
48
+ uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
49
+ with:
50
+ images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
51
+ # This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages.
52
+ # It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository.
53
+ # It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step.
54
+ - name: Build and push Docker image
55
+ uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
56
+ with:
57
+ context: .
58
+ push: true
59
+ tags: ${{ steps.meta.outputs.tags }}
60
+ labels: ${{ steps.meta.outputs.labels }}
.github/workflows/sync-hf.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to HF Space
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+
8
+ jobs:
9
+ trigger_curl:
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - name: Send cURL POST request
14
+ run: |
15
+ curl -X POST https://mrfakename-sync-f5.hf.space/gradio_api/call/refresh \
16
+ -s \
17
+ -H "Content-Type: application/json" \
18
+ -d "{\"data\": [\"${{ secrets.REFRESH_PASSWORD }}\"]}"
.gitignore ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Customed
2
+ .vscode/
3
+ tests/
4
+ runs/
5
+ data/
6
+ ckpts/
7
+ wandb/
8
+ results/
9
+
10
+
11
+
12
+ # Byte-compiled / optimized / DLL files
13
+ __pycache__/
14
+ *.py[cod]
15
+ *$py.class
16
+
17
+ # C extensions
18
+ *.so
19
+
20
+ # Distribution / packaging
21
+ .Python
22
+ build/
23
+ develop-eggs/
24
+ dist/
25
+ downloads/
26
+ eggs/
27
+ .eggs/
28
+ lib/
29
+ lib64/
30
+ parts/
31
+ sdist/
32
+ var/
33
+ wheels/
34
+ share/python-wheels/
35
+ *.egg-info/
36
+ .installed.cfg
37
+ *.egg
38
+ MANIFEST
39
+
40
+ # PyInstaller
41
+ # Usually these files are written by a python script from a template
42
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
43
+ *.manifest
44
+ *.spec
45
+
46
+ # Installer logs
47
+ pip-log.txt
48
+ pip-delete-this-directory.txt
49
+
50
+ # Unit test / coverage reports
51
+ htmlcov/
52
+ .tox/
53
+ .nox/
54
+ .coverage
55
+ .coverage.*
56
+ .cache
57
+ nosetests.xml
58
+ coverage.xml
59
+ *.cover
60
+ *.py,cover
61
+ .hypothesis/
62
+ .pytest_cache/
63
+ cover/
64
+
65
+ # Translations
66
+ *.mo
67
+ *.pot
68
+
69
+ # Django stuff:
70
+ *.log
71
+ local_settings.py
72
+ db.sqlite3
73
+ db.sqlite3-journal
74
+
75
+ # Flask stuff:
76
+ instance/
77
+ .webassets-cache
78
+
79
+ # Scrapy stuff:
80
+ .scrapy
81
+
82
+ # Sphinx documentation
83
+ docs/_build/
84
+
85
+ # PyBuilder
86
+ .pybuilder/
87
+ target/
88
+
89
+ # Jupyter Notebook
90
+ .ipynb_checkpoints
91
+
92
+ # IPython
93
+ profile_default/
94
+ ipython_config.py
95
+
96
+ # pyenv
97
+ # For a library or package, you might want to ignore these files since the code is
98
+ # intended to run in multiple environments; otherwise, check them in:
99
+ # .python-version
100
+
101
+ # pipenv
102
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
103
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
104
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
105
+ # install all needed dependencies.
106
+ #Pipfile.lock
107
+
108
+ # poetry
109
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
110
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
111
+ # commonly ignored for libraries.
112
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
113
+ #poetry.lock
114
+
115
+ # pdm
116
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
117
+ #pdm.lock
118
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
119
+ # in version control.
120
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
121
+ .pdm.toml
122
+ .pdm-python
123
+ .pdm-build/
124
+
125
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
126
+ __pypackages__/
127
+
128
+ # Celery stuff
129
+ celerybeat-schedule
130
+ celerybeat.pid
131
+
132
+ # SageMath parsed files
133
+ *.sage.py
134
+
135
+ # Environments
136
+ .env
137
+ .venv
138
+ env/
139
+ venv/
140
+ ENV/
141
+ env.bak/
142
+ venv.bak/
143
+
144
+ # Spyder project settings
145
+ .spyderproject
146
+ .spyproject
147
+
148
+ # Rope project settings
149
+ .ropeproject
150
+
151
+ # mkdocs documentation
152
+ /site
153
+
154
+ # mypy
155
+ .mypy_cache/
156
+ .dmypy.json
157
+ dmypy.json
158
+
159
+ # Pyre type checker
160
+ .pyre/
161
+
162
+ # pytype static type analyzer
163
+ .pytype/
164
+
165
+ # Cython debug symbols
166
+ cython_debug/
167
+
168
+ # PyCharm
169
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
170
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
171
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
172
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
173
+ #.idea/
.pre-commit-config.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/astral-sh/ruff-pre-commit
3
+ # Ruff version.
4
+ rev: v0.7.0
5
+ hooks:
6
+ # Run the linter.
7
+ - id: ruff
8
+ args: [--fix]
9
+ # Run the formatter.
10
+ - id: ruff-format
11
+ - repo: https://github.com/pre-commit/pre-commit-hooks
12
+ rev: v2.3.0
13
+ hooks:
14
+ - id: check-yaml
Dockerfile ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM pytorch/pytorch:2.4.0-cuda12.4-cudnn9-devel
2
+
3
+ USER root
4
+
5
+ ARG DEBIAN_FRONTEND=noninteractive
6
+
7
+ LABEL github_repo="https://github.com/SWivid/F5-TTS"
8
+
9
+ RUN set -x \
10
+ && apt-get update \
11
+ && apt-get -y install wget curl man git less openssl libssl-dev unzip unar build-essential aria2 tmux vim \
12
+ && apt-get install -y openssh-server sox libsox-fmt-all libsox-fmt-mp3 libsndfile1-dev ffmpeg \
13
+ && rm -rf /var/lib/apt/lists/* \
14
+ && apt-get clean
15
+
16
+ WORKDIR /workspace
17
+
18
+ RUN git clone https://github.com/SWivid/F5-TTS.git \
19
+ && cd F5-TTS \
20
+ && pip install -e .[eval]
21
+
22
+ ENV SHELL=/bin/bash
23
+
24
+ WORKDIR /workspace/F5-TTS
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Yushen CHEN
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
New Text Document.txt ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ https://github.com/SWivid/F5-TTS
2
+
3
+
4
+
5
+ #1 //conda create -n f5-tts python=3.10
6
+ conda activate f5-tts
7
+
8
+ # Launch a Gradio app (web interface)
9
+ f5-tts_infer-gradio
10
+
11
+ # Specify the port/host
12
+ f5-tts_infer-gradio --port 7860 --host 0.0.0.0
13
+
14
+ # Launch a share link
15
+ f5-tts_infer-gradio --share
16
+
17
+
README.md CHANGED
@@ -1,12 +1,170 @@
1
- ---
2
- title: F5 TTS
3
- emoji: 🏢
4
- colorFrom: blue
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 5.7.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: F5-TTS
3
+ app_file: src\f5_tts\infer\infer_gradio.py
4
+ sdk: gradio
5
+ sdk_version: 4.44.1
6
+ ---
7
+ # F5-TTS: A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching
8
+
9
+ [![python](https://img.shields.io/badge/Python-3.10-brightgreen)](https://github.com/SWivid/F5-TTS)
10
+ [![arXiv](https://img.shields.io/badge/arXiv-2410.06885-b31b1b.svg?logo=arXiv)](https://arxiv.org/abs/2410.06885)
11
+ [![demo](https://img.shields.io/badge/GitHub-Demo%20page-orange.svg)](https://swivid.github.io/F5-TTS/)
12
+ [![hfspace](https://img.shields.io/badge/🤗-Space%20demo-yellow)](https://huggingface.co/spaces/mrfakename/E2-F5-TTS)
13
+ [![msspace](https://img.shields.io/badge/🤖-Space%20demo-blue)](https://modelscope.cn/studios/modelscope/E2-F5-TTS)
14
+ [![lab](https://img.shields.io/badge/X--LANCE-Lab-grey?labelColor=lightgrey)](https://x-lance.sjtu.edu.cn/)
15
+ <img src="https://github.com/user-attachments/assets/12d7749c-071a-427c-81bf-b87b91def670" alt="Watermark" style="width: 40px; height: auto">
16
+
17
+ **F5-TTS**: Diffusion Transformer with ConvNeXt V2, faster trained and inference.
18
+
19
+ **E2 TTS**: Flat-UNet Transformer, closest reproduction from [paper](https://arxiv.org/abs/2406.18009).
20
+
21
+ **Sway Sampling**: Inference-time flow step sampling strategy, greatly improves performance
22
+
23
+ ### Thanks to all the contributors !
24
+
25
+ ## News
26
+ - **2024/10/08**: F5-TTS & E2 TTS base models on [🤗 Hugging Face](https://huggingface.co/SWivid/F5-TTS), [🤖 Model Scope](https://www.modelscope.cn/models/SWivid/F5-TTS_Emilia-ZH-EN), [🟣 Wisemodel](https://wisemodel.cn/models/SJTU_X-LANCE/F5-TTS_Emilia-ZH-EN).
27
+
28
+ ## Installation
29
+
30
+ ```bash
31
+ # Create a python 3.10 conda env (you could also use virtualenv)
32
+ conda create -n f5-tts python=3.10
33
+ conda activate f5-tts
34
+
35
+ # Install pytorch with your CUDA version, e.g.
36
+ pip install torch==2.3.0+cu118 torchaudio==2.3.0+cu118 --extra-index-url https://download.pytorch.org/whl/cu118
37
+ ```
38
+
39
+ Then you can choose from a few options below:
40
+
41
+ ### 1. As a pip package (if just for inference)
42
+
43
+ ```bash
44
+ pip install git+https://github.com/SWivid/F5-TTS.git
45
+ ```
46
+
47
+ ### 2. Local editable (if also do training, finetuning)
48
+
49
+ ```bash
50
+ git clone https://github.com/SWivid/F5-TTS.git
51
+ cd F5-TTS
52
+ pip install -e .
53
+ ```
54
+
55
+ ### 3. Docker usage
56
+ ```bash
57
+ # Build from Dockerfile
58
+ docker build -t f5tts:v1 .
59
+
60
+ # Or pull from GitHub Container Registry
61
+ docker pull ghcr.io/swivid/f5-tts:main
62
+ ```
63
+
64
+
65
+ ## Inference
66
+
67
+ ### 1. Gradio App
68
+
69
+ Currently supported features:
70
+
71
+ - Basic TTS with Chunk Inference
72
+ - Multi-Style / Multi-Speaker Generation
73
+ - Voice Chat powered by Qwen2.5-3B-Instruct
74
+
75
+ ```bash
76
+ # Launch a Gradio app (web interface)
77
+ f5-tts_infer-gradio
78
+
79
+ # Specify the port/host
80
+ f5-tts_infer-gradio --port 7860 --host 0.0.0.0
81
+
82
+ # Launch a share link
83
+ f5-tts_infer-gradio --share
84
+ ```
85
+
86
+ ### 2. CLI Inference
87
+
88
+ ```bash
89
+ # Run with flags
90
+ # Leave --ref_text "" will have ASR model transcribe (extra GPU memory usage)
91
+ f5-tts_infer-cli \
92
+ --model "F5-TTS" \
93
+ --ref_audio "ref_audio.wav" \
94
+ --ref_text "The content, subtitle or transcription of reference audio." \
95
+ --gen_text "Some text you want TTS model generate for you."
96
+
97
+ # Run with default setting. src/f5_tts/infer/examples/basic/basic.toml
98
+ f5-tts_infer-cli
99
+ # Or with your own .toml file
100
+ f5-tts_infer-cli -c custom.toml
101
+
102
+ # Multi voice. See src/f5_tts/infer/README.md
103
+ f5-tts_infer-cli -c src/f5_tts/infer/examples/multi/story.toml
104
+ ```
105
+
106
+ ### 3. More instructions
107
+
108
+ - In order to have better generation results, take a moment to read [detailed guidance](src/f5_tts/infer).
109
+ - The [Issues](https://github.com/SWivid/F5-TTS/issues?q=is%3Aissue) are very useful, please try to find the solution by properly searching the keywords of problem encountered. If no answer found, then feel free to open an issue.
110
+
111
+
112
+ ## Training
113
+
114
+ ### 1. Gradio App
115
+
116
+ Read [training & finetuning guidance](src/f5_tts/train) for more instructions.
117
+
118
+ ```bash
119
+ # Quick start with Gradio web interface
120
+ f5-tts_finetune-gradio
121
+ ```
122
+
123
+
124
+ ## [Evaluation](src/f5_tts/eval)
125
+
126
+
127
+ ## Development
128
+
129
+ Use pre-commit to ensure code quality (will run linters and formatters automatically)
130
+
131
+ ```bash
132
+ pip install pre-commit
133
+ pre-commit install
134
+ ```
135
+
136
+ When making a pull request, before each commit, run:
137
+
138
+ ```bash
139
+ pre-commit run --all-files
140
+ ```
141
+
142
+ Note: Some model components have linting exceptions for E722 to accommodate tensor notation
143
+
144
+
145
+ ## Acknowledgements
146
+
147
+ - [E2-TTS](https://arxiv.org/abs/2406.18009) brilliant work, simple and effective
148
+ - [Emilia](https://arxiv.org/abs/2407.05361), [WenetSpeech4TTS](https://arxiv.org/abs/2406.05763) valuable datasets
149
+ - [lucidrains](https://github.com/lucidrains) initial CFM structure with also [bfs18](https://github.com/bfs18) for discussion
150
+ - [SD3](https://arxiv.org/abs/2403.03206) & [Hugging Face diffusers](https://github.com/huggingface/diffusers) DiT and MMDiT code structure
151
+ - [torchdiffeq](https://github.com/rtqichen/torchdiffeq) as ODE solver, [Vocos](https://huggingface.co/charactr/vocos-mel-24khz) as vocoder
152
+ - [FunASR](https://github.com/modelscope/FunASR), [faster-whisper](https://github.com/SYSTRAN/faster-whisper), [UniSpeech](https://github.com/microsoft/UniSpeech) for evaluation tools
153
+ - [ctc-forced-aligner](https://github.com/MahmoudAshraf97/ctc-forced-aligner) for speech edit test
154
+ - [mrfakename](https://x.com/realmrfakename) huggingface space demo ~
155
+ - [f5-tts-mlx](https://github.com/lucasnewman/f5-tts-mlx/tree/main) Implementation with MLX framework by [Lucas Newman](https://github.com/lucasnewman)
156
+ - [F5-TTS-ONNX](https://github.com/DakeQQ/F5-TTS-ONNX) ONNX Runtime version by [DakeQQ](https://github.com/DakeQQ)
157
+
158
+ ## Citation
159
+ If our work and codebase is useful for you, please cite as:
160
+ ```
161
+ @article{chen-etal-2024-f5tts,
162
+ title={F5-TTS: A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching},
163
+ author={Yushen Chen and Zhikang Niu and Ziyang Ma and Keqi Deng and Chunhui Wang and Jian Zhao and Kai Yu and Xie Chen},
164
+ journal={arXiv preprint arXiv:2410.06885},
165
+ year={2024},
166
+ }
167
+ ```
168
+ ## License
169
+
170
+ Our code is released under MIT License. The pre-trained models are licensed under the CC-BY-NC license due to the training data Emilia, which is an in-the-wild dataset. Sorry for any inconvenience this may cause.
pyproject.toml ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 61.0", "setuptools-scm>=8.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "f5-tts"
7
+ dynamic = ["version"]
8
+ description = "F5-TTS: A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching"
9
+ readme = "README.md"
10
+ license = {text = "MIT License"}
11
+ classifiers = [
12
+ "License :: OSI Approved :: MIT License",
13
+ "Operating System :: OS Independent",
14
+ "Programming Language :: Python :: 3",
15
+ ]
16
+ dependencies = [
17
+ "accelerate>=0.33.0",
18
+ "bitsandbytes>0.37.0",
19
+ "cached_path",
20
+ "click",
21
+ "datasets",
22
+ "ema_pytorch>=0.5.2",
23
+ "gradio>=3.45.2",
24
+ "jieba",
25
+ "librosa",
26
+ "matplotlib",
27
+ "numpy<=1.26.4",
28
+ "pydub",
29
+ "pypinyin",
30
+ "safetensors",
31
+ "soundfile",
32
+ "tomli",
33
+ "torch>=2.0.0",
34
+ "torchaudio>=2.0.0",
35
+ "torchdiffeq",
36
+ "tqdm>=4.65.0",
37
+ "transformers",
38
+ "transformers_stream_generator",
39
+ "vocos",
40
+ "wandb",
41
+ "x_transformers>=1.31.14",
42
+ ]
43
+
44
+ [project.optional-dependencies]
45
+ eval = [
46
+ "faster_whisper==0.10.1",
47
+ "funasr",
48
+ "jiwer",
49
+ "modelscope",
50
+ "zhconv",
51
+ "zhon",
52
+ ]
53
+
54
+ [project.urls]
55
+ Homepage = "https://github.com/SWivid/F5-TTS"
56
+
57
+ [project.scripts]
58
+ "f5-tts_infer-cli" = "f5_tts.infer.infer_cli:main"
59
+ "f5-tts_infer-gradio" = "f5_tts.infer.infer_gradio:main"
60
+ "f5-tts_finetune-cli" = "f5_tts.train.finetune_cli:main"
61
+ "f5-tts_finetune-gradio" = "f5_tts.train.finetune_gradio:main"
ruff.toml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ line-length = 120
2
+ target-version = "py310"
3
+
4
+ [lint]
5
+ # Only ignore variables with names starting with "_".
6
+ dummy-variable-rgx = "^_.*$"
7
+
8
+ [lint.isort]
9
+ force-single-line = true
10
+ lines-after-imports = 2
src/ElevenLabs_2024_10_31T10_14_17_Nicoletta_gen_s50_sb75_se10_b_m2.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:513c11b36a53076548433b6b01cc328742b4885850c20ecd2c11401ffe7ec25f
3
+ size 1202732
src/f5_tts.egg-info/PKG-INFO ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: f5-tts
3
+ Version: 0.0.0
4
+ Summary: F5-TTS: A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching
5
+ License: MIT License
6
+ Project-URL: Homepage, https://github.com/SWivid/F5-TTS
7
+ Classifier: License :: OSI Approved :: MIT License
8
+ Classifier: Operating System :: OS Independent
9
+ Classifier: Programming Language :: Python :: 3
10
+ Description-Content-Type: text/markdown
11
+ License-File: LICENSE
12
+ Requires-Dist: accelerate>=0.33.0
13
+ Requires-Dist: bitsandbytes>0.37.0
14
+ Requires-Dist: cached_path
15
+ Requires-Dist: click
16
+ Requires-Dist: datasets
17
+ Requires-Dist: ema_pytorch>=0.5.2
18
+ Requires-Dist: gradio>=3.45.2
19
+ Requires-Dist: jieba
20
+ Requires-Dist: librosa
21
+ Requires-Dist: matplotlib
22
+ Requires-Dist: numpy<=1.26.4
23
+ Requires-Dist: pydub
24
+ Requires-Dist: pypinyin
25
+ Requires-Dist: safetensors
26
+ Requires-Dist: soundfile
27
+ Requires-Dist: tomli
28
+ Requires-Dist: torch>=2.0.0
29
+ Requires-Dist: torchaudio>=2.0.0
30
+ Requires-Dist: torchdiffeq
31
+ Requires-Dist: tqdm>=4.65.0
32
+ Requires-Dist: transformers
33
+ Requires-Dist: transformers_stream_generator
34
+ Requires-Dist: vocos
35
+ Requires-Dist: wandb
36
+ Requires-Dist: x_transformers>=1.31.14
37
+ Provides-Extra: eval
38
+ Requires-Dist: faster_whisper==0.10.1; extra == "eval"
39
+ Requires-Dist: funasr; extra == "eval"
40
+ Requires-Dist: jiwer; extra == "eval"
41
+ Requires-Dist: modelscope; extra == "eval"
42
+ Requires-Dist: zhconv; extra == "eval"
43
+ Requires-Dist: zhon; extra == "eval"
44
+
45
+ # F5-TTS: A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching
46
+
47
+ [![python](https://img.shields.io/badge/Python-3.10-brightgreen)](https://github.com/SWivid/F5-TTS)
48
+ [![arXiv](https://img.shields.io/badge/arXiv-2410.06885-b31b1b.svg?logo=arXiv)](https://arxiv.org/abs/2410.06885)
49
+ [![demo](https://img.shields.io/badge/GitHub-Demo%20page-orange.svg)](https://swivid.github.io/F5-TTS/)
50
+ [![hfspace](https://img.shields.io/badge/🤗-Space%20demo-yellow)](https://huggingface.co/spaces/mrfakename/E2-F5-TTS)
51
+ [![msspace](https://img.shields.io/badge/🤖-Space%20demo-blue)](https://modelscope.cn/studios/modelscope/E2-F5-TTS)
52
+ [![lab](https://img.shields.io/badge/X--LANCE-Lab-grey?labelColor=lightgrey)](https://x-lance.sjtu.edu.cn/)
53
+ <img src="https://github.com/user-attachments/assets/12d7749c-071a-427c-81bf-b87b91def670" alt="Watermark" style="width: 40px; height: auto">
54
+
55
+ **F5-TTS**: Diffusion Transformer with ConvNeXt V2, faster trained and inference.
56
+
57
+ **E2 TTS**: Flat-UNet Transformer, closest reproduction from [paper](https://arxiv.org/abs/2406.18009).
58
+
59
+ **Sway Sampling**: Inference-time flow step sampling strategy, greatly improves performance
60
+
61
+ ### Thanks to all the contributors !
62
+
63
+ ## News
64
+ - **2024/10/08**: F5-TTS & E2 TTS base models on [🤗 Hugging Face](https://huggingface.co/SWivid/F5-TTS), [🤖 Model Scope](https://www.modelscope.cn/models/SWivid/F5-TTS_Emilia-ZH-EN), [🟣 Wisemodel](https://wisemodel.cn/models/SJTU_X-LANCE/F5-TTS_Emilia-ZH-EN).
65
+
66
+ ## Installation
67
+
68
+ ```bash
69
+ # Create a python 3.10 conda env (you could also use virtualenv)
70
+ conda create -n f5-tts python=3.10
71
+ conda activate f5-tts
72
+
73
+ # Install pytorch with your CUDA version, e.g.
74
+ pip install torch==2.3.0+cu118 torchaudio==2.3.0+cu118 --extra-index-url https://download.pytorch.org/whl/cu118
75
+ ```
76
+
77
+ Then you can choose from a few options below:
78
+
79
+ ### 1. As a pip package (if just for inference)
80
+
81
+ ```bash
82
+ pip install git+https://github.com/SWivid/F5-TTS.git
83
+ ```
84
+
85
+ ### 2. Local editable (if also do training, finetuning)
86
+
87
+ ```bash
88
+ git clone https://github.com/SWivid/F5-TTS.git
89
+ cd F5-TTS
90
+ pip install -e .
91
+ ```
92
+
93
+ ### 3. Docker usage
94
+ ```bash
95
+ # Build from Dockerfile
96
+ docker build -t f5tts:v1 .
97
+
98
+ # Or pull from GitHub Container Registry
99
+ docker pull ghcr.io/swivid/f5-tts:main
100
+ ```
101
+
102
+
103
+ ## Inference
104
+
105
+ ### 1. Gradio App
106
+
107
+ Currently supported features:
108
+
109
+ - Basic TTS with Chunk Inference
110
+ - Multi-Style / Multi-Speaker Generation
111
+ - Voice Chat powered by Qwen2.5-3B-Instruct
112
+
113
+ ```bash
114
+ # Launch a Gradio app (web interface)
115
+ f5-tts_infer-gradio
116
+
117
+ # Specify the port/host
118
+ f5-tts_infer-gradio --port 7860 --host 0.0.0.0
119
+
120
+ # Launch a share link
121
+ f5-tts_infer-gradio --share
122
+ ```
123
+
124
+ ### 2. CLI Inference
125
+
126
+ ```bash
127
+ # Run with flags
128
+ # Leave --ref_text "" will have ASR model transcribe (extra GPU memory usage)
129
+ f5-tts_infer-cli \
130
+ --model "F5-TTS" \
131
+ --ref_audio "ref_audio.wav" \
132
+ --ref_text "The content, subtitle or transcription of reference audio." \
133
+ --gen_text "Some text you want TTS model generate for you."
134
+
135
+ # Run with default setting. src/f5_tts/infer/examples/basic/basic.toml
136
+ f5-tts_infer-cli
137
+ # Or with your own .toml file
138
+ f5-tts_infer-cli -c custom.toml
139
+
140
+ # Multi voice. See src/f5_tts/infer/README.md
141
+ f5-tts_infer-cli -c src/f5_tts/infer/examples/multi/story.toml
142
+ ```
143
+
144
+ ### 3. More instructions
145
+
146
+ - In order to have better generation results, take a moment to read [detailed guidance](src/f5_tts/infer).
147
+ - The [Issues](https://github.com/SWivid/F5-TTS/issues?q=is%3Aissue) are very useful, please try to find the solution by properly searching the keywords of problem encountered. If no answer found, then feel free to open an issue.
148
+
149
+
150
+ ## Training
151
+
152
+ ### 1. Gradio App
153
+
154
+ Read [training & finetuning guidance](src/f5_tts/train) for more instructions.
155
+
156
+ ```bash
157
+ # Quick start with Gradio web interface
158
+ f5-tts_finetune-gradio
159
+ ```
160
+
161
+
162
+ ## [Evaluation](src/f5_tts/eval)
163
+
164
+
165
+ ## Development
166
+
167
+ Use pre-commit to ensure code quality (will run linters and formatters automatically)
168
+
169
+ ```bash
170
+ pip install pre-commit
171
+ pre-commit install
172
+ ```
173
+
174
+ When making a pull request, before each commit, run:
175
+
176
+ ```bash
177
+ pre-commit run --all-files
178
+ ```
179
+
180
+ Note: Some model components have linting exceptions for E722 to accommodate tensor notation
181
+
182
+
183
+ ## Acknowledgements
184
+
185
+ - [E2-TTS](https://arxiv.org/abs/2406.18009) brilliant work, simple and effective
186
+ - [Emilia](https://arxiv.org/abs/2407.05361), [WenetSpeech4TTS](https://arxiv.org/abs/2406.05763) valuable datasets
187
+ - [lucidrains](https://github.com/lucidrains) initial CFM structure with also [bfs18](https://github.com/bfs18) for discussion
188
+ - [SD3](https://arxiv.org/abs/2403.03206) & [Hugging Face diffusers](https://github.com/huggingface/diffusers) DiT and MMDiT code structure
189
+ - [torchdiffeq](https://github.com/rtqichen/torchdiffeq) as ODE solver, [Vocos](https://huggingface.co/charactr/vocos-mel-24khz) as vocoder
190
+ - [FunASR](https://github.com/modelscope/FunASR), [faster-whisper](https://github.com/SYSTRAN/faster-whisper), [UniSpeech](https://github.com/microsoft/UniSpeech) for evaluation tools
191
+ - [ctc-forced-aligner](https://github.com/MahmoudAshraf97/ctc-forced-aligner) for speech edit test
192
+ - [mrfakename](https://x.com/realmrfakename) huggingface space demo ~
193
+ - [f5-tts-mlx](https://github.com/lucasnewman/f5-tts-mlx/tree/main) Implementation with MLX framework by [Lucas Newman](https://github.com/lucasnewman)
194
+ - [F5-TTS-ONNX](https://github.com/DakeQQ/F5-TTS-ONNX) ONNX Runtime version by [DakeQQ](https://github.com/DakeQQ)
195
+
196
+ ## Citation
197
+ If our work and codebase is useful for you, please cite as:
198
+ ```
199
+ @article{chen-etal-2024-f5tts,
200
+ title={F5-TTS: A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching},
201
+ author={Yushen Chen and Zhikang Niu and Ziyang Ma and Keqi Deng and Chunhui Wang and Jian Zhao and Kai Yu and Xie Chen},
202
+ journal={arXiv preprint arXiv:2410.06885},
203
+ year={2024},
204
+ }
205
+ ```
206
+ ## License
207
+
208
+ Our code is released under MIT License. The pre-trained models are licensed under the CC-BY-NC license due to the training data Emilia, which is an in-the-wild dataset. Sorry for any inconvenience this may cause.
src/f5_tts.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .gitignore
2
+ .pre-commit-config.yaml
3
+ Dockerfile
4
+ LICENSE
5
+ README.md
6
+ pyproject.toml
7
+ ruff.toml
8
+ .github/workflows/pre-commit.yaml
9
+ .github/workflows/publish-docker-image.yaml
10
+ .github/workflows/sync-hf.yaml
11
+ ckpts/README.md
12
+ data/librispeech_pc_test_clean_cross_sentence.lst
13
+ data/Emilia_ZH_EN_pinyin/vocab.txt
14
+ src/f5_tts/api.py
15
+ src/f5_tts.egg-info/PKG-INFO
16
+ src/f5_tts.egg-info/SOURCES.txt
17
+ src/f5_tts.egg-info/dependency_links.txt
18
+ src/f5_tts.egg-info/entry_points.txt
19
+ src/f5_tts.egg-info/requires.txt
20
+ src/f5_tts.egg-info/top_level.txt
21
+ src/f5_tts/eval/README.md
22
+ src/f5_tts/eval/ecapa_tdnn.py
23
+ src/f5_tts/eval/eval_infer_batch.py
24
+ src/f5_tts/eval/eval_infer_batch.sh
25
+ src/f5_tts/eval/eval_librispeech_test_clean.py
26
+ src/f5_tts/eval/eval_seedtts_testset.py
27
+ src/f5_tts/eval/utils_eval.py
28
+ src/f5_tts/infer/README.md
29
+ src/f5_tts/infer/infer_cli.py
30
+ src/f5_tts/infer/infer_gradio.py
31
+ src/f5_tts/infer/speech_edit.py
32
+ src/f5_tts/infer/utils_infer.py
33
+ src/f5_tts/infer/examples/vocab.txt
34
+ src/f5_tts/infer/examples/basic/basic.toml
35
+ src/f5_tts/infer/examples/basic/basic_ref_en.wav
36
+ src/f5_tts/infer/examples/basic/basic_ref_zh.wav
37
+ src/f5_tts/infer/examples/multi/country.flac
38
+ src/f5_tts/infer/examples/multi/main.flac
39
+ src/f5_tts/infer/examples/multi/story.toml
40
+ src/f5_tts/infer/examples/multi/story.txt
41
+ src/f5_tts/infer/examples/multi/town.flac
42
+ src/f5_tts/model/__init__.py
43
+ src/f5_tts/model/cfm.py
44
+ src/f5_tts/model/dataset.py
45
+ src/f5_tts/model/modules.py
46
+ src/f5_tts/model/trainer.py
47
+ src/f5_tts/model/utils.py
48
+ src/f5_tts/model/backbones/README.md
49
+ src/f5_tts/model/backbones/dit.py
50
+ src/f5_tts/model/backbones/mmdit.py
51
+ src/f5_tts/model/backbones/unett.py
52
+ src/f5_tts/scripts/count_max_epoch.py
53
+ src/f5_tts/scripts/count_params_gflops.py
54
+ src/f5_tts/train/README.md
55
+ src/f5_tts/train/finetune_cli.py
56
+ src/f5_tts/train/finetune_gradio.py
57
+ src/f5_tts/train/train.py
58
+ src/f5_tts/train/datasets/prepare_csv_wavs.py
59
+ src/f5_tts/train/datasets/prepare_emilia.py
60
+ src/f5_tts/train/datasets/prepare_wenetspeech4tts.py
src/f5_tts.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
src/f5_tts.egg-info/entry_points.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [console_scripts]
2
+ f5-tts_finetune-cli = f5_tts.train.finetune_cli:main
3
+ f5-tts_finetune-gradio = f5_tts.train.finetune_gradio:main
4
+ f5-tts_infer-cli = f5_tts.infer.infer_cli:main
5
+ f5-tts_infer-gradio = f5_tts.infer.infer_gradio:main
src/f5_tts.egg-info/requires.txt ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate>=0.33.0
2
+ bitsandbytes>0.37.0
3
+ cached_path
4
+ click
5
+ datasets
6
+ ema_pytorch>=0.5.2
7
+ gradio>=3.45.2
8
+ jieba
9
+ librosa
10
+ matplotlib
11
+ numpy<=1.26.4
12
+ pydub
13
+ pypinyin
14
+ safetensors
15
+ soundfile
16
+ tomli
17
+ torch>=2.0.0
18
+ torchaudio>=2.0.0
19
+ torchdiffeq
20
+ tqdm>=4.65.0
21
+ transformers
22
+ transformers_stream_generator
23
+ vocos
24
+ wandb
25
+ x_transformers>=1.31.14
26
+
27
+ [eval]
28
+ faster_whisper==0.10.1
29
+ funasr
30
+ jiwer
31
+ modelscope
32
+ zhconv
33
+ zhon
src/f5_tts.egg-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ f5_tts
src/f5_tts/ElevenLabs_2024_10_31T10_14_17_Nicoletta_gen_s50_sb75_se10_b_m2.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:513c11b36a53076548433b6b01cc328742b4885850c20ecd2c11401ffe7ec25f
3
+ size 1202732
src/f5_tts/api.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import sys
3
+ import tqdm
4
+ from importlib.resources import files
5
+
6
+ import soundfile as sf
7
+ import torch
8
+ from cached_path import cached_path
9
+
10
+ from f5_tts.model import DiT, UNetT
11
+ from f5_tts.model.utils import seed_everything
12
+ from f5_tts.infer.utils_infer import (
13
+ load_vocoder,
14
+ load_model,
15
+ infer_process,
16
+ remove_silence_for_generated_wav,
17
+ save_spectrogram,
18
+ )
19
+
20
+
21
+ class F5TTS:
22
+ def __init__(
23
+ self,
24
+ model_type="F5-TTS",
25
+ ckpt_file="",
26
+ vocab_file="",
27
+ ode_method="euler",
28
+ use_ema=True,
29
+ local_path=None,
30
+ device=None,
31
+ ):
32
+ # Initialize parameters
33
+ self.final_wave = None
34
+ self.target_sample_rate = 24000
35
+ self.n_mel_channels = 100
36
+ self.hop_length = 256
37
+ self.target_rms = 0.1
38
+ self.seed = -1
39
+
40
+ # Set device
41
+ self.device = device or (
42
+ "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
43
+ )
44
+
45
+ # Load models
46
+ self.load_vocoder_model(local_path)
47
+ self.load_ema_model(model_type, ckpt_file, vocab_file, ode_method, use_ema)
48
+
49
+ def load_vocoder_model(self, local_path):
50
+ self.vocos = load_vocoder(local_path is not None, local_path, self.device)
51
+
52
+ def load_ema_model(self, model_type, ckpt_file, vocab_file, ode_method, use_ema):
53
+ if model_type == "F5-TTS":
54
+ if not ckpt_file:
55
+ ckpt_file = str(cached_path("hf://SWivid/F5-TTS/F5TTS_Base/model_1200000.safetensors"))
56
+ model_cfg = dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4)
57
+ model_cls = DiT
58
+ elif model_type == "E2-TTS":
59
+ if not ckpt_file:
60
+ ckpt_file = str(cached_path("hf://SWivid/E2-TTS/E2TTS_Base/model_1200000.safetensors"))
61
+ model_cfg = dict(dim=1024, depth=24, heads=16, ff_mult=4)
62
+ model_cls = UNetT
63
+ else:
64
+ raise ValueError(f"Unknown model type: {model_type}")
65
+
66
+ self.ema_model = load_model(model_cls, model_cfg, ckpt_file, vocab_file, ode_method, use_ema, self.device)
67
+
68
+ def export_wav(self, wav, file_wave, remove_silence=False):
69
+ sf.write(file_wave, wav, self.target_sample_rate)
70
+
71
+ if remove_silence:
72
+ remove_silence_for_generated_wav(file_wave)
73
+
74
+ def export_spectrogram(self, spect, file_spect):
75
+ save_spectrogram(spect, file_spect)
76
+
77
+ def infer(
78
+ self,
79
+ ref_file,
80
+ ref_text,
81
+ gen_text,
82
+ show_info=print,
83
+ progress=tqdm,
84
+ target_rms=0.1,
85
+ cross_fade_duration=0.15,
86
+ sway_sampling_coef=-1,
87
+ cfg_strength=2,
88
+ nfe_step=32,
89
+ speed=1.0,
90
+ fix_duration=None,
91
+ remove_silence=False,
92
+ file_wave=None,
93
+ file_spect=None,
94
+ seed=-1,
95
+ ):
96
+ if seed == -1:
97
+ seed = random.randint(0, sys.maxsize)
98
+ seed_everything(seed)
99
+ self.seed = seed
100
+ wav, sr, spect = infer_process(
101
+ ref_file,
102
+ ref_text,
103
+ gen_text,
104
+ self.ema_model,
105
+ show_info=show_info,
106
+ progress=progress,
107
+ target_rms=target_rms,
108
+ cross_fade_duration=cross_fade_duration,
109
+ nfe_step=nfe_step,
110
+ cfg_strength=cfg_strength,
111
+ sway_sampling_coef=sway_sampling_coef,
112
+ speed=speed,
113
+ fix_duration=fix_duration,
114
+ device=self.device,
115
+ )
116
+
117
+ if file_wave is not None:
118
+ self.export_wav(wav, file_wave, remove_silence)
119
+
120
+ if file_spect is not None:
121
+ self.export_spectrogram(spect, file_spect)
122
+
123
+ return wav, sr, spect
124
+
125
+
126
+ if __name__ == "__main__":
127
+ f5tts = F5TTS()
128
+
129
+ wav, sr, spect = f5tts.infer(
130
+ ref_file=str(files("f5_tts").joinpath("infer/examples/basic/basic_ref_en.wav")),
131
+ ref_text="some call me nature, others call me mother nature.",
132
+ gen_text="""I don't really care what you call me. I've been a silent spectator, watching species evolve, empires rise and fall. But always remember, I am mighty and enduring. Respect me and I'll nurture you; ignore me and you shall face the consequences.""",
133
+ file_wave=str(files("f5_tts").joinpath("../../tests/api_out.wav")),
134
+ file_spect=str(files("f5_tts").joinpath("../../tests/api_out.png")),
135
+ seed=-1, # random seed = -1
136
+ )
137
+
138
+ print("seed :", f5tts.seed)
src/f5_tts/eval/README.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Evaluation
3
+
4
+ Install packages for evaluation:
5
+
6
+ ```bash
7
+ pip install -e .[eval]
8
+ ```
9
+
10
+ ## Generating Samples for Evaluation
11
+
12
+ ### Prepare Test Datasets
13
+
14
+ 1. *Seed-TTS testset*: Download from [seed-tts-eval](https://github.com/BytedanceSpeech/seed-tts-eval).
15
+ 2. *LibriSpeech test-clean*: Download from [OpenSLR](http://www.openslr.org/12/).
16
+ 3. Unzip the downloaded datasets and place them in the `data/` directory.
17
+ 4. Update the path for *LibriSpeech test-clean* data in `src/f5_tts/eval/eval_infer_batch.py`
18
+ 5. Our filtered LibriSpeech-PC 4-10s subset: `data/librispeech_pc_test_clean_cross_sentence.lst`
19
+
20
+ ### Batch Inference for Test Set
21
+
22
+ To run batch inference for evaluations, execute the following commands:
23
+
24
+ ```bash
25
+ # batch inference for evaluations
26
+ accelerate config # if not set before
27
+ bash src/f5_tts/eval/eval_infer_batch.sh
28
+ ```
29
+
30
+ ## Objective Evaluation on Generated Results
31
+
32
+ ### Download Evaluation Model Checkpoints
33
+
34
+ 1. Chinese ASR Model: [Paraformer-zh](https://huggingface.co/funasr/paraformer-zh)
35
+ 2. English ASR Model: [Faster-Whisper](https://huggingface.co/Systran/faster-whisper-large-v3)
36
+ 3. WavLM Model: Download from [Google Drive](https://drive.google.com/file/d/1-aE1NfzpRCLxA4GUxX9ITI3F9LlbtEGP/view).
37
+
38
+ Then update in the following scripts with the paths you put evaluation model ckpts to.
39
+
40
+ ### Objective Evaluation
41
+
42
+ Update the path with your batch-inferenced results, and carry out WER / SIM evaluations:
43
+ ```bash
44
+ # Evaluation for Seed-TTS test set
45
+ python src/f5_tts/eval/eval_seedtts_testset.py
46
+
47
+ # Evaluation for LibriSpeech-PC test-clean (cross-sentence)
48
+ python src/f5_tts/eval/eval_librispeech_test_clean.py
49
+ ```
src/f5_tts/eval/ecapa_tdnn.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # just for speaker similarity evaluation, third-party code
2
+
3
+ # From https://github.com/microsoft/UniSpeech/blob/main/downstreams/speaker_verification/models/
4
+ # part of the code is borrowed from https://github.com/lawlict/ECAPA-TDNN
5
+
6
+ import os
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+
11
+
12
+ """ Res2Conv1d + BatchNorm1d + ReLU
13
+ """
14
+
15
+
16
+ class Res2Conv1dReluBn(nn.Module):
17
+ """
18
+ in_channels == out_channels == channels
19
+ """
20
+
21
+ def __init__(self, channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=True, scale=4):
22
+ super().__init__()
23
+ assert channels % scale == 0, "{} % {} != 0".format(channels, scale)
24
+ self.scale = scale
25
+ self.width = channels // scale
26
+ self.nums = scale if scale == 1 else scale - 1
27
+
28
+ self.convs = []
29
+ self.bns = []
30
+ for i in range(self.nums):
31
+ self.convs.append(nn.Conv1d(self.width, self.width, kernel_size, stride, padding, dilation, bias=bias))
32
+ self.bns.append(nn.BatchNorm1d(self.width))
33
+ self.convs = nn.ModuleList(self.convs)
34
+ self.bns = nn.ModuleList(self.bns)
35
+
36
+ def forward(self, x):
37
+ out = []
38
+ spx = torch.split(x, self.width, 1)
39
+ for i in range(self.nums):
40
+ if i == 0:
41
+ sp = spx[i]
42
+ else:
43
+ sp = sp + spx[i]
44
+ # Order: conv -> relu -> bn
45
+ sp = self.convs[i](sp)
46
+ sp = self.bns[i](F.relu(sp))
47
+ out.append(sp)
48
+ if self.scale != 1:
49
+ out.append(spx[self.nums])
50
+ out = torch.cat(out, dim=1)
51
+
52
+ return out
53
+
54
+
55
+ """ Conv1d + BatchNorm1d + ReLU
56
+ """
57
+
58
+
59
+ class Conv1dReluBn(nn.Module):
60
+ def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=True):
61
+ super().__init__()
62
+ self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias)
63
+ self.bn = nn.BatchNorm1d(out_channels)
64
+
65
+ def forward(self, x):
66
+ return self.bn(F.relu(self.conv(x)))
67
+
68
+
69
+ """ The SE connection of 1D case.
70
+ """
71
+
72
+
73
+ class SE_Connect(nn.Module):
74
+ def __init__(self, channels, se_bottleneck_dim=128):
75
+ super().__init__()
76
+ self.linear1 = nn.Linear(channels, se_bottleneck_dim)
77
+ self.linear2 = nn.Linear(se_bottleneck_dim, channels)
78
+
79
+ def forward(self, x):
80
+ out = x.mean(dim=2)
81
+ out = F.relu(self.linear1(out))
82
+ out = torch.sigmoid(self.linear2(out))
83
+ out = x * out.unsqueeze(2)
84
+
85
+ return out
86
+
87
+
88
+ """ SE-Res2Block of the ECAPA-TDNN architecture.
89
+ """
90
+
91
+ # def SE_Res2Block(channels, kernel_size, stride, padding, dilation, scale):
92
+ # return nn.Sequential(
93
+ # Conv1dReluBn(channels, 512, kernel_size=1, stride=1, padding=0),
94
+ # Res2Conv1dReluBn(512, kernel_size, stride, padding, dilation, scale=scale),
95
+ # Conv1dReluBn(512, channels, kernel_size=1, stride=1, padding=0),
96
+ # SE_Connect(channels)
97
+ # )
98
+
99
+
100
+ class SE_Res2Block(nn.Module):
101
+ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, scale, se_bottleneck_dim):
102
+ super().__init__()
103
+ self.Conv1dReluBn1 = Conv1dReluBn(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
104
+ self.Res2Conv1dReluBn = Res2Conv1dReluBn(out_channels, kernel_size, stride, padding, dilation, scale=scale)
105
+ self.Conv1dReluBn2 = Conv1dReluBn(out_channels, out_channels, kernel_size=1, stride=1, padding=0)
106
+ self.SE_Connect = SE_Connect(out_channels, se_bottleneck_dim)
107
+
108
+ self.shortcut = None
109
+ if in_channels != out_channels:
110
+ self.shortcut = nn.Conv1d(
111
+ in_channels=in_channels,
112
+ out_channels=out_channels,
113
+ kernel_size=1,
114
+ )
115
+
116
+ def forward(self, x):
117
+ residual = x
118
+ if self.shortcut:
119
+ residual = self.shortcut(x)
120
+
121
+ x = self.Conv1dReluBn1(x)
122
+ x = self.Res2Conv1dReluBn(x)
123
+ x = self.Conv1dReluBn2(x)
124
+ x = self.SE_Connect(x)
125
+
126
+ return x + residual
127
+
128
+
129
+ """ Attentive weighted mean and standard deviation pooling.
130
+ """
131
+
132
+
133
+ class AttentiveStatsPool(nn.Module):
134
+ def __init__(self, in_dim, attention_channels=128, global_context_att=False):
135
+ super().__init__()
136
+ self.global_context_att = global_context_att
137
+
138
+ # Use Conv1d with stride == 1 rather than Linear, then we don't need to transpose inputs.
139
+ if global_context_att:
140
+ self.linear1 = nn.Conv1d(in_dim * 3, attention_channels, kernel_size=1) # equals W and b in the paper
141
+ else:
142
+ self.linear1 = nn.Conv1d(in_dim, attention_channels, kernel_size=1) # equals W and b in the paper
143
+ self.linear2 = nn.Conv1d(attention_channels, in_dim, kernel_size=1) # equals V and k in the paper
144
+
145
+ def forward(self, x):
146
+ if self.global_context_att:
147
+ context_mean = torch.mean(x, dim=-1, keepdim=True).expand_as(x)
148
+ context_std = torch.sqrt(torch.var(x, dim=-1, keepdim=True) + 1e-10).expand_as(x)
149
+ x_in = torch.cat((x, context_mean, context_std), dim=1)
150
+ else:
151
+ x_in = x
152
+
153
+ # DON'T use ReLU here! In experiments, I find ReLU hard to converge.
154
+ alpha = torch.tanh(self.linear1(x_in))
155
+ # alpha = F.relu(self.linear1(x_in))
156
+ alpha = torch.softmax(self.linear2(alpha), dim=2)
157
+ mean = torch.sum(alpha * x, dim=2)
158
+ residuals = torch.sum(alpha * (x**2), dim=2) - mean**2
159
+ std = torch.sqrt(residuals.clamp(min=1e-9))
160
+ return torch.cat([mean, std], dim=1)
161
+
162
+
163
+ class ECAPA_TDNN(nn.Module):
164
+ def __init__(
165
+ self,
166
+ feat_dim=80,
167
+ channels=512,
168
+ emb_dim=192,
169
+ global_context_att=False,
170
+ feat_type="wavlm_large",
171
+ sr=16000,
172
+ feature_selection="hidden_states",
173
+ update_extract=False,
174
+ config_path=None,
175
+ ):
176
+ super().__init__()
177
+
178
+ self.feat_type = feat_type
179
+ self.feature_selection = feature_selection
180
+ self.update_extract = update_extract
181
+ self.sr = sr
182
+
183
+ torch.hub._validate_not_a_forked_repo = lambda a, b, c: True
184
+ try:
185
+ local_s3prl_path = os.path.expanduser("~/.cache/torch/hub/s3prl_s3prl_main")
186
+ self.feature_extract = torch.hub.load(local_s3prl_path, feat_type, source="local", config_path=config_path)
187
+ except: # noqa: E722
188
+ self.feature_extract = torch.hub.load("s3prl/s3prl", feat_type)
189
+
190
+ if len(self.feature_extract.model.encoder.layers) == 24 and hasattr(
191
+ self.feature_extract.model.encoder.layers[23].self_attn, "fp32_attention"
192
+ ):
193
+ self.feature_extract.model.encoder.layers[23].self_attn.fp32_attention = False
194
+ if len(self.feature_extract.model.encoder.layers) == 24 and hasattr(
195
+ self.feature_extract.model.encoder.layers[11].self_attn, "fp32_attention"
196
+ ):
197
+ self.feature_extract.model.encoder.layers[11].self_attn.fp32_attention = False
198
+
199
+ self.feat_num = self.get_feat_num()
200
+ self.feature_weight = nn.Parameter(torch.zeros(self.feat_num))
201
+
202
+ if feat_type != "fbank" and feat_type != "mfcc":
203
+ freeze_list = ["final_proj", "label_embs_concat", "mask_emb", "project_q", "quantizer"]
204
+ for name, param in self.feature_extract.named_parameters():
205
+ for freeze_val in freeze_list:
206
+ if freeze_val in name:
207
+ param.requires_grad = False
208
+ break
209
+
210
+ if not self.update_extract:
211
+ for param in self.feature_extract.parameters():
212
+ param.requires_grad = False
213
+
214
+ self.instance_norm = nn.InstanceNorm1d(feat_dim)
215
+ # self.channels = [channels] * 4 + [channels * 3]
216
+ self.channels = [channels] * 4 + [1536]
217
+
218
+ self.layer1 = Conv1dReluBn(feat_dim, self.channels[0], kernel_size=5, padding=2)
219
+ self.layer2 = SE_Res2Block(
220
+ self.channels[0],
221
+ self.channels[1],
222
+ kernel_size=3,
223
+ stride=1,
224
+ padding=2,
225
+ dilation=2,
226
+ scale=8,
227
+ se_bottleneck_dim=128,
228
+ )
229
+ self.layer3 = SE_Res2Block(
230
+ self.channels[1],
231
+ self.channels[2],
232
+ kernel_size=3,
233
+ stride=1,
234
+ padding=3,
235
+ dilation=3,
236
+ scale=8,
237
+ se_bottleneck_dim=128,
238
+ )
239
+ self.layer4 = SE_Res2Block(
240
+ self.channels[2],
241
+ self.channels[3],
242
+ kernel_size=3,
243
+ stride=1,
244
+ padding=4,
245
+ dilation=4,
246
+ scale=8,
247
+ se_bottleneck_dim=128,
248
+ )
249
+
250
+ # self.conv = nn.Conv1d(self.channels[-1], self.channels[-1], kernel_size=1)
251
+ cat_channels = channels * 3
252
+ self.conv = nn.Conv1d(cat_channels, self.channels[-1], kernel_size=1)
253
+ self.pooling = AttentiveStatsPool(
254
+ self.channels[-1], attention_channels=128, global_context_att=global_context_att
255
+ )
256
+ self.bn = nn.BatchNorm1d(self.channels[-1] * 2)
257
+ self.linear = nn.Linear(self.channels[-1] * 2, emb_dim)
258
+
259
+ def get_feat_num(self):
260
+ self.feature_extract.eval()
261
+ wav = [torch.randn(self.sr).to(next(self.feature_extract.parameters()).device)]
262
+ with torch.no_grad():
263
+ features = self.feature_extract(wav)
264
+ select_feature = features[self.feature_selection]
265
+ if isinstance(select_feature, (list, tuple)):
266
+ return len(select_feature)
267
+ else:
268
+ return 1
269
+
270
+ def get_feat(self, x):
271
+ if self.update_extract:
272
+ x = self.feature_extract([sample for sample in x])
273
+ else:
274
+ with torch.no_grad():
275
+ if self.feat_type == "fbank" or self.feat_type == "mfcc":
276
+ x = self.feature_extract(x) + 1e-6 # B x feat_dim x time_len
277
+ else:
278
+ x = self.feature_extract([sample for sample in x])
279
+
280
+ if self.feat_type == "fbank":
281
+ x = x.log()
282
+
283
+ if self.feat_type != "fbank" and self.feat_type != "mfcc":
284
+ x = x[self.feature_selection]
285
+ if isinstance(x, (list, tuple)):
286
+ x = torch.stack(x, dim=0)
287
+ else:
288
+ x = x.unsqueeze(0)
289
+ norm_weights = F.softmax(self.feature_weight, dim=-1).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
290
+ x = (norm_weights * x).sum(dim=0)
291
+ x = torch.transpose(x, 1, 2) + 1e-6
292
+
293
+ x = self.instance_norm(x)
294
+ return x
295
+
296
+ def forward(self, x):
297
+ x = self.get_feat(x)
298
+
299
+ out1 = self.layer1(x)
300
+ out2 = self.layer2(out1)
301
+ out3 = self.layer3(out2)
302
+ out4 = self.layer4(out3)
303
+
304
+ out = torch.cat([out2, out3, out4], dim=1)
305
+ out = F.relu(self.conv(out))
306
+ out = self.bn(self.pooling(out))
307
+ out = self.linear(out)
308
+
309
+ return out
310
+
311
+
312
+ def ECAPA_TDNN_SMALL(
313
+ feat_dim,
314
+ emb_dim=256,
315
+ feat_type="wavlm_large",
316
+ sr=16000,
317
+ feature_selection="hidden_states",
318
+ update_extract=False,
319
+ config_path=None,
320
+ ):
321
+ return ECAPA_TDNN(
322
+ feat_dim=feat_dim,
323
+ channels=512,
324
+ emb_dim=emb_dim,
325
+ feat_type=feat_type,
326
+ sr=sr,
327
+ feature_selection=feature_selection,
328
+ update_extract=update_extract,
329
+ config_path=config_path,
330
+ )
src/f5_tts/eval/eval_infer_batch.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+
4
+ sys.path.append(os.getcwd())
5
+
6
+ import time
7
+ from tqdm import tqdm
8
+ import argparse
9
+ from importlib.resources import files
10
+
11
+ import torch
12
+ import torchaudio
13
+ from accelerate import Accelerator
14
+ from vocos import Vocos
15
+
16
+ from f5_tts.model import CFM, UNetT, DiT
17
+ from f5_tts.model.utils import get_tokenizer
18
+ from f5_tts.infer.utils_infer import load_checkpoint
19
+ from f5_tts.eval.utils_eval import (
20
+ get_seedtts_testset_metainfo,
21
+ get_librispeech_test_clean_metainfo,
22
+ get_inference_prompt,
23
+ )
24
+
25
+ accelerator = Accelerator()
26
+ device = f"cuda:{accelerator.process_index}"
27
+
28
+
29
+ # --------------------- Dataset Settings -------------------- #
30
+
31
+ target_sample_rate = 24000
32
+ n_mel_channels = 100
33
+ hop_length = 256
34
+ target_rms = 0.1
35
+
36
+ tokenizer = "pinyin"
37
+ rel_path = str(files("f5_tts").joinpath("../../"))
38
+
39
+
40
+ def main():
41
+ # ---------------------- infer setting ---------------------- #
42
+
43
+ parser = argparse.ArgumentParser(description="batch inference")
44
+
45
+ parser.add_argument("-s", "--seed", default=None, type=int)
46
+ parser.add_argument("-d", "--dataset", default="Emilia_ZH_EN")
47
+ parser.add_argument("-n", "--expname", required=True)
48
+ parser.add_argument("-c", "--ckptstep", default=1200000, type=int)
49
+
50
+ parser.add_argument("-nfe", "--nfestep", default=32, type=int)
51
+ parser.add_argument("-o", "--odemethod", default="euler")
52
+ parser.add_argument("-ss", "--swaysampling", default=-1, type=float)
53
+
54
+ parser.add_argument("-t", "--testset", required=True)
55
+
56
+ args = parser.parse_args()
57
+
58
+ seed = args.seed
59
+ dataset_name = args.dataset
60
+ exp_name = args.expname
61
+ ckpt_step = args.ckptstep
62
+ ckpt_path = rel_path + f"/ckpts/{exp_name}/model_{ckpt_step}.pt"
63
+
64
+ nfe_step = args.nfestep
65
+ ode_method = args.odemethod
66
+ sway_sampling_coef = args.swaysampling
67
+
68
+ testset = args.testset
69
+
70
+ infer_batch_size = 1 # max frames. 1 for ddp single inference (recommended)
71
+ cfg_strength = 2.0
72
+ speed = 1.0
73
+ use_truth_duration = False
74
+ no_ref_audio = False
75
+
76
+ if exp_name == "F5TTS_Base":
77
+ model_cls = DiT
78
+ model_cfg = dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4)
79
+
80
+ elif exp_name == "E2TTS_Base":
81
+ model_cls = UNetT
82
+ model_cfg = dict(dim=1024, depth=24, heads=16, ff_mult=4)
83
+
84
+ if testset == "ls_pc_test_clean":
85
+ metalst = rel_path + "/data/librispeech_pc_test_clean_cross_sentence.lst"
86
+ librispeech_test_clean_path = "<SOME_PATH>/LibriSpeech/test-clean" # test-clean path
87
+ metainfo = get_librispeech_test_clean_metainfo(metalst, librispeech_test_clean_path)
88
+
89
+ elif testset == "seedtts_test_zh":
90
+ metalst = rel_path + "/data/seedtts_testset/zh/meta.lst"
91
+ metainfo = get_seedtts_testset_metainfo(metalst)
92
+
93
+ elif testset == "seedtts_test_en":
94
+ metalst = rel_path + "/data/seedtts_testset/en/meta.lst"
95
+ metainfo = get_seedtts_testset_metainfo(metalst)
96
+
97
+ # path to save genereted wavs
98
+ output_dir = (
99
+ f"{rel_path}/"
100
+ f"results/{exp_name}_{ckpt_step}/{testset}/"
101
+ f"seed{seed}_{ode_method}_nfe{nfe_step}"
102
+ f"{f'_ss{sway_sampling_coef}' if sway_sampling_coef else ''}"
103
+ f"_cfg{cfg_strength}_speed{speed}"
104
+ f"{'_gt-dur' if use_truth_duration else ''}"
105
+ f"{'_no-ref-audio' if no_ref_audio else ''}"
106
+ )
107
+
108
+ # -------------------------------------------------#
109
+
110
+ use_ema = True
111
+
112
+ prompts_all = get_inference_prompt(
113
+ metainfo,
114
+ speed=speed,
115
+ tokenizer=tokenizer,
116
+ target_sample_rate=target_sample_rate,
117
+ n_mel_channels=n_mel_channels,
118
+ hop_length=hop_length,
119
+ target_rms=target_rms,
120
+ use_truth_duration=use_truth_duration,
121
+ infer_batch_size=infer_batch_size,
122
+ )
123
+
124
+ # Vocoder model
125
+ local = False
126
+ if local:
127
+ vocos_local_path = "../checkpoints/charactr/vocos-mel-24khz"
128
+ vocos = Vocos.from_hparams(f"{vocos_local_path}/config.yaml")
129
+ state_dict = torch.load(f"{vocos_local_path}/pytorch_model.bin", weights_only=True, map_location=device)
130
+ vocos.load_state_dict(state_dict)
131
+ vocos.eval()
132
+ else:
133
+ vocos = Vocos.from_pretrained("charactr/vocos-mel-24khz")
134
+
135
+ # Tokenizer
136
+ vocab_char_map, vocab_size = get_tokenizer(dataset_name, tokenizer)
137
+
138
+ # Model
139
+ model = CFM(
140
+ transformer=model_cls(**model_cfg, text_num_embeds=vocab_size, mel_dim=n_mel_channels),
141
+ mel_spec_kwargs=dict(
142
+ target_sample_rate=target_sample_rate,
143
+ n_mel_channels=n_mel_channels,
144
+ hop_length=hop_length,
145
+ ),
146
+ odeint_kwargs=dict(
147
+ method=ode_method,
148
+ ),
149
+ vocab_char_map=vocab_char_map,
150
+ ).to(device)
151
+
152
+ model = load_checkpoint(model, ckpt_path, device, use_ema=use_ema)
153
+
154
+ if not os.path.exists(output_dir) and accelerator.is_main_process:
155
+ os.makedirs(output_dir)
156
+
157
+ # start batch inference
158
+ accelerator.wait_for_everyone()
159
+ start = time.time()
160
+
161
+ with accelerator.split_between_processes(prompts_all) as prompts:
162
+ for prompt in tqdm(prompts, disable=not accelerator.is_local_main_process):
163
+ utts, ref_rms_list, ref_mels, ref_mel_lens, total_mel_lens, final_text_list = prompt
164
+ ref_mels = ref_mels.to(device)
165
+ ref_mel_lens = torch.tensor(ref_mel_lens, dtype=torch.long).to(device)
166
+ total_mel_lens = torch.tensor(total_mel_lens, dtype=torch.long).to(device)
167
+
168
+ # Inference
169
+ with torch.inference_mode():
170
+ generated, _ = model.sample(
171
+ cond=ref_mels,
172
+ text=final_text_list,
173
+ duration=total_mel_lens,
174
+ lens=ref_mel_lens,
175
+ steps=nfe_step,
176
+ cfg_strength=cfg_strength,
177
+ sway_sampling_coef=sway_sampling_coef,
178
+ no_ref_audio=no_ref_audio,
179
+ seed=seed,
180
+ )
181
+ # Final result
182
+ for i, gen in enumerate(generated):
183
+ gen = gen[ref_mel_lens[i] : total_mel_lens[i], :].unsqueeze(0)
184
+ gen_mel_spec = gen.permute(0, 2, 1)
185
+ generated_wave = vocos.decode(gen_mel_spec.cpu())
186
+ if ref_rms_list[i] < target_rms:
187
+ generated_wave = generated_wave * ref_rms_list[i] / target_rms
188
+ torchaudio.save(f"{output_dir}/{utts[i]}.wav", generated_wave, target_sample_rate)
189
+
190
+ accelerator.wait_for_everyone()
191
+ if accelerator.is_main_process:
192
+ timediff = time.time() - start
193
+ print(f"Done batch inference in {timediff / 60 :.2f} minutes.")
194
+
195
+
196
+ if __name__ == "__main__":
197
+ main()
src/f5_tts/eval/eval_infer_batch.sh ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # e.g. F5-TTS, 16 NFE
4
+ accelerate launch src/f5_tts/eval/eval_infer_batch.py -s 0 -n "F5TTS_Base" -t "seedtts_test_zh" -nfe 16
5
+ accelerate launch src/f5_tts/eval/eval_infer_batch.py -s 0 -n "F5TTS_Base" -t "seedtts_test_en" -nfe 16
6
+ accelerate launch src/f5_tts/eval/eval_infer_batch.py -s 0 -n "F5TTS_Base" -t "ls_pc_test_clean" -nfe 16
7
+
8
+ # e.g. Vanilla E2 TTS, 32 NFE
9
+ accelerate launch src/f5_tts/eval/eval_infer_batch.py -s 0 -n "E2TTS_Base" -t "seedtts_test_zh" -o "midpoint" -ss 0
10
+ accelerate launch src/f5_tts/eval/eval_infer_batch.py -s 0 -n "E2TTS_Base" -t "seedtts_test_en" -o "midpoint" -ss 0
11
+ accelerate launch src/f5_tts/eval/eval_infer_batch.py -s 0 -n "E2TTS_Base" -t "ls_pc_test_clean" -o "midpoint" -ss 0
12
+
13
+ # etc.
src/f5_tts/eval/eval_librispeech_test_clean.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Evaluate with Librispeech test-clean, ~3s prompt to generate 4-10s audio (the way of valle/voicebox evaluation)
2
+
3
+ import sys
4
+ import os
5
+
6
+ sys.path.append(os.getcwd())
7
+
8
+ import multiprocessing as mp
9
+ from importlib.resources import files
10
+
11
+ import numpy as np
12
+
13
+ from f5_tts.eval.utils_eval import (
14
+ get_librispeech_test,
15
+ run_asr_wer,
16
+ run_sim,
17
+ )
18
+
19
+ rel_path = str(files("f5_tts").joinpath("../../"))
20
+
21
+
22
+ eval_task = "wer" # sim | wer
23
+ lang = "en"
24
+ metalst = rel_path + "/data/librispeech_pc_test_clean_cross_sentence.lst"
25
+ librispeech_test_clean_path = "<SOME_PATH>/LibriSpeech/test-clean" # test-clean path
26
+ gen_wav_dir = "PATH_TO_GENERATED" # generated wavs
27
+
28
+ gpus = [0, 1, 2, 3, 4, 5, 6, 7]
29
+ test_set = get_librispeech_test(metalst, gen_wav_dir, gpus, librispeech_test_clean_path)
30
+
31
+ ## In LibriSpeech, some speakers utilized varying voice characteristics for different characters in the book,
32
+ ## leading to a low similarity for the ground truth in some cases.
33
+ # test_set = get_librispeech_test(metalst, gen_wav_dir, gpus, librispeech_test_clean_path, eval_ground_truth = True) # eval ground truth
34
+
35
+ local = False
36
+ if local: # use local custom checkpoint dir
37
+ asr_ckpt_dir = "../checkpoints/Systran/faster-whisper-large-v3"
38
+ else:
39
+ asr_ckpt_dir = "" # auto download to cache dir
40
+
41
+ wavlm_ckpt_dir = "../checkpoints/UniSpeech/wavlm_large_finetune.pth"
42
+
43
+
44
+ # --------------------------- WER ---------------------------
45
+
46
+ if eval_task == "wer":
47
+ wers = []
48
+
49
+ with mp.Pool(processes=len(gpus)) as pool:
50
+ args = [(rank, lang, sub_test_set, asr_ckpt_dir) for (rank, sub_test_set) in test_set]
51
+ results = pool.map(run_asr_wer, args)
52
+ for wers_ in results:
53
+ wers.extend(wers_)
54
+
55
+ wer = round(np.mean(wers) * 100, 3)
56
+ print(f"\nTotal {len(wers)} samples")
57
+ print(f"WER : {wer}%")
58
+
59
+
60
+ # --------------------------- SIM ---------------------------
61
+
62
+ if eval_task == "sim":
63
+ sim_list = []
64
+
65
+ with mp.Pool(processes=len(gpus)) as pool:
66
+ args = [(rank, sub_test_set, wavlm_ckpt_dir) for (rank, sub_test_set) in test_set]
67
+ results = pool.map(run_sim, args)
68
+ for sim_ in results:
69
+ sim_list.extend(sim_)
70
+
71
+ sim = round(sum(sim_list) / len(sim_list), 3)
72
+ print(f"\nTotal {len(sim_list)} samples")
73
+ print(f"SIM : {sim}")
src/f5_tts/eval/eval_seedtts_testset.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Evaluate with Seed-TTS testset
2
+
3
+ import sys
4
+ import os
5
+
6
+ sys.path.append(os.getcwd())
7
+
8
+ import multiprocessing as mp
9
+ from importlib.resources import files
10
+
11
+ import numpy as np
12
+
13
+ from f5_tts.eval.utils_eval import (
14
+ get_seed_tts_test,
15
+ run_asr_wer,
16
+ run_sim,
17
+ )
18
+
19
+ rel_path = str(files("f5_tts").joinpath("../../"))
20
+
21
+
22
+ eval_task = "wer" # sim | wer
23
+ lang = "zh" # zh | en
24
+ metalst = rel_path + f"/data/seedtts_testset/{lang}/meta.lst" # seed-tts testset
25
+ # gen_wav_dir = rel_path + f"/data/seedtts_testset/{lang}/wavs" # ground truth wavs
26
+ gen_wav_dir = "PATH_TO_GENERATED" # generated wavs
27
+
28
+
29
+ # NOTE. paraformer-zh result will be slightly different according to the number of gpus, cuz batchsize is different
30
+ # zh 1.254 seems a result of 4 workers wer_seed_tts
31
+ gpus = [0, 1, 2, 3, 4, 5, 6, 7]
32
+ test_set = get_seed_tts_test(metalst, gen_wav_dir, gpus)
33
+
34
+ local = False
35
+ if local: # use local custom checkpoint dir
36
+ if lang == "zh":
37
+ asr_ckpt_dir = "../checkpoints/funasr" # paraformer-zh dir under funasr
38
+ elif lang == "en":
39
+ asr_ckpt_dir = "../checkpoints/Systran/faster-whisper-large-v3"
40
+ else:
41
+ asr_ckpt_dir = "" # auto download to cache dir
42
+
43
+ wavlm_ckpt_dir = "../checkpoints/UniSpeech/wavlm_large_finetune.pth"
44
+
45
+
46
+ # --------------------------- WER ---------------------------
47
+
48
+ if eval_task == "wer":
49
+ wers = []
50
+
51
+ with mp.Pool(processes=len(gpus)) as pool:
52
+ args = [(rank, lang, sub_test_set, asr_ckpt_dir) for (rank, sub_test_set) in test_set]
53
+ results = pool.map(run_asr_wer, args)
54
+ for wers_ in results:
55
+ wers.extend(wers_)
56
+
57
+ wer = round(np.mean(wers) * 100, 3)
58
+ print(f"\nTotal {len(wers)} samples")
59
+ print(f"WER : {wer}%")
60
+
61
+
62
+ # --------------------------- SIM ---------------------------
63
+
64
+ if eval_task == "sim":
65
+ sim_list = []
66
+
67
+ with mp.Pool(processes=len(gpus)) as pool:
68
+ args = [(rank, sub_test_set, wavlm_ckpt_dir) for (rank, sub_test_set) in test_set]
69
+ results = pool.map(run_sim, args)
70
+ for sim_ in results:
71
+ sim_list.extend(sim_)
72
+
73
+ sim = round(sum(sim_list) / len(sim_list), 3)
74
+ print(f"\nTotal {len(sim_list)} samples")
75
+ print(f"SIM : {sim}")
src/f5_tts/eval/utils_eval.py ADDED
@@ -0,0 +1,397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import os
3
+ import random
4
+ import string
5
+ from tqdm import tqdm
6
+
7
+ import torch
8
+ import torch.nn.functional as F
9
+ import torchaudio
10
+
11
+ from f5_tts.model.modules import MelSpec
12
+ from f5_tts.model.utils import convert_char_to_pinyin
13
+ from f5_tts.eval.ecapa_tdnn import ECAPA_TDNN_SMALL
14
+
15
+
16
+ # seedtts testset metainfo: utt, prompt_text, prompt_wav, gt_text, gt_wav
17
+ def get_seedtts_testset_metainfo(metalst):
18
+ f = open(metalst)
19
+ lines = f.readlines()
20
+ f.close()
21
+ metainfo = []
22
+ for line in lines:
23
+ if len(line.strip().split("|")) == 5:
24
+ utt, prompt_text, prompt_wav, gt_text, gt_wav = line.strip().split("|")
25
+ elif len(line.strip().split("|")) == 4:
26
+ utt, prompt_text, prompt_wav, gt_text = line.strip().split("|")
27
+ gt_wav = os.path.join(os.path.dirname(metalst), "wavs", utt + ".wav")
28
+ if not os.path.isabs(prompt_wav):
29
+ prompt_wav = os.path.join(os.path.dirname(metalst), prompt_wav)
30
+ metainfo.append((utt, prompt_text, prompt_wav, gt_text, gt_wav))
31
+ return metainfo
32
+
33
+
34
+ # librispeech test-clean metainfo: gen_utt, ref_txt, ref_wav, gen_txt, gen_wav
35
+ def get_librispeech_test_clean_metainfo(metalst, librispeech_test_clean_path):
36
+ f = open(metalst)
37
+ lines = f.readlines()
38
+ f.close()
39
+ metainfo = []
40
+ for line in lines:
41
+ ref_utt, ref_dur, ref_txt, gen_utt, gen_dur, gen_txt = line.strip().split("\t")
42
+
43
+ # ref_txt = ref_txt[0] + ref_txt[1:].lower() + '.' # if use librispeech test-clean (no-pc)
44
+ ref_spk_id, ref_chaptr_id, _ = ref_utt.split("-")
45
+ ref_wav = os.path.join(librispeech_test_clean_path, ref_spk_id, ref_chaptr_id, ref_utt + ".flac")
46
+
47
+ # gen_txt = gen_txt[0] + gen_txt[1:].lower() + '.' # if use librispeech test-clean (no-pc)
48
+ gen_spk_id, gen_chaptr_id, _ = gen_utt.split("-")
49
+ gen_wav = os.path.join(librispeech_test_clean_path, gen_spk_id, gen_chaptr_id, gen_utt + ".flac")
50
+
51
+ metainfo.append((gen_utt, ref_txt, ref_wav, " " + gen_txt, gen_wav))
52
+
53
+ return metainfo
54
+
55
+
56
+ # padded to max length mel batch
57
+ def padded_mel_batch(ref_mels):
58
+ max_mel_length = torch.LongTensor([mel.shape[-1] for mel in ref_mels]).amax()
59
+ padded_ref_mels = []
60
+ for mel in ref_mels:
61
+ padded_ref_mel = F.pad(mel, (0, max_mel_length - mel.shape[-1]), value=0)
62
+ padded_ref_mels.append(padded_ref_mel)
63
+ padded_ref_mels = torch.stack(padded_ref_mels)
64
+ padded_ref_mels = padded_ref_mels.permute(0, 2, 1)
65
+ return padded_ref_mels
66
+
67
+
68
+ # get prompts from metainfo containing: utt, prompt_text, prompt_wav, gt_text, gt_wav
69
+
70
+
71
+ def get_inference_prompt(
72
+ metainfo,
73
+ speed=1.0,
74
+ tokenizer="pinyin",
75
+ polyphone=True,
76
+ target_sample_rate=24000,
77
+ n_mel_channels=100,
78
+ hop_length=256,
79
+ target_rms=0.1,
80
+ use_truth_duration=False,
81
+ infer_batch_size=1,
82
+ num_buckets=200,
83
+ min_secs=3,
84
+ max_secs=40,
85
+ ):
86
+ prompts_all = []
87
+
88
+ min_tokens = min_secs * target_sample_rate // hop_length
89
+ max_tokens = max_secs * target_sample_rate // hop_length
90
+
91
+ batch_accum = [0] * num_buckets
92
+ utts, ref_rms_list, ref_mels, ref_mel_lens, total_mel_lens, final_text_list = (
93
+ [[] for _ in range(num_buckets)] for _ in range(6)
94
+ )
95
+
96
+ mel_spectrogram = MelSpec(
97
+ target_sample_rate=target_sample_rate, n_mel_channels=n_mel_channels, hop_length=hop_length
98
+ )
99
+
100
+ for utt, prompt_text, prompt_wav, gt_text, gt_wav in tqdm(metainfo, desc="Processing prompts..."):
101
+ # Audio
102
+ ref_audio, ref_sr = torchaudio.load(prompt_wav)
103
+ ref_rms = torch.sqrt(torch.mean(torch.square(ref_audio)))
104
+ if ref_rms < target_rms:
105
+ ref_audio = ref_audio * target_rms / ref_rms
106
+ assert ref_audio.shape[-1] > 5000, f"Empty prompt wav: {prompt_wav}, or torchaudio backend issue."
107
+ if ref_sr != target_sample_rate:
108
+ resampler = torchaudio.transforms.Resample(ref_sr, target_sample_rate)
109
+ ref_audio = resampler(ref_audio)
110
+
111
+ # Text
112
+ if len(prompt_text[-1].encode("utf-8")) == 1:
113
+ prompt_text = prompt_text + " "
114
+ text = [prompt_text + gt_text]
115
+ if tokenizer == "pinyin":
116
+ text_list = convert_char_to_pinyin(text, polyphone=polyphone)
117
+ else:
118
+ text_list = text
119
+
120
+ # Duration, mel frame length
121
+ ref_mel_len = ref_audio.shape[-1] // hop_length
122
+ if use_truth_duration:
123
+ gt_audio, gt_sr = torchaudio.load(gt_wav)
124
+ if gt_sr != target_sample_rate:
125
+ resampler = torchaudio.transforms.Resample(gt_sr, target_sample_rate)
126
+ gt_audio = resampler(gt_audio)
127
+ total_mel_len = ref_mel_len + int(gt_audio.shape[-1] / hop_length / speed)
128
+
129
+ # # test vocoder resynthesis
130
+ # ref_audio = gt_audio
131
+ else:
132
+ ref_text_len = len(prompt_text.encode("utf-8"))
133
+ gen_text_len = len(gt_text.encode("utf-8"))
134
+ total_mel_len = ref_mel_len + int(ref_mel_len / ref_text_len * gen_text_len / speed)
135
+
136
+ # to mel spectrogram
137
+ ref_mel = mel_spectrogram(ref_audio)
138
+ ref_mel = ref_mel.squeeze(0)
139
+
140
+ # deal with batch
141
+ assert infer_batch_size > 0, "infer_batch_size should be greater than 0."
142
+ assert (
143
+ min_tokens <= total_mel_len <= max_tokens
144
+ ), f"Audio {utt} has duration {total_mel_len*hop_length//target_sample_rate}s out of range [{min_secs}, {max_secs}]."
145
+ bucket_i = math.floor((total_mel_len - min_tokens) / (max_tokens - min_tokens + 1) * num_buckets)
146
+
147
+ utts[bucket_i].append(utt)
148
+ ref_rms_list[bucket_i].append(ref_rms)
149
+ ref_mels[bucket_i].append(ref_mel)
150
+ ref_mel_lens[bucket_i].append(ref_mel_len)
151
+ total_mel_lens[bucket_i].append(total_mel_len)
152
+ final_text_list[bucket_i].extend(text_list)
153
+
154
+ batch_accum[bucket_i] += total_mel_len
155
+
156
+ if batch_accum[bucket_i] >= infer_batch_size:
157
+ # print(f"\n{len(ref_mels[bucket_i][0][0])}\n{ref_mel_lens[bucket_i]}\n{total_mel_lens[bucket_i]}")
158
+ prompts_all.append(
159
+ (
160
+ utts[bucket_i],
161
+ ref_rms_list[bucket_i],
162
+ padded_mel_batch(ref_mels[bucket_i]),
163
+ ref_mel_lens[bucket_i],
164
+ total_mel_lens[bucket_i],
165
+ final_text_list[bucket_i],
166
+ )
167
+ )
168
+ batch_accum[bucket_i] = 0
169
+ (
170
+ utts[bucket_i],
171
+ ref_rms_list[bucket_i],
172
+ ref_mels[bucket_i],
173
+ ref_mel_lens[bucket_i],
174
+ total_mel_lens[bucket_i],
175
+ final_text_list[bucket_i],
176
+ ) = [], [], [], [], [], []
177
+
178
+ # add residual
179
+ for bucket_i, bucket_frames in enumerate(batch_accum):
180
+ if bucket_frames > 0:
181
+ prompts_all.append(
182
+ (
183
+ utts[bucket_i],
184
+ ref_rms_list[bucket_i],
185
+ padded_mel_batch(ref_mels[bucket_i]),
186
+ ref_mel_lens[bucket_i],
187
+ total_mel_lens[bucket_i],
188
+ final_text_list[bucket_i],
189
+ )
190
+ )
191
+ # not only leave easy work for last workers
192
+ random.seed(666)
193
+ random.shuffle(prompts_all)
194
+
195
+ return prompts_all
196
+
197
+
198
+ # get wav_res_ref_text of seed-tts test metalst
199
+ # https://github.com/BytedanceSpeech/seed-tts-eval
200
+
201
+
202
+ def get_seed_tts_test(metalst, gen_wav_dir, gpus):
203
+ f = open(metalst)
204
+ lines = f.readlines()
205
+ f.close()
206
+
207
+ test_set_ = []
208
+ for line in tqdm(lines):
209
+ if len(line.strip().split("|")) == 5:
210
+ utt, prompt_text, prompt_wav, gt_text, gt_wav = line.strip().split("|")
211
+ elif len(line.strip().split("|")) == 4:
212
+ utt, prompt_text, prompt_wav, gt_text = line.strip().split("|")
213
+
214
+ if not os.path.exists(os.path.join(gen_wav_dir, utt + ".wav")):
215
+ continue
216
+ gen_wav = os.path.join(gen_wav_dir, utt + ".wav")
217
+ if not os.path.isabs(prompt_wav):
218
+ prompt_wav = os.path.join(os.path.dirname(metalst), prompt_wav)
219
+
220
+ test_set_.append((gen_wav, prompt_wav, gt_text))
221
+
222
+ num_jobs = len(gpus)
223
+ if num_jobs == 1:
224
+ return [(gpus[0], test_set_)]
225
+
226
+ wav_per_job = len(test_set_) // num_jobs + 1
227
+ test_set = []
228
+ for i in range(num_jobs):
229
+ test_set.append((gpus[i], test_set_[i * wav_per_job : (i + 1) * wav_per_job]))
230
+
231
+ return test_set
232
+
233
+
234
+ # get librispeech test-clean cross sentence test
235
+
236
+
237
+ def get_librispeech_test(metalst, gen_wav_dir, gpus, librispeech_test_clean_path, eval_ground_truth=False):
238
+ f = open(metalst)
239
+ lines = f.readlines()
240
+ f.close()
241
+
242
+ test_set_ = []
243
+ for line in tqdm(lines):
244
+ ref_utt, ref_dur, ref_txt, gen_utt, gen_dur, gen_txt = line.strip().split("\t")
245
+
246
+ if eval_ground_truth:
247
+ gen_spk_id, gen_chaptr_id, _ = gen_utt.split("-")
248
+ gen_wav = os.path.join(librispeech_test_clean_path, gen_spk_id, gen_chaptr_id, gen_utt + ".flac")
249
+ else:
250
+ if not os.path.exists(os.path.join(gen_wav_dir, gen_utt + ".wav")):
251
+ raise FileNotFoundError(f"Generated wav not found: {gen_utt}")
252
+ gen_wav = os.path.join(gen_wav_dir, gen_utt + ".wav")
253
+
254
+ ref_spk_id, ref_chaptr_id, _ = ref_utt.split("-")
255
+ ref_wav = os.path.join(librispeech_test_clean_path, ref_spk_id, ref_chaptr_id, ref_utt + ".flac")
256
+
257
+ test_set_.append((gen_wav, ref_wav, gen_txt))
258
+
259
+ num_jobs = len(gpus)
260
+ if num_jobs == 1:
261
+ return [(gpus[0], test_set_)]
262
+
263
+ wav_per_job = len(test_set_) // num_jobs + 1
264
+ test_set = []
265
+ for i in range(num_jobs):
266
+ test_set.append((gpus[i], test_set_[i * wav_per_job : (i + 1) * wav_per_job]))
267
+
268
+ return test_set
269
+
270
+
271
+ # load asr model
272
+
273
+
274
+ def load_asr_model(lang, ckpt_dir=""):
275
+ if lang == "zh":
276
+ from funasr import AutoModel
277
+
278
+ model = AutoModel(
279
+ model=os.path.join(ckpt_dir, "paraformer-zh"),
280
+ # vad_model = os.path.join(ckpt_dir, "fsmn-vad"),
281
+ # punc_model = os.path.join(ckpt_dir, "ct-punc"),
282
+ # spk_model = os.path.join(ckpt_dir, "cam++"),
283
+ disable_update=True,
284
+ ) # following seed-tts setting
285
+ elif lang == "en":
286
+ from faster_whisper import WhisperModel
287
+
288
+ model_size = "large-v3" if ckpt_dir == "" else ckpt_dir
289
+ model = WhisperModel(model_size, device="cuda", compute_type="float16")
290
+ return model
291
+
292
+
293
+ # WER Evaluation, the way Seed-TTS does
294
+
295
+
296
+ def run_asr_wer(args):
297
+ rank, lang, test_set, ckpt_dir = args
298
+
299
+ if lang == "zh":
300
+ import zhconv
301
+
302
+ torch.cuda.set_device(rank)
303
+ elif lang == "en":
304
+ os.environ["CUDA_VISIBLE_DEVICES"] = str(rank)
305
+ else:
306
+ raise NotImplementedError(
307
+ "lang support only 'zh' (funasr paraformer-zh), 'en' (faster-whisper-large-v3), for now."
308
+ )
309
+
310
+ asr_model = load_asr_model(lang, ckpt_dir=ckpt_dir)
311
+
312
+ from zhon.hanzi import punctuation
313
+
314
+ punctuation_all = punctuation + string.punctuation
315
+ wers = []
316
+
317
+ from jiwer import compute_measures
318
+
319
+ for gen_wav, prompt_wav, truth in tqdm(test_set):
320
+ if lang == "zh":
321
+ res = asr_model.generate(input=gen_wav, batch_size_s=300, disable_pbar=True)
322
+ hypo = res[0]["text"]
323
+ hypo = zhconv.convert(hypo, "zh-cn")
324
+ elif lang == "en":
325
+ segments, _ = asr_model.transcribe(gen_wav, beam_size=5, language="en")
326
+ hypo = ""
327
+ for segment in segments:
328
+ hypo = hypo + " " + segment.text
329
+
330
+ # raw_truth = truth
331
+ # raw_hypo = hypo
332
+
333
+ for x in punctuation_all:
334
+ truth = truth.replace(x, "")
335
+ hypo = hypo.replace(x, "")
336
+
337
+ truth = truth.replace(" ", " ")
338
+ hypo = hypo.replace(" ", " ")
339
+
340
+ if lang == "zh":
341
+ truth = " ".join([x for x in truth])
342
+ hypo = " ".join([x for x in hypo])
343
+ elif lang == "en":
344
+ truth = truth.lower()
345
+ hypo = hypo.lower()
346
+
347
+ measures = compute_measures(truth, hypo)
348
+ wer = measures["wer"]
349
+
350
+ # ref_list = truth.split(" ")
351
+ # subs = measures["substitutions"] / len(ref_list)
352
+ # dele = measures["deletions"] / len(ref_list)
353
+ # inse = measures["insertions"] / len(ref_list)
354
+
355
+ wers.append(wer)
356
+
357
+ return wers
358
+
359
+
360
+ # SIM Evaluation
361
+
362
+
363
+ def run_sim(args):
364
+ rank, test_set, ckpt_dir = args
365
+ device = f"cuda:{rank}"
366
+
367
+ model = ECAPA_TDNN_SMALL(feat_dim=1024, feat_type="wavlm_large", config_path=None)
368
+ state_dict = torch.load(ckpt_dir, weights_only=True, map_location=lambda storage, loc: storage)
369
+ model.load_state_dict(state_dict["model"], strict=False)
370
+
371
+ use_gpu = True if torch.cuda.is_available() else False
372
+ if use_gpu:
373
+ model = model.cuda(device)
374
+ model.eval()
375
+
376
+ sim_list = []
377
+ for wav1, wav2, truth in tqdm(test_set):
378
+ wav1, sr1 = torchaudio.load(wav1)
379
+ wav2, sr2 = torchaudio.load(wav2)
380
+
381
+ resample1 = torchaudio.transforms.Resample(orig_freq=sr1, new_freq=16000)
382
+ resample2 = torchaudio.transforms.Resample(orig_freq=sr2, new_freq=16000)
383
+ wav1 = resample1(wav1)
384
+ wav2 = resample2(wav2)
385
+
386
+ if use_gpu:
387
+ wav1 = wav1.cuda(device)
388
+ wav2 = wav2.cuda(device)
389
+ with torch.no_grad():
390
+ emb1 = model(wav1)
391
+ emb2 = model(wav2)
392
+
393
+ sim = F.cosine_similarity(emb1, emb2)[0].item()
394
+ # print(f"VSim score between two audios: {sim:.4f} (-1.0, 1.0).")
395
+ sim_list.append(sim)
396
+
397
+ return sim_list
src/f5_tts/infer/README.md ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference
2
+
3
+ The pretrained model checkpoints can be reached at [🤗 Hugging Face](https://huggingface.co/SWivid/F5-TTS) and [🤖 Model Scope](https://www.modelscope.cn/models/SWivid/F5-TTS_Emilia-ZH-EN), or will be automatically downloaded when running inference scripts.
4
+
5
+ Currently support **30s for a single** generation, which is the **total length** including both prompt and output audio. However, you can provide `infer_cli` and `infer_gradio` with longer text, will automatically do chunk generation. Long reference audio will be **clip short to ~15s**.
6
+
7
+ To avoid possible inference failures, make sure you have seen through the following instructions.
8
+
9
+ - Use reference audio <15s and leave some silence (e.g. 1s) at the end. Otherwise there is a risk of truncating in the middle of word, leading to suboptimal generation.
10
+ - Uppercased letters will be uttered letter by letter, so use lowercased letters for normal words.
11
+ - Add some spaces (blank: " ") or punctuations (e.g. "," ".") to explicitly introduce some pauses.
12
+ - Preprocess numbers to Chinese letters if you want to have them read in Chinese, otherwise in English.
13
+
14
+
15
+ ## Gradio App
16
+
17
+ Currently supported features:
18
+
19
+ - Basic TTS with Chunk Inference
20
+ - Multi-Style / Multi-Speaker Generation
21
+ - Voice Chat powered by Qwen2.5-3B-Instruct
22
+
23
+ The cli command `f5-tts_infer-gradio` equals to `python src/f5_tts/infer/infer_gradio.py`, which launches a Gradio APP (web interface) for inference.
24
+
25
+ The script will load model checkpoints from Huggingface. You can also manually download files and update the path to `load_model()` in `infer_gradio.py`. Currently only load TTS models first, will load ASR model to do transcription if `ref_text` not provided, will load LLM model if use Voice Chat.
26
+
27
+ Could also be used as a component for larger application.
28
+ ```python
29
+ import gradio as gr
30
+ from f5_tts.infer.infer_gradio import app
31
+
32
+ with gr.Blocks() as main_app:
33
+ gr.Markdown("# This is an example of using F5-TTS within a bigger Gradio app")
34
+
35
+ # ... other Gradio components
36
+
37
+ app.render()
38
+
39
+ main_app.launch()
40
+ ```
41
+
42
+
43
+ ## CLI Inference
44
+
45
+ The cli command `f5-tts_infer-cli` equals to `python src/f5_tts/infer/infer_cli.py`, which is a command line tool for inference.
46
+
47
+ The script will load model checkpoints from Huggingface. You can also manually download files and use `--ckpt_file` to specify the model you want to load, or directly update in `infer_cli.py`.
48
+
49
+ For change vocab.txt use `--vocab_file` to provide your `vocab.txt` file.
50
+
51
+ Basically you can inference with flags:
52
+ ```bash
53
+ # Leave --ref_text "" will have ASR model transcribe (extra GPU memory usage)
54
+ f5-tts_infer-cli \
55
+ --model "F5-TTS" \
56
+ --ref_audio "ref_audio.wav" \
57
+ --ref_text "The content, subtitle or transcription of reference audio." \
58
+ --gen_text "Some text you want TTS model generate for you."
59
+ ```
60
+
61
+ And a `.toml` file would help with more flexible usage.
62
+
63
+ ```bash
64
+ f5-tts_infer-cli -c custom.toml
65
+ ```
66
+
67
+ For example, you can use `.toml` to pass in variables, refer to `src/f5_tts/infer/examples/basic/basic.toml`:
68
+
69
+ ```toml
70
+ # F5-TTS | E2-TTS
71
+ model = "F5-TTS"
72
+ ref_audio = "infer/examples/basic/basic_ref_en.wav"
73
+ # If an empty "", transcribes the reference audio automatically.
74
+ ref_text = "Some call me nature, others call me mother nature."
75
+ gen_text = "I don't really care what you call me. I've been a silent spectator, watching species evolve, empires rise and fall. But always remember, I am mighty and enduring."
76
+ # File with text to generate. Ignores the text above.
77
+ gen_file = ""
78
+ remove_silence = false
79
+ output_dir = "tests"
80
+ ```
81
+
82
+ You can also leverage `.toml` file to do multi-style generation, refer to `src/f5_tts/infer/examples/multi/story.toml`.
83
+
84
+ ```toml
85
+ # F5-TTS | E2-TTS
86
+ model = "F5-TTS"
87
+ ref_audio = "infer/examples/multi/main.flac"
88
+ # If an empty "", transcribes the reference audio automatically.
89
+ ref_text = ""
90
+ gen_text = ""
91
+ # File with text to generate. Ignores the text above.
92
+ gen_file = "infer/examples/multi/story.txt"
93
+ remove_silence = true
94
+ output_dir = "tests"
95
+
96
+ [voices.town]
97
+ ref_audio = "infer/examples/multi/town.flac"
98
+ ref_text = ""
99
+
100
+ [voices.country]
101
+ ref_audio = "infer/examples/multi/country.flac"
102
+ ref_text = ""
103
+ ```
104
+ You should mark the voice with `[main]` `[town]` `[country]` whenever you want to change voice, refer to `src/f5_tts/infer/examples/multi/story.txt`.
105
+
106
+ ## Speech Editing
107
+
108
+ To test speech editing capabilities, use the following command:
109
+
110
+ ```bash
111
+ python src/f5_tts/infer/speech_edit.py
112
+ ```
src/f5_tts/infer/examples/basic/basic.toml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # F5-TTS | E2-TTS
2
+ model = "F5-TTS"
3
+ ref_audio = "infer/examples/basic/basic_ref_en.wav"
4
+ # If an empty "", transcribes the reference audio automatically.
5
+ ref_text = "Some call me nature, others call me mother nature."
6
+ gen_text = "I don't really care what you call me. I've been a silent spectator, watching species evolve, empires rise and fall. But always remember, I am mighty and enduring."
7
+ # File with text to generate. Ignores the text above.
8
+ gen_file = ""
9
+ remove_silence = false
10
+ output_dir = "tests"
src/f5_tts/infer/examples/basic/basic_ref_en.wav ADDED
Binary file (256 kB). View file
 
src/f5_tts/infer/examples/basic/basic_ref_zh.wav ADDED
Binary file (325 kB). View file
 
src/f5_tts/infer/examples/multi/country.flac ADDED
Binary file (180 kB). View file
 
src/f5_tts/infer/examples/multi/main.flac ADDED
Binary file (279 kB). View file
 
src/f5_tts/infer/examples/multi/story.toml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # F5-TTS | E2-TTS
2
+ model = "F5-TTS"
3
+ ref_audio = "infer/examples/multi/main.flac"
4
+ # If an empty "", transcribes the reference audio automatically.
5
+ ref_text = ""
6
+ gen_text = ""
7
+ # File with text to generate. Ignores the text above.
8
+ gen_file = "infer/examples/multi/story.txt"
9
+ remove_silence = true
10
+ output_dir = "tests"
11
+
12
+ [voices.town]
13
+ ref_audio = "infer/examples/multi/town.flac"
14
+ ref_text = ""
15
+
16
+ [voices.country]
17
+ ref_audio = "infer/examples/multi/country.flac"
18
+ ref_text = ""
19
+
src/f5_tts/infer/examples/multi/story.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ A Town Mouse and a Country Mouse were acquaintances, and the Country Mouse one day invited his friend to come and see him at his home in the fields. The Town Mouse came, and they sat down to a dinner of barleycorns and roots, the latter of which had a distinctly earthy flavour. The fare was not much to the taste of the guest, and presently he broke out with [town] “My poor dear friend, you live here no better than the ants. Now, you should just see how I fare! My larder is a regular horn of plenty. You must come and stay with me, and I promise you you shall live on the fat of the land.” [main] So when he returned to town he took the Country Mouse with him, and showed him into a larder containing flour and oatmeal and figs and honey and dates. The Country Mouse had never seen anything like it, and sat down to enjoy the luxuries his friend provided: but before they had well begun, the door of the larder opened and someone came in. The two Mice scampered off and hid themselves in a narrow and exceedingly uncomfortable hole. Presently, when all was quiet, they ventured out again; but someone else came in, and off they scuttled again. This was too much for the visitor. [country] “Goodbye,” [main] said he, [country] “I’m off. You live in the lap of luxury, I can see, but you are surrounded by dangers; whereas at home I can enjoy my simple dinner of roots and corn in peace.”
src/f5_tts/infer/examples/multi/town.flac ADDED
Binary file (229 kB). View file
 
src/f5_tts/infer/examples/vocab.txt ADDED
@@ -0,0 +1,2545 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ !
3
+ "
4
+ #
5
+ $
6
+ %
7
+ &
8
+ '
9
+ (
10
+ )
11
+ *
12
+ +
13
+ ,
14
+ -
15
+ .
16
+ /
17
+ 0
18
+ 1
19
+ 2
20
+ 3
21
+ 4
22
+ 5
23
+ 6
24
+ 7
25
+ 8
26
+ 9
27
+ :
28
+ ;
29
+ =
30
+ >
31
+ ?
32
+ @
33
+ A
34
+ B
35
+ C
36
+ D
37
+ E
38
+ F
39
+ G
40
+ H
41
+ I
42
+ J
43
+ K
44
+ L
45
+ M
46
+ N
47
+ O
48
+ P
49
+ Q
50
+ R
51
+ S
52
+ T
53
+ U
54
+ V
55
+ W
56
+ X
57
+ Y
58
+ Z
59
+ [
60
+ \
61
+ ]
62
+ _
63
+ a
64
+ a1
65
+ ai1
66
+ ai2
67
+ ai3
68
+ ai4
69
+ an1
70
+ an3
71
+ an4
72
+ ang1
73
+ ang2
74
+ ang4
75
+ ao1
76
+ ao2
77
+ ao3
78
+ ao4
79
+ b
80
+ ba
81
+ ba1
82
+ ba2
83
+ ba3
84
+ ba4
85
+ bai1
86
+ bai2
87
+ bai3
88
+ bai4
89
+ ban1
90
+ ban2
91
+ ban3
92
+ ban4
93
+ bang1
94
+ bang2
95
+ bang3
96
+ bang4
97
+ bao1
98
+ bao2
99
+ bao3
100
+ bao4
101
+ bei
102
+ bei1
103
+ bei2
104
+ bei3
105
+ bei4
106
+ ben1
107
+ ben2
108
+ ben3
109
+ ben4
110
+ beng
111
+ beng1
112
+ beng2
113
+ beng3
114
+ beng4
115
+ bi1
116
+ bi2
117
+ bi3
118
+ bi4
119
+ bian1
120
+ bian2
121
+ bian3
122
+ bian4
123
+ biao1
124
+ biao2
125
+ biao3
126
+ bie1
127
+ bie2
128
+ bie3
129
+ bie4
130
+ bin1
131
+ bin4
132
+ bing1
133
+ bing2
134
+ bing3
135
+ bing4
136
+ bo
137
+ bo1
138
+ bo2
139
+ bo3
140
+ bo4
141
+ bu2
142
+ bu3
143
+ bu4
144
+ c
145
+ ca1
146
+ cai1
147
+ cai2
148
+ cai3
149
+ cai4
150
+ can1
151
+ can2
152
+ can3
153
+ can4
154
+ cang1
155
+ cang2
156
+ cao1
157
+ cao2
158
+ cao3
159
+ ce4
160
+ cen1
161
+ cen2
162
+ ceng1
163
+ ceng2
164
+ ceng4
165
+ cha1
166
+ cha2
167
+ cha3
168
+ cha4
169
+ chai1
170
+ chai2
171
+ chan1
172
+ chan2
173
+ chan3
174
+ chan4
175
+ chang1
176
+ chang2
177
+ chang3
178
+ chang4
179
+ chao1
180
+ chao2
181
+ chao3
182
+ che1
183
+ che2
184
+ che3
185
+ che4
186
+ chen1
187
+ chen2
188
+ chen3
189
+ chen4
190
+ cheng1
191
+ cheng2
192
+ cheng3
193
+ cheng4
194
+ chi1
195
+ chi2
196
+ chi3
197
+ chi4
198
+ chong1
199
+ chong2
200
+ chong3
201
+ chong4
202
+ chou1
203
+ chou2
204
+ chou3
205
+ chou4
206
+ chu1
207
+ chu2
208
+ chu3
209
+ chu4
210
+ chua1
211
+ chuai1
212
+ chuai2
213
+ chuai3
214
+ chuai4
215
+ chuan1
216
+ chuan2
217
+ chuan3
218
+ chuan4
219
+ chuang1
220
+ chuang2
221
+ chuang3
222
+ chuang4
223
+ chui1
224
+ chui2
225
+ chun1
226
+ chun2
227
+ chun3
228
+ chuo1
229
+ chuo4
230
+ ci1
231
+ ci2
232
+ ci3
233
+ ci4
234
+ cong1
235
+ cong2
236
+ cou4
237
+ cu1
238
+ cu4
239
+ cuan1
240
+ cuan2
241
+ cuan4
242
+ cui1
243
+ cui3
244
+ cui4
245
+ cun1
246
+ cun2
247
+ cun4
248
+ cuo1
249
+ cuo2
250
+ cuo4
251
+ d
252
+ da
253
+ da1
254
+ da2
255
+ da3
256
+ da4
257
+ dai1
258
+ dai2
259
+ dai3
260
+ dai4
261
+ dan1
262
+ dan2
263
+ dan3
264
+ dan4
265
+ dang1
266
+ dang2
267
+ dang3
268
+ dang4
269
+ dao1
270
+ dao2
271
+ dao3
272
+ dao4
273
+ de
274
+ de1
275
+ de2
276
+ dei3
277
+ den4
278
+ deng1
279
+ deng2
280
+ deng3
281
+ deng4
282
+ di1
283
+ di2
284
+ di3
285
+ di4
286
+ dia3
287
+ dian1
288
+ dian2
289
+ dian3
290
+ dian4
291
+ diao1
292
+ diao3
293
+ diao4
294
+ die1
295
+ die2
296
+ die4
297
+ ding1
298
+ ding2
299
+ ding3
300
+ ding4
301
+ diu1
302
+ dong1
303
+ dong3
304
+ dong4
305
+ dou1
306
+ dou2
307
+ dou3
308
+ dou4
309
+ du1
310
+ du2
311
+ du3
312
+ du4
313
+ duan1
314
+ duan2
315
+ duan3
316
+ duan4
317
+ dui1
318
+ dui4
319
+ dun1
320
+ dun3
321
+ dun4
322
+ duo1
323
+ duo2
324
+ duo3
325
+ duo4
326
+ e
327
+ e1
328
+ e2
329
+ e3
330
+ e4
331
+ ei2
332
+ en1
333
+ en4
334
+ er
335
+ er2
336
+ er3
337
+ er4
338
+ f
339
+ fa1
340
+ fa2
341
+ fa3
342
+ fa4
343
+ fan1
344
+ fan2
345
+ fan3
346
+ fan4
347
+ fang1
348
+ fang2
349
+ fang3
350
+ fang4
351
+ fei1
352
+ fei2
353
+ fei3
354
+ fei4
355
+ fen1
356
+ fen2
357
+ fen3
358
+ fen4
359
+ feng1
360
+ feng2
361
+ feng3
362
+ feng4
363
+ fo2
364
+ fou2
365
+ fou3
366
+ fu1
367
+ fu2
368
+ fu3
369
+ fu4
370
+ g
371
+ ga1
372
+ ga2
373
+ ga3
374
+ ga4
375
+ gai1
376
+ gai2
377
+ gai3
378
+ gai4
379
+ gan1
380
+ gan2
381
+ gan3
382
+ gan4
383
+ gang1
384
+ gang2
385
+ gang3
386
+ gang4
387
+ gao1
388
+ gao2
389
+ gao3
390
+ gao4
391
+ ge1
392
+ ge2
393
+ ge3
394
+ ge4
395
+ gei2
396
+ gei3
397
+ gen1
398
+ gen2
399
+ gen3
400
+ gen4
401
+ geng1
402
+ geng3
403
+ geng4
404
+ gong1
405
+ gong3
406
+ gong4
407
+ gou1
408
+ gou2
409
+ gou3
410
+ gou4
411
+ gu
412
+ gu1
413
+ gu2
414
+ gu3
415
+ gu4
416
+ gua1
417
+ gua2
418
+ gua3
419
+ gua4
420
+ guai1
421
+ guai2
422
+ guai3
423
+ guai4
424
+ guan1
425
+ guan2
426
+ guan3
427
+ guan4
428
+ guang1
429
+ guang2
430
+ guang3
431
+ guang4
432
+ gui1
433
+ gui2
434
+ gui3
435
+ gui4
436
+ gun3
437
+ gun4
438
+ guo1
439
+ guo2
440
+ guo3
441
+ guo4
442
+ h
443
+ ha1
444
+ ha2
445
+ ha3
446
+ hai1
447
+ hai2
448
+ hai3
449
+ hai4
450
+ han1
451
+ han2
452
+ han3
453
+ han4
454
+ hang1
455
+ hang2
456
+ hang4
457
+ hao1
458
+ hao2
459
+ hao3
460
+ hao4
461
+ he1
462
+ he2
463
+ he4
464
+ hei1
465
+ hen2
466
+ hen3
467
+ hen4
468
+ heng1
469
+ heng2
470
+ heng4
471
+ hong1
472
+ hong2
473
+ hong3
474
+ hong4
475
+ hou1
476
+ hou2
477
+ hou3
478
+ hou4
479
+ hu1
480
+ hu2
481
+ hu3
482
+ hu4
483
+ hua1
484
+ hua2
485
+ hua4
486
+ huai2
487
+ huai4
488
+ huan1
489
+ huan2
490
+ huan3
491
+ huan4
492
+ huang1
493
+ huang2
494
+ huang3
495
+ huang4
496
+ hui1
497
+ hui2
498
+ hui3
499
+ hui4
500
+ hun1
501
+ hun2
502
+ hun4
503
+ huo
504
+ huo1
505
+ huo2
506
+ huo3
507
+ huo4
508
+ i
509
+ j
510
+ ji1
511
+ ji2
512
+ ji3
513
+ ji4
514
+ jia
515
+ jia1
516
+ jia2
517
+ jia3
518
+ jia4
519
+ jian1
520
+ jian2
521
+ jian3
522
+ jian4
523
+ jiang1
524
+ jiang2
525
+ jiang3
526
+ jiang4
527
+ jiao1
528
+ jiao2
529
+ jiao3
530
+ jiao4
531
+ jie1
532
+ jie2
533
+ jie3
534
+ jie4
535
+ jin1
536
+ jin2
537
+ jin3
538
+ jin4
539
+ jing1
540
+ jing2
541
+ jing3
542
+ jing4
543
+ jiong3
544
+ jiu1
545
+ jiu2
546
+ jiu3
547
+ jiu4
548
+ ju1
549
+ ju2
550
+ ju3
551
+ ju4
552
+ juan1
553
+ juan2
554
+ juan3
555
+ juan4
556
+ jue1
557
+ jue2
558
+ jue4
559
+ jun1
560
+ jun4
561
+ k
562
+ ka1
563
+ ka2
564
+ ka3
565
+ kai1
566
+ kai2
567
+ kai3
568
+ kai4
569
+ kan1
570
+ kan2
571
+ kan3
572
+ kan4
573
+ kang1
574
+ kang2
575
+ kang4
576
+ kao1
577
+ kao2
578
+ kao3
579
+ kao4
580
+ ke1
581
+ ke2
582
+ ke3
583
+ ke4
584
+ ken3
585
+ keng1
586
+ kong1
587
+ kong3
588
+ kong4
589
+ kou1
590
+ kou2
591
+ kou3
592
+ kou4
593
+ ku1
594
+ ku2
595
+ ku3
596
+ ku4
597
+ kua1
598
+ kua3
599
+ kua4
600
+ kuai3
601
+ kuai4
602
+ kuan1
603
+ kuan2
604
+ kuan3
605
+ kuang1
606
+ kuang2
607
+ kuang4
608
+ kui1
609
+ kui2
610
+ kui3
611
+ kui4
612
+ kun1
613
+ kun3
614
+ kun4
615
+ kuo4
616
+ l
617
+ la
618
+ la1
619
+ la2
620
+ la3
621
+ la4
622
+ lai2
623
+ lai4
624
+ lan2
625
+ lan3
626
+ lan4
627
+ lang1
628
+ lang2
629
+ lang3
630
+ lang4
631
+ lao1
632
+ lao2
633
+ lao3
634
+ lao4
635
+ le
636
+ le1
637
+ le4
638
+ lei
639
+ lei1
640
+ lei2
641
+ lei3
642
+ lei4
643
+ leng1
644
+ leng2
645
+ leng3
646
+ leng4
647
+ li
648
+ li1
649
+ li2
650
+ li3
651
+ li4
652
+ lia3
653
+ lian2
654
+ lian3
655
+ lian4
656
+ liang2
657
+ liang3
658
+ liang4
659
+ liao1
660
+ liao2
661
+ liao3
662
+ liao4
663
+ lie1
664
+ lie2
665
+ lie3
666
+ lie4
667
+ lin1
668
+ lin2
669
+ lin3
670
+ lin4
671
+ ling2
672
+ ling3
673
+ ling4
674
+ liu1
675
+ liu2
676
+ liu3
677
+ liu4
678
+ long1
679
+ long2
680
+ long3
681
+ long4
682
+ lou1
683
+ lou2
684
+ lou3
685
+ lou4
686
+ lu1
687
+ lu2
688
+ lu3
689
+ lu4
690
+ luan2
691
+ luan3
692
+ luan4
693
+ lun1
694
+ lun2
695
+ lun4
696
+ luo1
697
+ luo2
698
+ luo3
699
+ luo4
700
+ lv2
701
+ lv3
702
+ lv4
703
+ lve3
704
+ lve4
705
+ m
706
+ ma
707
+ ma1
708
+ ma2
709
+ ma3
710
+ ma4
711
+ mai2
712
+ mai3
713
+ mai4
714
+ man1
715
+ man2
716
+ man3
717
+ man4
718
+ mang2
719
+ mang3
720
+ mao1
721
+ mao2
722
+ mao3
723
+ mao4
724
+ me
725
+ mei2
726
+ mei3
727
+ mei4
728
+ men
729
+ men1
730
+ men2
731
+ men4
732
+ meng
733
+ meng1
734
+ meng2
735
+ meng3
736
+ meng4
737
+ mi1
738
+ mi2
739
+ mi3
740
+ mi4
741
+ mian2
742
+ mian3
743
+ mian4
744
+ miao1
745
+ miao2
746
+ miao3
747
+ miao4
748
+ mie1
749
+ mie4
750
+ min2
751
+ min3
752
+ ming2
753
+ ming3
754
+ ming4
755
+ miu4
756
+ mo1
757
+ mo2
758
+ mo3
759
+ mo4
760
+ mou1
761
+ mou2
762
+ mou3
763
+ mu2
764
+ mu3
765
+ mu4
766
+ n
767
+ n2
768
+ na1
769
+ na2
770
+ na3
771
+ na4
772
+ nai2
773
+ nai3
774
+ nai4
775
+ nan1
776
+ nan2
777
+ nan3
778
+ nan4
779
+ nang1
780
+ nang2
781
+ nang3
782
+ nao1
783
+ nao2
784
+ nao3
785
+ nao4
786
+ ne
787
+ ne2
788
+ ne4
789
+ nei3
790
+ nei4
791
+ nen4
792
+ neng2
793
+ ni1
794
+ ni2
795
+ ni3
796
+ ni4
797
+ nian1
798
+ nian2
799
+ nian3
800
+ nian4
801
+ niang2
802
+ niang4
803
+ niao2
804
+ niao3
805
+ niao4
806
+ nie1
807
+ nie4
808
+ nin2
809
+ ning2
810
+ ning3
811
+ ning4
812
+ niu1
813
+ niu2
814
+ niu3
815
+ niu4
816
+ nong2
817
+ nong4
818
+ nou4
819
+ nu2
820
+ nu3
821
+ nu4
822
+ nuan3
823
+ nuo2
824
+ nuo4
825
+ nv2
826
+ nv3
827
+ nve4
828
+ o
829
+ o1
830
+ o2
831
+ ou1
832
+ ou2
833
+ ou3
834
+ ou4
835
+ p
836
+ pa1
837
+ pa2
838
+ pa4
839
+ pai1
840
+ pai2
841
+ pai3
842
+ pai4
843
+ pan1
844
+ pan2
845
+ pan4
846
+ pang1
847
+ pang2
848
+ pang4
849
+ pao1
850
+ pao2
851
+ pao3
852
+ pao4
853
+ pei1
854
+ pei2
855
+ pei4
856
+ pen1
857
+ pen2
858
+ pen4
859
+ peng1
860
+ peng2
861
+ peng3
862
+ peng4
863
+ pi1
864
+ pi2
865
+ pi3
866
+ pi4
867
+ pian1
868
+ pian2
869
+ pian4
870
+ piao1
871
+ piao2
872
+ piao3
873
+ piao4
874
+ pie1
875
+ pie2
876
+ pie3
877
+ pin1
878
+ pin2
879
+ pin3
880
+ pin4
881
+ ping1
882
+ ping2
883
+ po1
884
+ po2
885
+ po3
886
+ po4
887
+ pou1
888
+ pu1
889
+ pu2
890
+ pu3
891
+ pu4
892
+ q
893
+ qi1
894
+ qi2
895
+ qi3
896
+ qi4
897
+ qia1
898
+ qia3
899
+ qia4
900
+ qian1
901
+ qian2
902
+ qian3
903
+ qian4
904
+ qiang1
905
+ qiang2
906
+ qiang3
907
+ qiang4
908
+ qiao1
909
+ qiao2
910
+ qiao3
911
+ qiao4
912
+ qie1
913
+ qie2
914
+ qie3
915
+ qie4
916
+ qin1
917
+ qin2
918
+ qin3
919
+ qin4
920
+ qing1
921
+ qing2
922
+ qing3
923
+ qing4
924
+ qiong1
925
+ qiong2
926
+ qiu1
927
+ qiu2
928
+ qiu3
929
+ qu1
930
+ qu2
931
+ qu3
932
+ qu4
933
+ quan1
934
+ quan2
935
+ quan3
936
+ quan4
937
+ que1
938
+ que2
939
+ que4
940
+ qun2
941
+ r
942
+ ran2
943
+ ran3
944
+ rang1
945
+ rang2
946
+ rang3
947
+ rang4
948
+ rao2
949
+ rao3
950
+ rao4
951
+ re2
952
+ re3
953
+ re4
954
+ ren2
955
+ ren3
956
+ ren4
957
+ reng1
958
+ reng2
959
+ ri4
960
+ rong1
961
+ rong2
962
+ rong3
963
+ rou2
964
+ rou4
965
+ ru2
966
+ ru3
967
+ ru4
968
+ ruan2
969
+ ruan3
970
+ rui3
971
+ rui4
972
+ run4
973
+ ruo4
974
+ s
975
+ sa1
976
+ sa2
977
+ sa3
978
+ sa4
979
+ sai1
980
+ sai4
981
+ san1
982
+ san2
983
+ san3
984
+ san4
985
+ sang1
986
+ sang3
987
+ sang4
988
+ sao1
989
+ sao2
990
+ sao3
991
+ sao4
992
+ se4
993
+ sen1
994
+ seng1
995
+ sha1
996
+ sha2
997
+ sha3
998
+ sha4
999
+ shai1
1000
+ shai2
1001
+ shai3
1002
+ shai4
1003
+ shan1
1004
+ shan3
1005
+ shan4
1006
+ shang
1007
+ shang1
1008
+ shang3
1009
+ shang4
1010
+ shao1
1011
+ shao2
1012
+ shao3
1013
+ shao4
1014
+ she1
1015
+ she2
1016
+ she3
1017
+ she4
1018
+ shei2
1019
+ shen1
1020
+ shen2
1021
+ shen3
1022
+ shen4
1023
+ sheng1
1024
+ sheng2
1025
+ sheng3
1026
+ sheng4
1027
+ shi
1028
+ shi1
1029
+ shi2
1030
+ shi3
1031
+ shi4
1032
+ shou1
1033
+ shou2
1034
+ shou3
1035
+ shou4
1036
+ shu1
1037
+ shu2
1038
+ shu3
1039
+ shu4
1040
+ shua1
1041
+ shua2
1042
+ shua3
1043
+ shua4
1044
+ shuai1
1045
+ shuai3
1046
+ shuai4
1047
+ shuan1
1048
+ shuan4
1049
+ shuang1
1050
+ shuang3
1051
+ shui2
1052
+ shui3
1053
+ shui4
1054
+ shun3
1055
+ shun4
1056
+ shuo1
1057
+ shuo4
1058
+ si1
1059
+ si2
1060
+ si3
1061
+ si4
1062
+ song1
1063
+ song3
1064
+ song4
1065
+ sou1
1066
+ sou3
1067
+ sou4
1068
+ su1
1069
+ su2
1070
+ su4
1071
+ suan1
1072
+ suan4
1073
+ sui1
1074
+ sui2
1075
+ sui3
1076
+ sui4
1077
+ sun1
1078
+ sun3
1079
+ suo
1080
+ suo1
1081
+ suo2
1082
+ suo3
1083
+ t
1084
+ ta1
1085
+ ta2
1086
+ ta3
1087
+ ta4
1088
+ tai1
1089
+ tai2
1090
+ tai4
1091
+ tan1
1092
+ tan2
1093
+ tan3
1094
+ tan4
1095
+ tang1
1096
+ tang2
1097
+ tang3
1098
+ tang4
1099
+ tao1
1100
+ tao2
1101
+ tao3
1102
+ tao4
1103
+ te4
1104
+ teng2
1105
+ ti1
1106
+ ti2
1107
+ ti3
1108
+ ti4
1109
+ tian1
1110
+ tian2
1111
+ tian3
1112
+ tiao1
1113
+ tiao2
1114
+ tiao3
1115
+ tiao4
1116
+ tie1
1117
+ tie2
1118
+ tie3
1119
+ tie4
1120
+ ting1
1121
+ ting2
1122
+ ting3
1123
+ tong1
1124
+ tong2
1125
+ tong3
1126
+ tong4
1127
+ tou
1128
+ tou1
1129
+ tou2
1130
+ tou4
1131
+ tu1
1132
+ tu2
1133
+ tu3
1134
+ tu4
1135
+ tuan1
1136
+ tuan2
1137
+ tui1
1138
+ tui2
1139
+ tui3
1140
+ tui4
1141
+ tun1
1142
+ tun2
1143
+ tun4
1144
+ tuo1
1145
+ tuo2
1146
+ tuo3
1147
+ tuo4
1148
+ u
1149
+ v
1150
+ w
1151
+ wa
1152
+ wa1
1153
+ wa2
1154
+ wa3
1155
+ wa4
1156
+ wai1
1157
+ wai3
1158
+ wai4
1159
+ wan1
1160
+ wan2
1161
+ wan3
1162
+ wan4
1163
+ wang1
1164
+ wang2
1165
+ wang3
1166
+ wang4
1167
+ wei1
1168
+ wei2
1169
+ wei3
1170
+ wei4
1171
+ wen1
1172
+ wen2
1173
+ wen3
1174
+ wen4
1175
+ weng1
1176
+ weng4
1177
+ wo1
1178
+ wo2
1179
+ wo3
1180
+ wo4
1181
+ wu1
1182
+ wu2
1183
+ wu3
1184
+ wu4
1185
+ x
1186
+ xi1
1187
+ xi2
1188
+ xi3
1189
+ xi4
1190
+ xia1
1191
+ xia2
1192
+ xia4
1193
+ xian1
1194
+ xian2
1195
+ xian3
1196
+ xian4
1197
+ xiang1
1198
+ xiang2
1199
+ xiang3
1200
+ xiang4
1201
+ xiao1
1202
+ xiao2
1203
+ xiao3
1204
+ xiao4
1205
+ xie1
1206
+ xie2
1207
+ xie3
1208
+ xie4
1209
+ xin1
1210
+ xin2
1211
+ xin4
1212
+ xing1
1213
+ xing2
1214
+ xing3
1215
+ xing4
1216
+ xiong1
1217
+ xiong2
1218
+ xiu1
1219
+ xiu3
1220
+ xiu4
1221
+ xu
1222
+ xu1
1223
+ xu2
1224
+ xu3
1225
+ xu4
1226
+ xuan1
1227
+ xuan2
1228
+ xuan3
1229
+ xuan4
1230
+ xue1
1231
+ xue2
1232
+ xue3
1233
+ xue4
1234
+ xun1
1235
+ xun2
1236
+ xun4
1237
+ y
1238
+ ya
1239
+ ya1
1240
+ ya2
1241
+ ya3
1242
+ ya4
1243
+ yan1
1244
+ yan2
1245
+ yan3
1246
+ yan4
1247
+ yang1
1248
+ yang2
1249
+ yang3
1250
+ yang4
1251
+ yao1
1252
+ yao2
1253
+ yao3
1254
+ yao4
1255
+ ye1
1256
+ ye2
1257
+ ye3
1258
+ ye4
1259
+ yi
1260
+ yi1
1261
+ yi2
1262
+ yi3
1263
+ yi4
1264
+ yin1
1265
+ yin2
1266
+ yin3
1267
+ yin4
1268
+ ying1
1269
+ ying2
1270
+ ying3
1271
+ ying4
1272
+ yo1
1273
+ yong1
1274
+ yong2
1275
+ yong3
1276
+ yong4
1277
+ you1
1278
+ you2
1279
+ you3
1280
+ you4
1281
+ yu1
1282
+ yu2
1283
+ yu3
1284
+ yu4
1285
+ yuan1
1286
+ yuan2
1287
+ yuan3
1288
+ yuan4
1289
+ yue1
1290
+ yue4
1291
+ yun1
1292
+ yun2
1293
+ yun3
1294
+ yun4
1295
+ z
1296
+ za1
1297
+ za2
1298
+ za3
1299
+ zai1
1300
+ zai3
1301
+ zai4
1302
+ zan1
1303
+ zan2
1304
+ zan3
1305
+ zan4
1306
+ zang1
1307
+ zang4
1308
+ zao1
1309
+ zao2
1310
+ zao3
1311
+ zao4
1312
+ ze2
1313
+ ze4
1314
+ zei2
1315
+ zen3
1316
+ zeng1
1317
+ zeng4
1318
+ zha1
1319
+ zha2
1320
+ zha3
1321
+ zha4
1322
+ zhai1
1323
+ zhai2
1324
+ zhai3
1325
+ zhai4
1326
+ zhan1
1327
+ zhan2
1328
+ zhan3
1329
+ zhan4
1330
+ zhang1
1331
+ zhang2
1332
+ zhang3
1333
+ zhang4
1334
+ zhao1
1335
+ zhao2
1336
+ zhao3
1337
+ zhao4
1338
+ zhe
1339
+ zhe1
1340
+ zhe2
1341
+ zhe3
1342
+ zhe4
1343
+ zhen1
1344
+ zhen2
1345
+ zhen3
1346
+ zhen4
1347
+ zheng1
1348
+ zheng2
1349
+ zheng3
1350
+ zheng4
1351
+ zhi1
1352
+ zhi2
1353
+ zhi3
1354
+ zhi4
1355
+ zhong1
1356
+ zhong2
1357
+ zhong3
1358
+ zhong4
1359
+ zhou1
1360
+ zhou2
1361
+ zhou3
1362
+ zhou4
1363
+ zhu1
1364
+ zhu2
1365
+ zhu3
1366
+ zhu4
1367
+ zhua1
1368
+ zhua2
1369
+ zhua3
1370
+ zhuai1
1371
+ zhuai3
1372
+ zhuai4
1373
+ zhuan1
1374
+ zhuan2
1375
+ zhuan3
1376
+ zhuan4
1377
+ zhuang1
1378
+ zhuang4
1379
+ zhui1
1380
+ zhui4
1381
+ zhun1
1382
+ zhun2
1383
+ zhun3
1384
+ zhuo1
1385
+ zhuo2
1386
+ zi
1387
+ zi1
1388
+ zi2
1389
+ zi3
1390
+ zi4
1391
+ zong1
1392
+ zong2
1393
+ zong3
1394
+ zong4
1395
+ zou1
1396
+ zou2
1397
+ zou3
1398
+ zou4
1399
+ zu1
1400
+ zu2
1401
+ zu3
1402
+ zuan1
1403
+ zuan3
1404
+ zuan4
1405
+ zui2
1406
+ zui3
1407
+ zui4
1408
+ zun1
1409
+ zuo
1410
+ zuo1
1411
+ zuo2
1412
+ zuo3
1413
+ zuo4
1414
+ {
1415
+ ~
1416
+ ¡
1417
+ ¢
1418
+ £
1419
+ ¥
1420
+ §
1421
+ ¨
1422
+ ©
1423
+ «
1424
+ ®
1425
+ ¯
1426
+ °
1427
+ ±
1428
+ ²
1429
+ ³
1430
+ ´
1431
+ µ
1432
+ ·
1433
+ ¹
1434
+ º
1435
+ »
1436
+ ¼
1437
+ ½
1438
+ ¾
1439
+ ¿
1440
+ À
1441
+ Á
1442
+ Â
1443
+ Ã
1444
+ Ä
1445
+ Å
1446
+ Æ
1447
+ Ç
1448
+ È
1449
+ É
1450
+ Ê
1451
+ Í
1452
+ Î
1453
+ Ñ
1454
+ Ó
1455
+ Ö
1456
+ ×
1457
+ Ø
1458
+ Ú
1459
+ Ü
1460
+ Ý
1461
+ Þ
1462
+ ß
1463
+ à
1464
+ á
1465
+ â
1466
+ ã
1467
+ ä
1468
+ å
1469
+ æ
1470
+ ç
1471
+ è
1472
+ é
1473
+ ê
1474
+ ë
1475
+ ì
1476
+ í
1477
+ î
1478
+ ï
1479
+ ð
1480
+ ñ
1481
+ ò
1482
+ ó
1483
+ ô
1484
+ õ
1485
+ ö
1486
+ ø
1487
+ ù
1488
+ ú
1489
+ û
1490
+ ü
1491
+ ý
1492
+ Ā
1493
+ ā
1494
+ ă
1495
+ ą
1496
+ ć
1497
+ Č
1498
+ č
1499
+ Đ
1500
+ đ
1501
+ ē
1502
+ ė
1503
+ ę
1504
+ ě
1505
+ ĝ
1506
+ ğ
1507
+ ħ
1508
+ ī
1509
+ į
1510
+ İ
1511
+ ı
1512
+ Ł
1513
+ ł
1514
+ ń
1515
+ ņ
1516
+ ň
1517
+ ŋ
1518
+ Ō
1519
+ ō
1520
+ ő
1521
+ œ
1522
+ ř
1523
+ Ś
1524
+ ś
1525
+ Ş
1526
+ ş
1527
+ Š
1528
+ š
1529
+ Ť
1530
+ ť
1531
+ ũ
1532
+ ū
1533
+ ź
1534
+ Ż
1535
+ ż
1536
+ Ž
1537
+ ž
1538
+ ơ
1539
+ ư
1540
+ ǎ
1541
+ ǐ
1542
+ ǒ
1543
+ ǔ
1544
+ ǚ
1545
+ ș
1546
+ ț
1547
+ ɑ
1548
+ ɔ
1549
+ ɕ
1550
+ ə
1551
+ ɛ
1552
+ ɜ
1553
+ ɡ
1554
+ ɣ
1555
+ ɪ
1556
+ ɫ
1557
+ ɴ
1558
+ ɹ
1559
+ ɾ
1560
+ ʃ
1561
+ ʊ
1562
+ ʌ
1563
+ ʒ
1564
+ ʔ
1565
+ ʰ
1566
+ ʷ
1567
+ ʻ
1568
+ ʾ
1569
+ ʿ
1570
+ ˈ
1571
+ ː
1572
+ ˙
1573
+ ˜
1574
+ ˢ
1575
+ ́
1576
+ ̅
1577
+ Α
1578
+ Β
1579
+ Δ
1580
+ Ε
1581
+ Θ
1582
+ Κ
1583
+ Λ
1584
+ Μ
1585
+ Ξ
1586
+ Π
1587
+ Σ
1588
+ Τ
1589
+ Φ
1590
+ Χ
1591
+ Ψ
1592
+ Ω
1593
+ ά
1594
+ έ
1595
+ ή
1596
+ ί
1597
+ α
1598
+ β
1599
+ γ
1600
+ δ
1601
+ ε
1602
+ ζ
1603
+ η
1604
+ θ
1605
+ ι
1606
+ κ
1607
+ λ
1608
+ μ
1609
+ ν
1610
+ ξ
1611
+ ο
1612
+ π
1613
+ ρ
1614
+ ς
1615
+ σ
1616
+ τ
1617
+ υ
1618
+ φ
1619
+ χ
1620
+ ψ
1621
+ ω
1622
+ ϊ
1623
+ ό
1624
+ ύ
1625
+ ώ
1626
+ ϕ
1627
+ ϵ
1628
+ Ё
1629
+ А
1630
+ Б
1631
+ В
1632
+ Г
1633
+ Д
1634
+ Е
1635
+ Ж
1636
+ З
1637
+ И
1638
+ Й
1639
+ К
1640
+ Л
1641
+ М
1642
+ Н
1643
+ О
1644
+ П
1645
+ Р
1646
+ С
1647
+ Т
1648
+ У
1649
+ Ф
1650
+ Х
1651
+ Ц
1652
+ Ч
1653
+ Ш
1654
+ Щ
1655
+ Ы
1656
+ Ь
1657
+ Э
1658
+ Ю
1659
+ Я
1660
+ а
1661
+ б
1662
+ в
1663
+ г
1664
+ д
1665
+ е
1666
+ ж
1667
+ з
1668
+ и
1669
+ й
1670
+ к
1671
+ л
1672
+ м
1673
+ н
1674
+ о
1675
+ п
1676
+ р
1677
+ с
1678
+ т
1679
+ у
1680
+ ф
1681
+ х
1682
+ ц
1683
+ ч
1684
+ ш
1685
+ щ
1686
+ ъ
1687
+ ы
1688
+ ь
1689
+ э
1690
+ ю
1691
+ я
1692
+ ё
1693
+ і
1694
+ ְ
1695
+ ִ
1696
+ ֵ
1697
+ ֶ
1698
+ ַ
1699
+ ָ
1700
+ ֹ
1701
+ ּ
1702
+ ־
1703
+ ׁ
1704
+ א
1705
+ ב
1706
+ ג
1707
+ ד
1708
+ ה
1709
+ ו
1710
+ ז
1711
+ ח
1712
+ ט
1713
+ י
1714
+ כ
1715
+ ל
1716
+ ם
1717
+ מ
1718
+ ן
1719
+ נ
1720
+ ס
1721
+ ע
1722
+ פ
1723
+ ק
1724
+ ר
1725
+ ש
1726
+ ת
1727
+ أ
1728
+ ب
1729
+ ة
1730
+ ت
1731
+ ج
1732
+ ح
1733
+ د
1734
+ ر
1735
+ ز
1736
+ س
1737
+ ص
1738
+ ط
1739
+ ع
1740
+ ق
1741
+ ك
1742
+ ل
1743
+ م
1744
+ ن
1745
+ ه
1746
+ و
1747
+ ي
1748
+ َ
1749
+ ُ
1750
+ ِ
1751
+ ْ
1752
+
1753
+
1754
+
1755
+
1756
+
1757
+
1758
+
1759
+
1760
+
1761
+
1762
+
1763
+
1764
+
1765
+
1766
+
1767
+
1768
+
1769
+
1770
+
1771
+
1772
+
1773
+
1774
+
1775
+
1776
+
1777
+
1778
+
1779
+
1780
+
1781
+
1782
+
1783
+
1784
+
1785
+
1786
+
1787
+
1788
+
1789
+
1790
+
1791
+
1792
+
1793
+
1794
+
1795
+
1796
+
1797
+
1798
+
1799
+
1800
+ ế
1801
+
1802
+
1803
+
1804
+
1805
+
1806
+
1807
+
1808
+
1809
+
1810
+
1811
+
1812
+
1813
+
1814
+
1815
+
1816
+
1817
+
1818
+
1819
+
1820
+
1821
+
1822
+
1823
+
1824
+
1825
+
1826
+
1827
+
1828
+
1829
+
1830
+
1831
+
1832
+
1833
+
1834
+
1835
+
1836
+
1837
+
1838
+
1839
+
1840
+
1841
+
1842
+
1843
+
1844
+
1845
+
1846
+
1847
+
1848
+
1849
+
1850
+
1851
+
1852
+
1853
+
1854
+
1855
+
1856
+
1857
+
1858
+
1859
+
1860
+
1861
+
1862
+
1863
+
1864
+
1865
+
1866
+
1867
+
1868
+
1869
+
1870
+
1871
+
1872
+
1873
+
1874
+
1875
+
1876
+
1877
+
1878
+
1879
+
1880
+
1881
+
1882
+
1883
+
1884
+
1885
+
1886
+
1887
+
1888
+
1889
+
1890
+
1891
+
1892
+
1893
+
1894
+
1895
+
1896
+
1897
+
1898
+
1899
+
1900
+
1901
+
1902
+
1903
+
1904
+
1905
+
1906
+
1907
+
1908
+
1909
+
1910
+
1911
+
1912
+
1913
+
1914
+
1915
+
1916
+
1917
+
1918
+
1919
+
1920
+
1921
+
1922
+
1923
+
1924
+
1925
+
1926
+
1927
+
1928
+
1929
+
1930
+
1931
+
1932
+
1933
+
1934
+
1935
+
1936
+
1937
+
1938
+
1939
+
1940
+
1941
+
1942
+
1943
+
1944
+
1945
+
1946
+
1947
+
1948
+
1949
+
1950
+
1951
+
1952
+
1953
+
1954
+
1955
+
1956
+
1957
+
1958
+
1959
+
1960
+
1961
+
1962
+
1963
+
1964
+
1965
+
1966
+
1967
+
1968
+
1969
+
1970
+
1971
+
1972
+
1973
+
1974
+
1975
+
1976
+
1977
+
1978
+
1979
+
1980
+
1981
+
1982
+
1983
+
1984
+
1985
+
1986
+
1987
+
1988
+
1989
+
1990
+
1991
+
1992
+
1993
+
1994
+
1995
+
1996
+
1997
+
1998
+
1999
+
2000
+
2001
+
2002
+
2003
+
2004
+
2005
+
2006
+
2007
+
2008
+
2009
+
2010
+
2011
+
2012
+
2013
+
2014
+
2015
+
2016
+
2017
+
2018
+
2019
+
2020
+
2021
+
2022
+
2023
+
2024
+
2025
+
2026
+
2027
+
2028
+
2029
+
2030
+
2031
+
2032
+
2033
+
2034
+
2035
+
2036
+
2037
+
2038
+
2039
+
2040
+
2041
+
2042
+
2043
+
2044
+
2045
+
2046
+
2047
+
2048
+
2049
+
2050
+
2051
+
2052
+
2053
+
2054
+
2055
+
2056
+
2057
+
2058
+
2059
+
2060
+
2061
+
2062
+
2063
+
2064
+
2065
+
2066
+
2067
+
2068
+
2069
+
2070
+
2071
+
2072
+
2073
+
2074
+
2075
+
2076
+
2077
+
2078
+
2079
+
2080
+
2081
+
2082
+
2083
+
2084
+
2085
+
2086
+
2087
+
2088
+
2089
+
2090
+
2091
+
2092
+
2093
+
2094
+
2095
+
2096
+
2097
+
2098
+
2099
+
2100
+
2101
+
2102
+
2103
+
2104
+
2105
+
2106
+
2107
+
2108
+
2109
+
2110
+
2111
+
2112
+
2113
+
2114
+
2115
+
2116
+
2117
+
2118
+
2119
+
2120
+
2121
+
2122
+
2123
+
2124
+
2125
+
2126
+
2127
+
2128
+
2129
+
2130
+
2131
+
2132
+
2133
+
2134
+
2135
+
2136
+
2137
+
2138
+
2139
+
2140
+
2141
+
2142
+
2143
+
2144
+
2145
+
2146
+
2147
+
2148
+
2149
+
2150
+
2151
+
2152
+
2153
+
2154
+
2155
+
2156
+
2157
+
2158
+
2159
+
2160
+
2161
+
2162
+
2163
+
2164
+
2165
+
2166
+
2167
+
2168
+
2169
+
2170
+
2171
+
2172
+
2173
+
2174
+
2175
+
2176
+
2177
+
2178
+
2179
+
2180
+
2181
+
2182
+
2183
+
2184
+
2185
+
2186
+
2187
+
2188
+
2189
+
2190
+
2191
+
2192
+
2193
+
2194
+
2195
+
2196
+
2197
+
2198
+
2199
+
2200
+
2201
+
2202
+
2203
+
2204
+
2205
+
2206
+
2207
+
2208
+
2209
+
2210
+
2211
+
2212
+
2213
+
2214
+
2215
+
2216
+
2217
+
2218
+
2219
+
2220
+
2221
+
2222
+
2223
+
2224
+
2225
+
2226
+
2227
+
2228
+
2229
+
2230
+
2231
+
2232
+
2233
+
2234
+
2235
+
2236
+
2237
+
2238
+
2239
+
2240
+
2241
+
2242
+
2243
+
2244
+
2245
+
2246
+
2247
+
2248
+
2249
+
2250
+
2251
+
2252
+
2253
+
2254
+
2255
+
2256
+
2257
+
2258
+
2259
+
2260
+
2261
+
2262
+
2263
+
2264
+
2265
+
2266
+
2267
+
2268
+
2269
+
2270
+
2271
+
2272
+
2273
+
2274
+
2275
+
2276
+
2277
+
2278
+
2279
+
2280
+
2281
+
2282
+
2283
+
2284
+
2285
+
2286
+
2287
+
2288
+
2289
+
2290
+
2291
+
2292
+
2293
+
2294
+
2295
+
2296
+
2297
+
2298
+
2299
+
2300
+
2301
+
2302
+
2303
+
2304
+
2305
+
2306
+
2307
+
2308
+
2309
+
2310
+
2311
+
2312
+
2313
+
2314
+
2315
+
2316
+
2317
+
2318
+
2319
+
2320
+
2321
+
2322
+
2323
+
2324
+
2325
+
2326
+
2327
+
2328
+
2329
+
2330
+
2331
+
2332
+
2333
+
2334
+
2335
+
2336
+
2337
+
2338
+
2339
+
2340
+
2341
+
2342
+
2343
+
2344
+
2345
+
2346
+
2347
+
2348
+
2349
+
2350
+
2351
+
2352
+
2353
+
2354
+
2355
+
2356
+
2357
+
2358
+
2359
+
2360
+
2361
+
2362
+
2363
+
2364
+
2365
+
2366
+
2367
+
2368
+
2369
+
2370
+
2371
+
2372
+
2373
+
2374
+
2375
+
2376
+
2377
+
2378
+
2379
+
2380
+
2381
+
2382
+
2383
+
2384
+
2385
+
2386
+
2387
+
2388
+
2389
+
2390
+
2391
+
2392
+
2393
+
2394
+
2395
+
2396
+
2397
+
2398
+
2399
+
2400
+
2401
+
2402
+
2403
+
2404
+
2405
+
2406
+
2407
+
2408
+
2409
+
2410
+
2411
+
2412
+
2413
+
2414
+
2415
+
2416
+
2417
+
2418
+
2419
+
2420
+
2421
+
2422
+
2423
+
2424
+
2425
+
2426
+
2427
+
2428
+
2429
+
2430
+
2431
+
2432
+
2433
+
2434
+
2435
+
2436
+
2437
+
2438
+
2439
+
2440
+
2441
+
2442
+
2443
+
2444
+
2445
+
2446
+
2447
+
2448
+
2449
+
2450
+
2451
+
2452
+
2453
+
2454
+
2455
+
2456
+
2457
+
2458
+
2459
+
2460
+
2461
+
2462
+
2463
+
2464
+
2465
+
2466
+
2467
+
2468
+
2469
+
2470
+
2471
+
2472
+
2473
+
2474
+
2475
+
2476
+
2477
+
2478
+
2479
+
2480
+
2481
+
2482
+
2483
+
2484
+
2485
+
2486
+
2487
+
2488
+
2489
+
2490
+
2491
+
2492
+
2493
+
2494
+
2495
+
2496
+
2497
+
2498
+
2499
+
2500
+
2501
+
2502
+
2503
+
2504
+
2505
+
2506
+
2507
+
2508
+
2509
+
2510
+
2511
+
2512
+
2513
+
2514
+
2515
+
2516
+
2517
+
2518
+
2519
+
2520
+
2521
+
2522
+
2523
+
2524
+
2525
+
2526
+
2527
+
2528
+
2529
+
2530
+
2531
+
2532
+
2533
+
2534
+
2535
+
2536
+
2537
+
2538
+
2539
+
2540
+
2541
+
2542
+
2543
+
2544
+
2545
+ 𠮶
src/f5_tts/infer/infer_cli.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import codecs
3
+ import os
4
+ import re
5
+ from pathlib import Path
6
+ from importlib.resources import files
7
+
8
+ import numpy as np
9
+ import soundfile as sf
10
+ import tomli
11
+ from cached_path import cached_path
12
+
13
+ from f5_tts.model import DiT, UNetT
14
+ from f5_tts.infer.utils_infer import (
15
+ load_vocoder,
16
+ load_model,
17
+ preprocess_ref_audio_text,
18
+ infer_process,
19
+ remove_silence_for_generated_wav,
20
+ )
21
+
22
+
23
+ parser = argparse.ArgumentParser(
24
+ prog="python3 infer-cli.py",
25
+ description="Commandline interface for E2/F5 TTS with Advanced Batch Processing.",
26
+ epilog="Specify options above to override one or more settings from config.",
27
+ )
28
+ parser.add_argument(
29
+ "-c",
30
+ "--config",
31
+ help="Configuration file. Default=infer/examples/basic/basic.toml",
32
+ default=os.path.join(files("f5_tts").joinpath("infer/examples/basic"), "basic.toml"),
33
+ )
34
+ parser.add_argument(
35
+ "-m",
36
+ "--model",
37
+ help="F5-TTS | E2-TTS",
38
+ )
39
+ parser.add_argument(
40
+ "-p",
41
+ "--ckpt_file",
42
+ help="The Checkpoint .pt",
43
+ )
44
+ parser.add_argument(
45
+ "-v",
46
+ "--vocab_file",
47
+ help="The vocab .txt",
48
+ )
49
+ parser.add_argument("-r", "--ref_audio", type=str, help="Reference audio file < 15 seconds.")
50
+ parser.add_argument("-s", "--ref_text", type=str, default="666", help="Subtitle for the reference audio.")
51
+ parser.add_argument(
52
+ "-t",
53
+ "--gen_text",
54
+ type=str,
55
+ help="Text to generate.",
56
+ )
57
+ parser.add_argument(
58
+ "-f",
59
+ "--gen_file",
60
+ type=str,
61
+ help="File with text to generate. Ignores --text",
62
+ )
63
+ parser.add_argument(
64
+ "-o",
65
+ "--output_dir",
66
+ type=str,
67
+ help="Path to output folder..",
68
+ )
69
+ parser.add_argument(
70
+ "--remove_silence",
71
+ help="Remove silence.",
72
+ )
73
+ parser.add_argument(
74
+ "--load_vocoder_from_local",
75
+ action="store_true",
76
+ help="load vocoder from local. Default: ../checkpoints/charactr/vocos-mel-24khz",
77
+ )
78
+ parser.add_argument(
79
+ "--speed",
80
+ type=float,
81
+ default=1.0,
82
+ help="Adjust the speed of the audio generation (default: 1.0)",
83
+ )
84
+ args = parser.parse_args()
85
+
86
+ config = tomli.load(open(args.config, "rb"))
87
+
88
+ ref_audio = args.ref_audio if args.ref_audio else config["ref_audio"]
89
+ ref_text = args.ref_text if args.ref_text != "666" else config["ref_text"]
90
+ gen_text = args.gen_text if args.gen_text else config["gen_text"]
91
+ gen_file = args.gen_file if args.gen_file else config["gen_file"]
92
+
93
+ # patches for pip pkg user
94
+ if "infer/examples/" in ref_audio:
95
+ ref_audio = str(files("f5_tts").joinpath(f"{ref_audio}"))
96
+ if "infer/examples/" in gen_file:
97
+ gen_file = str(files("f5_tts").joinpath(f"{gen_file}"))
98
+ if "voices" in config:
99
+ for voice in config["voices"]:
100
+ voice_ref_audio = config["voices"][voice]["ref_audio"]
101
+ if "infer/examples/" in voice_ref_audio:
102
+ config["voices"][voice]["ref_audio"] = str(files("f5_tts").joinpath(f"{voice_ref_audio}"))
103
+
104
+ if gen_file:
105
+ gen_text = codecs.open(gen_file, "r", "utf-8").read()
106
+ output_dir = args.output_dir if args.output_dir else config["output_dir"]
107
+ model = args.model if args.model else config["model"]
108
+ ckpt_file = args.ckpt_file if args.ckpt_file else ""
109
+ vocab_file = args.vocab_file if args.vocab_file else ""
110
+ remove_silence = args.remove_silence if args.remove_silence else config["remove_silence"]
111
+ speed = args.speed
112
+ wave_path = Path(output_dir) / "infer_cli_out.wav"
113
+ # spectrogram_path = Path(output_dir) / "infer_cli_out.png"
114
+ vocos_local_path = "../checkpoints/charactr/vocos-mel-24khz"
115
+
116
+ vocos = load_vocoder(is_local=args.load_vocoder_from_local, local_path=vocos_local_path)
117
+
118
+
119
+ # load models
120
+ if model == "F5-TTS":
121
+ model_cls = DiT
122
+ model_cfg = dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4)
123
+ if ckpt_file == "":
124
+ repo_name = "F5-TTS"
125
+ exp_name = "F5TTS_Base"
126
+ ckpt_step = 1200000
127
+ ckpt_file = str(cached_path(f"hf://SWivid/{repo_name}/{exp_name}/model_{ckpt_step}.safetensors"))
128
+ # ckpt_file = f"ckpts/{exp_name}/model_{ckpt_step}.pt" # .pt | .safetensors; local path
129
+
130
+ elif model == "E2-TTS":
131
+ model_cls = UNetT
132
+ model_cfg = dict(dim=1024, depth=24, heads=16, ff_mult=4)
133
+ if ckpt_file == "":
134
+ repo_name = "E2-TTS"
135
+ exp_name = "E2TTS_Base"
136
+ ckpt_step = 1200000
137
+ ckpt_file = str(cached_path(f"hf://SWivid/{repo_name}/{exp_name}/model_{ckpt_step}.safetensors"))
138
+ # ckpt_file = f"ckpts/{exp_name}/model_{ckpt_step}.pt" # .pt | .safetensors; local path
139
+
140
+ print(f"Using {model}...")
141
+ ema_model = load_model(model_cls, model_cfg, ckpt_file, vocab_file)
142
+
143
+
144
+ def main_process(ref_audio, ref_text, text_gen, model_obj, remove_silence, speed):
145
+ main_voice = {"ref_audio": ref_audio, "ref_text": ref_text}
146
+ if "voices" not in config:
147
+ voices = {"main": main_voice}
148
+ else:
149
+ voices = config["voices"]
150
+ voices["main"] = main_voice
151
+ for voice in voices:
152
+ voices[voice]["ref_audio"], voices[voice]["ref_text"] = preprocess_ref_audio_text(
153
+ voices[voice]["ref_audio"], voices[voice]["ref_text"]
154
+ )
155
+ print("Voice:", voice)
156
+ print("Ref_audio:", voices[voice]["ref_audio"])
157
+ print("Ref_text:", voices[voice]["ref_text"])
158
+
159
+ generated_audio_segments = []
160
+ reg1 = r"(?=\[\w+\])"
161
+ chunks = re.split(reg1, text_gen)
162
+ reg2 = r"\[(\w+)\]"
163
+ for text in chunks:
164
+ match = re.match(reg2, text)
165
+ if match:
166
+ voice = match[1]
167
+ else:
168
+ print("No voice tag found, using main.")
169
+ voice = "main"
170
+ if voice not in voices:
171
+ print(f"Voice {voice} not found, using main.")
172
+ voice = "main"
173
+ text = re.sub(reg2, "", text)
174
+ gen_text = text.strip()
175
+ ref_audio = voices[voice]["ref_audio"]
176
+ ref_text = voices[voice]["ref_text"]
177
+ print(f"Voice: {voice}")
178
+ audio, final_sample_rate, spectragram = infer_process(ref_audio, ref_text, gen_text, model_obj, speed=speed)
179
+ generated_audio_segments.append(audio)
180
+
181
+ if generated_audio_segments:
182
+ final_wave = np.concatenate(generated_audio_segments)
183
+
184
+ if not os.path.exists(output_dir):
185
+ os.makedirs(output_dir)
186
+
187
+ with open(wave_path, "wb") as f:
188
+ sf.write(f.name, final_wave, final_sample_rate)
189
+ # Remove silence
190
+ if remove_silence:
191
+ remove_silence_for_generated_wav(f.name)
192
+ print(f.name)
193
+
194
+
195
+ def main():
196
+ main_process(ref_audio, ref_text, gen_text, ema_model, remove_silence, speed)
197
+
198
+
199
+ if __name__ == "__main__":
200
+ main()
src/f5_tts/infer/infer_gradio.py ADDED
@@ -0,0 +1,729 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ruff: noqa: E402
2
+ # Above allows ruff to ignore E402: module level import not at top of file
3
+
4
+ import re
5
+ import tempfile
6
+
7
+ import click
8
+ import gradio as gr
9
+ import numpy as np
10
+ import soundfile as sf
11
+ import torchaudio
12
+ from cached_path import cached_path
13
+ from transformers import AutoModelForCausalLM, AutoTokenizer
14
+
15
+ try:
16
+ import spaces
17
+
18
+ USING_SPACES = True
19
+ except ImportError:
20
+ USING_SPACES = False
21
+
22
+
23
+ def gpu_decorator(func):
24
+ if USING_SPACES:
25
+ return spaces.GPU(func)
26
+ else:
27
+ return func
28
+
29
+
30
+ from f5_tts.model import DiT, UNetT
31
+ from f5_tts.infer.utils_infer import (
32
+ load_vocoder,
33
+ load_model,
34
+ preprocess_ref_audio_text,
35
+ infer_process,
36
+ remove_silence_for_generated_wav,
37
+ save_spectrogram,
38
+ )
39
+
40
+ vocos = load_vocoder()
41
+
42
+
43
+ # load models
44
+ F5TTS_model_cfg = dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4)
45
+ F5TTS_ema_model = load_model(
46
+ DiT, F5TTS_model_cfg, str(cached_path("hf://SWivid/F5-TTS/F5TTS_Base/model_1200000.safetensors"))
47
+ )
48
+
49
+ E2TTS_model_cfg = dict(dim=1024, depth=24, heads=16, ff_mult=4)
50
+ E2TTS_ema_model = load_model(
51
+ UNetT, E2TTS_model_cfg, str(cached_path("hf://SWivid/E2-TTS/E2TTS_Base/model_1200000.safetensors"))
52
+ )
53
+
54
+ chat_model_state = None
55
+ chat_tokenizer_state = None
56
+
57
+
58
+ @gpu_decorator
59
+ def generate_response(messages, model, tokenizer):
60
+ """Generate response using Qwen"""
61
+ text = tokenizer.apply_chat_template(
62
+ messages,
63
+ tokenize=False,
64
+ add_generation_prompt=True,
65
+ )
66
+
67
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
68
+ generated_ids = model.generate(
69
+ **model_inputs,
70
+ max_new_tokens=512,
71
+ temperature=0.7,
72
+ top_p=0.95,
73
+ )
74
+
75
+ generated_ids = [
76
+ output_ids[len(input_ids) :] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
77
+ ]
78
+ return tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
79
+
80
+
81
+ @gpu_decorator
82
+ def infer(
83
+ ref_audio_orig, ref_text, gen_text, model, remove_silence, cross_fade_duration=0.15, speed=1, show_info=gr.Info
84
+ ):
85
+ ref_audio, ref_text = preprocess_ref_audio_text(ref_audio_orig, ref_text, show_info=show_info)
86
+
87
+ if model == "F5-TTS":
88
+ ema_model = F5TTS_ema_model
89
+ elif model == "E2-TTS":
90
+ ema_model = E2TTS_ema_model
91
+
92
+ final_wave, final_sample_rate, combined_spectrogram = infer_process(
93
+ ref_audio,
94
+ ref_text,
95
+ gen_text,
96
+ ema_model,
97
+ cross_fade_duration=cross_fade_duration,
98
+ speed=speed,
99
+ show_info=show_info,
100
+ progress=gr.Progress(),
101
+ )
102
+
103
+ # Remove silence
104
+ if remove_silence:
105
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f:
106
+ sf.write(f.name, final_wave, final_sample_rate)
107
+ remove_silence_for_generated_wav(f.name)
108
+ final_wave, _ = torchaudio.load(f.name)
109
+ final_wave = final_wave.squeeze().cpu().numpy()
110
+
111
+ # Save the spectrogram
112
+ with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp_spectrogram:
113
+ spectrogram_path = tmp_spectrogram.name
114
+ save_spectrogram(combined_spectrogram, spectrogram_path)
115
+
116
+ return (final_sample_rate, final_wave), spectrogram_path
117
+
118
+
119
+ with gr.Blocks() as app_credits:
120
+ gr.Markdown("""
121
+ # Credits
122
+
123
+ * [mrfakename](https://github.com/fakerybakery) for the original [online demo](https://huggingface.co/spaces/mrfakename/E2-F5-TTS)
124
+ * [RootingInLoad](https://github.com/RootingInLoad) for initial chunk generation and podcast app exploration
125
+ * [jpgallegoar](https://github.com/jpgallegoar) for multiple speech-type generation & voice chat
126
+ """)
127
+ with gr.Blocks() as app_tts:
128
+ gr.Markdown("# Batched TTS")
129
+ ref_audio_input = gr.Audio(label="Reference Audio", type="filepath")
130
+ gen_text_input = gr.Textbox(label="Text to Generate", lines=10)
131
+ model_choice = gr.Radio(choices=["F5-TTS", "E2-TTS"], label="Choose TTS Model", value="F5-TTS")
132
+ generate_btn = gr.Button("Synthesize", variant="primary")
133
+ with gr.Accordion("Advanced Settings", open=False):
134
+ ref_text_input = gr.Textbox(
135
+ label="Reference Text",
136
+ info="Leave blank to automatically transcribe the reference audio. If you enter text it will override automatic transcription.",
137
+ lines=2,
138
+ )
139
+ remove_silence = gr.Checkbox(
140
+ label="Remove Silences",
141
+ info="The model tends to produce silences, especially on longer audio. We can manually remove silences if needed. Note that this is an experimental feature and may produce strange results. This will also increase generation time.",
142
+ value=False,
143
+ )
144
+ speed_slider = gr.Slider(
145
+ label="Speed",
146
+ minimum=0.3,
147
+ maximum=2.0,
148
+ value=1.0,
149
+ step=0.1,
150
+ info="Adjust the speed of the audio.",
151
+ )
152
+ cross_fade_duration_slider = gr.Slider(
153
+ label="Cross-Fade Duration (s)",
154
+ minimum=0.0,
155
+ maximum=1.0,
156
+ value=0.15,
157
+ step=0.01,
158
+ info="Set the duration of the cross-fade between audio clips.",
159
+ )
160
+
161
+ audio_output = gr.Audio(label="Synthesized Audio")
162
+ spectrogram_output = gr.Image(label="Spectrogram")
163
+
164
+ generate_btn.click(
165
+ infer,
166
+ inputs=[
167
+ ref_audio_input,
168
+ ref_text_input,
169
+ gen_text_input,
170
+ model_choice,
171
+ remove_silence,
172
+ cross_fade_duration_slider,
173
+ speed_slider,
174
+ ],
175
+ outputs=[audio_output, spectrogram_output],
176
+ )
177
+
178
+
179
+ def parse_speechtypes_text(gen_text):
180
+ # Pattern to find {speechtype}
181
+ pattern = r"\{(.*?)\}"
182
+
183
+ # Split the text by the pattern
184
+ tokens = re.split(pattern, gen_text)
185
+
186
+ segments = []
187
+
188
+ current_style = "Regular"
189
+
190
+ for i in range(len(tokens)):
191
+ if i % 2 == 0:
192
+ # This is text
193
+ text = tokens[i].strip()
194
+ if text:
195
+ segments.append({"style": current_style, "text": text})
196
+ else:
197
+ # This is style
198
+ style = tokens[i].strip()
199
+ current_style = style
200
+
201
+ return segments
202
+
203
+
204
+ with gr.Blocks() as app_multistyle:
205
+ # New section for multistyle generation
206
+ gr.Markdown(
207
+ """
208
+ # Multiple Speech-Type Generation
209
+
210
+ This section allows you to generate multiple speech types or multiple people's voices. Enter your text in the format shown below, and the system will generate speech using the appropriate type. If unspecified, the model will use the regular speech type. The current speech type will be used until the next speech type is specified.
211
+ """
212
+ )
213
+
214
+ with gr.Row():
215
+ gr.Markdown(
216
+ """
217
+ **Example Input:**
218
+ {Regular} Hello, I'd like to order a sandwich please.
219
+ {Surprised} What do you mean you're out of bread?
220
+ {Sad} I really wanted a sandwich though...
221
+ {Angry} You know what, darn you and your little shop!
222
+ {Whisper} I'll just go back home and cry now.
223
+ {Shouting} Why me?!
224
+ """
225
+ )
226
+
227
+ gr.Markdown(
228
+ """
229
+ **Example Input 2:**
230
+ {Speaker1_Happy} Hello, I'd like to order a sandwich please.
231
+ {Speaker2_Regular} Sorry, we're out of bread.
232
+ {Speaker1_Sad} I really wanted a sandwich though...
233
+ {Speaker2_Whisper} I'll give you the last one I was hiding.
234
+ """
235
+ )
236
+
237
+ gr.Markdown(
238
+ "Upload different audio clips for each speech type. The first speech type is mandatory. You can add additional speech types by clicking the 'Add Speech Type' button."
239
+ )
240
+
241
+ # Regular speech type (mandatory)
242
+ with gr.Row():
243
+ with gr.Column():
244
+ regular_name = gr.Textbox(value="Regular", label="Speech Type Name")
245
+ regular_insert = gr.Button("Insert", variant="secondary")
246
+ regular_audio = gr.Audio(label="Regular Reference Audio", type="filepath")
247
+ regular_ref_text = gr.Textbox(label="Reference Text (Regular)", lines=2)
248
+
249
+ # Additional speech types (up to 99 more)
250
+ max_speech_types = 100
251
+ speech_type_rows = []
252
+ speech_type_names = [regular_name]
253
+ speech_type_audios = []
254
+ speech_type_ref_texts = []
255
+ speech_type_delete_btns = []
256
+ speech_type_insert_btns = []
257
+ speech_type_insert_btns.append(regular_insert)
258
+
259
+ for i in range(max_speech_types - 1):
260
+ with gr.Row(visible=False) as row:
261
+ with gr.Column():
262
+ name_input = gr.Textbox(label="Speech Type Name")
263
+ delete_btn = gr.Button("Delete", variant="secondary")
264
+ insert_btn = gr.Button("Insert", variant="secondary")
265
+ audio_input = gr.Audio(label="Reference Audio", type="filepath")
266
+ ref_text_input = gr.Textbox(label="Reference Text", lines=2)
267
+ speech_type_rows.append(row)
268
+ speech_type_names.append(name_input)
269
+ speech_type_audios.append(audio_input)
270
+ speech_type_ref_texts.append(ref_text_input)
271
+ speech_type_delete_btns.append(delete_btn)
272
+ speech_type_insert_btns.append(insert_btn)
273
+
274
+ # Button to add speech type
275
+ add_speech_type_btn = gr.Button("Add Speech Type")
276
+
277
+ # Keep track of current number of speech types
278
+ speech_type_count = gr.State(value=0)
279
+
280
+ # Function to add a speech type
281
+ def add_speech_type_fn(speech_type_count):
282
+ if speech_type_count < max_speech_types - 1:
283
+ speech_type_count += 1
284
+ # Prepare updates for the rows
285
+ row_updates = []
286
+ for i in range(max_speech_types - 1):
287
+ if i < speech_type_count:
288
+ row_updates.append(gr.update(visible=True))
289
+ else:
290
+ row_updates.append(gr.update())
291
+ else:
292
+ # Optionally, show a warning
293
+ row_updates = [gr.update() for _ in range(max_speech_types - 1)]
294
+ return [speech_type_count] + row_updates
295
+
296
+ add_speech_type_btn.click(
297
+ add_speech_type_fn, inputs=speech_type_count, outputs=[speech_type_count] + speech_type_rows
298
+ )
299
+
300
+ # Function to delete a speech type
301
+ def make_delete_speech_type_fn(index):
302
+ def delete_speech_type_fn(speech_type_count):
303
+ # Prepare updates
304
+ row_updates = []
305
+
306
+ for i in range(max_speech_types - 1):
307
+ if i == index:
308
+ row_updates.append(gr.update(visible=False))
309
+ else:
310
+ row_updates.append(gr.update())
311
+
312
+ speech_type_count = max(0, speech_type_count - 1)
313
+
314
+ return [speech_type_count] + row_updates
315
+
316
+ return delete_speech_type_fn
317
+
318
+ # Update delete button clicks
319
+ for i, delete_btn in enumerate(speech_type_delete_btns):
320
+ delete_fn = make_delete_speech_type_fn(i)
321
+ delete_btn.click(delete_fn, inputs=speech_type_count, outputs=[speech_type_count] + speech_type_rows)
322
+
323
+ # Text input for the prompt
324
+ gen_text_input_multistyle = gr.Textbox(
325
+ label="Text to Generate",
326
+ lines=10,
327
+ placeholder="Enter the script with speaker names (or emotion types) at the start of each block, e.g.:\n\n{Regular} Hello, I'd like to order a sandwich please.\n{Surprised} What do you mean you're out of bread?\n{Sad} I really wanted a sandwich though...\n{Angry} You know what, darn you and your little shop!\n{Whisper} I'll just go back home and cry now.\n{Shouting} Why me?!",
328
+ )
329
+
330
+ def make_insert_speech_type_fn(index):
331
+ def insert_speech_type_fn(current_text, speech_type_name):
332
+ current_text = current_text or ""
333
+ speech_type_name = speech_type_name or "None"
334
+ updated_text = current_text + f"{{{speech_type_name}}} "
335
+ return gr.update(value=updated_text)
336
+
337
+ return insert_speech_type_fn
338
+
339
+ for i, insert_btn in enumerate(speech_type_insert_btns):
340
+ insert_fn = make_insert_speech_type_fn(i)
341
+ insert_btn.click(
342
+ insert_fn,
343
+ inputs=[gen_text_input_multistyle, speech_type_names[i]],
344
+ outputs=gen_text_input_multistyle,
345
+ )
346
+
347
+ # Model choice
348
+ model_choice_multistyle = gr.Radio(choices=["F5-TTS", "E2-TTS"], label="Choose TTS Model", value="F5-TTS")
349
+
350
+ with gr.Accordion("Advanced Settings", open=False):
351
+ remove_silence_multistyle = gr.Checkbox(
352
+ label="Remove Silences",
353
+ value=False,
354
+ )
355
+
356
+ # Generate button
357
+ generate_multistyle_btn = gr.Button("Generate Multi-Style Speech", variant="primary")
358
+
359
+ # Output audio
360
+ audio_output_multistyle = gr.Audio(label="Synthesized Audio")
361
+
362
+ @gpu_decorator
363
+ def generate_multistyle_speech(
364
+ regular_audio,
365
+ regular_ref_text,
366
+ gen_text,
367
+ *args,
368
+ ):
369
+ num_additional_speech_types = max_speech_types - 1
370
+ speech_type_names_list = args[:num_additional_speech_types]
371
+ speech_type_audios_list = args[num_additional_speech_types : 2 * num_additional_speech_types]
372
+ speech_type_ref_texts_list = args[2 * num_additional_speech_types : 3 * num_additional_speech_types]
373
+ model_choice = args[3 * num_additional_speech_types + 1]
374
+ remove_silence = args[3 * num_additional_speech_types + 1]
375
+
376
+ # Collect the speech types and their audios into a dict
377
+ speech_types = {"Regular": {"audio": regular_audio, "ref_text": regular_ref_text}}
378
+
379
+ for name_input, audio_input, ref_text_input in zip(
380
+ speech_type_names_list, speech_type_audios_list, speech_type_ref_texts_list
381
+ ):
382
+ if name_input and audio_input:
383
+ speech_types[name_input] = {"audio": audio_input, "ref_text": ref_text_input}
384
+
385
+ # Parse the gen_text into segments
386
+ segments = parse_speechtypes_text(gen_text)
387
+
388
+ # For each segment, generate speech
389
+ generated_audio_segments = []
390
+ current_style = "Regular"
391
+
392
+ for segment in segments:
393
+ style = segment["style"]
394
+ text = segment["text"]
395
+
396
+ if style in speech_types:
397
+ current_style = style
398
+ else:
399
+ # If style not available, default to Regular
400
+ current_style = "Regular"
401
+
402
+ ref_audio = speech_types[current_style]["audio"]
403
+ ref_text = speech_types[current_style].get("ref_text", "")
404
+
405
+ # Generate speech for this segment
406
+ audio, _ = infer(
407
+ ref_audio, ref_text, text, model_choice, remove_silence, 0, show_info=print
408
+ ) # show_info=print no pull to top when generating
409
+ sr, audio_data = audio
410
+
411
+ generated_audio_segments.append(audio_data)
412
+
413
+ # Concatenate all audio segments
414
+ if generated_audio_segments:
415
+ final_audio_data = np.concatenate(generated_audio_segments)
416
+ return (sr, final_audio_data)
417
+ else:
418
+ gr.Warning("No audio generated.")
419
+ return None
420
+
421
+ generate_multistyle_btn.click(
422
+ generate_multistyle_speech,
423
+ inputs=[
424
+ regular_audio,
425
+ regular_ref_text,
426
+ gen_text_input_multistyle,
427
+ ]
428
+ + speech_type_names
429
+ + speech_type_audios
430
+ + speech_type_ref_texts
431
+ + [
432
+ model_choice_multistyle,
433
+ remove_silence_multistyle,
434
+ ],
435
+ outputs=audio_output_multistyle,
436
+ )
437
+
438
+ # Validation function to disable Generate button if speech types are missing
439
+ def validate_speech_types(gen_text, regular_name, *args):
440
+ num_additional_speech_types = max_speech_types - 1
441
+ speech_type_names_list = args[:num_additional_speech_types]
442
+
443
+ # Collect the speech types names
444
+ speech_types_available = set()
445
+ if regular_name:
446
+ speech_types_available.add(regular_name)
447
+ for name_input in speech_type_names_list:
448
+ if name_input:
449
+ speech_types_available.add(name_input)
450
+
451
+ # Parse the gen_text to get the speech types used
452
+ segments = parse_speechtypes_text(gen_text)
453
+ speech_types_in_text = set(segment["style"] for segment in segments)
454
+
455
+ # Check if all speech types in text are available
456
+ missing_speech_types = speech_types_in_text - speech_types_available
457
+
458
+ if missing_speech_types:
459
+ # Disable the generate button
460
+ return gr.update(interactive=False)
461
+ else:
462
+ # Enable the generate button
463
+ return gr.update(interactive=True)
464
+
465
+ gen_text_input_multistyle.change(
466
+ validate_speech_types,
467
+ inputs=[gen_text_input_multistyle, regular_name] + speech_type_names,
468
+ outputs=generate_multistyle_btn,
469
+ )
470
+
471
+
472
+ with gr.Blocks() as app_chat:
473
+ gr.Markdown(
474
+ """
475
+ # Voice Chat
476
+ Have a conversation with an AI using your reference voice!
477
+ 1. Upload a reference audio clip and optionally its transcript.
478
+ 2. Load the chat model.
479
+ 3. Record your message through your microphone.
480
+ 4. The AI will respond using the reference voice.
481
+ """
482
+ )
483
+
484
+ if not USING_SPACES:
485
+ load_chat_model_btn = gr.Button("Load Chat Model", variant="primary")
486
+
487
+ chat_interface_container = gr.Column(visible=False)
488
+
489
+ @gpu_decorator
490
+ def load_chat_model():
491
+ global chat_model_state, chat_tokenizer_state
492
+ if chat_model_state is None:
493
+ show_info = gr.Info
494
+ show_info("Loading chat model...")
495
+ model_name = "Qwen/Qwen2.5-3B-Instruct"
496
+ chat_model_state = AutoModelForCausalLM.from_pretrained(
497
+ model_name, torch_dtype="auto", device_map="auto"
498
+ )
499
+ chat_tokenizer_state = AutoTokenizer.from_pretrained(model_name)
500
+ show_info("Chat model loaded.")
501
+
502
+ return gr.update(visible=False), gr.update(visible=True)
503
+
504
+ load_chat_model_btn.click(load_chat_model, outputs=[load_chat_model_btn, chat_interface_container])
505
+
506
+ else:
507
+ chat_interface_container = gr.Column()
508
+
509
+ if chat_model_state is None:
510
+ model_name = "Qwen/Qwen2.5-3B-Instruct"
511
+ chat_model_state = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto", device_map="auto")
512
+ chat_tokenizer_state = AutoTokenizer.from_pretrained(model_name)
513
+
514
+ with chat_interface_container:
515
+ with gr.Row():
516
+ with gr.Column():
517
+ ref_audio_chat = gr.Audio(label="Reference Audio", type="filepath")
518
+ with gr.Column():
519
+ with gr.Accordion("Advanced Settings", open=False):
520
+ model_choice_chat = gr.Radio(
521
+ choices=["F5-TTS", "E2-TTS"],
522
+ label="TTS Model",
523
+ value="F5-TTS",
524
+ )
525
+ remove_silence_chat = gr.Checkbox(
526
+ label="Remove Silences",
527
+ value=True,
528
+ )
529
+ ref_text_chat = gr.Textbox(
530
+ label="Reference Text",
531
+ info="Optional: Leave blank to auto-transcribe",
532
+ lines=2,
533
+ )
534
+ system_prompt_chat = gr.Textbox(
535
+ label="System Prompt",
536
+ value="You are not an AI assistant, you are whoever the user says you are. You must stay in character. Keep your responses concise since they will be spoken out loud.",
537
+ lines=2,
538
+ )
539
+
540
+ chatbot_interface = gr.Chatbot(label="Conversation")
541
+
542
+ with gr.Row():
543
+ with gr.Column():
544
+ audio_input_chat = gr.Microphone(
545
+ label="Speak your message",
546
+ type="filepath",
547
+ )
548
+ audio_output_chat = gr.Audio(autoplay=True)
549
+ with gr.Column():
550
+ text_input_chat = gr.Textbox(
551
+ label="Type your message",
552
+ lines=1,
553
+ )
554
+ send_btn_chat = gr.Button("Send")
555
+ clear_btn_chat = gr.Button("Clear Conversation")
556
+
557
+ conversation_state = gr.State(
558
+ value=[
559
+ {
560
+ "role": "system",
561
+ "content": "You are not an AI assistant, you are whoever the user says you are. You must stay in character. Keep your responses concise since they will be spoken out loud.",
562
+ }
563
+ ]
564
+ )
565
+
566
+ # Modify process_audio_input to use model and tokenizer from state
567
+ @gpu_decorator
568
+ def process_audio_input(audio_path, text, history, conv_state):
569
+ """Handle audio or text input from user"""
570
+
571
+ if not audio_path and not text.strip():
572
+ return history, conv_state, ""
573
+
574
+ if audio_path:
575
+ text = preprocess_ref_audio_text(audio_path, text)[1]
576
+
577
+ if not text.strip():
578
+ return history, conv_state, ""
579
+
580
+ conv_state.append({"role": "user", "content": text})
581
+ history.append((text, None))
582
+
583
+ response = generate_response(conv_state, chat_model_state, chat_tokenizer_state)
584
+
585
+ conv_state.append({"role": "assistant", "content": response})
586
+ history[-1] = (text, response)
587
+
588
+ return history, conv_state, ""
589
+
590
+ @gpu_decorator
591
+ def generate_audio_response(history, ref_audio, ref_text, model, remove_silence):
592
+ """Generate TTS audio for AI response"""
593
+ if not history or not ref_audio:
594
+ return None
595
+
596
+ last_user_message, last_ai_response = history[-1]
597
+ if not last_ai_response:
598
+ return None
599
+
600
+ audio_result, _ = infer(
601
+ ref_audio,
602
+ ref_text,
603
+ last_ai_response,
604
+ model,
605
+ remove_silence,
606
+ cross_fade_duration=0.15,
607
+ speed=1.0,
608
+ show_info=print, # show_info=print no pull to top when generating
609
+ )
610
+ return audio_result
611
+
612
+ def clear_conversation():
613
+ """Reset the conversation"""
614
+ return [], [
615
+ {
616
+ "role": "system",
617
+ "content": "You are not an AI assistant, you are whoever the user says you are. You must stay in character. Keep your responses concise since they will be spoken out loud.",
618
+ }
619
+ ]
620
+
621
+ def update_system_prompt(new_prompt):
622
+ """Update the system prompt and reset the conversation"""
623
+ new_conv_state = [{"role": "system", "content": new_prompt}]
624
+ return [], new_conv_state
625
+
626
+ # Handle audio input
627
+ audio_input_chat.stop_recording(
628
+ process_audio_input,
629
+ inputs=[audio_input_chat, text_input_chat, chatbot_interface, conversation_state],
630
+ outputs=[chatbot_interface, conversation_state],
631
+ ).then(
632
+ generate_audio_response,
633
+ inputs=[chatbot_interface, ref_audio_chat, ref_text_chat, model_choice_chat, remove_silence_chat],
634
+ outputs=[audio_output_chat],
635
+ ).then(
636
+ lambda: None,
637
+ None,
638
+ audio_input_chat,
639
+ )
640
+
641
+ # Handle text input
642
+ text_input_chat.submit(
643
+ process_audio_input,
644
+ inputs=[audio_input_chat, text_input_chat, chatbot_interface, conversation_state],
645
+ outputs=[chatbot_interface, conversation_state],
646
+ ).then(
647
+ generate_audio_response,
648
+ inputs=[chatbot_interface, ref_audio_chat, ref_text_chat, model_choice_chat, remove_silence_chat],
649
+ outputs=[audio_output_chat],
650
+ ).then(
651
+ lambda: None,
652
+ None,
653
+ text_input_chat,
654
+ )
655
+
656
+ # Handle send button
657
+ send_btn_chat.click(
658
+ process_audio_input,
659
+ inputs=[audio_input_chat, text_input_chat, chatbot_interface, conversation_state],
660
+ outputs=[chatbot_interface, conversation_state],
661
+ ).then(
662
+ generate_audio_response,
663
+ inputs=[chatbot_interface, ref_audio_chat, ref_text_chat, model_choice_chat, remove_silence_chat],
664
+ outputs=[audio_output_chat],
665
+ ).then(
666
+ lambda: None,
667
+ None,
668
+ text_input_chat,
669
+ )
670
+
671
+ # Handle clear button
672
+ clear_btn_chat.click(
673
+ clear_conversation,
674
+ outputs=[chatbot_interface, conversation_state],
675
+ )
676
+
677
+ # Handle system prompt change and reset conversation
678
+ system_prompt_chat.change(
679
+ update_system_prompt,
680
+ inputs=system_prompt_chat,
681
+ outputs=[chatbot_interface, conversation_state],
682
+ )
683
+
684
+
685
+ with gr.Blocks() as app:
686
+ gr.Markdown(
687
+ """
688
+ # E2/F5 TTS
689
+
690
+ This is a local web UI for F5 TTS with advanced batch processing support. This app supports the following TTS models:
691
+
692
+ * [F5-TTS](https://arxiv.org/abs/2410.06885) (A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching)
693
+ * [E2 TTS](https://arxiv.org/abs/2406.18009) (Embarrassingly Easy Fully Non-Autoregressive Zero-Shot TTS)
694
+
695
+ The checkpoints support English and Chinese.
696
+
697
+ If you're having issues, try converting your reference audio to WAV or MP3, clipping it to 15s, and shortening your prompt.
698
+
699
+ **NOTE: Reference text will be automatically transcribed with Whisper if not provided. For best results, keep your reference clips short (<15s). Ensure the audio is fully uploaded before generating.**
700
+ """
701
+ )
702
+ gr.TabbedInterface(
703
+ [app_tts, app_multistyle, app_chat, app_credits],
704
+ ["TTS", "Multi-Speech", "Voice-Chat", "Credits"],
705
+ )
706
+
707
+
708
+ @click.command()
709
+ @click.option("--port", "-p", default=None, type=int, help="Port to run the app on")
710
+ @click.option("--host", "-H", default=None, help="Host to run the app on")
711
+ @click.option(
712
+ "--share",
713
+ "-s",
714
+ default=False,
715
+ is_flag=True,
716
+ help="Share the app via Gradio share link",
717
+ )
718
+ @click.option("--api", "-a", default=True, is_flag=True, help="Allow API access")
719
+ def main(port, host, share, api):
720
+ global app
721
+ print("Starting app...")
722
+ app.queue(api_open=api).launch(server_name=host, server_port=port, share=share, show_api=api)
723
+
724
+
725
+ if __name__ == "__main__":
726
+ if not USING_SPACES:
727
+ main()
728
+ else:
729
+ app.queue().launch()
src/f5_tts/infer/speech_edit.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import torch
4
+ import torch.nn.functional as F
5
+ import torchaudio
6
+ from vocos import Vocos
7
+
8
+ from f5_tts.model import CFM, UNetT, DiT
9
+ from f5_tts.model.utils import (
10
+ get_tokenizer,
11
+ convert_char_to_pinyin,
12
+ )
13
+ from f5_tts.infer.utils_infer import (
14
+ load_checkpoint,
15
+ save_spectrogram,
16
+ )
17
+
18
+ device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
19
+
20
+
21
+ # --------------------- Dataset Settings -------------------- #
22
+
23
+ target_sample_rate = 24000
24
+ n_mel_channels = 100
25
+ hop_length = 256
26
+ target_rms = 0.1
27
+
28
+ tokenizer = "pinyin"
29
+ dataset_name = "Emilia_ZH_EN"
30
+
31
+
32
+ # ---------------------- infer setting ---------------------- #
33
+
34
+ seed = None # int | None
35
+
36
+ exp_name = "F5TTS_Base" # F5TTS_Base | E2TTS_Base
37
+ ckpt_step = 1200000
38
+
39
+ nfe_step = 32 # 16, 32
40
+ cfg_strength = 2.0
41
+ ode_method = "euler" # euler | midpoint
42
+ sway_sampling_coef = -1.0
43
+ speed = 1.0
44
+
45
+ if exp_name == "F5TTS_Base":
46
+ model_cls = DiT
47
+ model_cfg = dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4)
48
+
49
+ elif exp_name == "E2TTS_Base":
50
+ model_cls = UNetT
51
+ model_cfg = dict(dim=1024, depth=24, heads=16, ff_mult=4)
52
+
53
+ ckpt_path = f"ckpts/{exp_name}/model_{ckpt_step}.safetensors"
54
+ output_dir = "tests"
55
+
56
+ # [leverage https://github.com/MahmoudAshraf97/ctc-forced-aligner to get char level alignment]
57
+ # pip install git+https://github.com/MahmoudAshraf97/ctc-forced-aligner.git
58
+ # [write the origin_text into a file, e.g. tests/test_edit.txt]
59
+ # ctc-forced-aligner --audio_path "src/f5_tts/infer/examples/basic/basic_ref_en.wav" --text_path "tests/test_edit.txt" --language "zho" --romanize --split_size "char"
60
+ # [result will be saved at same path of audio file]
61
+ # [--language "zho" for Chinese, "eng" for English]
62
+ # [if local ckpt, set --alignment_model "../checkpoints/mms-300m-1130-forced-aligner"]
63
+
64
+ audio_to_edit = "src/f5_tts/infer/examples/basic/basic_ref_en.wav"
65
+ origin_text = "Some call me nature, others call me mother nature."
66
+ target_text = "Some call me optimist, others call me realist."
67
+ parts_to_edit = [
68
+ [1.42, 2.44],
69
+ [4.04, 4.9],
70
+ ] # stard_ends of "nature" & "mother nature", in seconds
71
+ fix_duration = [
72
+ 1.2,
73
+ 1,
74
+ ] # fix duration for "optimist" & "realist", in seconds
75
+
76
+ # audio_to_edit = "src/f5_tts/infer/examples/basic/basic_ref_zh.wav"
77
+ # origin_text = "对,这就是我,万人敬仰的太乙真人。"
78
+ # target_text = "对,那就是你,万人敬仰的太白金星。"
79
+ # parts_to_edit = [[0.84, 1.4], [1.92, 2.4], [4.26, 6.26], ]
80
+ # fix_duration = None # use origin text duration
81
+
82
+
83
+ # -------------------------------------------------#
84
+
85
+ use_ema = True
86
+
87
+ if not os.path.exists(output_dir):
88
+ os.makedirs(output_dir)
89
+
90
+ # Vocoder model
91
+ local = False
92
+ if local:
93
+ vocos_local_path = "../checkpoints/charactr/vocos-mel-24khz"
94
+ vocos = Vocos.from_hparams(f"{vocos_local_path}/config.yaml")
95
+ state_dict = torch.load(f"{vocos_local_path}/pytorch_model.bin", weights_only=True, map_location=device)
96
+ vocos.load_state_dict(state_dict)
97
+
98
+ vocos.eval()
99
+ else:
100
+ vocos = Vocos.from_pretrained("charactr/vocos-mel-24khz")
101
+
102
+ # Tokenizer
103
+ vocab_char_map, vocab_size = get_tokenizer(dataset_name, tokenizer)
104
+
105
+ # Model
106
+ model = CFM(
107
+ transformer=model_cls(**model_cfg, text_num_embeds=vocab_size, mel_dim=n_mel_channels),
108
+ mel_spec_kwargs=dict(
109
+ target_sample_rate=target_sample_rate,
110
+ n_mel_channels=n_mel_channels,
111
+ hop_length=hop_length,
112
+ ),
113
+ odeint_kwargs=dict(
114
+ method=ode_method,
115
+ ),
116
+ vocab_char_map=vocab_char_map,
117
+ ).to(device)
118
+
119
+ model = load_checkpoint(model, ckpt_path, device, use_ema=use_ema)
120
+
121
+ # Audio
122
+ audio, sr = torchaudio.load(audio_to_edit)
123
+ if audio.shape[0] > 1:
124
+ audio = torch.mean(audio, dim=0, keepdim=True)
125
+ rms = torch.sqrt(torch.mean(torch.square(audio)))
126
+ if rms < target_rms:
127
+ audio = audio * target_rms / rms
128
+ if sr != target_sample_rate:
129
+ resampler = torchaudio.transforms.Resample(sr, target_sample_rate)
130
+ audio = resampler(audio)
131
+ offset = 0
132
+ audio_ = torch.zeros(1, 0)
133
+ edit_mask = torch.zeros(1, 0, dtype=torch.bool)
134
+ for part in parts_to_edit:
135
+ start, end = part
136
+ part_dur = end - start if fix_duration is None else fix_duration.pop(0)
137
+ part_dur = part_dur * target_sample_rate
138
+ start = start * target_sample_rate
139
+ audio_ = torch.cat((audio_, audio[:, round(offset) : round(start)], torch.zeros(1, round(part_dur))), dim=-1)
140
+ edit_mask = torch.cat(
141
+ (
142
+ edit_mask,
143
+ torch.ones(1, round((start - offset) / hop_length), dtype=torch.bool),
144
+ torch.zeros(1, round(part_dur / hop_length), dtype=torch.bool),
145
+ ),
146
+ dim=-1,
147
+ )
148
+ offset = end * target_sample_rate
149
+ # audio = torch.cat((audio_, audio[:, round(offset):]), dim = -1)
150
+ edit_mask = F.pad(edit_mask, (0, audio.shape[-1] // hop_length - edit_mask.shape[-1] + 1), value=True)
151
+ audio = audio.to(device)
152
+ edit_mask = edit_mask.to(device)
153
+
154
+ # Text
155
+ text_list = [target_text]
156
+ if tokenizer == "pinyin":
157
+ final_text_list = convert_char_to_pinyin(text_list)
158
+ else:
159
+ final_text_list = [text_list]
160
+ print(f"text : {text_list}")
161
+ print(f"pinyin: {final_text_list}")
162
+
163
+ # Duration
164
+ ref_audio_len = 0
165
+ duration = audio.shape[-1] // hop_length
166
+
167
+ # Inference
168
+ with torch.inference_mode():
169
+ generated, trajectory = model.sample(
170
+ cond=audio,
171
+ text=final_text_list,
172
+ duration=duration,
173
+ steps=nfe_step,
174
+ cfg_strength=cfg_strength,
175
+ sway_sampling_coef=sway_sampling_coef,
176
+ seed=seed,
177
+ edit_mask=edit_mask,
178
+ )
179
+ print(f"Generated mel: {generated.shape}")
180
+
181
+ # Final result
182
+ generated = generated.to(torch.float32)
183
+ generated = generated[:, ref_audio_len:, :]
184
+ generated_mel_spec = generated.permute(0, 2, 1)
185
+ generated_wave = vocos.decode(generated_mel_spec.cpu())
186
+ if rms < target_rms:
187
+ generated_wave = generated_wave * rms / target_rms
188
+
189
+ save_spectrogram(generated_mel_spec[0].cpu().numpy(), f"{output_dir}/speech_edit_out.png")
190
+ torchaudio.save(f"{output_dir}/speech_edit_out.wav", generated_wave, target_sample_rate)
191
+ print(f"Generated wav: {generated_wave.shape}")
src/f5_tts/infer/utils_infer.py ADDED
@@ -0,0 +1,439 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A unified script for inference process
2
+ # Make adjustments inside functions, and consider both gradio and cli scripts if need to change func output format
3
+
4
+ import hashlib
5
+ import re
6
+ import tempfile
7
+ from importlib.resources import files
8
+
9
+ import matplotlib
10
+
11
+ matplotlib.use("Agg")
12
+
13
+ import matplotlib.pylab as plt
14
+ import numpy as np
15
+ import torch
16
+ import torchaudio
17
+ import tqdm
18
+ from pydub import AudioSegment, silence
19
+ from transformers import pipeline
20
+ from vocos import Vocos
21
+
22
+ from f5_tts.model import CFM
23
+ from f5_tts.model.utils import (
24
+ get_tokenizer,
25
+ convert_char_to_pinyin,
26
+ )
27
+
28
+ _ref_audio_cache = {}
29
+
30
+ device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
31
+
32
+ vocos = Vocos.from_pretrained("charactr/vocos-mel-24khz")
33
+
34
+
35
+ # -----------------------------------------
36
+
37
+ target_sample_rate = 24000
38
+ n_mel_channels = 100
39
+ hop_length = 256
40
+ target_rms = 0.1
41
+ cross_fade_duration = 0.15
42
+ ode_method = "euler"
43
+ nfe_step = 32 # 16, 32
44
+ cfg_strength = 2.0
45
+ sway_sampling_coef = -1.0
46
+ speed = 1.0
47
+ fix_duration = None
48
+
49
+ # -----------------------------------------
50
+
51
+
52
+ # chunk text into smaller pieces
53
+
54
+
55
+ def chunk_text(text, max_chars=135):
56
+ """
57
+ Splits the input text into chunks, each with a maximum number of characters.
58
+
59
+ Args:
60
+ text (str): The text to be split.
61
+ max_chars (int): The maximum number of characters per chunk.
62
+
63
+ Returns:
64
+ List[str]: A list of text chunks.
65
+ """
66
+ chunks = []
67
+ current_chunk = ""
68
+ # Split the text into sentences based on punctuation followed by whitespace
69
+ sentences = re.split(r"(?<=[;:,.!?])\s+|(?<=[;:,。!?])", text)
70
+
71
+ for sentence in sentences:
72
+ if len(current_chunk.encode("utf-8")) + len(sentence.encode("utf-8")) <= max_chars:
73
+ current_chunk += sentence + " " if sentence and len(sentence[-1].encode("utf-8")) == 1 else sentence
74
+ else:
75
+ if current_chunk:
76
+ chunks.append(current_chunk.strip())
77
+ current_chunk = sentence + " " if sentence and len(sentence[-1].encode("utf-8")) == 1 else sentence
78
+
79
+ if current_chunk:
80
+ chunks.append(current_chunk.strip())
81
+
82
+ return chunks
83
+
84
+
85
+ # load vocoder
86
+ def load_vocoder(is_local=False, local_path="", device=device):
87
+ if is_local:
88
+ print(f"Load vocos from local path {local_path}")
89
+ vocos = Vocos.from_hparams(f"{local_path}/config.yaml")
90
+ state_dict = torch.load(f"{local_path}/pytorch_model.bin", map_location=device)
91
+ vocos.load_state_dict(state_dict)
92
+ vocos.eval()
93
+ else:
94
+ print("Download Vocos from huggingface charactr/vocos-mel-24khz")
95
+ vocos = Vocos.from_pretrained("charactr/vocos-mel-24khz")
96
+ return vocos
97
+
98
+
99
+ # load asr pipeline
100
+
101
+ asr_pipe = None
102
+
103
+
104
+ def initialize_asr_pipeline(device=device):
105
+ global asr_pipe
106
+ asr_pipe = pipeline(
107
+ "automatic-speech-recognition",
108
+ model="openai/whisper-large-v3-turbo",
109
+ torch_dtype=torch.float16,
110
+ device=device,
111
+ )
112
+
113
+
114
+ # load model checkpoint for inference
115
+
116
+
117
+ def load_checkpoint(model, ckpt_path, device, use_ema=True):
118
+ if device == "cuda":
119
+ model = model.half()
120
+
121
+ ckpt_type = ckpt_path.split(".")[-1]
122
+ if ckpt_type == "safetensors":
123
+ from safetensors.torch import load_file
124
+
125
+ checkpoint = load_file(ckpt_path)
126
+ else:
127
+ checkpoint = torch.load(ckpt_path, weights_only=True)
128
+
129
+ if use_ema:
130
+ if ckpt_type == "safetensors":
131
+ checkpoint = {"ema_model_state_dict": checkpoint}
132
+ checkpoint["model_state_dict"] = {
133
+ k.replace("ema_model.", ""): v
134
+ for k, v in checkpoint["ema_model_state_dict"].items()
135
+ if k not in ["initted", "step"]
136
+ }
137
+ model.load_state_dict(checkpoint["model_state_dict"])
138
+ else:
139
+ if ckpt_type == "safetensors":
140
+ checkpoint = {"model_state_dict": checkpoint}
141
+ model.load_state_dict(checkpoint["model_state_dict"])
142
+
143
+ return model.to(device)
144
+
145
+
146
+ # load model for inference
147
+
148
+
149
+ def load_model(model_cls, model_cfg, ckpt_path, vocab_file="", ode_method=ode_method, use_ema=True, device=device):
150
+ if vocab_file == "":
151
+ vocab_file = str(files("f5_tts").joinpath("infer/examples/vocab.txt"))
152
+ tokenizer = "custom"
153
+
154
+ print("\nvocab : ", vocab_file)
155
+ print("tokenizer : ", tokenizer)
156
+ print("model : ", ckpt_path, "\n")
157
+
158
+ vocab_char_map, vocab_size = get_tokenizer(vocab_file, tokenizer)
159
+ model = CFM(
160
+ transformer=model_cls(**model_cfg, text_num_embeds=vocab_size, mel_dim=n_mel_channels),
161
+ mel_spec_kwargs=dict(
162
+ target_sample_rate=target_sample_rate,
163
+ n_mel_channels=n_mel_channels,
164
+ hop_length=hop_length,
165
+ ),
166
+ odeint_kwargs=dict(
167
+ method=ode_method,
168
+ ),
169
+ vocab_char_map=vocab_char_map,
170
+ ).to(device)
171
+
172
+ model = load_checkpoint(model, ckpt_path, device, use_ema=use_ema)
173
+
174
+ return model
175
+
176
+
177
+ # preprocess reference audio and text
178
+
179
+
180
+ def preprocess_ref_audio_text(ref_audio_orig, ref_text, clip_short=True, show_info=print, device=device):
181
+ show_info("Converting audio...")
182
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f:
183
+ aseg = AudioSegment.from_file(ref_audio_orig)
184
+
185
+ if clip_short:
186
+ # 1. try to find long silence for clipping
187
+ non_silent_segs = silence.split_on_silence(
188
+ aseg, min_silence_len=1000, silence_thresh=-50, keep_silence=1000
189
+ )
190
+ non_silent_wave = AudioSegment.silent(duration=0)
191
+ for non_silent_seg in non_silent_segs:
192
+ if len(non_silent_wave) > 6000 and len(non_silent_wave + non_silent_seg) > 15000:
193
+ show_info("Audio is over 15s, clipping short. (1)")
194
+ break
195
+ non_silent_wave += non_silent_seg
196
+
197
+ # 2. try to find short silence for clipping if 1. failed
198
+ if len(non_silent_wave) > 15000:
199
+ non_silent_segs = silence.split_on_silence(
200
+ aseg, min_silence_len=100, silence_thresh=-40, keep_silence=1000
201
+ )
202
+ non_silent_wave = AudioSegment.silent(duration=0)
203
+ for non_silent_seg in non_silent_segs:
204
+ if len(non_silent_wave) > 6000 and len(non_silent_wave + non_silent_seg) > 15000:
205
+ show_info("Audio is over 15s, clipping short. (2)")
206
+ break
207
+ non_silent_wave += non_silent_seg
208
+
209
+ aseg = non_silent_wave
210
+
211
+ # 3. if no proper silence found for clipping
212
+ if len(aseg) > 15000:
213
+ aseg = aseg[:15000]
214
+ show_info("Audio is over 15s, clipping short. (3)")
215
+
216
+ aseg.export(f.name, format="wav")
217
+ ref_audio = f.name
218
+
219
+ # Compute a hash of the reference audio file
220
+ with open(ref_audio, "rb") as audio_file:
221
+ audio_data = audio_file.read()
222
+ audio_hash = hashlib.md5(audio_data).hexdigest()
223
+
224
+ global _ref_audio_cache
225
+ if audio_hash in _ref_audio_cache:
226
+ # Use cached reference text
227
+ show_info("Using cached reference text...")
228
+ ref_text = _ref_audio_cache[audio_hash]
229
+ else:
230
+ if not ref_text.strip():
231
+ global asr_pipe
232
+ if asr_pipe is None:
233
+ initialize_asr_pipeline(device=device)
234
+ show_info("No reference text provided, transcribing reference audio...")
235
+ ref_text = asr_pipe(
236
+ ref_audio,
237
+ chunk_length_s=30,
238
+ batch_size=128,
239
+ generate_kwargs={"task": "transcribe"},
240
+ return_timestamps=False,
241
+ )["text"].strip()
242
+ show_info("Finished transcription")
243
+ else:
244
+ show_info("Using custom reference text...")
245
+ # Cache the transcribed text
246
+ _ref_audio_cache[audio_hash] = ref_text
247
+
248
+ # Ensure ref_text ends with a proper sentence-ending punctuation
249
+ if not ref_text.endswith(". ") and not ref_text.endswith("。"):
250
+ if ref_text.endswith("."):
251
+ ref_text += " "
252
+ else:
253
+ ref_text += ". "
254
+
255
+ return ref_audio, ref_text
256
+
257
+
258
+ # infer process: chunk text -> infer batches [i.e. infer_batch_process()]
259
+
260
+
261
+ def infer_process(
262
+ ref_audio,
263
+ ref_text,
264
+ gen_text,
265
+ model_obj,
266
+ show_info=print,
267
+ progress=tqdm,
268
+ target_rms=target_rms,
269
+ cross_fade_duration=cross_fade_duration,
270
+ nfe_step=nfe_step,
271
+ cfg_strength=cfg_strength,
272
+ sway_sampling_coef=sway_sampling_coef,
273
+ speed=speed,
274
+ fix_duration=fix_duration,
275
+ device=device,
276
+ ):
277
+ # Split the input text into batches
278
+ audio, sr = torchaudio.load(ref_audio)
279
+ max_chars = int(len(ref_text.encode("utf-8")) / (audio.shape[-1] / sr) * (25 - audio.shape[-1] / sr))
280
+ gen_text_batches = chunk_text(gen_text, max_chars=max_chars)
281
+ for i, gen_text in enumerate(gen_text_batches):
282
+ print(f"gen_text {i}", gen_text)
283
+
284
+ show_info(f"Generating audio in {len(gen_text_batches)} batches...")
285
+ return infer_batch_process(
286
+ (audio, sr),
287
+ ref_text,
288
+ gen_text_batches,
289
+ model_obj,
290
+ progress=progress,
291
+ target_rms=target_rms,
292
+ cross_fade_duration=cross_fade_duration,
293
+ nfe_step=nfe_step,
294
+ cfg_strength=cfg_strength,
295
+ sway_sampling_coef=sway_sampling_coef,
296
+ speed=speed,
297
+ fix_duration=fix_duration,
298
+ device=device,
299
+ )
300
+
301
+
302
+ # infer batches
303
+
304
+
305
+ def infer_batch_process(
306
+ ref_audio,
307
+ ref_text,
308
+ gen_text_batches,
309
+ model_obj,
310
+ progress=tqdm,
311
+ target_rms=0.1,
312
+ cross_fade_duration=0.15,
313
+ nfe_step=32,
314
+ cfg_strength=2.0,
315
+ sway_sampling_coef=-1,
316
+ speed=1,
317
+ fix_duration=None,
318
+ device=None,
319
+ ):
320
+ audio, sr = ref_audio
321
+ if audio.shape[0] > 1:
322
+ audio = torch.mean(audio, dim=0, keepdim=True)
323
+
324
+ rms = torch.sqrt(torch.mean(torch.square(audio)))
325
+ if rms < target_rms:
326
+ audio = audio * target_rms / rms
327
+ if sr != target_sample_rate:
328
+ resampler = torchaudio.transforms.Resample(sr, target_sample_rate)
329
+ audio = resampler(audio)
330
+ audio = audio.to(device)
331
+
332
+ generated_waves = []
333
+ spectrograms = []
334
+
335
+ if len(ref_text[-1].encode("utf-8")) == 1:
336
+ ref_text = ref_text + " "
337
+ for i, gen_text in enumerate(progress.tqdm(gen_text_batches)):
338
+ # Prepare the text
339
+ text_list = [ref_text + gen_text]
340
+ final_text_list = convert_char_to_pinyin(text_list)
341
+
342
+ ref_audio_len = audio.shape[-1] // hop_length
343
+ if fix_duration is not None:
344
+ duration = int(fix_duration * target_sample_rate / hop_length)
345
+ else:
346
+ # Calculate duration
347
+ ref_text_len = len(ref_text.encode("utf-8"))
348
+ gen_text_len = len(gen_text.encode("utf-8"))
349
+ duration = ref_audio_len + int(ref_audio_len / ref_text_len * gen_text_len / speed)
350
+
351
+ # inference
352
+ with torch.inference_mode():
353
+ generated, _ = model_obj.sample(
354
+ cond=audio,
355
+ text=final_text_list,
356
+ duration=duration,
357
+ steps=nfe_step,
358
+ cfg_strength=cfg_strength,
359
+ sway_sampling_coef=sway_sampling_coef,
360
+ )
361
+
362
+ generated = generated.to(torch.float32)
363
+ generated = generated[:, ref_audio_len:, :]
364
+ generated_mel_spec = generated.permute(0, 2, 1)
365
+ generated_wave = vocos.decode(generated_mel_spec.cpu())
366
+ if rms < target_rms:
367
+ generated_wave = generated_wave * rms / target_rms
368
+
369
+ # wav -> numpy
370
+ generated_wave = generated_wave.squeeze().cpu().numpy()
371
+
372
+ generated_waves.append(generated_wave)
373
+ spectrograms.append(generated_mel_spec[0].cpu().numpy())
374
+
375
+ # Combine all generated waves with cross-fading
376
+ if cross_fade_duration <= 0:
377
+ # Simply concatenate
378
+ final_wave = np.concatenate(generated_waves)
379
+ else:
380
+ final_wave = generated_waves[0]
381
+ for i in range(1, len(generated_waves)):
382
+ prev_wave = final_wave
383
+ next_wave = generated_waves[i]
384
+
385
+ # Calculate cross-fade samples, ensuring it does not exceed wave lengths
386
+ cross_fade_samples = int(cross_fade_duration * target_sample_rate)
387
+ cross_fade_samples = min(cross_fade_samples, len(prev_wave), len(next_wave))
388
+
389
+ if cross_fade_samples <= 0:
390
+ # No overlap possible, concatenate
391
+ final_wave = np.concatenate([prev_wave, next_wave])
392
+ continue
393
+
394
+ # Overlapping parts
395
+ prev_overlap = prev_wave[-cross_fade_samples:]
396
+ next_overlap = next_wave[:cross_fade_samples]
397
+
398
+ # Fade out and fade in
399
+ fade_out = np.linspace(1, 0, cross_fade_samples)
400
+ fade_in = np.linspace(0, 1, cross_fade_samples)
401
+
402
+ # Cross-faded overlap
403
+ cross_faded_overlap = prev_overlap * fade_out + next_overlap * fade_in
404
+
405
+ # Combine
406
+ new_wave = np.concatenate(
407
+ [prev_wave[:-cross_fade_samples], cross_faded_overlap, next_wave[cross_fade_samples:]]
408
+ )
409
+
410
+ final_wave = new_wave
411
+
412
+ # Create a combined spectrogram
413
+ combined_spectrogram = np.concatenate(spectrograms, axis=1)
414
+
415
+ return final_wave, target_sample_rate, combined_spectrogram
416
+
417
+
418
+ # remove silence from generated wav
419
+
420
+
421
+ def remove_silence_for_generated_wav(filename):
422
+ aseg = AudioSegment.from_file(filename)
423
+ non_silent_segs = silence.split_on_silence(aseg, min_silence_len=1000, silence_thresh=-50, keep_silence=500)
424
+ non_silent_wave = AudioSegment.silent(duration=0)
425
+ for non_silent_seg in non_silent_segs:
426
+ non_silent_wave += non_silent_seg
427
+ aseg = non_silent_wave
428
+ aseg.export(filename, format="wav")
429
+
430
+
431
+ # save spectrogram
432
+
433
+
434
+ def save_spectrogram(spectrogram, path):
435
+ plt.figure(figsize=(12, 4))
436
+ plt.imshow(spectrogram, origin="lower", aspect="auto")
437
+ plt.colorbar()
438
+ plt.savefig(path)
439
+ plt.close()
src/f5_tts/model/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from f5_tts.model.cfm import CFM
2
+
3
+ from f5_tts.model.backbones.unett import UNetT
4
+ from f5_tts.model.backbones.dit import DiT
5
+ from f5_tts.model.backbones.mmdit import MMDiT
6
+
7
+ from f5_tts.model.trainer import Trainer
8
+
9
+
10
+ __all__ = ["CFM", "UNetT", "DiT", "MMDiT", "Trainer"]
src/f5_tts/model/backbones/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Backbones quick introduction
2
+
3
+
4
+ ### unett.py
5
+ - flat unet transformer
6
+ - structure same as in e2-tts & voicebox paper except using rotary pos emb
7
+ - update: allow possible abs pos emb & convnextv2 blocks for embedded text before concat
8
+
9
+ ### dit.py
10
+ - adaln-zero dit
11
+ - embedded timestep as condition
12
+ - concatted noised_input + masked_cond + embedded_text, linear proj in
13
+ - possible abs pos emb & convnextv2 blocks for embedded text before concat
14
+ - possible long skip connection (first layer to last layer)
15
+
16
+ ### mmdit.py
17
+ - sd3 structure
18
+ - timestep as condition
19
+ - left stream: text embedded and applied a abs pos emb
20
+ - right stream: masked_cond & noised_input concatted and with same conv pos emb as unett
src/f5_tts/model/backbones/dit.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ein notation:
3
+ b - batch
4
+ n - sequence
5
+ nt - text sequence
6
+ nw - raw wave length
7
+ d - dimension
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import torch
13
+ from torch import nn
14
+ import torch.nn.functional as F
15
+
16
+ from x_transformers.x_transformers import RotaryEmbedding
17
+
18
+ from f5_tts.model.modules import (
19
+ TimestepEmbedding,
20
+ ConvNeXtV2Block,
21
+ ConvPositionEmbedding,
22
+ DiTBlock,
23
+ AdaLayerNormZero_Final,
24
+ precompute_freqs_cis,
25
+ get_pos_embed_indices,
26
+ )
27
+
28
+
29
+ # Text embedding
30
+
31
+
32
+ class TextEmbedding(nn.Module):
33
+ def __init__(self, text_num_embeds, text_dim, conv_layers=0, conv_mult=2):
34
+ super().__init__()
35
+ self.text_embed = nn.Embedding(text_num_embeds + 1, text_dim) # use 0 as filler token
36
+
37
+ if conv_layers > 0:
38
+ self.extra_modeling = True
39
+ self.precompute_max_pos = 4096 # ~44s of 24khz audio
40
+ self.register_buffer("freqs_cis", precompute_freqs_cis(text_dim, self.precompute_max_pos), persistent=False)
41
+ self.text_blocks = nn.Sequential(
42
+ *[ConvNeXtV2Block(text_dim, text_dim * conv_mult) for _ in range(conv_layers)]
43
+ )
44
+ else:
45
+ self.extra_modeling = False
46
+
47
+ def forward(self, text: int["b nt"], seq_len, drop_text=False): # noqa: F722
48
+ text = text + 1 # use 0 as filler token. preprocess of batch pad -1, see list_str_to_idx()
49
+ text = text[:, :seq_len] # curtail if character tokens are more than the mel spec tokens
50
+ batch, text_len = text.shape[0], text.shape[1]
51
+ text = F.pad(text, (0, seq_len - text_len), value=0)
52
+
53
+ if drop_text: # cfg for text
54
+ text = torch.zeros_like(text)
55
+
56
+ text = self.text_embed(text) # b n -> b n d
57
+
58
+ # possible extra modeling
59
+ if self.extra_modeling:
60
+ # sinus pos emb
61
+ batch_start = torch.zeros((batch,), dtype=torch.long)
62
+ pos_idx = get_pos_embed_indices(batch_start, seq_len, max_pos=self.precompute_max_pos)
63
+ text_pos_embed = self.freqs_cis[pos_idx]
64
+ text = text + text_pos_embed
65
+
66
+ # convnextv2 blocks
67
+ text = self.text_blocks(text)
68
+
69
+ return text
70
+
71
+
72
+ # noised input audio and context mixing embedding
73
+
74
+
75
+ class InputEmbedding(nn.Module):
76
+ def __init__(self, mel_dim, text_dim, out_dim):
77
+ super().__init__()
78
+ self.proj = nn.Linear(mel_dim * 2 + text_dim, out_dim)
79
+ self.conv_pos_embed = ConvPositionEmbedding(dim=out_dim)
80
+
81
+ def forward(self, x: float["b n d"], cond: float["b n d"], text_embed: float["b n d"], drop_audio_cond=False): # noqa: F722
82
+ if drop_audio_cond: # cfg for cond audio
83
+ cond = torch.zeros_like(cond)
84
+
85
+ x = self.proj(torch.cat((x, cond, text_embed), dim=-1))
86
+ x = self.conv_pos_embed(x) + x
87
+ return x
88
+
89
+
90
+ # Transformer backbone using DiT blocks
91
+
92
+
93
+ class DiT(nn.Module):
94
+ def __init__(
95
+ self,
96
+ *,
97
+ dim,
98
+ depth=8,
99
+ heads=8,
100
+ dim_head=64,
101
+ dropout=0.1,
102
+ ff_mult=4,
103
+ mel_dim=100,
104
+ text_num_embeds=256,
105
+ text_dim=None,
106
+ conv_layers=0,
107
+ long_skip_connection=False,
108
+ ):
109
+ super().__init__()
110
+
111
+ self.time_embed = TimestepEmbedding(dim)
112
+ if text_dim is None:
113
+ text_dim = mel_dim
114
+ self.text_embed = TextEmbedding(text_num_embeds, text_dim, conv_layers=conv_layers)
115
+ self.input_embed = InputEmbedding(mel_dim, text_dim, dim)
116
+
117
+ self.rotary_embed = RotaryEmbedding(dim_head)
118
+
119
+ self.dim = dim
120
+ self.depth = depth
121
+
122
+ self.transformer_blocks = nn.ModuleList(
123
+ [DiTBlock(dim=dim, heads=heads, dim_head=dim_head, ff_mult=ff_mult, dropout=dropout) for _ in range(depth)]
124
+ )
125
+ self.long_skip_connection = nn.Linear(dim * 2, dim, bias=False) if long_skip_connection else None
126
+
127
+ self.norm_out = AdaLayerNormZero_Final(dim) # final modulation
128
+ self.proj_out = nn.Linear(dim, mel_dim)
129
+
130
+ def forward(
131
+ self,
132
+ x: float["b n d"], # nosied input audio # noqa: F722
133
+ cond: float["b n d"], # masked cond audio # noqa: F722
134
+ text: int["b nt"], # text # noqa: F722
135
+ time: float["b"] | float[""], # time step # noqa: F821 F722
136
+ drop_audio_cond, # cfg for cond audio
137
+ drop_text, # cfg for text
138
+ mask: bool["b n"] | None = None, # noqa: F722
139
+ ):
140
+ batch, seq_len = x.shape[0], x.shape[1]
141
+ if time.ndim == 0:
142
+ time = time.repeat(batch)
143
+
144
+ # t: conditioning time, c: context (text + masked cond audio), x: noised input audio
145
+ t = self.time_embed(time)
146
+ text_embed = self.text_embed(text, seq_len, drop_text=drop_text)
147
+ x = self.input_embed(x, cond, text_embed, drop_audio_cond=drop_audio_cond)
148
+
149
+ rope = self.rotary_embed.forward_from_seq_len(seq_len)
150
+
151
+ if self.long_skip_connection is not None:
152
+ residual = x
153
+
154
+ for block in self.transformer_blocks:
155
+ x = block(x, t, mask=mask, rope=rope)
156
+
157
+ if self.long_skip_connection is not None:
158
+ x = self.long_skip_connection(torch.cat((x, residual), dim=-1))
159
+
160
+ x = self.norm_out(x, t)
161
+ output = self.proj_out(x)
162
+
163
+ return output
src/f5_tts/model/backbones/mmdit.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ein notation:
3
+ b - batch
4
+ n - sequence
5
+ nt - text sequence
6
+ nw - raw wave length
7
+ d - dimension
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import torch
13
+ from torch import nn
14
+
15
+ from x_transformers.x_transformers import RotaryEmbedding
16
+
17
+ from f5_tts.model.modules import (
18
+ TimestepEmbedding,
19
+ ConvPositionEmbedding,
20
+ MMDiTBlock,
21
+ AdaLayerNormZero_Final,
22
+ precompute_freqs_cis,
23
+ get_pos_embed_indices,
24
+ )
25
+
26
+
27
+ # text embedding
28
+
29
+
30
+ class TextEmbedding(nn.Module):
31
+ def __init__(self, out_dim, text_num_embeds):
32
+ super().__init__()
33
+ self.text_embed = nn.Embedding(text_num_embeds + 1, out_dim) # will use 0 as filler token
34
+
35
+ self.precompute_max_pos = 1024
36
+ self.register_buffer("freqs_cis", precompute_freqs_cis(out_dim, self.precompute_max_pos), persistent=False)
37
+
38
+ def forward(self, text: int["b nt"], drop_text=False) -> int["b nt d"]: # noqa: F722
39
+ text = text + 1
40
+ if drop_text:
41
+ text = torch.zeros_like(text)
42
+ text = self.text_embed(text)
43
+
44
+ # sinus pos emb
45
+ batch_start = torch.zeros((text.shape[0],), dtype=torch.long)
46
+ batch_text_len = text.shape[1]
47
+ pos_idx = get_pos_embed_indices(batch_start, batch_text_len, max_pos=self.precompute_max_pos)
48
+ text_pos_embed = self.freqs_cis[pos_idx]
49
+
50
+ text = text + text_pos_embed
51
+
52
+ return text
53
+
54
+
55
+ # noised input & masked cond audio embedding
56
+
57
+
58
+ class AudioEmbedding(nn.Module):
59
+ def __init__(self, in_dim, out_dim):
60
+ super().__init__()
61
+ self.linear = nn.Linear(2 * in_dim, out_dim)
62
+ self.conv_pos_embed = ConvPositionEmbedding(out_dim)
63
+
64
+ def forward(self, x: float["b n d"], cond: float["b n d"], drop_audio_cond=False): # noqa: F722
65
+ if drop_audio_cond:
66
+ cond = torch.zeros_like(cond)
67
+ x = torch.cat((x, cond), dim=-1)
68
+ x = self.linear(x)
69
+ x = self.conv_pos_embed(x) + x
70
+ return x
71
+
72
+
73
+ # Transformer backbone using MM-DiT blocks
74
+
75
+
76
+ class MMDiT(nn.Module):
77
+ def __init__(
78
+ self,
79
+ *,
80
+ dim,
81
+ depth=8,
82
+ heads=8,
83
+ dim_head=64,
84
+ dropout=0.1,
85
+ ff_mult=4,
86
+ text_num_embeds=256,
87
+ mel_dim=100,
88
+ ):
89
+ super().__init__()
90
+
91
+ self.time_embed = TimestepEmbedding(dim)
92
+ self.text_embed = TextEmbedding(dim, text_num_embeds)
93
+ self.audio_embed = AudioEmbedding(mel_dim, dim)
94
+
95
+ self.rotary_embed = RotaryEmbedding(dim_head)
96
+
97
+ self.dim = dim
98
+ self.depth = depth
99
+
100
+ self.transformer_blocks = nn.ModuleList(
101
+ [
102
+ MMDiTBlock(
103
+ dim=dim,
104
+ heads=heads,
105
+ dim_head=dim_head,
106
+ dropout=dropout,
107
+ ff_mult=ff_mult,
108
+ context_pre_only=i == depth - 1,
109
+ )
110
+ for i in range(depth)
111
+ ]
112
+ )
113
+ self.norm_out = AdaLayerNormZero_Final(dim) # final modulation
114
+ self.proj_out = nn.Linear(dim, mel_dim)
115
+
116
+ def forward(
117
+ self,
118
+ x: float["b n d"], # nosied input audio # noqa: F722
119
+ cond: float["b n d"], # masked cond audio # noqa: F722
120
+ text: int["b nt"], # text # noqa: F722
121
+ time: float["b"] | float[""], # time step # noqa: F821 F722
122
+ drop_audio_cond, # cfg for cond audio
123
+ drop_text, # cfg for text
124
+ mask: bool["b n"] | None = None, # noqa: F722
125
+ ):
126
+ batch = x.shape[0]
127
+ if time.ndim == 0:
128
+ time = time.repeat(batch)
129
+
130
+ # t: conditioning (time), c: context (text + masked cond audio), x: noised input audio
131
+ t = self.time_embed(time)
132
+ c = self.text_embed(text, drop_text=drop_text)
133
+ x = self.audio_embed(x, cond, drop_audio_cond=drop_audio_cond)
134
+
135
+ seq_len = x.shape[1]
136
+ text_len = text.shape[1]
137
+ rope_audio = self.rotary_embed.forward_from_seq_len(seq_len)
138
+ rope_text = self.rotary_embed.forward_from_seq_len(text_len)
139
+
140
+ for block in self.transformer_blocks:
141
+ c, x = block(x, c, t, mask=mask, rope=rope_audio, c_rope=rope_text)
142
+
143
+ x = self.norm_out(x, t)
144
+ output = self.proj_out(x)
145
+
146
+ return output
src/f5_tts/model/backbones/unett.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ein notation:
3
+ b - batch
4
+ n - sequence
5
+ nt - text sequence
6
+ nw - raw wave length
7
+ d - dimension
8
+ """
9
+
10
+ from __future__ import annotations
11
+ from typing import Literal
12
+
13
+ import torch
14
+ from torch import nn
15
+ import torch.nn.functional as F
16
+
17
+ from x_transformers import RMSNorm
18
+ from x_transformers.x_transformers import RotaryEmbedding
19
+
20
+ from f5_tts.model.modules import (
21
+ TimestepEmbedding,
22
+ ConvNeXtV2Block,
23
+ ConvPositionEmbedding,
24
+ Attention,
25
+ AttnProcessor,
26
+ FeedForward,
27
+ precompute_freqs_cis,
28
+ get_pos_embed_indices,
29
+ )
30
+
31
+
32
+ # Text embedding
33
+
34
+
35
+ class TextEmbedding(nn.Module):
36
+ def __init__(self, text_num_embeds, text_dim, conv_layers=0, conv_mult=2):
37
+ super().__init__()
38
+ self.text_embed = nn.Embedding(text_num_embeds + 1, text_dim) # use 0 as filler token
39
+
40
+ if conv_layers > 0:
41
+ self.extra_modeling = True
42
+ self.precompute_max_pos = 4096 # ~44s of 24khz audio
43
+ self.register_buffer("freqs_cis", precompute_freqs_cis(text_dim, self.precompute_max_pos), persistent=False)
44
+ self.text_blocks = nn.Sequential(
45
+ *[ConvNeXtV2Block(text_dim, text_dim * conv_mult) for _ in range(conv_layers)]
46
+ )
47
+ else:
48
+ self.extra_modeling = False
49
+
50
+ def forward(self, text: int["b nt"], seq_len, drop_text=False): # noqa: F722
51
+ text = text + 1 # use 0 as filler token. preprocess of batch pad -1, see list_str_to_idx()
52
+ text = text[:, :seq_len] # curtail if character tokens are more than the mel spec tokens
53
+ batch, text_len = text.shape[0], text.shape[1]
54
+ text = F.pad(text, (0, seq_len - text_len), value=0)
55
+
56
+ if drop_text: # cfg for text
57
+ text = torch.zeros_like(text)
58
+
59
+ text = self.text_embed(text) # b n -> b n d
60
+
61
+ # possible extra modeling
62
+ if self.extra_modeling:
63
+ # sinus pos emb
64
+ batch_start = torch.zeros((batch,), dtype=torch.long)
65
+ pos_idx = get_pos_embed_indices(batch_start, seq_len, max_pos=self.precompute_max_pos)
66
+ text_pos_embed = self.freqs_cis[pos_idx]
67
+ text = text + text_pos_embed
68
+
69
+ # convnextv2 blocks
70
+ text = self.text_blocks(text)
71
+
72
+ return text
73
+
74
+
75
+ # noised input audio and context mixing embedding
76
+
77
+
78
+ class InputEmbedding(nn.Module):
79
+ def __init__(self, mel_dim, text_dim, out_dim):
80
+ super().__init__()
81
+ self.proj = nn.Linear(mel_dim * 2 + text_dim, out_dim)
82
+ self.conv_pos_embed = ConvPositionEmbedding(dim=out_dim)
83
+
84
+ def forward(self, x: float["b n d"], cond: float["b n d"], text_embed: float["b n d"], drop_audio_cond=False): # noqa: F722
85
+ if drop_audio_cond: # cfg for cond audio
86
+ cond = torch.zeros_like(cond)
87
+
88
+ x = self.proj(torch.cat((x, cond, text_embed), dim=-1))
89
+ x = self.conv_pos_embed(x) + x
90
+ return x
91
+
92
+
93
+ # Flat UNet Transformer backbone
94
+
95
+
96
+ class UNetT(nn.Module):
97
+ def __init__(
98
+ self,
99
+ *,
100
+ dim,
101
+ depth=8,
102
+ heads=8,
103
+ dim_head=64,
104
+ dropout=0.1,
105
+ ff_mult=4,
106
+ mel_dim=100,
107
+ text_num_embeds=256,
108
+ text_dim=None,
109
+ conv_layers=0,
110
+ skip_connect_type: Literal["add", "concat", "none"] = "concat",
111
+ ):
112
+ super().__init__()
113
+ assert depth % 2 == 0, "UNet-Transformer's depth should be even."
114
+
115
+ self.time_embed = TimestepEmbedding(dim)
116
+ if text_dim is None:
117
+ text_dim = mel_dim
118
+ self.text_embed = TextEmbedding(text_num_embeds, text_dim, conv_layers=conv_layers)
119
+ self.input_embed = InputEmbedding(mel_dim, text_dim, dim)
120
+
121
+ self.rotary_embed = RotaryEmbedding(dim_head)
122
+
123
+ # transformer layers & skip connections
124
+
125
+ self.dim = dim
126
+ self.skip_connect_type = skip_connect_type
127
+ needs_skip_proj = skip_connect_type == "concat"
128
+
129
+ self.depth = depth
130
+ self.layers = nn.ModuleList([])
131
+
132
+ for idx in range(depth):
133
+ is_later_half = idx >= (depth // 2)
134
+
135
+ attn_norm = RMSNorm(dim)
136
+ attn = Attention(
137
+ processor=AttnProcessor(),
138
+ dim=dim,
139
+ heads=heads,
140
+ dim_head=dim_head,
141
+ dropout=dropout,
142
+ )
143
+
144
+ ff_norm = RMSNorm(dim)
145
+ ff = FeedForward(dim=dim, mult=ff_mult, dropout=dropout, approximate="tanh")
146
+
147
+ skip_proj = nn.Linear(dim * 2, dim, bias=False) if needs_skip_proj and is_later_half else None
148
+
149
+ self.layers.append(
150
+ nn.ModuleList(
151
+ [
152
+ skip_proj,
153
+ attn_norm,
154
+ attn,
155
+ ff_norm,
156
+ ff,
157
+ ]
158
+ )
159
+ )
160
+
161
+ self.norm_out = RMSNorm(dim)
162
+ self.proj_out = nn.Linear(dim, mel_dim)
163
+
164
+ def forward(
165
+ self,
166
+ x: float["b n d"], # nosied input audio # noqa: F722
167
+ cond: float["b n d"], # masked cond audio # noqa: F722
168
+ text: int["b nt"], # text # noqa: F722
169
+ time: float["b"] | float[""], # time step # noqa: F821 F722
170
+ drop_audio_cond, # cfg for cond audio
171
+ drop_text, # cfg for text
172
+ mask: bool["b n"] | None = None, # noqa: F722
173
+ ):
174
+ batch, seq_len = x.shape[0], x.shape[1]
175
+ if time.ndim == 0:
176
+ time = time.repeat(batch)
177
+
178
+ # t: conditioning time, c: context (text + masked cond audio), x: noised input audio
179
+ t = self.time_embed(time)
180
+ text_embed = self.text_embed(text, seq_len, drop_text=drop_text)
181
+ x = self.input_embed(x, cond, text_embed, drop_audio_cond=drop_audio_cond)
182
+
183
+ # postfix time t to input x, [b n d] -> [b n+1 d]
184
+ x = torch.cat([t.unsqueeze(1), x], dim=1) # pack t to x
185
+ if mask is not None:
186
+ mask = F.pad(mask, (1, 0), value=1)
187
+
188
+ rope = self.rotary_embed.forward_from_seq_len(seq_len + 1)
189
+
190
+ # flat unet transformer
191
+ skip_connect_type = self.skip_connect_type
192
+ skips = []
193
+ for idx, (maybe_skip_proj, attn_norm, attn, ff_norm, ff) in enumerate(self.layers):
194
+ layer = idx + 1
195
+
196
+ # skip connection logic
197
+ is_first_half = layer <= (self.depth // 2)
198
+ is_later_half = not is_first_half
199
+
200
+ if is_first_half:
201
+ skips.append(x)
202
+
203
+ if is_later_half:
204
+ skip = skips.pop()
205
+ if skip_connect_type == "concat":
206
+ x = torch.cat((x, skip), dim=-1)
207
+ x = maybe_skip_proj(x)
208
+ elif skip_connect_type == "add":
209
+ x = x + skip
210
+
211
+ # attention and feedforward blocks
212
+ x = attn(attn_norm(x), rope=rope, mask=mask) + x
213
+ x = ff(ff_norm(x)) + x
214
+
215
+ assert len(skips) == 0
216
+
217
+ x = self.norm_out(x)[:, 1:, :] # unpack t from x
218
+
219
+ return self.proj_out(x)
src/f5_tts/model/cfm.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ein notation:
3
+ b - batch
4
+ n - sequence
5
+ nt - text sequence
6
+ nw - raw wave length
7
+ d - dimension
8
+ """
9
+
10
+ from __future__ import annotations
11
+ from typing import Callable
12
+ from random import random
13
+
14
+ import torch
15
+ from torch import nn
16
+ import torch.nn.functional as F
17
+ from torch.nn.utils.rnn import pad_sequence
18
+
19
+ from torchdiffeq import odeint
20
+
21
+ from f5_tts.model.modules import MelSpec
22
+ from f5_tts.model.utils import (
23
+ default,
24
+ exists,
25
+ list_str_to_idx,
26
+ list_str_to_tensor,
27
+ lens_to_mask,
28
+ mask_from_frac_lengths,
29
+ )
30
+
31
+
32
+ class CFM(nn.Module):
33
+ def __init__(
34
+ self,
35
+ transformer: nn.Module,
36
+ sigma=0.0,
37
+ odeint_kwargs: dict = dict(
38
+ # atol = 1e-5,
39
+ # rtol = 1e-5,
40
+ method="euler" # 'midpoint'
41
+ ),
42
+ audio_drop_prob=0.3,
43
+ cond_drop_prob=0.2,
44
+ num_channels=None,
45
+ mel_spec_module: nn.Module | None = None,
46
+ mel_spec_kwargs: dict = dict(),
47
+ frac_lengths_mask: tuple[float, float] = (0.7, 1.0),
48
+ vocab_char_map: dict[str:int] | None = None,
49
+ ):
50
+ super().__init__()
51
+
52
+ self.frac_lengths_mask = frac_lengths_mask
53
+
54
+ # mel spec
55
+ self.mel_spec = default(mel_spec_module, MelSpec(**mel_spec_kwargs))
56
+ num_channels = default(num_channels, self.mel_spec.n_mel_channels)
57
+ self.num_channels = num_channels
58
+
59
+ # classifier-free guidance
60
+ self.audio_drop_prob = audio_drop_prob
61
+ self.cond_drop_prob = cond_drop_prob
62
+
63
+ # transformer
64
+ self.transformer = transformer
65
+ dim = transformer.dim
66
+ self.dim = dim
67
+
68
+ # conditional flow related
69
+ self.sigma = sigma
70
+
71
+ # sampling related
72
+ self.odeint_kwargs = odeint_kwargs
73
+
74
+ # vocab map for tokenization
75
+ self.vocab_char_map = vocab_char_map
76
+
77
+ @property
78
+ def device(self):
79
+ return next(self.parameters()).device
80
+
81
+ @torch.no_grad()
82
+ def sample(
83
+ self,
84
+ cond: float["b n d"] | float["b nw"], # noqa: F722
85
+ text: int["b nt"] | list[str], # noqa: F722
86
+ duration: int | int["b"], # noqa: F821
87
+ *,
88
+ lens: int["b"] | None = None, # noqa: F821
89
+ steps=32,
90
+ cfg_strength=1.0,
91
+ sway_sampling_coef=None,
92
+ seed: int | None = None,
93
+ max_duration=4096,
94
+ vocoder: Callable[[float["b d n"]], float["b nw"]] | None = None, # noqa: F722
95
+ no_ref_audio=False,
96
+ duplicate_test=False,
97
+ t_inter=0.1,
98
+ edit_mask=None,
99
+ ):
100
+ self.eval()
101
+
102
+ if next(self.parameters()).dtype == torch.float16:
103
+ cond = cond.half()
104
+
105
+ # raw wave
106
+
107
+ if cond.ndim == 2:
108
+ cond = self.mel_spec(cond)
109
+ cond = cond.permute(0, 2, 1)
110
+ assert cond.shape[-1] == self.num_channels
111
+
112
+ batch, cond_seq_len, device = *cond.shape[:2], cond.device
113
+ if not exists(lens):
114
+ lens = torch.full((batch,), cond_seq_len, device=device, dtype=torch.long)
115
+
116
+ # text
117
+
118
+ if isinstance(text, list):
119
+ if exists(self.vocab_char_map):
120
+ text = list_str_to_idx(text, self.vocab_char_map).to(device)
121
+ else:
122
+ text = list_str_to_tensor(text).to(device)
123
+ assert text.shape[0] == batch
124
+
125
+ if exists(text):
126
+ text_lens = (text != -1).sum(dim=-1)
127
+ lens = torch.maximum(text_lens, lens) # make sure lengths are at least those of the text characters
128
+
129
+ # duration
130
+
131
+ cond_mask = lens_to_mask(lens)
132
+ if edit_mask is not None:
133
+ cond_mask = cond_mask & edit_mask
134
+
135
+ if isinstance(duration, int):
136
+ duration = torch.full((batch,), duration, device=device, dtype=torch.long)
137
+
138
+ duration = torch.maximum(lens + 1, duration) # just add one token so something is generated
139
+ duration = duration.clamp(max=max_duration)
140
+ max_duration = duration.amax()
141
+
142
+ # duplicate test corner for inner time step oberservation
143
+ if duplicate_test:
144
+ test_cond = F.pad(cond, (0, 0, cond_seq_len, max_duration - 2 * cond_seq_len), value=0.0)
145
+
146
+ cond = F.pad(cond, (0, 0, 0, max_duration - cond_seq_len), value=0.0)
147
+ cond_mask = F.pad(cond_mask, (0, max_duration - cond_mask.shape[-1]), value=False)
148
+ cond_mask = cond_mask.unsqueeze(-1)
149
+ step_cond = torch.where(
150
+ cond_mask, cond, torch.zeros_like(cond)
151
+ ) # allow direct control (cut cond audio) with lens passed in
152
+
153
+ if batch > 1:
154
+ mask = lens_to_mask(duration)
155
+ else: # save memory and speed up, as single inference need no mask currently
156
+ mask = None
157
+
158
+ # test for no ref audio
159
+ if no_ref_audio:
160
+ cond = torch.zeros_like(cond)
161
+
162
+ # neural ode
163
+
164
+ def fn(t, x):
165
+ # at each step, conditioning is fixed
166
+ # step_cond = torch.where(cond_mask, cond, torch.zeros_like(cond))
167
+
168
+ # predict flow
169
+ pred = self.transformer(
170
+ x=x, cond=step_cond, text=text, time=t, mask=mask, drop_audio_cond=False, drop_text=False
171
+ )
172
+ if cfg_strength < 1e-5:
173
+ return pred
174
+
175
+ null_pred = self.transformer(
176
+ x=x, cond=step_cond, text=text, time=t, mask=mask, drop_audio_cond=True, drop_text=True
177
+ )
178
+ return pred + (pred - null_pred) * cfg_strength
179
+
180
+ # noise input
181
+ # to make sure batch inference result is same with different batch size, and for sure single inference
182
+ # still some difference maybe due to convolutional layers
183
+ y0 = []
184
+ for dur in duration:
185
+ if exists(seed):
186
+ torch.manual_seed(seed)
187
+ y0.append(torch.randn(dur, self.num_channels, device=self.device, dtype=step_cond.dtype))
188
+ y0 = pad_sequence(y0, padding_value=0, batch_first=True)
189
+
190
+ t_start = 0
191
+
192
+ # duplicate test corner for inner time step oberservation
193
+ if duplicate_test:
194
+ t_start = t_inter
195
+ y0 = (1 - t_start) * y0 + t_start * test_cond
196
+ steps = int(steps * (1 - t_start))
197
+
198
+ t = torch.linspace(t_start, 1, steps, device=self.device, dtype=step_cond.dtype)
199
+ if sway_sampling_coef is not None:
200
+ t = t + sway_sampling_coef * (torch.cos(torch.pi / 2 * t) - 1 + t)
201
+
202
+ trajectory = odeint(fn, y0, t, **self.odeint_kwargs)
203
+
204
+ sampled = trajectory[-1]
205
+ out = sampled
206
+ out = torch.where(cond_mask, cond, out)
207
+
208
+ if exists(vocoder):
209
+ out = out.permute(0, 2, 1)
210
+ out = vocoder(out)
211
+
212
+ return out, trajectory
213
+
214
+ def forward(
215
+ self,
216
+ inp: float["b n d"] | float["b nw"], # mel or raw wave # noqa: F722
217
+ text: int["b nt"] | list[str], # noqa: F722
218
+ *,
219
+ lens: int["b"] | None = None, # noqa: F821
220
+ noise_scheduler: str | None = None,
221
+ ):
222
+ # handle raw wave
223
+ if inp.ndim == 2:
224
+ inp = self.mel_spec(inp)
225
+ inp = inp.permute(0, 2, 1)
226
+ assert inp.shape[-1] == self.num_channels
227
+
228
+ batch, seq_len, dtype, device, _σ1 = *inp.shape[:2], inp.dtype, self.device, self.sigma
229
+
230
+ # handle text as string
231
+ if isinstance(text, list):
232
+ if exists(self.vocab_char_map):
233
+ text = list_str_to_idx(text, self.vocab_char_map).to(device)
234
+ else:
235
+ text = list_str_to_tensor(text).to(device)
236
+ assert text.shape[0] == batch
237
+
238
+ # lens and mask
239
+ if not exists(lens):
240
+ lens = torch.full((batch,), seq_len, device=device)
241
+
242
+ mask = lens_to_mask(lens, length=seq_len) # useless here, as collate_fn will pad to max length in batch
243
+
244
+ # get a random span to mask out for training conditionally
245
+ frac_lengths = torch.zeros((batch,), device=self.device).float().uniform_(*self.frac_lengths_mask)
246
+ rand_span_mask = mask_from_frac_lengths(lens, frac_lengths)
247
+
248
+ if exists(mask):
249
+ rand_span_mask &= mask
250
+
251
+ # mel is x1
252
+ x1 = inp
253
+
254
+ # x0 is gaussian noise
255
+ x0 = torch.randn_like(x1)
256
+
257
+ # time step
258
+ time = torch.rand((batch,), dtype=dtype, device=self.device)
259
+ # TODO. noise_scheduler
260
+
261
+ # sample xt (φ_t(x) in the paper)
262
+ t = time.unsqueeze(-1).unsqueeze(-1)
263
+ φ = (1 - t) * x0 + t * x1
264
+ flow = x1 - x0
265
+
266
+ # only predict what is within the random mask span for infilling
267
+ cond = torch.where(rand_span_mask[..., None], torch.zeros_like(x1), x1)
268
+
269
+ # transformer and cfg training with a drop rate
270
+ drop_audio_cond = random() < self.audio_drop_prob # p_drop in voicebox paper
271
+ if random() < self.cond_drop_prob: # p_uncond in voicebox paper
272
+ drop_audio_cond = True
273
+ drop_text = True
274
+ else:
275
+ drop_text = False
276
+
277
+ # if want rigourously mask out padding, record in collate_fn in dataset.py, and pass in here
278
+ # adding mask will use more memory, thus also need to adjust batchsampler with scaled down threshold for long sequences
279
+ pred = self.transformer(
280
+ x=φ, cond=cond, text=text, time=time, drop_audio_cond=drop_audio_cond, drop_text=drop_text
281
+ )
282
+
283
+ # flow matching loss
284
+ loss = F.mse_loss(pred, flow, reduction="none")
285
+ loss = loss[rand_span_mask]
286
+
287
+ return loss.mean(), cond, pred
src/f5_tts/model/dataset.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import random
3
+ from importlib.resources import files
4
+ from tqdm import tqdm
5
+
6
+ import torch
7
+ import torch.nn.functional as F
8
+ import torchaudio
9
+ from torch import nn
10
+ from torch.utils.data import Dataset, Sampler
11
+ from datasets import load_from_disk
12
+ from datasets import Dataset as Dataset_
13
+
14
+ from f5_tts.model.modules import MelSpec
15
+ from f5_tts.model.utils import default
16
+
17
+
18
+ class HFDataset(Dataset):
19
+ def __init__(
20
+ self,
21
+ hf_dataset: Dataset,
22
+ target_sample_rate=24_000,
23
+ n_mel_channels=100,
24
+ hop_length=256,
25
+ ):
26
+ self.data = hf_dataset
27
+ self.target_sample_rate = target_sample_rate
28
+ self.hop_length = hop_length
29
+ self.mel_spectrogram = MelSpec(
30
+ target_sample_rate=target_sample_rate, n_mel_channels=n_mel_channels, hop_length=hop_length
31
+ )
32
+
33
+ def get_frame_len(self, index):
34
+ row = self.data[index]
35
+ audio = row["audio"]["array"]
36
+ sample_rate = row["audio"]["sampling_rate"]
37
+ return audio.shape[-1] / sample_rate * self.target_sample_rate / self.hop_length
38
+
39
+ def __len__(self):
40
+ return len(self.data)
41
+
42
+ def __getitem__(self, index):
43
+ row = self.data[index]
44
+ audio = row["audio"]["array"]
45
+
46
+ # logger.info(f"Audio shape: {audio.shape}")
47
+
48
+ sample_rate = row["audio"]["sampling_rate"]
49
+ duration = audio.shape[-1] / sample_rate
50
+
51
+ if duration > 30 or duration < 0.3:
52
+ return self.__getitem__((index + 1) % len(self.data))
53
+
54
+ audio_tensor = torch.from_numpy(audio).float()
55
+
56
+ if sample_rate != self.target_sample_rate:
57
+ resampler = torchaudio.transforms.Resample(sample_rate, self.target_sample_rate)
58
+ audio_tensor = resampler(audio_tensor)
59
+
60
+ audio_tensor = audio_tensor.unsqueeze(0) # 't -> 1 t')
61
+
62
+ mel_spec = self.mel_spectrogram(audio_tensor)
63
+
64
+ mel_spec = mel_spec.squeeze(0) # '1 d t -> d t'
65
+
66
+ text = row["text"]
67
+
68
+ return dict(
69
+ mel_spec=mel_spec,
70
+ text=text,
71
+ )
72
+
73
+
74
+ class CustomDataset(Dataset):
75
+ def __init__(
76
+ self,
77
+ custom_dataset: Dataset,
78
+ durations=None,
79
+ target_sample_rate=24_000,
80
+ hop_length=256,
81
+ n_mel_channels=100,
82
+ preprocessed_mel=False,
83
+ mel_spec_module: nn.Module | None = None,
84
+ ):
85
+ self.data = custom_dataset
86
+ self.durations = durations
87
+ self.target_sample_rate = target_sample_rate
88
+ self.hop_length = hop_length
89
+ self.preprocessed_mel = preprocessed_mel
90
+
91
+ if not preprocessed_mel:
92
+ self.mel_spectrogram = default(
93
+ mel_spec_module,
94
+ MelSpec(
95
+ target_sample_rate=target_sample_rate,
96
+ hop_length=hop_length,
97
+ n_mel_channels=n_mel_channels,
98
+ ),
99
+ )
100
+
101
+ def get_frame_len(self, index):
102
+ if (
103
+ self.durations is not None
104
+ ): # Please make sure the separately provided durations are correct, otherwise 99.99% OOM
105
+ return self.durations[index] * self.target_sample_rate / self.hop_length
106
+ return self.data[index]["duration"] * self.target_sample_rate / self.hop_length
107
+
108
+ def __len__(self):
109
+ return len(self.data)
110
+
111
+ def __getitem__(self, index):
112
+ row = self.data[index]
113
+ audio_path = row["audio_path"]
114
+ text = row["text"]
115
+ duration = row["duration"]
116
+
117
+ if self.preprocessed_mel:
118
+ mel_spec = torch.tensor(row["mel_spec"])
119
+
120
+ else:
121
+ audio, source_sample_rate = torchaudio.load(audio_path)
122
+ if audio.shape[0] > 1:
123
+ audio = torch.mean(audio, dim=0, keepdim=True)
124
+
125
+ if duration > 30 or duration < 0.3:
126
+ return self.__getitem__((index + 1) % len(self.data))
127
+
128
+ if source_sample_rate != self.target_sample_rate:
129
+ resampler = torchaudio.transforms.Resample(source_sample_rate, self.target_sample_rate)
130
+ audio = resampler(audio)
131
+
132
+ mel_spec = self.mel_spectrogram(audio)
133
+ mel_spec = mel_spec.squeeze(0) # '1 d t -> d t')
134
+
135
+ return dict(
136
+ mel_spec=mel_spec,
137
+ text=text,
138
+ )
139
+
140
+
141
+ # Dynamic Batch Sampler
142
+
143
+
144
+ class DynamicBatchSampler(Sampler[list[int]]):
145
+ """Extension of Sampler that will do the following:
146
+ 1. Change the batch size (essentially number of sequences)
147
+ in a batch to ensure that the total number of frames are less
148
+ than a certain threshold.
149
+ 2. Make sure the padding efficiency in the batch is high.
150
+ """
151
+
152
+ def __init__(
153
+ self, sampler: Sampler[int], frames_threshold: int, max_samples=0, random_seed=None, drop_last: bool = False
154
+ ):
155
+ self.sampler = sampler
156
+ self.frames_threshold = frames_threshold
157
+ self.max_samples = max_samples
158
+
159
+ indices, batches = [], []
160
+ data_source = self.sampler.data_source
161
+
162
+ for idx in tqdm(
163
+ self.sampler, desc="Sorting with sampler... if slow, check whether dataset is provided with duration"
164
+ ):
165
+ indices.append((idx, data_source.get_frame_len(idx)))
166
+ indices.sort(key=lambda elem: elem[1])
167
+
168
+ batch = []
169
+ batch_frames = 0
170
+ for idx, frame_len in tqdm(
171
+ indices, desc=f"Creating dynamic batches with {frames_threshold} audio frames per gpu"
172
+ ):
173
+ if batch_frames + frame_len <= self.frames_threshold and (max_samples == 0 or len(batch) < max_samples):
174
+ batch.append(idx)
175
+ batch_frames += frame_len
176
+ else:
177
+ if len(batch) > 0:
178
+ batches.append(batch)
179
+ if frame_len <= self.frames_threshold:
180
+ batch = [idx]
181
+ batch_frames = frame_len
182
+ else:
183
+ batch = []
184
+ batch_frames = 0
185
+
186
+ if not drop_last and len(batch) > 0:
187
+ batches.append(batch)
188
+
189
+ del indices
190
+
191
+ # if want to have different batches between epochs, may just set a seed and log it in ckpt
192
+ # cuz during multi-gpu training, although the batch on per gpu not change between epochs, the formed general minibatch is different
193
+ # e.g. for epoch n, use (random_seed + n)
194
+ random.seed(random_seed)
195
+ random.shuffle(batches)
196
+
197
+ self.batches = batches
198
+
199
+ def __iter__(self):
200
+ return iter(self.batches)
201
+
202
+ def __len__(self):
203
+ return len(self.batches)
204
+
205
+
206
+ # Load dataset
207
+
208
+
209
+ def load_dataset(
210
+ dataset_name: str,
211
+ tokenizer: str = "pinyin",
212
+ dataset_type: str = "CustomDataset",
213
+ audio_type: str = "raw",
214
+ mel_spec_module: nn.Module | None = None,
215
+ mel_spec_kwargs: dict = dict(),
216
+ ) -> CustomDataset | HFDataset:
217
+ """
218
+ dataset_type - "CustomDataset" if you want to use tokenizer name and default data path to load for train_dataset
219
+ - "CustomDatasetPath" if you just want to pass the full path to a preprocessed dataset without relying on tokenizer
220
+ """
221
+
222
+ print("Loading dataset ...")
223
+
224
+ if dataset_type == "CustomDataset":
225
+ rel_data_path = str(files("f5_tts").joinpath(f"../../data/{dataset_name}_{tokenizer}"))
226
+ if audio_type == "raw":
227
+ try:
228
+ train_dataset = load_from_disk(f"{rel_data_path}/raw")
229
+ except: # noqa: E722
230
+ train_dataset = Dataset_.from_file(f"{rel_data_path}/raw.arrow")
231
+ preprocessed_mel = False
232
+ elif audio_type == "mel":
233
+ train_dataset = Dataset_.from_file(f"{rel_data_path}/mel.arrow")
234
+ preprocessed_mel = True
235
+ with open(f"{rel_data_path}/duration.json", "r", encoding="utf-8") as f:
236
+ data_dict = json.load(f)
237
+ durations = data_dict["duration"]
238
+ train_dataset = CustomDataset(
239
+ train_dataset,
240
+ durations=durations,
241
+ preprocessed_mel=preprocessed_mel,
242
+ mel_spec_module=mel_spec_module,
243
+ **mel_spec_kwargs,
244
+ )
245
+
246
+ elif dataset_type == "CustomDatasetPath":
247
+ try:
248
+ train_dataset = load_from_disk(f"{dataset_name}/raw")
249
+ except: # noqa: E722
250
+ train_dataset = Dataset_.from_file(f"{dataset_name}/raw.arrow")
251
+
252
+ with open(f"{dataset_name}/duration.json", "r", encoding="utf-8") as f:
253
+ data_dict = json.load(f)
254
+ durations = data_dict["duration"]
255
+ train_dataset = CustomDataset(
256
+ train_dataset, durations=durations, preprocessed_mel=preprocessed_mel, **mel_spec_kwargs
257
+ )
258
+
259
+ elif dataset_type == "HFDataset":
260
+ print(
261
+ "Should manually modify the path of huggingface dataset to your need.\n"
262
+ + "May also the corresponding script cuz different dataset may have different format."
263
+ )
264
+ pre, post = dataset_name.split("_")
265
+ train_dataset = HFDataset(
266
+ load_dataset(f"{pre}/{pre}", split=f"train.{post}", cache_dir=str(files("f5_tts").joinpath("../../data"))),
267
+ )
268
+
269
+ return train_dataset
270
+
271
+
272
+ # collation
273
+
274
+
275
+ def collate_fn(batch):
276
+ mel_specs = [item["mel_spec"].squeeze(0) for item in batch]
277
+ mel_lengths = torch.LongTensor([spec.shape[-1] for spec in mel_specs])
278
+ max_mel_length = mel_lengths.amax()
279
+
280
+ padded_mel_specs = []
281
+ for spec in mel_specs: # TODO. maybe records mask for attention here
282
+ padding = (0, max_mel_length - spec.size(-1))
283
+ padded_spec = F.pad(spec, padding, value=0)
284
+ padded_mel_specs.append(padded_spec)
285
+
286
+ mel_specs = torch.stack(padded_mel_specs)
287
+
288
+ text = [item["text"] for item in batch]
289
+ text_lengths = torch.LongTensor([len(item) for item in text])
290
+
291
+ return dict(
292
+ mel=mel_specs,
293
+ mel_lengths=mel_lengths,
294
+ text=text,
295
+ text_lengths=text_lengths,
296
+ )
src/f5_tts/model/modules.py ADDED
@@ -0,0 +1,581 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ein notation:
3
+ b - batch
4
+ n - sequence
5
+ nt - text sequence
6
+ nw - raw wave length
7
+ d - dimension
8
+ """
9
+
10
+ from __future__ import annotations
11
+ from typing import Optional
12
+ import math
13
+
14
+ import torch
15
+ from torch import nn
16
+ import torch.nn.functional as F
17
+ import torchaudio
18
+
19
+ from x_transformers.x_transformers import apply_rotary_pos_emb
20
+
21
+
22
+ # raw wav to mel spec
23
+
24
+
25
+ class MelSpec(nn.Module):
26
+ def __init__(
27
+ self,
28
+ filter_length=1024,
29
+ hop_length=256,
30
+ win_length=1024,
31
+ n_mel_channels=100,
32
+ target_sample_rate=24_000,
33
+ normalize=False,
34
+ power=1,
35
+ norm=None,
36
+ center=True,
37
+ ):
38
+ super().__init__()
39
+ self.n_mel_channels = n_mel_channels
40
+
41
+ self.mel_stft = torchaudio.transforms.MelSpectrogram(
42
+ sample_rate=target_sample_rate,
43
+ n_fft=filter_length,
44
+ win_length=win_length,
45
+ hop_length=hop_length,
46
+ n_mels=n_mel_channels,
47
+ power=power,
48
+ center=center,
49
+ normalized=normalize,
50
+ norm=norm,
51
+ )
52
+
53
+ self.register_buffer("dummy", torch.tensor(0), persistent=False)
54
+
55
+ def forward(self, inp):
56
+ if len(inp.shape) == 3:
57
+ inp = inp.squeeze(1) # 'b 1 nw -> b nw'
58
+
59
+ assert len(inp.shape) == 2
60
+
61
+ if self.dummy.device != inp.device:
62
+ self.to(inp.device)
63
+
64
+ mel = self.mel_stft(inp)
65
+ mel = mel.clamp(min=1e-5).log()
66
+ return mel
67
+
68
+
69
+ # sinusoidal position embedding
70
+
71
+
72
+ class SinusPositionEmbedding(nn.Module):
73
+ def __init__(self, dim):
74
+ super().__init__()
75
+ self.dim = dim
76
+
77
+ def forward(self, x, scale=1000):
78
+ device = x.device
79
+ half_dim = self.dim // 2
80
+ emb = math.log(10000) / (half_dim - 1)
81
+ emb = torch.exp(torch.arange(half_dim, device=device).float() * -emb)
82
+ emb = scale * x.unsqueeze(1) * emb.unsqueeze(0)
83
+ emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
84
+ return emb
85
+
86
+
87
+ # convolutional position embedding
88
+
89
+
90
+ class ConvPositionEmbedding(nn.Module):
91
+ def __init__(self, dim, kernel_size=31, groups=16):
92
+ super().__init__()
93
+ assert kernel_size % 2 != 0
94
+ self.conv1d = nn.Sequential(
95
+ nn.Conv1d(dim, dim, kernel_size, groups=groups, padding=kernel_size // 2),
96
+ nn.Mish(),
97
+ nn.Conv1d(dim, dim, kernel_size, groups=groups, padding=kernel_size // 2),
98
+ nn.Mish(),
99
+ )
100
+
101
+ def forward(self, x: float["b n d"], mask: bool["b n"] | None = None): # noqa: F722
102
+ if mask is not None:
103
+ mask = mask[..., None]
104
+ x = x.masked_fill(~mask, 0.0)
105
+
106
+ x = x.permute(0, 2, 1)
107
+ x = self.conv1d(x)
108
+ out = x.permute(0, 2, 1)
109
+
110
+ if mask is not None:
111
+ out = out.masked_fill(~mask, 0.0)
112
+
113
+ return out
114
+
115
+
116
+ # rotary positional embedding related
117
+
118
+
119
+ def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0, theta_rescale_factor=1.0):
120
+ # proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning
121
+ # has some connection to NTK literature
122
+ # https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
123
+ # https://github.com/lucidrains/rotary-embedding-torch/blob/main/rotary_embedding_torch/rotary_embedding_torch.py
124
+ theta *= theta_rescale_factor ** (dim / (dim - 2))
125
+ freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
126
+ t = torch.arange(end, device=freqs.device) # type: ignore
127
+ freqs = torch.outer(t, freqs).float() # type: ignore
128
+ freqs_cos = torch.cos(freqs) # real part
129
+ freqs_sin = torch.sin(freqs) # imaginary part
130
+ return torch.cat([freqs_cos, freqs_sin], dim=-1)
131
+
132
+
133
+ def get_pos_embed_indices(start, length, max_pos, scale=1.0):
134
+ # length = length if isinstance(length, int) else length.max()
135
+ scale = scale * torch.ones_like(start, dtype=torch.float32) # in case scale is a scalar
136
+ pos = (
137
+ start.unsqueeze(1)
138
+ + (torch.arange(length, device=start.device, dtype=torch.float32).unsqueeze(0) * scale.unsqueeze(1)).long()
139
+ )
140
+ # avoid extra long error.
141
+ pos = torch.where(pos < max_pos, pos, max_pos - 1)
142
+ return pos
143
+
144
+
145
+ # Global Response Normalization layer (Instance Normalization ?)
146
+
147
+
148
+ class GRN(nn.Module):
149
+ def __init__(self, dim):
150
+ super().__init__()
151
+ self.gamma = nn.Parameter(torch.zeros(1, 1, dim))
152
+ self.beta = nn.Parameter(torch.zeros(1, 1, dim))
153
+
154
+ def forward(self, x):
155
+ Gx = torch.norm(x, p=2, dim=1, keepdim=True)
156
+ Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-6)
157
+ return self.gamma * (x * Nx) + self.beta + x
158
+
159
+
160
+ # ConvNeXt-V2 Block https://github.com/facebookresearch/ConvNeXt-V2/blob/main/models/convnextv2.py
161
+ # ref: https://github.com/bfs18/e2_tts/blob/main/rfwave/modules.py#L108
162
+
163
+
164
+ class ConvNeXtV2Block(nn.Module):
165
+ def __init__(
166
+ self,
167
+ dim: int,
168
+ intermediate_dim: int,
169
+ dilation: int = 1,
170
+ ):
171
+ super().__init__()
172
+ padding = (dilation * (7 - 1)) // 2
173
+ self.dwconv = nn.Conv1d(
174
+ dim, dim, kernel_size=7, padding=padding, groups=dim, dilation=dilation
175
+ ) # depthwise conv
176
+ self.norm = nn.LayerNorm(dim, eps=1e-6)
177
+ self.pwconv1 = nn.Linear(dim, intermediate_dim) # pointwise/1x1 convs, implemented with linear layers
178
+ self.act = nn.GELU()
179
+ self.grn = GRN(intermediate_dim)
180
+ self.pwconv2 = nn.Linear(intermediate_dim, dim)
181
+
182
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
183
+ residual = x
184
+ x = x.transpose(1, 2) # b n d -> b d n
185
+ x = self.dwconv(x)
186
+ x = x.transpose(1, 2) # b d n -> b n d
187
+ x = self.norm(x)
188
+ x = self.pwconv1(x)
189
+ x = self.act(x)
190
+ x = self.grn(x)
191
+ x = self.pwconv2(x)
192
+ return residual + x
193
+
194
+
195
+ # AdaLayerNormZero
196
+ # return with modulated x for attn input, and params for later mlp modulation
197
+
198
+
199
+ class AdaLayerNormZero(nn.Module):
200
+ def __init__(self, dim):
201
+ super().__init__()
202
+
203
+ self.silu = nn.SiLU()
204
+ self.linear = nn.Linear(dim, dim * 6)
205
+
206
+ self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
207
+
208
+ def forward(self, x, emb=None):
209
+ emb = self.linear(self.silu(emb))
210
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = torch.chunk(emb, 6, dim=1)
211
+
212
+ x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None]
213
+ return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
214
+
215
+
216
+ # AdaLayerNormZero for final layer
217
+ # return only with modulated x for attn input, cuz no more mlp modulation
218
+
219
+
220
+ class AdaLayerNormZero_Final(nn.Module):
221
+ def __init__(self, dim):
222
+ super().__init__()
223
+
224
+ self.silu = nn.SiLU()
225
+ self.linear = nn.Linear(dim, dim * 2)
226
+
227
+ self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
228
+
229
+ def forward(self, x, emb):
230
+ emb = self.linear(self.silu(emb))
231
+ scale, shift = torch.chunk(emb, 2, dim=1)
232
+
233
+ x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :]
234
+ return x
235
+
236
+
237
+ # FeedForward
238
+
239
+
240
+ class FeedForward(nn.Module):
241
+ def __init__(self, dim, dim_out=None, mult=4, dropout=0.0, approximate: str = "none"):
242
+ super().__init__()
243
+ inner_dim = int(dim * mult)
244
+ dim_out = dim_out if dim_out is not None else dim
245
+
246
+ activation = nn.GELU(approximate=approximate)
247
+ project_in = nn.Sequential(nn.Linear(dim, inner_dim), activation)
248
+ self.ff = nn.Sequential(project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out))
249
+
250
+ def forward(self, x):
251
+ return self.ff(x)
252
+
253
+
254
+ # Attention with possible joint part
255
+ # modified from diffusers/src/diffusers/models/attention_processor.py
256
+
257
+
258
+ class Attention(nn.Module):
259
+ def __init__(
260
+ self,
261
+ processor: JointAttnProcessor | AttnProcessor,
262
+ dim: int,
263
+ heads: int = 8,
264
+ dim_head: int = 64,
265
+ dropout: float = 0.0,
266
+ context_dim: Optional[int] = None, # if not None -> joint attention
267
+ context_pre_only=None,
268
+ ):
269
+ super().__init__()
270
+
271
+ if not hasattr(F, "scaled_dot_product_attention"):
272
+ raise ImportError("Attention equires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
273
+
274
+ self.processor = processor
275
+
276
+ self.dim = dim
277
+ self.heads = heads
278
+ self.inner_dim = dim_head * heads
279
+ self.dropout = dropout
280
+
281
+ self.context_dim = context_dim
282
+ self.context_pre_only = context_pre_only
283
+
284
+ self.to_q = nn.Linear(dim, self.inner_dim)
285
+ self.to_k = nn.Linear(dim, self.inner_dim)
286
+ self.to_v = nn.Linear(dim, self.inner_dim)
287
+
288
+ if self.context_dim is not None:
289
+ self.to_k_c = nn.Linear(context_dim, self.inner_dim)
290
+ self.to_v_c = nn.Linear(context_dim, self.inner_dim)
291
+ if self.context_pre_only is not None:
292
+ self.to_q_c = nn.Linear(context_dim, self.inner_dim)
293
+
294
+ self.to_out = nn.ModuleList([])
295
+ self.to_out.append(nn.Linear(self.inner_dim, dim))
296
+ self.to_out.append(nn.Dropout(dropout))
297
+
298
+ if self.context_pre_only is not None and not self.context_pre_only:
299
+ self.to_out_c = nn.Linear(self.inner_dim, dim)
300
+
301
+ def forward(
302
+ self,
303
+ x: float["b n d"], # noised input x # noqa: F722
304
+ c: float["b n d"] = None, # context c # noqa: F722
305
+ mask: bool["b n"] | None = None, # noqa: F722
306
+ rope=None, # rotary position embedding for x
307
+ c_rope=None, # rotary position embedding for c
308
+ ) -> torch.Tensor:
309
+ if c is not None:
310
+ return self.processor(self, x, c=c, mask=mask, rope=rope, c_rope=c_rope)
311
+ else:
312
+ return self.processor(self, x, mask=mask, rope=rope)
313
+
314
+
315
+ # Attention processor
316
+
317
+
318
+ class AttnProcessor:
319
+ def __init__(self):
320
+ pass
321
+
322
+ def __call__(
323
+ self,
324
+ attn: Attention,
325
+ x: float["b n d"], # noised input x # noqa: F722
326
+ mask: bool["b n"] | None = None, # noqa: F722
327
+ rope=None, # rotary position embedding
328
+ ) -> torch.FloatTensor:
329
+ batch_size = x.shape[0]
330
+
331
+ # `sample` projections.
332
+ query = attn.to_q(x)
333
+ key = attn.to_k(x)
334
+ value = attn.to_v(x)
335
+
336
+ # apply rotary position embedding
337
+ if rope is not None:
338
+ freqs, xpos_scale = rope
339
+ q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale**-1.0) if xpos_scale is not None else (1.0, 1.0)
340
+
341
+ query = apply_rotary_pos_emb(query, freqs, q_xpos_scale)
342
+ key = apply_rotary_pos_emb(key, freqs, k_xpos_scale)
343
+
344
+ # attention
345
+ inner_dim = key.shape[-1]
346
+ head_dim = inner_dim // attn.heads
347
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
348
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
349
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
350
+
351
+ # mask. e.g. inference got a batch with different target durations, mask out the padding
352
+ if mask is not None:
353
+ attn_mask = mask
354
+ attn_mask = attn_mask.unsqueeze(1).unsqueeze(1) # 'b n -> b 1 1 n'
355
+ attn_mask = attn_mask.expand(batch_size, attn.heads, query.shape[-2], key.shape[-2])
356
+ else:
357
+ attn_mask = None
358
+
359
+ x = F.scaled_dot_product_attention(query, key, value, attn_mask=attn_mask, dropout_p=0.0, is_causal=False)
360
+ x = x.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
361
+ x = x.to(query.dtype)
362
+
363
+ # linear proj
364
+ x = attn.to_out[0](x)
365
+ # dropout
366
+ x = attn.to_out[1](x)
367
+
368
+ if mask is not None:
369
+ mask = mask.unsqueeze(-1)
370
+ x = x.masked_fill(~mask, 0.0)
371
+
372
+ return x
373
+
374
+
375
+ # Joint Attention processor for MM-DiT
376
+ # modified from diffusers/src/diffusers/models/attention_processor.py
377
+
378
+
379
+ class JointAttnProcessor:
380
+ def __init__(self):
381
+ pass
382
+
383
+ def __call__(
384
+ self,
385
+ attn: Attention,
386
+ x: float["b n d"], # noised input x # noqa: F722
387
+ c: float["b nt d"] = None, # context c, here text # noqa: F722
388
+ mask: bool["b n"] | None = None, # noqa: F722
389
+ rope=None, # rotary position embedding for x
390
+ c_rope=None, # rotary position embedding for c
391
+ ) -> torch.FloatTensor:
392
+ residual = x
393
+
394
+ batch_size = c.shape[0]
395
+
396
+ # `sample` projections.
397
+ query = attn.to_q(x)
398
+ key = attn.to_k(x)
399
+ value = attn.to_v(x)
400
+
401
+ # `context` projections.
402
+ c_query = attn.to_q_c(c)
403
+ c_key = attn.to_k_c(c)
404
+ c_value = attn.to_v_c(c)
405
+
406
+ # apply rope for context and noised input independently
407
+ if rope is not None:
408
+ freqs, xpos_scale = rope
409
+ q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale**-1.0) if xpos_scale is not None else (1.0, 1.0)
410
+ query = apply_rotary_pos_emb(query, freqs, q_xpos_scale)
411
+ key = apply_rotary_pos_emb(key, freqs, k_xpos_scale)
412
+ if c_rope is not None:
413
+ freqs, xpos_scale = c_rope
414
+ q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale**-1.0) if xpos_scale is not None else (1.0, 1.0)
415
+ c_query = apply_rotary_pos_emb(c_query, freqs, q_xpos_scale)
416
+ c_key = apply_rotary_pos_emb(c_key, freqs, k_xpos_scale)
417
+
418
+ # attention
419
+ query = torch.cat([query, c_query], dim=1)
420
+ key = torch.cat([key, c_key], dim=1)
421
+ value = torch.cat([value, c_value], dim=1)
422
+
423
+ inner_dim = key.shape[-1]
424
+ head_dim = inner_dim // attn.heads
425
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
426
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
427
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
428
+
429
+ # mask. e.g. inference got a batch with different target durations, mask out the padding
430
+ if mask is not None:
431
+ attn_mask = F.pad(mask, (0, c.shape[1]), value=True) # no mask for c (text)
432
+ attn_mask = attn_mask.unsqueeze(1).unsqueeze(1) # 'b n -> b 1 1 n'
433
+ attn_mask = attn_mask.expand(batch_size, attn.heads, query.shape[-2], key.shape[-2])
434
+ else:
435
+ attn_mask = None
436
+
437
+ x = F.scaled_dot_product_attention(query, key, value, attn_mask=attn_mask, dropout_p=0.0, is_causal=False)
438
+ x = x.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
439
+ x = x.to(query.dtype)
440
+
441
+ # Split the attention outputs.
442
+ x, c = (
443
+ x[:, : residual.shape[1]],
444
+ x[:, residual.shape[1] :],
445
+ )
446
+
447
+ # linear proj
448
+ x = attn.to_out[0](x)
449
+ # dropout
450
+ x = attn.to_out[1](x)
451
+ if not attn.context_pre_only:
452
+ c = attn.to_out_c(c)
453
+
454
+ if mask is not None:
455
+ mask = mask.unsqueeze(-1)
456
+ x = x.masked_fill(~mask, 0.0)
457
+ # c = c.masked_fill(~mask, 0.) # no mask for c (text)
458
+
459
+ return x, c
460
+
461
+
462
+ # DiT Block
463
+
464
+
465
+ class DiTBlock(nn.Module):
466
+ def __init__(self, dim, heads, dim_head, ff_mult=4, dropout=0.1):
467
+ super().__init__()
468
+
469
+ self.attn_norm = AdaLayerNormZero(dim)
470
+ self.attn = Attention(
471
+ processor=AttnProcessor(),
472
+ dim=dim,
473
+ heads=heads,
474
+ dim_head=dim_head,
475
+ dropout=dropout,
476
+ )
477
+
478
+ self.ff_norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
479
+ self.ff = FeedForward(dim=dim, mult=ff_mult, dropout=dropout, approximate="tanh")
480
+
481
+ def forward(self, x, t, mask=None, rope=None): # x: noised input, t: time embedding
482
+ # pre-norm & modulation for attention input
483
+ norm, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.attn_norm(x, emb=t)
484
+
485
+ # attention
486
+ attn_output = self.attn(x=norm, mask=mask, rope=rope)
487
+
488
+ # process attention output for input x
489
+ x = x + gate_msa.unsqueeze(1) * attn_output
490
+
491
+ norm = self.ff_norm(x) * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
492
+ ff_output = self.ff(norm)
493
+ x = x + gate_mlp.unsqueeze(1) * ff_output
494
+
495
+ return x
496
+
497
+
498
+ # MMDiT Block https://arxiv.org/abs/2403.03206
499
+
500
+
501
+ class MMDiTBlock(nn.Module):
502
+ r"""
503
+ modified from diffusers/src/diffusers/models/attention.py
504
+
505
+ notes.
506
+ _c: context related. text, cond, etc. (left part in sd3 fig2.b)
507
+ _x: noised input related. (right part)
508
+ context_pre_only: last layer only do prenorm + modulation cuz no more ffn
509
+ """
510
+
511
+ def __init__(self, dim, heads, dim_head, ff_mult=4, dropout=0.1, context_pre_only=False):
512
+ super().__init__()
513
+
514
+ self.context_pre_only = context_pre_only
515
+
516
+ self.attn_norm_c = AdaLayerNormZero_Final(dim) if context_pre_only else AdaLayerNormZero(dim)
517
+ self.attn_norm_x = AdaLayerNormZero(dim)
518
+ self.attn = Attention(
519
+ processor=JointAttnProcessor(),
520
+ dim=dim,
521
+ heads=heads,
522
+ dim_head=dim_head,
523
+ dropout=dropout,
524
+ context_dim=dim,
525
+ context_pre_only=context_pre_only,
526
+ )
527
+
528
+ if not context_pre_only:
529
+ self.ff_norm_c = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
530
+ self.ff_c = FeedForward(dim=dim, mult=ff_mult, dropout=dropout, approximate="tanh")
531
+ else:
532
+ self.ff_norm_c = None
533
+ self.ff_c = None
534
+ self.ff_norm_x = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
535
+ self.ff_x = FeedForward(dim=dim, mult=ff_mult, dropout=dropout, approximate="tanh")
536
+
537
+ def forward(self, x, c, t, mask=None, rope=None, c_rope=None): # x: noised input, c: context, t: time embedding
538
+ # pre-norm & modulation for attention input
539
+ if self.context_pre_only:
540
+ norm_c = self.attn_norm_c(c, t)
541
+ else:
542
+ norm_c, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.attn_norm_c(c, emb=t)
543
+ norm_x, x_gate_msa, x_shift_mlp, x_scale_mlp, x_gate_mlp = self.attn_norm_x(x, emb=t)
544
+
545
+ # attention
546
+ x_attn_output, c_attn_output = self.attn(x=norm_x, c=norm_c, mask=mask, rope=rope, c_rope=c_rope)
547
+
548
+ # process attention output for context c
549
+ if self.context_pre_only:
550
+ c = None
551
+ else: # if not last layer
552
+ c = c + c_gate_msa.unsqueeze(1) * c_attn_output
553
+
554
+ norm_c = self.ff_norm_c(c) * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None]
555
+ c_ff_output = self.ff_c(norm_c)
556
+ c = c + c_gate_mlp.unsqueeze(1) * c_ff_output
557
+
558
+ # process attention output for input x
559
+ x = x + x_gate_msa.unsqueeze(1) * x_attn_output
560
+
561
+ norm_x = self.ff_norm_x(x) * (1 + x_scale_mlp[:, None]) + x_shift_mlp[:, None]
562
+ x_ff_output = self.ff_x(norm_x)
563
+ x = x + x_gate_mlp.unsqueeze(1) * x_ff_output
564
+
565
+ return c, x
566
+
567
+
568
+ # time step conditioning embedding
569
+
570
+
571
+ class TimestepEmbedding(nn.Module):
572
+ def __init__(self, dim, freq_embed_dim=256):
573
+ super().__init__()
574
+ self.time_embed = SinusPositionEmbedding(freq_embed_dim)
575
+ self.time_mlp = nn.Sequential(nn.Linear(freq_embed_dim, dim), nn.SiLU(), nn.Linear(dim, dim))
576
+
577
+ def forward(self, timestep: float["b"]): # noqa: F821
578
+ time_hidden = self.time_embed(timestep)
579
+ time_hidden = time_hidden.to(timestep.dtype)
580
+ time = self.time_mlp(time_hidden) # b d
581
+ return time