init
Browse files- attach_speaker_embedding_s2s.py +10 -5
- main_s2s.sh +1 -14
- speaker_embedding_pyannote.py +36 -0
- tokenize_dataset_s2s.py +1 -0
attach_speaker_embedding_s2s.py
CHANGED
@@ -5,9 +5,6 @@ import shutil
|
|
5 |
from soundfile import LibsndfileError
|
6 |
from datasets import load_dataset, DatasetDict, Audio
|
7 |
|
8 |
-
from speaker_embedding_metavoice import MetaVoiceSE
|
9 |
-
|
10 |
-
|
11 |
direction = os.getenv("DIRECTION", "enA-jaA")
|
12 |
sides = set(direction.split("-"))
|
13 |
dataset_id = os.getenv("DATASET_ID", 0)
|
@@ -16,7 +13,15 @@ hf_org = os.getenv("HF_ORG", "asahi417")
|
|
16 |
hf_dataset = f"seamless-align-{direction}"
|
17 |
dataset = load_dataset(f"{hf_org}/{hf_dataset}", f"subset_{dataset_id}", split="train")
|
18 |
audio_loader = Audio()
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
|
22 |
def error_file(example):
|
@@ -51,7 +56,7 @@ dataset = dataset.map(
|
|
51 |
num_proc=num_proc,
|
52 |
desc="attach speaker embedding dataset"
|
53 |
)
|
54 |
-
DatasetDict({"train": dataset}).push_to_hub(f"{hf_org}/{hf_dataset}.speaker-embedding.
|
55 |
cache_dir = f"{expanduser('~')}/.cache/huggingface/datasets/{hf_org}___{hf_dataset}/subset_{dataset_id}"
|
56 |
if os.path.exists(cache_dir):
|
57 |
shutil.rmtree(cache_dir)
|
|
|
5 |
from soundfile import LibsndfileError
|
6 |
from datasets import load_dataset, DatasetDict, Audio
|
7 |
|
|
|
|
|
|
|
8 |
direction = os.getenv("DIRECTION", "enA-jaA")
|
9 |
sides = set(direction.split("-"))
|
10 |
dataset_id = os.getenv("DATASET_ID", 0)
|
|
|
13 |
hf_dataset = f"seamless-align-{direction}"
|
14 |
dataset = load_dataset(f"{hf_org}/{hf_dataset}", f"subset_{dataset_id}", split="train")
|
15 |
audio_loader = Audio()
|
16 |
+
se_model = os.getenv("SE_MODEL", "metavoice")
|
17 |
+
if se_model == "metavoice":
|
18 |
+
from speaker_embedding_metavoice import MetaVoiceSE
|
19 |
+
speaker_embedder = MetaVoiceSE()
|
20 |
+
elif se_model == "pyannote":
|
21 |
+
from speaker_embedding_pyannote import PyannoteSE
|
22 |
+
speaker_embedder = PyannoteSE()
|
23 |
+
else:
|
24 |
+
raise ValueError(f"unknown speaker embedding: {se_model}")
|
25 |
|
26 |
|
27 |
def error_file(example):
|
|
|
56 |
num_proc=num_proc,
|
57 |
desc="attach speaker embedding dataset"
|
58 |
)
|
59 |
+
DatasetDict({"train": dataset}).push_to_hub(f"{hf_org}/{hf_dataset}.speaker-embedding.{se_model}", config_name=f"subset_{dataset_id}")
|
60 |
cache_dir = f"{expanduser('~')}/.cache/huggingface/datasets/{hf_org}___{hf_dataset}/subset_{dataset_id}"
|
61 |
if os.path.exists(cache_dir):
|
62 |
shutil.rmtree(cache_dir)
|
main_s2s.sh
CHANGED
@@ -26,13 +26,13 @@ do
|
|
26 |
python tokenize_dataset_s2s.py
|
27 |
done
|
28 |
# speaker embedding
|
|
|
29 |
for i in $(seq 1 144);
|
30 |
do
|
31 |
export DATASET_ID=${i}
|
32 |
export DIRECTION="enA-jaA"
|
33 |
python attach_speaker_embedding_s2s.py
|
34 |
done
|
35 |
-
|
36 |
for i in $(seq 2 40);
|
37 |
do
|
38 |
export DATASET_ID=${i}
|
@@ -45,15 +45,12 @@ do
|
|
45 |
export DIRECTION="enA-jaA"
|
46 |
python attach_speaker_embedding_s2s.py
|
47 |
done
|
48 |
-
|
49 |
for i in $(seq 81 120);
|
50 |
do
|
51 |
export DATASET_ID=${i}
|
52 |
export DIRECTION="enA-jaA"
|
53 |
python attach_speaker_embedding_s2s.py
|
54 |
done
|
55 |
-
|
56 |
-
|
57 |
for i in $(seq 121 144);
|
58 |
do
|
59 |
export DATASET_ID=${i}
|
@@ -109,16 +106,6 @@ do
|
|
109 |
echo ${LINE_NO_START}
|
110 |
python fetch_dataset_s2s.py
|
111 |
done
|
112 |
-
for i in 114 77 78 79 80;
|
113 |
-
do
|
114 |
-
export N_POOL=15
|
115 |
-
export DATASET_ID=${i}
|
116 |
-
export DIRECTION="enA-viA"
|
117 |
-
export LINE_NO_START=$(((DATASET_ID-1) * 2500))
|
118 |
-
export LINE_NO_END=$((DATASET_ID * 2500))
|
119 |
-
echo ${LINE_NO_START}
|
120 |
-
python fetch_dataset_s2s.py
|
121 |
-
done
|
122 |
# tokenize
|
123 |
for i in $(seq 120 140);
|
124 |
do
|
|
|
26 |
python tokenize_dataset_s2s.py
|
27 |
done
|
28 |
# speaker embedding
|
29 |
+
export SE_MODEL="metavoice"
|
30 |
for i in $(seq 1 144);
|
31 |
do
|
32 |
export DATASET_ID=${i}
|
33 |
export DIRECTION="enA-jaA"
|
34 |
python attach_speaker_embedding_s2s.py
|
35 |
done
|
|
|
36 |
for i in $(seq 2 40);
|
37 |
do
|
38 |
export DATASET_ID=${i}
|
|
|
45 |
export DIRECTION="enA-jaA"
|
46 |
python attach_speaker_embedding_s2s.py
|
47 |
done
|
|
|
48 |
for i in $(seq 81 120);
|
49 |
do
|
50 |
export DATASET_ID=${i}
|
51 |
export DIRECTION="enA-jaA"
|
52 |
python attach_speaker_embedding_s2s.py
|
53 |
done
|
|
|
|
|
54 |
for i in $(seq 121 144);
|
55 |
do
|
56 |
export DATASET_ID=${i}
|
|
|
106 |
echo ${LINE_NO_START}
|
107 |
python fetch_dataset_s2s.py
|
108 |
done
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
# tokenize
|
110 |
for i in $(seq 120 140);
|
111 |
do
|
speaker_embedding_pyannote.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Pyannote speaker embedding model.
|
2 |
+
- pip install pyannote.audio
|
3 |
+
- feature dimension: 512
|
4 |
+
- source: https://huggingface.co/pyannote/embedding
|
5 |
+
"""
|
6 |
+
from typing import Optional, Union, Tuple
|
7 |
+
import torch
|
8 |
+
import numpy as np
|
9 |
+
from pyannote.audio import Model
|
10 |
+
from pyannote.audio import Inference
|
11 |
+
from pyannote.audio.core.inference import fix_reproducibility, map_with_specifications
|
12 |
+
|
13 |
+
|
14 |
+
class PyannoteSE:
|
15 |
+
|
16 |
+
def __init__(self):
|
17 |
+
model = Model.from_pretrained("pyannote/embedding")
|
18 |
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
19 |
+
model.to(self.device)
|
20 |
+
model.eval()
|
21 |
+
self.inference = Inference(model, window="whole")
|
22 |
+
|
23 |
+
def get_speaker_embedding(self, wav: np.ndarray, sampling_rate: Optional[int] = None) -> np.ndarray:
|
24 |
+
wav = torch.as_tensor(wav.reshape(1, -1), dtype=torch.float32).to(self.device)
|
25 |
+
fix_reproducibility(self.inference.device)
|
26 |
+
if self.inference.window == "sliding":
|
27 |
+
return self.inference.slide(wav, sampling_rate, hook=None)
|
28 |
+
|
29 |
+
outputs: Union[np.ndarray, Tuple[np.ndarray]] = self.inference.infer(wav[None])
|
30 |
+
|
31 |
+
def __first_sample(outputs: np.ndarray, **kwargs) -> np.ndarray:
|
32 |
+
return outputs[0]
|
33 |
+
|
34 |
+
return map_with_specifications(
|
35 |
+
self.inference.model.specifications, __first_sample, outputs
|
36 |
+
)
|
tokenize_dataset_s2s.py
CHANGED
@@ -54,6 +54,7 @@ dataset = dataset.map(
|
|
54 |
desc="tokenize dataset"
|
55 |
)
|
56 |
DatasetDict({"train": dataset}).push_to_hub(f"{hf_org}/{hf_dataset}.tokenized", config_name=f"subset_{dataset_id}")
|
|
|
57 |
cache_dir = f"{expanduser('~')}/.cache/huggingface/datasets/{hf_org}___{hf_dataset}/subset_{dataset_id}"
|
58 |
if os.path.exists(cache_dir):
|
59 |
shutil.rmtree(cache_dir)
|
|
|
54 |
desc="tokenize dataset"
|
55 |
)
|
56 |
DatasetDict({"train": dataset}).push_to_hub(f"{hf_org}/{hf_dataset}.tokenized", config_name=f"subset_{dataset_id}")
|
57 |
+
# DatasetDict({"train": dataset}).push_to_hub(f"{hf_org}/{hf_dataset}.tokenized.encodec", config_name=f"subset_{dataset_id}")
|
58 |
cache_dir = f"{expanduser('~')}/.cache/huggingface/datasets/{hf_org}___{hf_dataset}/subset_{dataset_id}"
|
59 |
if os.path.exists(cache_dir):
|
60 |
shutil.rmtree(cache_dir)
|