Upload ParsiGoo.py with huggingface_hub
Browse files- ParsiGoo.py +29 -14
ParsiGoo.py
CHANGED
@@ -1,6 +1,14 @@
|
|
1 |
import os
|
2 |
import datasets
|
3 |
logger = datasets.logging.get_logger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
class ParsiGoo(datasets.GeneratorBasedBuilder):
|
5 |
VERSION = datasets.Version("1.0.0")
|
6 |
|
@@ -22,32 +30,39 @@ class ParsiGoo(datasets.GeneratorBasedBuilder):
|
|
22 |
|
23 |
def _split_generators(self, dl_manager):
|
24 |
logger.info("| > ")
|
|
|
25 |
logger.info(dl_manager.manual_dir)
|
26 |
# logger.info(os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets"))
|
27 |
data_dir = dl_manager.download("datasets")
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
30 |
if not os.path.isdir(os.path.join(data_dir, speaker_name)):
|
31 |
continue
|
32 |
root_path = os.path.join(data_dir, speaker_name)
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
name=
|
37 |
gen_kwargs={
|
38 |
-
"
|
39 |
-
"
|
40 |
"root_path": root_path
|
41 |
}
|
42 |
-
)
|
43 |
-
)
|
44 |
-
return splits
|
45 |
|
46 |
-
def _generate_examples(self,
|
47 |
-
|
|
|
|
|
48 |
for i, line in enumerate(ttf):
|
49 |
cols = line.split("|")
|
50 |
wav_file = cols[1].strip()
|
51 |
text = cols[0].strip()
|
52 |
wav_file = os.path.join(root_path, "wavs", wav_file)
|
53 |
-
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import datasets
|
3 |
logger = datasets.logging.get_logger(__name__)
|
4 |
+
|
5 |
+
|
6 |
+
_speaker_names = [
|
7 |
+
'ariana_Male2',
|
8 |
+
'moujeze_Female1',
|
9 |
+
'ariana_Male1',
|
10 |
+
'ariana_Female1'
|
11 |
+
]
|
12 |
class ParsiGoo(datasets.GeneratorBasedBuilder):
|
13 |
VERSION = datasets.Version("1.0.0")
|
14 |
|
|
|
30 |
|
31 |
def _split_generators(self, dl_manager):
|
32 |
logger.info("| > ")
|
33 |
+
print("4544444")
|
34 |
logger.info(dl_manager.manual_dir)
|
35 |
# logger.info(os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets"))
|
36 |
data_dir = dl_manager.download("datasets")
|
37 |
+
logger.info(data_dir)
|
38 |
+
meta_files = []
|
39 |
+
speaker_names = _speaker_names
|
40 |
+
root_path = ""
|
41 |
+
for speaker_name in speaker_names:
|
42 |
if not os.path.isdir(os.path.join(data_dir, speaker_name)):
|
43 |
continue
|
44 |
root_path = os.path.join(data_dir, speaker_name)
|
45 |
+
meta_files.append(os.path.join(root_path, "metadata.csv"))
|
46 |
+
|
47 |
+
return [datasets.SplitGenerator(
|
48 |
+
name="train",
|
49 |
gen_kwargs={
|
50 |
+
"txt_files": meta_files,
|
51 |
+
"speaker_names": speaker_names,
|
52 |
"root_path": root_path
|
53 |
}
|
54 |
+
)]
|
|
|
|
|
55 |
|
56 |
+
def _generate_examples(self, txt_files, speaker_names, root_path):
|
57 |
+
id=-1
|
58 |
+
for ind,txt_file in enumerate(txt_files):
|
59 |
+
with open(txt_file, "r", encoding="utf-8") as ttf:
|
60 |
for i, line in enumerate(ttf):
|
61 |
cols = line.split("|")
|
62 |
wav_file = cols[1].strip()
|
63 |
text = cols[0].strip()
|
64 |
wav_file = os.path.join(root_path, "wavs", wav_file)
|
65 |
+
id+=1
|
66 |
+
yield id, {"text": text, "audio_file": wav_file, "speaker_name": speaker_names[ind], "root_path": root_path}
|
67 |
+
|
68 |
+
|