Spaces:
Running
on
Zero
Running
on
Zero
update modelscope model
Browse files- README.md +6 -6
- cosyvoice/cli/frontend.py +1 -1
README.md
CHANGED
@@ -40,18 +40,18 @@ If you are expert in this field, and you are only interested in training your ow
|
|
40 |
``` python
|
41 |
# SDK模型下载
|
42 |
from modelscope import snapshot_download
|
43 |
-
snapshot_download('
|
44 |
-
snapshot_download('
|
45 |
-
snapshot_download('
|
46 |
snapshot_download('speech_tts/speech_kantts_ttsfrd', local_dir='pretrained_models/speech_kantts_ttsfrd')
|
47 |
```
|
48 |
|
49 |
``` sh
|
50 |
# git模型下载,请确保已安装git lfs
|
51 |
mkdir -p pretrained_models
|
52 |
-
git clone https://www.modelscope.cn/
|
53 |
-
git clone https://www.modelscope.cn/
|
54 |
-
git clone https://www.modelscope.cn/
|
55 |
git clone https://www.modelscope.cn/speech_tts/speech_kantts_ttsfrd.git pretrained_models/speech_kantts_ttsfrd
|
56 |
```
|
57 |
|
|
|
40 |
``` python
|
41 |
# SDK模型下载
|
42 |
from modelscope import snapshot_download
|
43 |
+
snapshot_download('iic/CosyVoice-300M', local_dir='pretrained_models/CosyVoice-300M')
|
44 |
+
snapshot_download('iic/CosyVoice-300M-SFT', local_dir='pretrained_models/CosyVoice-300M-SFT')
|
45 |
+
snapshot_download('iic/CosyVoice-300M-Instruct', local_dir='pretrained_models/CosyVoice-300M-Instruct')
|
46 |
snapshot_download('speech_tts/speech_kantts_ttsfrd', local_dir='pretrained_models/speech_kantts_ttsfrd')
|
47 |
```
|
48 |
|
49 |
``` sh
|
50 |
# git模型下载,请确保已安装git lfs
|
51 |
mkdir -p pretrained_models
|
52 |
+
git clone https://www.modelscope.cn/iic/CosyVoice-300M.git pretrained_models/CosyVoice-300M
|
53 |
+
git clone https://www.modelscope.cn/iic/CosyVoice-300M-SFT.git pretrained_models/CosyVoice-300M-SFT
|
54 |
+
git clone https://www.modelscope.cn/iic/CosyVoice-300M-Instruct.git pretrained_models/CosyVoice-300M-Instruct
|
55 |
git clone https://www.modelscope.cn/speech_tts/speech_kantts_ttsfrd.git pretrained_models/speech_kantts_ttsfrd
|
56 |
```
|
57 |
|
cosyvoice/cli/frontend.py
CHANGED
@@ -42,7 +42,7 @@ class CosyVoiceFrontEnd:
|
|
42 |
option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
|
43 |
option.intra_op_num_threads = 1
|
44 |
self.campplus_session = onnxruntime.InferenceSession(campplus_model, sess_options=option, providers=["CPUExecutionProvider"])
|
45 |
-
self.speech_tokenizer_session = onnxruntime.InferenceSession(speech_tokenizer_model, sess_options=option, providers=["CUDAExecutionProvider"
|
46 |
if os.path.exists(spk2info):
|
47 |
self.spk2info = torch.load(spk2info, map_location=self.device)
|
48 |
self.instruct = instruct
|
|
|
42 |
option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
|
43 |
option.intra_op_num_threads = 1
|
44 |
self.campplus_session = onnxruntime.InferenceSession(campplus_model, sess_options=option, providers=["CPUExecutionProvider"])
|
45 |
+
self.speech_tokenizer_session = onnxruntime.InferenceSession(speech_tokenizer_model, sess_options=option, providers=["CUDAExecutionProvider"if torch.cuda.is_available() else "CPUExecutionProvider"])
|
46 |
if os.path.exists(spk2info):
|
47 |
self.spk2info = torch.load(spk2info, map_location=self.device)
|
48 |
self.instruct = instruct
|