Update README.md
Browse files
README.md
CHANGED
@@ -39,18 +39,18 @@ It achieves the following results on the evaluation set:
|
|
39 |
|
40 |
## Model description
|
41 |
|
42 |
-
from transformers import pipeline
|
43 |
-
import torch
|
44 |
-
|
45 |
-
modelName="ajibs75/whisper-small-yoruba"
|
46 |
-
device = 0 if torch.cuda.is_available() else "cpu"
|
47 |
-
pipe = pipeline(task="automatic-speech-recognition",model=modelName,chunk_length_s=30,device=device,)
|
48 |
-
pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language="yo", task="transcribe")
|
49 |
-
|
50 |
-
audio = "sample.mp3"
|
51 |
-
text = pipe(audio)
|
52 |
-
transacribed_audio = text["text"]
|
53 |
-
print(transacribed_audio)
|
54 |
|
55 |
|
56 |
## Intended uses & limitations
|
|
|
39 |
|
40 |
## Model description
|
41 |
|
42 |
+
>>> from transformers import pipeline
|
43 |
+
>>> import torch
|
44 |
+
|
45 |
+
>>> modelName="ajibs75/whisper-small-yoruba"
|
46 |
+
>>> device = 0 if torch.cuda.is_available() else "cpu"
|
47 |
+
>>> pipe = pipeline(task="automatic-speech-recognition",model=modelName,chunk_length_s=30,device=device,)
|
48 |
+
>>> pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language="yo", task="transcribe")
|
49 |
+
|
50 |
+
>>> audio = "sample.mp3"
|
51 |
+
>>> text = pipe(audio)
|
52 |
+
>>> transacribed_audio = text["text"]
|
53 |
+
>>> print(transacribed_audio)
|
54 |
|
55 |
|
56 |
## Intended uses & limitations
|