eddiegulay
commited on
Commit
•
b8a0301
1
Parent(s):
da91b13
Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
```python
|
2 |
+
from transformers import WhisperProcessor, WhisperForConditionalGeneration
|
3 |
+
|
4 |
+
processor = WhisperProcessor.from_pretrained("eddiegulay/Whisperer_Mozilla_Sw_2000")
|
5 |
+
model = WhisperForConditionalGeneration.from_pretrained("eddiegulay/Whisperer_Mozilla_Sw_2000")
|
6 |
+
forced_decoder_ids = processor.get_decoder_prompt_ids(language="swahili", task="transcribe")
|
7 |
+
|
8 |
+
def transcribe(audio_path):
|
9 |
+
# Load the audio file
|
10 |
+
audio_input, sample_rate = torchaudio.load(audio_path)
|
11 |
+
target_sample_rate = 16000
|
12 |
+
audio_input = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=target_sample_rate)(audio_input)
|
13 |
+
|
14 |
+
# Preprocess the audio data
|
15 |
+
input_features = processor(audio_input[0], sampling_rate=target_sample_rate, return_tensors="pt").input_features
|
16 |
+
|
17 |
+
# generate token ids
|
18 |
+
predicted_ids = model.generate(input_features, forced_decoder_ids=forced_decoder_ids)
|
19 |
+
|
20 |
+
# Perform inference and transcribe
|
21 |
+
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
|
22 |
+
|
23 |
+
return transcription
|
24 |
+
|
25 |
+
|
26 |
+
|
27 |
+
transcribe('your_audio_file.mp3')
|
28 |
+
```
|