Spaces:
Sleeping
Sleeping
import torch | |
import gradio as gr | |
from transformers import ( | |
AutomaticSpeechRecognitionPipeline, | |
WhisperForConditionalGeneration, | |
WhisperTokenizer, | |
WhisperProcessor, | |
) | |
from peft import PeftModel, PeftConfig | |
peft_model_id = "Moustapha91/whisper-small-wolof" | |
language = "French" | |
task = "transcribe" | |
peft_config = PeftConfig.from_pretrained(peft_model_id) | |
model = WhisperForConditionalGeneration.from_pretrained( | |
peft_config.base_model_name_or_path, | |
device_map="auto" # On supprime la quantization en 8 bits | |
) | |
model = PeftModel.from_pretrained(model, peft_model_id) | |
tokenizer = WhisperTokenizer.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task) | |
processor = WhisperProcessor.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task) | |
feature_extractor = processor.feature_extractor | |
forced_decoder_ids = processor.get_decoder_prompt_ids(language=language, task=task) | |
pipe = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor) | |
def transcribe(audio): | |
text = pipe(audio, generate_kwargs={"forced_decoder_ids": forced_decoder_ids}, max_new_tokens=255)["text"] | |
return text | |
iface = gr.Interface( | |
fn=transcribe, | |
inputs=gr.Audio(type="filepath"), # On supprime 'source' pour éviter l'erreur | |
outputs="text", | |
title="PEFT LoRA + Whisper Small Wolof", | |
description="Realtime demo for Wolof speech recognition using `PEFT-LoRA` fine-tuned Whisper Small model.", | |
) | |
iface.launch(share=True) | |