Pranjal12345's picture
dsdasd
f2021d6
raw
history blame
908 Bytes
#uvicorn app:app --host 0.0.0.0 --port 8000 --reload
from fastapi import FastAPI
from transformers import WhisperProcessor, WhisperForConditionalGeneration
import librosa
app = FastAPI()
processor = WhisperProcessor.from_pretrained("openai/whisper-small")
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small")
model.config.forced_decoder_ids = None
audio_file_path = "output.mp3"
audio_data, _ = librosa.load(audio_file_path, sr=16000)
@app.get("/")
def transcribe_audio():
input_features = processor(audio_data.tolist(), return_tensors="pt").input_features
predicted_ids = model.generate(input_features)
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
return {"transcription": transcription[0]}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)