File size: 2,289 Bytes
631d7fd bf0fcc3 389171c bf0fcc3 739bccd 3b9863d ea9e44a bf0fcc3 3b9863d bf0fcc3 739bccd bf0fcc3 739bccd bf0fcc3 739bccd bf0fcc3 8b61153 389171c bf0fcc3 8b61153 bf0fcc3 8b61153 bf0fcc3 631d7fd 8b61153 bf0fcc3 739bccd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
from urllib.parse import urlparse, parse_qsl
from transcribe.whisper_llm_serve import WhisperTranscriptionService
from uuid import uuid1
from logging import getLogger
import numpy as np
from transcribe.translatepipes import TranslatePipes
from contextlib import asynccontextmanager
from multiprocessing import Process, freeze_support
from fastapi.staticfiles import StaticFiles
from fastapi.responses import RedirectResponse
import os
from transcribe.utils import pcm_bytes_to_np_array
from config import BASE_DIR
logger = getLogger(__name__)
async def get_audio_from_websocket(websocket)->np.array:
"""
Receives audio buffer from websocket and creates a numpy array out of it.
Args:
websocket: The websocket to receive audio from.
Returns:
A numpy array containing the audio.
"""
frame_data = await websocket.receive_bytes()
if frame_data == b"END_OF_AUDIO":
return False
return pcm_bytes_to_np_array(frame_data)
@asynccontextmanager
async def lifespan(app:FastAPI):
global pipe
pipe = TranslatePipes()
pipe.wait_ready()
logger.info("Pipeline is ready.")
yield
FRONTEND_DIR = os.path.join(BASE_DIR, "frontend")
app = FastAPI(lifespan=lifespan)
app.mount("/app", StaticFiles(directory=FRONTEND_DIR, html=True), name="frontend")
pipe = None
@app.get("/")
async def root():
return RedirectResponse(url="/app/")
@app.websocket("/ws")
async def translate(websocket: WebSocket):
query_parameters_dict = websocket.query_params
from_lang, to_lang = query_parameters_dict.get('from'), query_parameters_dict.get('to')
client = WhisperTranscriptionService(
websocket,
pipe,
language=from_lang,
dst_lang=to_lang,
client_uid=f"{uuid1()}",
)
if from_lang and to_lang and client:
logger.info(f"Source lange: {from_lang} -> Dst lange: {to_lang}")
await websocket.accept()
try:
while True:
frame_data = await get_audio_from_websocket(websocket)
client.add_frames(frame_data)
except WebSocketDisconnect:
return
if __name__ == '__main__':
freeze_support()
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=9191)
|