whisperdocker / main.py
lyimo's picture
Update main.py
a13ba9b verified
from fastapi import FastAPI, Request
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse, HTMLResponse
from fastapi.templating import Jinja2Templates
from transformers import pipeline
from openai import OpenAI
app = FastAPI()
app.mount("/static", StaticFiles(directory="static"), name="static")
templates = Jinja2Templates(directory="templates")
pipe = pipeline(model="seeafricatz/kiaziboraasr")
client = OpenAI()
@app.post("/transcribe")
async def transcribe(request: Request):
form_data = await request.form()
audio_file = form_data["audio"]
text = pipe(audio_file)["text"]
return {"text": text}
@app.post("/generate_response")
async def generate_response(request: Request):
form_data = await request.form()
transcribed_text = form_data["text"]
response = client.chat.completions.create(
messages=[
{"role": "system", "content": "All your answers should be in swahili only, users undertands swahili only so here we start... Wewe ni mtaalamu wa viazi lishe na utajibu maswali yote kwa kiswahili tu!"},
{"role": "user", "content": "Mambo vipi?"},
{"role": "assistant", "content": "Salama je una swali lolote kuhusu viazi lishe?"},
{"role": "user", "content": "nini maana ya Viazi lishe?"},
{"role": "assistant", "content": "viazi lishe ni Viazi vitamu vyenye rangi ya karoti kwa ndani ambavyo vina vitamin A kwa wingi"},
{"role": "user", "content": "nini matumizi ya viazi lishe?"},
{"role": "assistant", "content": "viazi lishe vinaweza kutengenezea chakula kama Keki, Maandazi, Kalimati na tambi: Ukisaga unga wa viazi lishe, unaweza kutumika kupika vyakula ambavyo huwa watu hutumia unga wa ngano kupika, unga wa viazi lishe una virutubisho vingi zaidi kuliko unga wa ngano na ukitumika kupikia vyakula tajwa hapo juu watumiaji watakuwa wanakula vyakula vyenye virutubisho Zaidi."},
{"role": "user", "content": transcribed_text},
],
model="gpt-4"
)
return {"response": response.choices[0].message.content}
@app.post("/inference")
async def inference(request: Request):
form_data = await request.form()
text = form_data["text"]
response = client.audio.speech.create(
model="tts-1",
voice="nova",
input=text
)
output_file = "static/tts_output.mp3"
response.stream_to_file(output_file)
return {"audio_url": "/static/tts_output.mp3"}
@app.get("/", response_class=HTMLResponse)
async def index(request: Request):
return templates.TemplateResponse("index.html", {"request": request})