llm_server / main.py
Paridhim's picture
Update main.py
4ab4bca verified
raw
history blame contribute delete
542 Bytes
from typing import Union
from wrapper import LLMWrapper
import uvicorn
from fastapi import FastAPI, Request
app = FastAPI()
llm_wrapper = LLMWrapper()
@app.post("/llm_on_cpu")
async def generate_text(request: Request):
raw_data = await request.body() # Get the raw body data from the request
prompt = raw_data.decode('utf-8')
if not prompt:
return {'error': 'Prompt is required'}, 400
generated_text = llm_wrapper.generate_text(prompt)
print(generated_text)
return {'generated_text': generated_text}