llm_server / main.py
Paridhim's picture
Update main.py
36b15e0
raw
history blame
609 Bytes
from typing import Union
from wrapper import LLMWrapper
import uvicorn
from fastapi import FastAPI, Request
app = FastAPI()
llm_wrapper = LLMWrapper()
@app.post("/")
async def generate_text(request: Request):
raw_data = await request.body() # Get the raw body data from the request
prompt = raw_data.decode('utf-8')
if not prompt:
return {'error': 'Prompt is required'}, 400
generated_text = llm_wrapper.generate_text(prompt)
print(generated_text)
return {'generated_text': generated_text}
if __name__ == '__main__':
uvicorn.run(app, host='127.0.0.1', port=8001)