|
from fastapi import FastAPI |
|
from pydantic import BaseModel |
|
from fastapi.middleware.cors import CORSMiddleware |
|
import uvicorn |
|
from langchain_ollama import OllamaLLM |
|
|
|
app = FastAPI() |
|
|
|
|
|
app.add_middleware( |
|
CORSMiddleware, |
|
allow_origins=["chrome-extension://*"], |
|
allow_credentials=True, |
|
allow_methods=["*"], |
|
allow_headers=["*"], |
|
) |
|
|
|
|
|
app.add_middleware( |
|
CORSMiddleware, |
|
allow_origins=["*"], |
|
allow_credentials=True, |
|
allow_methods=["*"], |
|
allow_headers=["*"], |
|
) |
|
|
|
|
|
class MeaningRequest(BaseModel): |
|
text: str |
|
|
|
|
|
class MeaningResponse(BaseModel): |
|
meaning: str |
|
|
|
def get_meaning_from_llm(text: str) -> str: |
|
""" |
|
Get meaning of text using Ollama LLM. |
|
""" |
|
|
|
prompt = f"Explain the meaning of the following text in simple terms in only one or two lines not more than that: '{text}'" |
|
|
|
|
|
llm = OllamaLLM( |
|
model="llama3.2", |
|
base_url="https://earwig-exact-slug.ngrok-free.app", |
|
temperature=0.25 |
|
) |
|
meaning = llm(prompt) |
|
return meaning |
|
|
|
@app.post("/get_meaning", response_model=MeaningResponse) |
|
async def get_meaning(request: MeaningRequest): |
|
""" |
|
Endpoint to receive text and return its 'meaning' as generated by an LLM. |
|
""" |
|
print(f"Received text: {request.text}") |
|
|
|
text = request.text |
|
|
|
meaning = get_meaning_from_llm(text) |
|
|
|
return MeaningResponse(meaning=meaning) |
|
|
|
if __name__ == "__main__": |
|
|
|
uvicorn.run("app:app", host="0.0.0.0", port=8000, reload=True) |