sushku commited on
Commit
0b25702
·
verified ·
1 Parent(s): 3da981f

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile +25 -0
  2. main.py +66 -0
  3. requirements.txt +4 -0
Dockerfile ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # For more information, please refer to https://aka.ms/vscode-docker-python
2
+ FROM python:3-slim
3
+
4
+ EXPOSE 8000
5
+
6
+ # Keeps Python from generating .pyc files in the container
7
+ ENV PYTHONDONTWRITEBYTECODE=1
8
+
9
+ # Turns off buffering for easier container logging
10
+ ENV PYTHONUNBUFFERED=1
11
+
12
+ # Install pip requirements
13
+ COPY requirements.txt .
14
+ RUN python -m pip install -r requirements.txt
15
+
16
+ WORKDIR /app
17
+ COPY . /app
18
+
19
+ # Creates a non-root user with an explicit UID and adds permission to access the /app folder
20
+ # For more info, please refer to https://aka.ms/vscode-docker-python-configure-containers
21
+ RUN adduser -u 5678 --disabled-password --gecos "" appuser && chown -R appuser /app
22
+ USER appuser
23
+
24
+ # During debugging, this entry point will be overridden. For more information, please refer to https://aka.ms/vscode-docker-python-debug
25
+ CMD ["gunicorn", "--bind", "0.0.0.0:8000", "-k", "uvicorn.workers.UvicornWorker", "-t", "3000" ,"--keep-alive" , "3000","app.main:app"]
main.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from pydantic import BaseModel
3
+ from fastapi.middleware.cors import CORSMiddleware
4
+ import uvicorn
5
+ from langchain_ollama import OllamaLLM
6
+
7
+ app = FastAPI()
8
+
9
+ # Allow requests from your front-end's origin.
10
+ app.add_middleware(
11
+ CORSMiddleware,
12
+ allow_origins=["chrome-extension://*"], # Allow Chrome extensions
13
+ allow_credentials=True,
14
+ allow_methods=["*"],
15
+ allow_headers=["*"],
16
+ )
17
+
18
+
19
+ app.add_middleware(
20
+ CORSMiddleware,
21
+ allow_origins=["*"],
22
+ allow_credentials=True,
23
+ allow_methods=["*"],
24
+ allow_headers=["*"],
25
+ )
26
+
27
+ # Define the request model that expects a JSON body with "text"
28
+ class MeaningRequest(BaseModel):
29
+ text: str
30
+
31
+ # Define the response model that will return the meaning
32
+ class MeaningResponse(BaseModel):
33
+ meaning: str
34
+
35
+ def get_meaning_from_llm(text: str) -> str:
36
+ """
37
+ Get meaning of text using Ollama LLM.
38
+ """
39
+ # Create a prompt for your LLM
40
+ prompt = f"Explain the meaning of the following text in simple terms in only one or two lines not more than that: '{text}'"
41
+
42
+ # Make sure this URL is accessible and valid
43
+ llm = OllamaLLM(
44
+ model="llama3.2",
45
+ base_url="https://earwig-exact-slug.ngrok-free.app",
46
+ temperature=0.25
47
+ )
48
+ meaning = llm(prompt)
49
+ return meaning
50
+
51
+ @app.post("/get_meaning", response_model=MeaningResponse)
52
+ async def get_meaning(request: MeaningRequest):
53
+ """
54
+ Endpoint to receive text and return its 'meaning' as generated by an LLM.
55
+ """
56
+ print(f"Received text: {request.text}")
57
+ # Extract text from the request
58
+ text = request.text
59
+ # Generate meaning using the LLM call
60
+ meaning = get_meaning_from_llm(text)
61
+ # Return the meaning in a JSON response
62
+ return MeaningResponse(meaning=meaning)
63
+
64
+ if __name__ == "__main__":
65
+ # Run the FastAPI app with Uvicorn
66
+ uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ langchain-ollama
2
+ fastapi
3
+ uvicorn
4
+ gunicorn