Commit
·
d350b44
1
Parent(s):
2d6aff1
Add caching functionality for model download and prediction
Browse files
main.py
CHANGED
@@ -12,6 +12,11 @@ from huggingface_hub import hf_hub_download
|
|
12 |
from huggingface_hub.utils import logging
|
13 |
from toolz import concat, groupby, valmap
|
14 |
from starlette.responses import RedirectResponse
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
app = FastAPI()
|
17 |
logger = logging.get_logger(__name__)
|
@@ -205,6 +210,7 @@ def root():
|
|
205 |
|
206 |
|
207 |
@app.get("/predict_dataset_language/{hub_id}")
|
|
|
208 |
async def predict_language(
|
209 |
hub_id: str,
|
210 |
config: str | None = None,
|
|
|
12 |
from huggingface_hub.utils import logging
|
13 |
from toolz import concat, groupby, valmap
|
14 |
from starlette.responses import RedirectResponse
|
15 |
+
from cashews import cache
|
16 |
+
from datetime import timedelta
|
17 |
+
|
18 |
+
cache.setup("mem://")
|
19 |
+
|
20 |
|
21 |
app = FastAPI()
|
22 |
logger = logging.get_logger(__name__)
|
|
|
210 |
|
211 |
|
212 |
@app.get("/predict_dataset_language/{hub_id}")
|
213 |
+
@cache(ttl=timedelta(minutes=10))
|
214 |
async def predict_language(
|
215 |
hub_id: str,
|
216 |
config: str | None = None,
|