# Dockerfile FROM python:3.12-slim ENV DEBIAN_FRONTEND=noninteractive \ PYTHONDONTWRITEBYTECODE=1 \ PYTHONUNBUFFERED=1 \ PIP_NO_CACHE_DIR=1 # OS deps (gcc pour llama-cpp-python wheel si besoin; libgomp pour BLAS) RUN apt-get update && apt-get install -y --no-install-recommends \ build-essential curl ca-certificates git \ && rm -rf /var/lib/apt/lists/* RUN useradd -m -u 1000 user USER user ENV PATH="/home/user/.local/bin:$PATH" WORKDIR /app COPY --chown=user ./requirements.txt requirements.txt RUN pip install --upgrade pip && pip install --no-cache-dir --upgrade -r requirements.txt RUN pip install llama-cpp-python --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu COPY --chown=user . . # Crée les dossiers pour monter les volumes RUN mkdir -p /models /hf_cache RUN chown -R user: /models RUN chown -R user: /hf_cache EXPOSE 7860 ENV MODELS_DIR=/models HF_HOME=/hf_cache CMD ["gunicorn", "-w", "1", "-b", "0.0.0.0:7860", "app:app"]