Spaces:
Runtime error
Runtime error
Update Dockerfile
Browse files- Dockerfile +102 -13
Dockerfile
CHANGED
@@ -1,24 +1,113 @@
|
|
1 |
-
ARG
|
|
|
2 |
|
3 |
-
|
|
|
4 |
|
5 |
-
RUN apt-get update && \
|
6 |
-
apt-get install -y build-essential
|
|
|
|
|
|
|
|
|
7 |
|
8 |
-
|
9 |
|
10 |
-
|
|
|
|
|
11 |
|
12 |
-
|
|
|
13 |
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
-
# FROM ubuntu:$UBUNTU_VERSION as runtime
|
17 |
|
18 |
-
# COPY --from=build /llama.cpp/main /main
|
19 |
|
20 |
-
ENV LC_ALL=C.utf8
|
21 |
|
22 |
-
ENTRYPOINT ["/main", "-m", "./models/ggml-model-q4_1.gguf", "-p", "### Instruction:\nତୁମେ କିଏ?\n\n### Response:\n"]
|
23 |
|
24 |
-
# ./main -ngl 16 -m /content/openllama_odia_3b_base/ggml-model-q5_1.gguf -n 512 -p "### Instruction:\nତୁମେ କିଏ?\n\n### Response:\n"
|
|
|
1 |
+
ARG CUDA_IMAGE="12.1.1-devel-ubuntu22.04"
|
2 |
+
FROM nvidia/cuda:${CUDA_IMAGE}
|
3 |
|
4 |
+
# We need to set the host to 0.0.0.0 to allow outside access
|
5 |
+
ENV HOST 0.0.0.0
|
6 |
|
7 |
+
RUN apt-get update && apt-get upgrade -y \
|
8 |
+
&& apt-get install -y git build-essential \
|
9 |
+
python3 python3-pip gcc wget \
|
10 |
+
ocl-icd-opencl-dev opencl-headers clinfo \
|
11 |
+
libclblast-dev libopenblas-dev \
|
12 |
+
&& mkdir -p /etc/OpenCL/vendors && echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd
|
13 |
|
14 |
+
COPY . .
|
15 |
|
16 |
+
# setting build related env vars
|
17 |
+
ENV CUDA_DOCKER_ARCH=all
|
18 |
+
ENV LLAMA_CUBLAS=1
|
19 |
|
20 |
+
# Install depencencies
|
21 |
+
RUN python3 -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette pydantic-settings gradio huggingface_hub hf_transfer
|
22 |
|
23 |
+
# Install llama-cpp-python (build with cuda)
|
24 |
+
# RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python
|
25 |
+
RUN FORCE_CMAKE=1 pip install llama-cpp-python
|
26 |
+
|
27 |
+
RUN useradd -m -u 1000 user
|
28 |
+
# Switch to the "user" user
|
29 |
+
USER user
|
30 |
+
# Set home to the user's home directory
|
31 |
+
ENV HOME=/home/user \
|
32 |
+
PATH=/home/user/.local/bin:$PATH \
|
33 |
+
PYTHONPATH=$HOME/app \
|
34 |
+
PYTHONUNBUFFERED=1 \
|
35 |
+
GRADIO_ALLOW_FLAGGING=never \
|
36 |
+
GRADIO_NUM_PORTS=1 \
|
37 |
+
GRADIO_SERVER_NAME=0.0.0.0 \
|
38 |
+
GRADIO_THEME=huggingface \
|
39 |
+
SYSTEM=spaces
|
40 |
+
|
41 |
+
WORKDIR $HOME/app
|
42 |
+
|
43 |
+
# Copy the current directory contents into the container at $HOME/app setting the owner to the user
|
44 |
+
COPY --chown=user . $HOME/app
|
45 |
+
|
46 |
+
CMD ["python3", "app.py"]
|
47 |
+
|
48 |
+
|
49 |
+
|
50 |
+
|
51 |
+
|
52 |
+
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
|
57 |
+
|
58 |
+
|
59 |
+
|
60 |
+
|
61 |
+
|
62 |
+
|
63 |
+
|
64 |
+
|
65 |
+
|
66 |
+
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
|
71 |
+
|
72 |
+
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
|
77 |
+
|
78 |
+
|
79 |
+
|
80 |
+
|
81 |
+
|
82 |
+
|
83 |
+
|
84 |
+
|
85 |
+
|
86 |
+
|
87 |
+
|
88 |
+
|
89 |
+
|
90 |
+
# ARG UBUNTU_VERSION=22.04
|
91 |
+
|
92 |
+
# FROM ubuntu:$UBUNTU_VERSION as build
|
93 |
+
|
94 |
+
# RUN apt-get update && \
|
95 |
+
# apt-get install -y build-essential git
|
96 |
+
|
97 |
+
# RUN git clone https://github.com/ggerganov/llama.cpp
|
98 |
+
|
99 |
+
# WORKDIR /llama.cpp
|
100 |
+
|
101 |
+
# RUN make
|
102 |
+
|
103 |
+
# COPY . .
|
104 |
|
105 |
+
# # FROM ubuntu:$UBUNTU_VERSION as runtime
|
106 |
|
107 |
+
# # COPY --from=build /llama.cpp/main /main
|
108 |
|
109 |
+
# ENV LC_ALL=C.utf8
|
110 |
|
111 |
+
# ENTRYPOINT ["/main", "-m", "./models/ggml-model-q4_1.gguf", "-p", "### Instruction:\nତୁମେ କିଏ?\n\n### Response:\n"]
|
112 |
|
113 |
+
# # ./main -ngl 16 -m /content/openllama_odia_3b_base/ggml-model-q5_1.gguf -n 512 -p "### Instruction:\nତୁମେ କିଏ?\n\n### Response:\n"
|