2024-01-28 07:55:31 +00:00
|
|
|
ARG UBUNTU_VERSION=22.04
|
|
|
|
# This needs to generally match the container host's environment.
|
|
|
|
ARG CUDA_VERSION=11.7.1
|
|
|
|
# Target the CUDA build image
|
|
|
|
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
|
|
|
|
# Target the CUDA runtime image
|
|
|
|
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
|
|
|
|
|
|
|
|
FROM ${BASE_CUDA_DEV_CONTAINER} as build
|
|
|
|
|
|
|
|
# Unless otherwise specified, we make a fat build.
|
|
|
|
ARG CUDA_DOCKER_ARCH=all
|
|
|
|
|
|
|
|
RUN apt-get update && \
|
2024-04-04 16:31:22 +00:00
|
|
|
apt-get install -y build-essential git libcurl4-openssl-dev
|
2024-01-28 07:55:31 +00:00
|
|
|
|
|
|
|
WORKDIR /app
|
|
|
|
|
|
|
|
COPY . .
|
|
|
|
|
|
|
|
# Set nvcc architecture
|
|
|
|
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
2024-03-26 00:16:01 +00:00
|
|
|
# Enable CUDA
|
2024-06-26 16:32:07 +00:00
|
|
|
ENV GGML_CUDA=1
|
2024-04-04 16:31:22 +00:00
|
|
|
# Enable cURL
|
|
|
|
ENV LLAMA_CURL=1
|
2024-01-28 07:55:31 +00:00
|
|
|
|
2024-06-12 23:41:52 +00:00
|
|
|
RUN make -j$(nproc) llama-server
|
2024-01-28 07:55:31 +00:00
|
|
|
|
|
|
|
FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
|
|
|
|
|
2024-04-04 16:31:22 +00:00
|
|
|
RUN apt-get update && \
|
2024-06-25 15:13:27 +00:00
|
|
|
apt-get install -y libcurl4-openssl-dev libgomp1 curl
|
2024-04-04 16:31:22 +00:00
|
|
|
|
2024-06-12 23:41:52 +00:00
|
|
|
COPY --from=build /app/llama-server /llama-server
|
2024-01-28 07:55:31 +00:00
|
|
|
|
2024-06-25 15:13:27 +00:00
|
|
|
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
|
|
|
|
|
2024-06-12 23:41:52 +00:00
|
|
|
ENTRYPOINT [ "/llama-server" ]
|