mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-30 13:24:35 +00:00
6b91b1e0a9
* add vulkan dockerfile * intel dockerfile: compile sycl by default * fix vulkan dockerfile * add docs for vulkan * docs: sycl build in docker * docs: remove trailing spaces * docs: sycl: add docker section * docs: clarify install vulkan SDK outside docker * sycl: use intel/oneapi-basekit docker image * docs: correct TOC * docs: correct docker image for Intel oneMKL
29 lines
690 B
Docker
29 lines
690 B
Docker
ARG ONEAPI_VERSION=2024.0.1-devel-ubuntu22.04
|
|
|
|
FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
|
|
|
|
ARG LLAMA_SYCL_F16=OFF
|
|
RUN apt-get update && \
|
|
apt-get install -y git
|
|
|
|
WORKDIR /app
|
|
|
|
COPY . .
|
|
|
|
RUN mkdir build && \
|
|
cd build && \
|
|
if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \
|
|
echo "LLAMA_SYCL_F16 is set" && \
|
|
export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \
|
|
fi && \
|
|
cmake .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \
|
|
cmake --build . --config Release --target server
|
|
|
|
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
|
|
|
|
COPY --from=build /app/build/bin/server /server
|
|
|
|
ENV LC_ALL=C.utf8
|
|
|
|
ENTRYPOINT [ "/server" ]
|