main/server: fix targets

This commit is contained in:
Olivier Chafik 2024-06-06 15:53:25 +01:00
parent 8b7c734473
commit 9a03341094
9 changed files with 10 additions and 12 deletions

View File

@ -23,7 +23,7 @@ RUN if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \
export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \
fi && \
cmake -B build -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \
cmake --build build --config Release --target main
cmake --build build --config Release --target llama
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime

View File

@ -15,7 +15,7 @@ RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key
WORKDIR /app
COPY . .
RUN cmake -B build -DLLAMA_VULKAN=1 && \
cmake --build build --config Release --target main
cmake --build build --config Release --target llama
# Clean up
WORKDIR /

View File

@ -23,7 +23,7 @@ RUN if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \
export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \
fi && \
cmake -B build -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \
cmake --build build --config Release --target server
cmake --build build --config Release --target llama-server
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime

View File

@ -19,7 +19,7 @@ RUN apt-get update && \
WORKDIR /app
COPY . .
RUN cmake -B build -DLLAMA_VULKAN=1 -DLLAMA_CURL=1 && \
cmake --build build --config Release --target server
cmake --build build --config Release --target llama-server
# Clean up
WORKDIR /

View File

@ -119,7 +119,7 @@ jobs:
-DLLAMA_FATAL_WARNINGS=OFF \
-DLLAMA_ALL_WARNINGS=OFF \
-DCMAKE_BUILD_TYPE=Release;
cmake --build build --config Release -j $(nproc) --target server
cmake --build build --config Release -j $(nproc) --target llama-server
- name: Download the dataset
id: download_dataset

View File

@ -98,7 +98,7 @@ jobs:
-DLLAMA_CURL=ON \
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \
-DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON ;
cmake --build build --config ${{ matrix.build_type }} -j $(nproc) --target server
cmake --build build --config ${{ matrix.build_type }} -j $(nproc) --target llama-server
- name: Tests
id: server_integration_tests
@ -138,7 +138,7 @@ jobs:
id: cmake_build
run: |
cmake -B build -DLLAMA_CURL=ON -DCURL_LIBRARY="$env:RUNNER_TEMP/libcurl/lib/libcurl.dll.a" -DCURL_INCLUDE_DIR="$env:RUNNER_TEMP/libcurl/include"
cmake --build build --config Release -j ${env:NUMBER_OF_PROCESSORS} --target server
cmake --build build --config Release -j ${env:NUMBER_OF_PROCESSORS} --target llama-server
- name: Python setup
id: setup_python

View File

@ -1,5 +1,4 @@
set(TARGET main)
set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama)
set(TARGET llama)
add_executable(${TARGET} main.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})

View File

@ -1,4 +1,4 @@
set(TARGET server)
set(TARGET llama-server)
option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
option(LLAMA_SERVER_SSL "Build SSL support for the server" OFF)
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
@ -34,7 +34,6 @@ foreach(asset ${PUBLIC_ASSETS})
COMMAND "${CMAKE_COMMAND}" "-DINPUT=${input}" "-DOUTPUT=${output}" -P "${PROJECT_SOURCE_DIR}/scripts/xxd.cmake"
)
endforeach()
set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-server)
add_executable(${TARGET} ${TARGET_SRCS})
install(TARGETS ${TARGET} RUNTIME)
target_compile_definitions(${TARGET} PRIVATE

View File

@ -30,7 +30,7 @@ cd ../../..
mkdir build
cd build
cmake -DLLAMA_CURL=ON ../
cmake --build . --target server
cmake --build . --target llama-server
```
2. Start the test: `./tests.sh`