mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 11:24:35 +00:00
f3f65429c4
* scripts : update sync [no ci] * files : relocate [no ci] * ci : disable kompute build [no ci] * cmake : fixes [no ci] * server : fix mingw build ggml-ci * cmake : minor [no ci] * cmake : link math library [no ci] * cmake : build normal ggml library (not object library) [no ci] * cmake : fix kompute build ggml-ci * make,cmake : fix LLAMA_CUDA + replace GGML_CDEF_PRIVATE ggml-ci * move public backend headers to the public include directory (#8122) * move public backend headers to the public include directory * nix test * spm : fix metal header --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * scripts : fix sync paths [no ci] * scripts : sync ggml-blas.h [no ci] --------- Co-authored-by: slaren <slarengh@gmail.com>
24 lines
582 B
Bash
Executable File
24 lines
582 B
Bash
Executable File
|
|
# MIT license
|
|
# Copyright (C) 2024 Intel Corporation
|
|
# SPDX-License-Identifier: MIT
|
|
|
|
mkdir -p build
|
|
cd build
|
|
source /opt/intel/oneapi/setvars.sh
|
|
|
|
#for FP16
|
|
#cmake .. -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON # faster for long-prompt inference
|
|
|
|
#for FP32
|
|
cmake .. -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
|
|
|
|
#build example/main
|
|
#cmake --build . --config Release --target main
|
|
|
|
#build example/llama-bench
|
|
#cmake --build . --config Release --target llama-bench
|
|
|
|
#build all binary
|
|
cmake --build . --config Release -j -v
|