diff --git a/Makefile b/Makefile index a4cab1bb2..6f8de62ca 100644 --- a/Makefile +++ b/Makefile @@ -106,7 +106,7 @@ ifeq ($(UNAME_S),Darwin) endif ifdef LLAMA_RPC - BUILD_TARGETS += rpc-server + BUILD_TARGETS += llama-rpc-server endif default: $(BUILD_TARGETS) @@ -699,7 +699,7 @@ ggml-rpc.o: ggml-rpc.cpp ggml-rpc.h rpc-server.o: examples/rpc/rpc-server.cpp ggml-rpc.h $(CXX) $(CXXFLAGS) -c $< -o $@ -rpc-server: rpc-server.o ggml.o llama.o $(COMMON_DEPS) $(OBJS) +llama-rpc-server: rpc-server.o ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) endif # LLAMA_RPC diff --git a/examples/rpc/CMakeLists.txt b/examples/rpc/CMakeLists.txt index ae48fb98d..68fb5c4e7 100644 --- a/examples/rpc/CMakeLists.txt +++ b/examples/rpc/CMakeLists.txt @@ -1,2 +1,3 @@ -add_executable(rpc-server rpc-server.cpp) -target_link_libraries(rpc-server PRIVATE ggml llama) +set(TARGET llama-rpc-server) +add_executable(${TARGET} rpc-server.cpp) +target_link_libraries(${TARGET} PRIVATE ggml llama) diff --git a/examples/rpc/README.md b/examples/rpc/README.md index 86544e3fe..61bfd5847 100644 --- a/examples/rpc/README.md +++ b/examples/rpc/README.md @@ -1,7 +1,7 @@ ## Overview -The `rpc-server` allows running `ggml` backend on a remote host. -The RPC backend communicates with one or several instances of `rpc-server` and offloads computations to them. +`llama-rpc-server` allows running `ggml` backend on a remote host. +The RPC backend communicates with one or several instances of `llama-rpc-server` and offloads computations to them. This can be used for distributed LLM inference with `llama.cpp` in the following way: ```mermaid @@ -10,13 +10,13 @@ flowchart TD rpcb---|TCP|srvb rpcb-.-|TCP|srvn subgraph hostn[Host N] - srvn[rpc-server]-.-backend3["Backend (CUDA,Metal,etc.)"] + srvn[llama-rpc-server]-.-backend3["Backend (CUDA,Metal,etc.)"] end subgraph hostb[Host B] - srvb[rpc-server]---backend2["Backend (CUDA,Metal,etc.)"] + srvb[llama-rpc-server]---backend2["Backend (CUDA,Metal,etc.)"] end subgraph hosta[Host A] - srva[rpc-server]---backend["Backend (CUDA,Metal,etc.)"] + srva[llama-rpc-server]---backend["Backend (CUDA,Metal,etc.)"] end subgraph host[Main Host] ggml[llama.cpp]---rpcb[RPC backend] @@ -25,7 +25,7 @@ flowchart TD ``` Each host can run a different backend, e.g. one with CUDA and another with Metal. -You can also run multiple `rpc-server` instances on the same host, each with a different backend. +You can also run multiple `llama-rpc-server` instances on the same host, each with a different backend. ## Usage @@ -33,16 +33,14 @@ On each host, build the corresponding backend with `cmake` and add `-DLLAMA_RPC= For example, to build the CUDA backend with RPC support: ```bash -mkdir build-rpc-cuda -cd build-rpc-cuda -cmake .. -DLLAMA_CUDA=ON -DLLAMA_RPC=ON -cmake --build . --config Release +cmake -B build-rpc-cuda -DLLAMA_CUDA=ON -DLLAMA_RPC=ON +cmake --build build-rpc-cuda --config Release ``` -Then, start the `rpc-server` with the backend: +Then, start `llama-rpc-server` with the backend: ```bash -$ bin/rpc-server -p 50052 +$ bin/llama-rpc-server -p 50052 create_backend: using CUDA backend ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no ggml_cuda_init: CUDA_USE_TENSOR_CORES: yes @@ -53,21 +51,19 @@ Starting RPC server on 0.0.0.0:50052 When using the CUDA backend, you can specify the device with the `CUDA_VISIBLE_DEVICES` environment variable, e.g.: ```bash -$ CUDA_VISIBLE_DEVICES=0 bin/rpc-server -p 50052 +$ CUDA_VISIBLE_DEVICES=0 bin/llama-rpc-server -p 50052 ``` -This way you can run multiple `rpc-server` instances on the same host, each with a different CUDA device. +This way you can run multiple `llama-rpc-server` instances on the same host, each with a different CUDA device. On the main host build `llama.cpp` only with `-DLLAMA_RPC=ON`: ```bash -mkdir build-rpc -cd build-rpc -cmake .. -DLLAMA_RPC=ON -cmake --build . --config Release +cmake -B build-rpc -DLLAMA_RPC=ON +cmake --build build-rpc --config Release -t -j ``` -Finally, use the `--rpc` option to specify the host and port of each `rpc-server`: +Finally, use the `--rpc` option to specify the host and port of each `llama-rpc-server`: ```bash $ bin/llama-cli -m ../models/tinyllama-1b/ggml-model-f16.gguf -p "Hello, my name is" --repeat-penalty 1.0 -n 64 --rpc 192.168.88.10:50052,192.168.88.11:50052 -ngl 99