rpc : backend refactoring (#9912)
Some checks are pending
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full-cuda.Dockerfile platforms:linux/amd64 tag:full-cuda]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full-musa.Dockerfile platforms:linux/amd64 tag:full-musa]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full.Dockerfile platforms:linux/amd64,linux/arm64 tag:full]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-cuda.Dockerfile platforms:linux/amd64 tag:light-cuda]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-intel.Dockerfile platforms:linux/amd64 tag:light-intel]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-musa.Dockerfile platforms:linux/amd64 tag:light-musa]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli.Dockerfile platforms:linux/amd64,linux/arm64 tag:light]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-cuda.Dockerfile platforms:linux/amd64 tag:server-cuda]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-intel.Dockerfile platforms:linux/amd64 tag:server-intel]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-musa.Dockerfile platforms:linux/amd64 tag:server-musa]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server.Dockerfile platforms:linux/amd64,linux/arm64 tag:server]) (push) Waiting to run
Nix CI / nix-eval (macos-latest) (push) Waiting to run
Nix CI / nix-eval (ubuntu-latest) (push) Waiting to run
Nix CI / nix-build (macos-latest) (push) Waiting to run
Nix CI / nix-build (ubuntu-latest) (push) Waiting to run
flake8 Lint / Lint (push) Waiting to run

* rpc : refactor backend

Use structs for RPC request/response messages

* rpc : refactor server
This commit is contained in:
Radoslav Gerganov 2024-10-18 14:33:58 +03:00 committed by GitHub
parent 87421a23e8
commit afd9909a64
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -58,7 +58,7 @@ struct socket_t {
}; };
// ggml_tensor is serialized into rpc_tensor // ggml_tensor is serialized into rpc_tensor
#pragma pack(push, 1) #pragma pack(1)
struct rpc_tensor { struct rpc_tensor {
uint64_t id; uint64_t id;
uint32_t type; uint32_t type;
@ -76,7 +76,6 @@ struct rpc_tensor {
char padding[4]; char padding[4];
}; };
#pragma pack(pop)
static_assert(sizeof(rpc_tensor) % 8 == 0, "rpc_tensor size must be multiple of 8"); static_assert(sizeof(rpc_tensor) % 8 == 0, "rpc_tensor size must be multiple of 8");
@ -96,6 +95,77 @@ enum rpc_cmd {
RPC_CMD_COUNT, RPC_CMD_COUNT,
}; };
#pragma pack(1)
struct rpc_msg_alloc_buffer_req {
uint64_t size;
};
#pragma pack(1)
struct rpc_msg_alloc_buffer_rsp {
uint64_t remote_ptr;
uint64_t remote_size;
};
#pragma pack(1)
struct rpc_msg_get_alignment_rsp {
uint64_t alignment;
};
#pragma pack(1)
struct rpc_msg_get_max_size_rsp {
uint64_t max_size;
};
#pragma pack(1)
struct rpc_msg_buffer_get_base_req {
uint64_t remote_ptr;
};
#pragma pack(1)
struct rpc_msg_buffer_get_base_rsp {
uint64_t base_ptr;
};
#pragma pack(1)
struct rpc_msg_free_buffer_req {
uint64_t remote_ptr;
};
#pragma pack(1)
struct rpc_msg_buffer_clear_req {
uint64_t remote_ptr;
uint8_t value;
};
#pragma pack(1)
struct rpc_msg_get_tensor_req {
rpc_tensor tensor;
uint64_t offset;
uint64_t size;
};
#pragma pack(1)
struct rpc_msg_copy_tensor_req {
rpc_tensor src;
rpc_tensor dst;
};
#pragma pack(1)
struct rpc_msg_copy_tensor_rsp {
uint8_t result;
};
#pragma pack(1)
struct rpc_msg_graph_compute_rsp {
uint8_t result;
};
#pragma pack(1)
struct rpc_msg_get_device_memory_rsp {
uint64_t free_mem;
uint64_t total_mem;
};
// RPC data structures // RPC data structures
static ggml_guid_t ggml_backend_rpc_guid() { static ggml_guid_t ggml_backend_rpc_guid() {
@ -240,6 +310,38 @@ static bool recv_data(sockfd_t sockfd, void * data, size_t size) {
return true; return true;
} }
static bool send_msg(sockfd_t sockfd, const void * msg, size_t msg_size) {
if (!send_data(sockfd, &msg_size, sizeof(msg_size))) {
return false;
}
return send_data(sockfd, msg, msg_size);
}
static bool recv_msg(sockfd_t sockfd, void * msg, size_t msg_size) {
uint64_t size;
if (!recv_data(sockfd, &size, sizeof(size))) {
return false;
}
if (size != msg_size) {
return false;
}
return recv_data(sockfd, msg, msg_size);
}
static bool recv_msg(sockfd_t sockfd, std::vector<uint8_t> & input) {
uint64_t size;
if (!recv_data(sockfd, &size, sizeof(size))) {
return false;
}
try {
input.resize(size);
} catch (const std::bad_alloc & e) {
fprintf(stderr, "Failed to allocate input buffer of size %" PRIu64 "\n", size);
return false;
}
return recv_data(sockfd, input.data(), size);
}
static bool parse_endpoint(const std::string & endpoint, std::string & host, int & port) { static bool parse_endpoint(const std::string & endpoint, std::string & host, int & port) {
size_t pos = endpoint.find(':'); size_t pos = endpoint.find(':');
if (pos == std::string::npos) { if (pos == std::string::npos) {
@ -252,28 +354,27 @@ static bool parse_endpoint(const std::string & endpoint, std::string & host, int
// RPC request : | rpc_cmd (1 byte) | request_size (8 bytes) | request_data (request_size bytes) | // RPC request : | rpc_cmd (1 byte) | request_size (8 bytes) | request_data (request_size bytes) |
// RPC response: | response_size (8 bytes) | response_data (response_size bytes) | // RPC response: | response_size (8 bytes) | response_data (response_size bytes) |
static bool send_rpc_cmd(const std::shared_ptr<socket_t> & sock, enum rpc_cmd cmd, const std::vector<uint8_t> & input, std::vector<uint8_t> & output) { static bool send_rpc_cmd(const std::shared_ptr<socket_t> & sock, enum rpc_cmd cmd, const void * input, size_t input_size, void * output, size_t output_size) {
uint8_t cmd_byte = cmd; uint8_t cmd_byte = cmd;
if (!send_data(sock->fd, &cmd_byte, sizeof(cmd_byte))) { if (!send_data(sock->fd, &cmd_byte, sizeof(cmd_byte))) {
return false; return false;
} }
uint64_t input_size = input.size();
if (!send_data(sock->fd, &input_size, sizeof(input_size))) { if (!send_data(sock->fd, &input_size, sizeof(input_size))) {
return false; return false;
} }
if (!send_data(sock->fd, input.data(), input.size())) { if (!send_data(sock->fd, input, input_size)) {
return false; return false;
} }
uint64_t output_size; // TODO: currently the output_size is always known, do we need support for commands with variable output size?
if (!recv_data(sock->fd, &output_size, sizeof(output_size))) { // even if we do, we can skip sending output_size from the server for commands with known output size
uint64_t out_size;
if (!recv_data(sock->fd, &out_size, sizeof(out_size))) {
return false; return false;
} }
if (output_size == 0) { if (out_size != output_size) {
output.clear(); return false;
return true;
} }
output.resize(output_size); if (!recv_data(sock->fd, output, output_size)) {
if (!recv_data(sock->fd, output.data(), output_size)) {
return false; return false;
} }
return true; return true;
@ -326,14 +427,9 @@ static const char * ggml_backend_rpc_buffer_get_name(ggml_backend_buffer_t buffe
static void ggml_backend_rpc_buffer_free_buffer(ggml_backend_buffer_t buffer) { static void ggml_backend_rpc_buffer_free_buffer(ggml_backend_buffer_t buffer) {
ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context;
// input serialization format: | remote_ptr (8 bytes) | rpc_msg_free_buffer_req request = {ctx->remote_ptr};
std::vector<uint8_t> input(sizeof(uint64_t), 0); bool status = send_rpc_cmd(ctx->sock, RPC_CMD_FREE_BUFFER, &request, sizeof(request), nullptr, 0);
uint64_t remote_ptr = ctx->remote_ptr;
memcpy(input.data(), &remote_ptr, sizeof(remote_ptr));
std::vector<uint8_t> output;
bool status = send_rpc_cmd(ctx->sock, RPC_CMD_FREE_BUFFER, input, output);
GGML_ASSERT(status); GGML_ASSERT(status);
GGML_ASSERT(output.empty());
delete ctx; delete ctx;
} }
@ -342,20 +438,13 @@ static void * ggml_backend_rpc_buffer_get_base(ggml_backend_buffer_t buffer) {
if (ctx->base_cache.find(buffer) != ctx->base_cache.end()) { if (ctx->base_cache.find(buffer) != ctx->base_cache.end()) {
return ctx->base_cache[buffer]; return ctx->base_cache[buffer];
} }
// input serialization format: | remote_ptr (8 bytes) | rpc_msg_buffer_get_base_req request = {ctx->remote_ptr};
std::vector<uint8_t> input(sizeof(uint64_t), 0); rpc_msg_buffer_get_base_rsp response;
uint64_t remote_ptr = ctx->remote_ptr; bool status = send_rpc_cmd(ctx->sock, RPC_CMD_BUFFER_GET_BASE, &request, sizeof(request), &response, sizeof(response));
memcpy(input.data(), &remote_ptr, sizeof(remote_ptr));
std::vector<uint8_t> output;
bool status = send_rpc_cmd(ctx->sock, RPC_CMD_BUFFER_GET_BASE, input, output);
GGML_ASSERT(status); GGML_ASSERT(status);
GGML_ASSERT(output.size() == sizeof(uint64_t)); void * base_ptr = reinterpret_cast<void *>(response.base_ptr);
// output serialization format: | base_ptr (8 bytes) | ctx->base_cache[buffer] = base_ptr;
uint64_t base_ptr; return base_ptr;
memcpy(&base_ptr, output.data(), sizeof(base_ptr));
void * base = reinterpret_cast<void *>(base_ptr);
ctx->base_cache[buffer] = base;
return base;
} }
static rpc_tensor serialize_tensor(const ggml_tensor * tensor) { static rpc_tensor serialize_tensor(const ggml_tensor * tensor) {
@ -405,26 +494,18 @@ static void ggml_backend_rpc_buffer_set_tensor(ggml_backend_buffer_t buffer, ggm
memcpy(input.data(), &rpc_tensor, sizeof(rpc_tensor)); memcpy(input.data(), &rpc_tensor, sizeof(rpc_tensor));
memcpy(input.data() + sizeof(rpc_tensor), &offset, sizeof(offset)); memcpy(input.data() + sizeof(rpc_tensor), &offset, sizeof(offset));
memcpy(input.data() + sizeof(rpc_tensor) + sizeof(offset), data, size); memcpy(input.data() + sizeof(rpc_tensor) + sizeof(offset), data, size);
std::vector<uint8_t> output; bool status = send_rpc_cmd(ctx->sock, RPC_CMD_SET_TENSOR, input.data(), input.size(), nullptr, 0);
bool status = send_rpc_cmd(ctx->sock, RPC_CMD_SET_TENSOR, input, output);
GGML_ASSERT(status); GGML_ASSERT(status);
} }
static void ggml_backend_rpc_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { static void ggml_backend_rpc_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context;
// input serialization format: | rpc_tensor | offset (8 bytes) | size (8 bytes) | rpc_msg_get_tensor_req request;
int input_size = sizeof(rpc_tensor) + 2*sizeof(uint64_t); request.tensor = serialize_tensor(tensor);
std::vector<uint8_t> input(input_size, 0); request.offset = offset;
rpc_tensor rpc_tensor = serialize_tensor(tensor); request.size = size;
memcpy(input.data(), &rpc_tensor, sizeof(rpc_tensor)); bool status = send_rpc_cmd(ctx->sock, RPC_CMD_GET_TENSOR, &request, sizeof(request), data, size);
memcpy(input.data() + sizeof(rpc_tensor), &offset, sizeof(offset));
memcpy(input.data() + sizeof(rpc_tensor) + sizeof(offset), &size, sizeof(size));
std::vector<uint8_t> output;
bool status = send_rpc_cmd(ctx->sock, RPC_CMD_GET_TENSOR, input, output);
GGML_ASSERT(status); GGML_ASSERT(status);
GGML_ASSERT(output.size() == size);
// output serialization format: | data (size bytes) |
memcpy(data, output.data(), size);
} }
static bool ggml_backend_rpc_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) { static bool ggml_backend_rpc_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) {
@ -437,30 +518,19 @@ static bool ggml_backend_rpc_buffer_cpy_tensor(ggml_backend_buffer_t buffer, con
return false; return false;
} }
ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context;
// input serialization format: | rpc_tensor src | rpc_tensor dst | rpc_msg_copy_tensor_req request;
int input_size = 2*sizeof(rpc_tensor); request.src = serialize_tensor(src);
std::vector<uint8_t> input(input_size, 0); request.dst = serialize_tensor(dst);
rpc_tensor rpc_src = serialize_tensor(src); rpc_msg_copy_tensor_rsp response;
rpc_tensor rpc_dst = serialize_tensor(dst); bool status = send_rpc_cmd(ctx->sock, RPC_CMD_COPY_TENSOR, &request, sizeof(request), &response, sizeof(response));
memcpy(input.data(), &rpc_src, sizeof(rpc_src));
memcpy(input.data() + sizeof(rpc_src), &rpc_dst, sizeof(rpc_dst));
std::vector<uint8_t> output;
bool status = send_rpc_cmd(ctx->sock, RPC_CMD_COPY_TENSOR, input, output);
GGML_ASSERT(status); GGML_ASSERT(status);
// output serialization format: | result (1 byte) | return response.result;
GGML_ASSERT(output.size() == 1);
return output[0];
} }
static void ggml_backend_rpc_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { static void ggml_backend_rpc_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context;
// serialization format: | bufptr (8 bytes) | value (1 byte) | rpc_msg_buffer_clear_req request = {ctx->remote_ptr, value};
int input_size = sizeof(uint64_t) + sizeof(uint8_t); bool status = send_rpc_cmd(ctx->sock, RPC_CMD_BUFFER_CLEAR, &request, sizeof(request), nullptr, 0);
std::vector<uint8_t> input(input_size, 0);
memcpy(input.data(), &ctx->remote_ptr, sizeof(ctx->remote_ptr));
memcpy(input.data() + sizeof(ctx->remote_ptr), &value, sizeof(value));
std::vector<uint8_t> output;
bool status = send_rpc_cmd(ctx->sock, RPC_CMD_BUFFER_CLEAR, input, output);
GGML_ASSERT(status); GGML_ASSERT(status);
} }
@ -484,25 +554,16 @@ static const char * ggml_backend_rpc_buffer_type_name(ggml_backend_buffer_type_t
static ggml_backend_buffer_t ggml_backend_rpc_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { static ggml_backend_buffer_t ggml_backend_rpc_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context; ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context;
// input serialization format: | size (8 bytes) | rpc_msg_alloc_buffer_req request = {size};
int input_size = sizeof(uint64_t); rpc_msg_alloc_buffer_rsp response;
std::vector<uint8_t> input(input_size, 0);
memcpy(input.data(), &size, sizeof(size));
std::vector<uint8_t> output;
auto sock = get_socket(buft_ctx->endpoint); auto sock = get_socket(buft_ctx->endpoint);
bool status = send_rpc_cmd(sock, RPC_CMD_ALLOC_BUFFER, input, output); bool status = send_rpc_cmd(sock, RPC_CMD_ALLOC_BUFFER, &request, sizeof(request), &response, sizeof(response));
GGML_ASSERT(status); GGML_ASSERT(status);
GGML_ASSERT(output.size() == 2*sizeof(uint64_t)); if (response.remote_ptr != 0) {
// output serialization format: | remote_ptr (8 bytes) | remote_size (8 bytes) |
uint64_t remote_ptr;
memcpy(&remote_ptr, output.data(), sizeof(remote_ptr));
size_t remote_size;
memcpy(&remote_size, output.data() + sizeof(uint64_t), sizeof(remote_size));
if (remote_ptr != 0) {
ggml_backend_buffer_t buffer = ggml_backend_buffer_init(buft, ggml_backend_buffer_t buffer = ggml_backend_buffer_init(buft,
ggml_backend_rpc_buffer_interface, ggml_backend_rpc_buffer_interface,
new ggml_backend_rpc_buffer_context{sock, {}, remote_ptr, "RPC[" + std::string(buft_ctx->endpoint) + "]"}, new ggml_backend_rpc_buffer_context{sock, {}, response.remote_ptr, "RPC[" + std::string(buft_ctx->endpoint) + "]"},
remote_size); response.remote_size);
return buffer; return buffer;
} else { } else {
return nullptr; return nullptr;
@ -510,16 +571,10 @@ static ggml_backend_buffer_t ggml_backend_rpc_buffer_type_alloc_buffer(ggml_back
} }
static size_t get_alignment(const std::shared_ptr<socket_t> & sock) { static size_t get_alignment(const std::shared_ptr<socket_t> & sock) {
// input serialization format: | 0 bytes | rpc_msg_get_alignment_rsp response;
std::vector<uint8_t> input; bool status = send_rpc_cmd(sock, RPC_CMD_GET_ALIGNMENT, nullptr, 0, &response, sizeof(response));
std::vector<uint8_t> output;
bool status = send_rpc_cmd(sock, RPC_CMD_GET_ALIGNMENT, input, output);
GGML_ASSERT(status); GGML_ASSERT(status);
GGML_ASSERT(output.size() == sizeof(uint64_t)); return response.alignment;
// output serialization format: | alignment (8 bytes) |
uint64_t alignment;
memcpy(&alignment, output.data(), sizeof(alignment));
return alignment;
} }
static size_t ggml_backend_rpc_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { static size_t ggml_backend_rpc_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
@ -528,16 +583,10 @@ static size_t ggml_backend_rpc_buffer_type_get_alignment(ggml_backend_buffer_typ
} }
static size_t get_max_size(const std::shared_ptr<socket_t> & sock) { static size_t get_max_size(const std::shared_ptr<socket_t> & sock) {
// input serialization format: | 0 bytes | rpc_msg_get_max_size_rsp response;
std::vector<uint8_t> input; bool status = send_rpc_cmd(sock, RPC_CMD_GET_MAX_SIZE, nullptr, 0, &response, sizeof(response));
std::vector<uint8_t> output;
bool status = send_rpc_cmd(sock, RPC_CMD_GET_MAX_SIZE, input, output);
GGML_ASSERT(status); GGML_ASSERT(status);
GGML_ASSERT(output.size() == sizeof(uint64_t)); return response.max_size;
// output serialization format: | max_size (8 bytes) |
uint64_t max_size;
memcpy(&max_size, output.data(), sizeof(max_size));
return max_size;
} }
static size_t ggml_backend_rpc_get_max_size(ggml_backend_buffer_type_t buft) { static size_t ggml_backend_rpc_get_max_size(ggml_backend_buffer_type_t buft) {
@ -622,12 +671,11 @@ static enum ggml_status ggml_backend_rpc_graph_compute(ggml_backend_t backend, g
ggml_backend_rpc_context * rpc_ctx = (ggml_backend_rpc_context *)backend->context; ggml_backend_rpc_context * rpc_ctx = (ggml_backend_rpc_context *)backend->context;
std::vector<uint8_t> input; std::vector<uint8_t> input;
serialize_graph(cgraph, input); serialize_graph(cgraph, input);
std::vector<uint8_t> output; rpc_msg_graph_compute_rsp response;
auto sock = get_socket(rpc_ctx->endpoint); auto sock = get_socket(rpc_ctx->endpoint);
bool status = send_rpc_cmd(sock, RPC_CMD_GRAPH_COMPUTE, input, output); bool status = send_rpc_cmd(sock, RPC_CMD_GRAPH_COMPUTE, input.data(), input.size(), &response, sizeof(response));
GGML_ASSERT(status); GGML_ASSERT(status);
GGML_ASSERT(output.size() == 1); return (enum ggml_status)response.result;
return (enum ggml_status)output[0];
} }
static ggml_backend_i ggml_backend_rpc_interface = { static ggml_backend_i ggml_backend_rpc_interface = {
@ -702,19 +750,11 @@ GGML_API bool ggml_backend_is_rpc(ggml_backend_t backend) {
} }
static void get_device_memory(const std::shared_ptr<socket_t> & sock, size_t * free, size_t * total) { static void get_device_memory(const std::shared_ptr<socket_t> & sock, size_t * free, size_t * total) {
// input serialization format: | 0 bytes | rpc_msg_get_device_memory_rsp response;
std::vector<uint8_t> input; bool status = send_rpc_cmd(sock, RPC_CMD_GET_DEVICE_MEMORY, nullptr, 0, &response, sizeof(response));
std::vector<uint8_t> output;
bool status = send_rpc_cmd(sock, RPC_CMD_GET_DEVICE_MEMORY, input, output);
GGML_ASSERT(status); GGML_ASSERT(status);
GGML_ASSERT(output.size() == 2*sizeof(uint64_t)); *free = response.free_mem;
// output serialization format: | free (8 bytes) | total (8 bytes) | *total = response.total_mem;
uint64_t free_mem;
memcpy(&free_mem, output.data(), sizeof(free_mem));
uint64_t total_mem;
memcpy(&total_mem, output.data() + sizeof(uint64_t), sizeof(total_mem));
*free = free_mem;
*total = total_mem;
} }
GGML_API void ggml_backend_rpc_get_device_memory(const char * endpoint, size_t * free, size_t * total) { GGML_API void ggml_backend_rpc_get_device_memory(const char * endpoint, size_t * free, size_t * total) {
@ -734,16 +774,16 @@ public:
rpc_server(ggml_backend_t backend) : backend(backend) {} rpc_server(ggml_backend_t backend) : backend(backend) {}
~rpc_server(); ~rpc_server();
bool alloc_buffer(const std::vector<uint8_t> & input, std::vector<uint8_t> & output); void alloc_buffer(const rpc_msg_alloc_buffer_req & request, rpc_msg_alloc_buffer_rsp & response);
void get_alignment(std::vector<uint8_t> & output); void get_alignment(rpc_msg_get_alignment_rsp & response);
void get_max_size(std::vector<uint8_t> & output); void get_max_size(rpc_msg_get_max_size_rsp & response);
bool buffer_get_base(const std::vector<uint8_t> & input, std::vector<uint8_t> & output); bool buffer_get_base(const rpc_msg_buffer_get_base_req & request, rpc_msg_buffer_get_base_rsp & response);
bool free_buffer(const std::vector<uint8_t> & input); bool free_buffer(const rpc_msg_free_buffer_req & request);
bool buffer_clear(const std::vector<uint8_t> & input); bool buffer_clear(const rpc_msg_buffer_clear_req & request);
bool set_tensor(const std::vector<uint8_t> & input); bool set_tensor(const std::vector<uint8_t> & input);
bool get_tensor(const std::vector<uint8_t> & input, std::vector<uint8_t> & output); bool get_tensor(const rpc_msg_get_tensor_req & request, std::vector<uint8_t> & response);
bool copy_tensor(const std::vector<uint8_t> & input, std::vector<uint8_t> & output); bool copy_tensor(const rpc_msg_copy_tensor_req & request, rpc_msg_copy_tensor_rsp & response);
bool graph_compute(const std::vector<uint8_t> & input, std::vector<uint8_t> & output); bool graph_compute(const std::vector<uint8_t> & input, rpc_msg_graph_compute_rsp & response);
private: private:
ggml_tensor * deserialize_tensor(struct ggml_context * ctx, const rpc_tensor * tensor); ggml_tensor * deserialize_tensor(struct ggml_context * ctx, const rpc_tensor * tensor);
@ -757,80 +797,50 @@ private:
std::unordered_set<ggml_backend_buffer_t> buffers; std::unordered_set<ggml_backend_buffer_t> buffers;
}; };
bool rpc_server::alloc_buffer(const std::vector<uint8_t> & input, std::vector<uint8_t> & output) { void rpc_server::alloc_buffer(const rpc_msg_alloc_buffer_req & request, rpc_msg_alloc_buffer_rsp & response) {
// input serialization format: | size (8 bytes) |
if (input.size() != sizeof(uint64_t)) {
return false;
}
uint64_t size;
memcpy(&size, input.data(), sizeof(size));
ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend); ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend);
ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, size); ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, request.size);
uint64_t remote_ptr = 0; response.remote_ptr = 0;
uint64_t remote_size = 0; response.remote_size = 0;
if (buffer != nullptr) { if (buffer != nullptr) {
remote_ptr = reinterpret_cast<uint64_t>(buffer); response.remote_ptr = reinterpret_cast<uint64_t>(buffer);
remote_size = buffer->size; response.remote_size = buffer->size;
GGML_PRINT_DEBUG("[%s] size: %" PRIu64 " -> remote_ptr: %" PRIx64 ", remote_size: %" PRIu64 "\n", __func__, size, remote_ptr, remote_size); GGML_PRINT_DEBUG("[%s] size: %" PRIu64 " -> remote_ptr: %" PRIx64 ", remote_size: %" PRIu64 "\n", __func__, request.size, response.remote_ptr, response.remote_size);
buffers.insert(buffer); buffers.insert(buffer);
} else { } else {
GGML_PRINT_DEBUG("[%s] size: %" PRIu64 " -> failed\n", __func__, size); GGML_PRINT_DEBUG("[%s] size: %" PRIu64 " -> failed\n", __func__, request.size);
} }
// output serialization format: | remote_ptr (8 bytes) | remote_size (8 bytes) |
output.resize(2*sizeof(uint64_t), 0);
memcpy(output.data(), &remote_ptr, sizeof(remote_ptr));
memcpy(output.data() + sizeof(uint64_t), &remote_size, sizeof(remote_size));
return true;
} }
void rpc_server::get_alignment(std::vector<uint8_t> & output) { void rpc_server::get_alignment(rpc_msg_get_alignment_rsp & response) {
ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend); ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend);
size_t alignment = ggml_backend_buft_get_alignment(buft); size_t alignment = ggml_backend_buft_get_alignment(buft);
GGML_PRINT_DEBUG("[%s] alignment: %lu\n", __func__, alignment); GGML_PRINT_DEBUG("[%s] alignment: %lu\n", __func__, alignment);
// output serialization format: | alignment (8 bytes) | response.alignment = alignment;
output.resize(sizeof(uint64_t), 0);
memcpy(output.data(), &alignment, sizeof(alignment));
} }
void rpc_server::get_max_size(std::vector<uint8_t> & output) { void rpc_server::get_max_size(rpc_msg_get_max_size_rsp & response) {
ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend); ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend);
size_t max_size = ggml_backend_buft_get_max_size(buft); size_t max_size = ggml_backend_buft_get_max_size(buft);
GGML_PRINT_DEBUG("[%s] max_size: %lu\n", __func__, max_size); GGML_PRINT_DEBUG("[%s] max_size: %lu\n", __func__, max_size);
// output serialization format: | max_size (8 bytes) | response.max_size = max_size;
output.resize(sizeof(uint64_t), 0);
memcpy(output.data(), &max_size, sizeof(max_size));
} }
bool rpc_server::buffer_get_base(const std::vector<uint8_t> & input, std::vector<uint8_t> & output) { bool rpc_server::buffer_get_base(const rpc_msg_buffer_get_base_req & request, rpc_msg_buffer_get_base_rsp & response) {
// input serialization format: | remote_ptr (8 bytes) | GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, request.remote_ptr);
if (input.size() != sizeof(uint64_t)) { ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(request.remote_ptr);
return false;
}
uint64_t remote_ptr;
memcpy(&remote_ptr, input.data(), sizeof(remote_ptr));
GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, remote_ptr);
ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(remote_ptr);
if (buffers.find(buffer) == buffers.end()) { if (buffers.find(buffer) == buffers.end()) {
GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__); GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__);
return false; return false;
} }
void * base = ggml_backend_buffer_get_base(buffer); void * base = ggml_backend_buffer_get_base(buffer);
// output serialization format: | base_ptr (8 bytes) | response.base_ptr = reinterpret_cast<uint64_t>(base);
uint64_t base_ptr = reinterpret_cast<uint64_t>(base);
output.resize(sizeof(uint64_t), 0);
memcpy(output.data(), &base_ptr, sizeof(base_ptr));
return true; return true;
} }
bool rpc_server::free_buffer(const std::vector<uint8_t> & input) { bool rpc_server::free_buffer(const rpc_msg_free_buffer_req & request) {
// input serialization format: | remote_ptr (8 bytes) | GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, request.remote_ptr);
if (input.size() != sizeof(uint64_t)) { ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(request.remote_ptr);
return false;
}
uint64_t remote_ptr;
memcpy(&remote_ptr, input.data(), sizeof(remote_ptr));
GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, remote_ptr);
ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(remote_ptr);
if (buffers.find(buffer) == buffers.end()) { if (buffers.find(buffer) == buffers.end()) {
GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__); GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__);
return false; return false;
@ -840,22 +850,14 @@ bool rpc_server::free_buffer(const std::vector<uint8_t> & input) {
return true; return true;
} }
bool rpc_server::buffer_clear(const std::vector<uint8_t> & input) { bool rpc_server::buffer_clear(const rpc_msg_buffer_clear_req & request) {
// input serialization format: | remote_ptr (8 bytes) | value (1 byte) | GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 ", value: %u\n", __func__, request.remote_ptr, request.value);
if (input.size() != sizeof(uint64_t) + sizeof(uint8_t)) { ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(request.remote_ptr);
return false;
}
uint64_t remote_ptr;
memcpy(&remote_ptr, input.data(), sizeof(remote_ptr));
uint8_t value;
memcpy(&value, input.data() + sizeof(uint64_t), sizeof(value));
GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 ", value: %u\n", __func__, remote_ptr, value);
ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(remote_ptr);
if (buffers.find(buffer) == buffers.end()) { if (buffers.find(buffer) == buffers.end()) {
GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__); GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__);
return false; return false;
} }
ggml_backend_buffer_clear(buffer, value); ggml_backend_buffer_clear(buffer, request.value);
return true; return true;
} }
@ -930,74 +932,55 @@ bool rpc_server::set_tensor(const std::vector<uint8_t> & input) {
return true; return true;
} }
bool rpc_server::get_tensor(const std::vector<uint8_t> & input, std::vector<uint8_t> & output) { bool rpc_server::get_tensor(const rpc_msg_get_tensor_req & request, std::vector<uint8_t> & response) {
// serialization format: | rpc_tensor | offset (8 bytes) | size (8 bytes) |
if (input.size() != sizeof(rpc_tensor) + 2*sizeof(uint64_t)) {
return false;
}
const rpc_tensor * in_tensor = (const rpc_tensor *)input.data();
uint64_t offset;
memcpy(&offset, input.data() + sizeof(rpc_tensor), sizeof(offset));
uint64_t size;
memcpy(&size, input.data() + sizeof(rpc_tensor) + sizeof(offset), sizeof(size));
struct ggml_init_params params { struct ggml_init_params params {
/*.mem_size =*/ ggml_tensor_overhead(), /*.mem_size =*/ ggml_tensor_overhead(),
/*.mem_buffer =*/ NULL, /*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true, /*.no_alloc =*/ true,
}; };
struct ggml_context * ctx = ggml_init(params); struct ggml_context * ctx = ggml_init(params);
ggml_tensor * tensor = deserialize_tensor(ctx, in_tensor); ggml_tensor * tensor = deserialize_tensor(ctx, &request.tensor);
if (tensor == nullptr) { if (tensor == nullptr) {
GGML_PRINT_DEBUG("[%s] error deserializing tensor\n", __func__); GGML_PRINT_DEBUG("[%s] error deserializing tensor\n", __func__);
ggml_free(ctx); ggml_free(ctx);
return false; return false;
} }
GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %" PRIu64 "\n", __func__, (void*)tensor->buffer, tensor->data, offset, size); GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %" PRIu64 "\n", __func__, (void*)tensor->buffer, tensor->data, request.offset, request.size);
// sanitize tensor->data // sanitize tensor->data
{ {
const size_t p0 = (size_t) ggml_backend_buffer_get_base(tensor->buffer); const size_t p0 = (size_t) ggml_backend_buffer_get_base(tensor->buffer);
const size_t p1 = p0 + ggml_backend_buffer_get_size(tensor->buffer); const size_t p1 = p0 + ggml_backend_buffer_get_size(tensor->buffer);
if (in_tensor->data + offset < p0 || in_tensor->data + offset >= p1 || size > (p1 - in_tensor->data - offset)) { if (request.tensor.data + request.offset < p0 ||
GGML_ABORT("[%s] tensor->data out of bounds\n", __func__); request.tensor.data + request.offset >= p1 ||
request.size > (p1 - request.tensor.data - request.offset)) {
GGML_ABORT("[%s] tensor->data out of bounds\n", __func__);
} }
} }
// output serialization format: | data (size bytes) | response.resize(request.size, 0);
output.resize(size, 0); ggml_backend_tensor_get(tensor, response.data(), request.offset, request.size);
ggml_backend_tensor_get(tensor, output.data(), offset, size);
ggml_free(ctx); ggml_free(ctx);
return true; return true;
} }
bool rpc_server::copy_tensor(const std::vector<uint8_t> & input, std::vector<uint8_t> & output) { bool rpc_server::copy_tensor(const rpc_msg_copy_tensor_req & request, rpc_msg_copy_tensor_rsp & response) {
// serialization format: | rpc_tensor src | rpc_tensor dst |
if (input.size() != 2*sizeof(rpc_tensor)) {
return false;
}
const rpc_tensor * rpc_src = (const rpc_tensor *)input.data();
const rpc_tensor * rpc_dst = (const rpc_tensor *)(input.data() + sizeof(rpc_src));
struct ggml_init_params params { struct ggml_init_params params {
/*.mem_size =*/ 2*ggml_tensor_overhead(), /*.mem_size =*/ 2*ggml_tensor_overhead(),
/*.mem_buffer =*/ NULL, /*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true, /*.no_alloc =*/ true,
}; };
struct ggml_context * ctx = ggml_init(params); struct ggml_context * ctx = ggml_init(params);
ggml_tensor * src = deserialize_tensor(ctx, rpc_src); ggml_tensor * src = deserialize_tensor(ctx, &request.src);
ggml_tensor * dst = deserialize_tensor(ctx, rpc_dst); ggml_tensor * dst = deserialize_tensor(ctx, &request.dst);
if (src == nullptr || dst == nullptr) { if (src == nullptr || dst == nullptr) {
GGML_PRINT_DEBUG("[%s] error deserializing tensors\n", __func__); GGML_PRINT_DEBUG("[%s] error deserializing tensors\n", __func__);
ggml_free(ctx); ggml_free(ctx);
return false; return false;
} }
GGML_PRINT_DEBUG("[%s] src->buffer: %p, dst->buffer: %p\n", __func__, (void*)src->buffer, (void*)dst->buffer); GGML_PRINT_DEBUG("[%s] src->buffer: %p, dst->buffer: %p\n", __func__, (void*)src->buffer, (void*)dst->buffer);
bool result = ggml_backend_buffer_copy_tensor(src, dst); response.result = ggml_backend_buffer_copy_tensor(src, dst);
// output serialization format: | result (1 byte) |
output.resize(1, 0);
output[0] = result;
ggml_free(ctx); ggml_free(ctx);
return true; return true;
} }
@ -1026,7 +1009,7 @@ ggml_tensor * rpc_server::create_node(uint64_t id,
return result; return result;
} }
bool rpc_server::graph_compute(const std::vector<uint8_t> & input, std::vector<uint8_t> & output) { bool rpc_server::graph_compute(const std::vector<uint8_t> & input, rpc_msg_graph_compute_rsp & response) {
// serialization format: // serialization format:
// | n_nodes (4 bytes) | nodes (n_nodes * sizeof(uint64_t) | n_tensors (4 bytes) | tensors (n_tensors * sizeof(rpc_tensor)) | // | n_nodes (4 bytes) | nodes (n_nodes * sizeof(uint64_t) | n_tensors (4 bytes) | tensors (n_tensors * sizeof(rpc_tensor)) |
if (input.size() < sizeof(uint32_t)) { if (input.size() < sizeof(uint32_t)) {
@ -1066,9 +1049,7 @@ bool rpc_server::graph_compute(const std::vector<uint8_t> & input, std::vector<u
graph->nodes[i] = create_node(id, ctx, tensor_ptrs, tensor_map); graph->nodes[i] = create_node(id, ctx, tensor_ptrs, tensor_map);
} }
ggml_status status = ggml_backend_graph_compute(backend, graph); ggml_status status = ggml_backend_graph_compute(backend, graph);
// output serialization format: | status (1 byte) | response.result = status;
output.resize(1, 0);
output[0] = status;
ggml_free(ctx); ggml_free(ctx);
return true; return true;
} }
@ -1091,85 +1072,153 @@ static void rpc_serve_client(ggml_backend_t backend, sockfd_t sockfd, size_t fre
fprintf(stderr, "Unknown command: %d\n", cmd); fprintf(stderr, "Unknown command: %d\n", cmd);
break; break;
} }
std::vector<uint8_t> input;
std::vector<uint8_t> output;
uint64_t input_size;
if (!recv_data(sockfd, &input_size, sizeof(input_size))) {
break;
}
try {
input.resize(input_size);
} catch (const std::bad_alloc & e) {
fprintf(stderr, "Failed to allocate input buffer of size %" PRIu64 "\n", input_size);
break;
}
if (!recv_data(sockfd, input.data(), input_size)) {
break;
}
bool ok = true;
switch (cmd) { switch (cmd) {
case RPC_CMD_ALLOC_BUFFER: { case RPC_CMD_ALLOC_BUFFER: {
ok = server.alloc_buffer(input, output); rpc_msg_alloc_buffer_req request;
if (!recv_msg(sockfd, &request, sizeof(request))) {
return;
}
rpc_msg_alloc_buffer_rsp response;
server.alloc_buffer(request, response);
if (!send_msg(sockfd, &response, sizeof(response))) {
return;
}
break; break;
} }
case RPC_CMD_GET_ALIGNMENT: { case RPC_CMD_GET_ALIGNMENT: {
server.get_alignment(output); if (!recv_msg(sockfd, nullptr, 0)) {
return;
}
rpc_msg_get_alignment_rsp response;
server.get_alignment(response);
if (!send_msg(sockfd, &response, sizeof(response))) {
return;
}
break; break;
} }
case RPC_CMD_GET_MAX_SIZE: { case RPC_CMD_GET_MAX_SIZE: {
server.get_max_size(output); if (!recv_msg(sockfd, nullptr, 0)) {
return;
}
rpc_msg_get_max_size_rsp response;
server.get_max_size(response);
if (!send_msg(sockfd, &response, sizeof(response))) {
return;
}
break; break;
} }
case RPC_CMD_BUFFER_GET_BASE: { case RPC_CMD_BUFFER_GET_BASE: {
ok = server.buffer_get_base(input, output); rpc_msg_buffer_get_base_req request;
if (!recv_msg(sockfd, &request, sizeof(request))) {
return;
}
rpc_msg_buffer_get_base_rsp response;
if (!server.buffer_get_base(request, response)) {
return;
}
if (!send_msg(sockfd, &response, sizeof(response))) {
return;
}
break; break;
} }
case RPC_CMD_FREE_BUFFER: { case RPC_CMD_FREE_BUFFER: {
ok = server.free_buffer(input); rpc_msg_free_buffer_req request;
if (!recv_msg(sockfd, &request, sizeof(request))) {
return;
}
if (!server.free_buffer(request)) {
return;
}
if (!send_msg(sockfd, nullptr, 0)) {
return;
}
break; break;
} }
case RPC_CMD_BUFFER_CLEAR: { case RPC_CMD_BUFFER_CLEAR: {
ok = server.buffer_clear(input); rpc_msg_buffer_clear_req request;
if (!recv_msg(sockfd, &request, sizeof(request))) {
return;
}
if (!server.buffer_clear(request)) {
return;
}
if (!send_msg(sockfd, nullptr, 0)) {
return;
}
break; break;
} }
case RPC_CMD_SET_TENSOR: { case RPC_CMD_SET_TENSOR: {
ok = server.set_tensor(input); std::vector<uint8_t> input;
if (!recv_msg(sockfd, input)) {
return;
}
if (!server.set_tensor(input)) {
return;
}
if (!send_msg(sockfd, nullptr, 0)) {
return;
}
break; break;
} }
case RPC_CMD_GET_TENSOR: { case RPC_CMD_GET_TENSOR: {
ok = server.get_tensor(input, output); rpc_msg_get_tensor_req request;
if (!recv_msg(sockfd, &request, sizeof(request))) {
return;
}
std::vector<uint8_t> response;
if (!server.get_tensor(request, response)) {
return;
}
if (!send_msg(sockfd, response.data(), response.size())) {
return;
}
break; break;
} }
case RPC_CMD_COPY_TENSOR: { case RPC_CMD_COPY_TENSOR: {
ok = server.copy_tensor(input, output); rpc_msg_copy_tensor_req request;
if (!recv_msg(sockfd, &request, sizeof(request))) {
return;
}
rpc_msg_copy_tensor_rsp response;
if (!server.copy_tensor(request, response)) {
return;
}
if (!send_msg(sockfd, &response, sizeof(response))) {
return;
}
break; break;
} }
case RPC_CMD_GRAPH_COMPUTE: { case RPC_CMD_GRAPH_COMPUTE: {
ok = server.graph_compute(input, output); std::vector<uint8_t> input;
if (!recv_msg(sockfd, input)) {
return;
}
rpc_msg_graph_compute_rsp response;
if (!server.graph_compute(input, response)) {
return;
}
if (!send_msg(sockfd, &response, sizeof(response))) {
return;
}
break; break;
} }
case RPC_CMD_GET_DEVICE_MEMORY: { case RPC_CMD_GET_DEVICE_MEMORY: {
// output serialization format: | free (8 bytes) | total (8 bytes) | if (!recv_msg(sockfd, nullptr, 0)) {
output.resize(2*sizeof(uint64_t), 0); return;
memcpy(output.data(), &free_mem, sizeof(free_mem)); }
memcpy(output.data() + sizeof(uint64_t), &total_mem, sizeof(total_mem)); rpc_msg_get_device_memory_rsp response;
response.free_mem = free_mem;
response.total_mem = total_mem;
if (!send_msg(sockfd, &response, sizeof(response))) {
return;
}
break; break;
} }
default: { default: {
fprintf(stderr, "Unknown command: %d\n", cmd); fprintf(stderr, "Unknown command: %d\n", cmd);
ok = false; return;
} }
} }
if (!ok) {
break;
}
uint64_t output_size = output.size();
if (!send_data(sockfd, &output_size, sizeof(output_size))) {
break;
}
if (!send_data(sockfd, output.data(), output_size)) {
break;
}
} }
} }