mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-12 03:31:46 +00:00
rpc : code cleanup (#11107)
Remove duplicated macros, use GGML_LOG_ERROR for errors
This commit is contained in:
parent
c0d6f790d0
commit
a4dd490069
@ -27,15 +27,6 @@
|
|||||||
#endif
|
#endif
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
|
||||||
#define UNUSED GGML_UNUSED
|
|
||||||
|
|
||||||
#define GGML_DEBUG 0
|
|
||||||
#if (GGML_DEBUG >= 1)
|
|
||||||
#define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
|
|
||||||
#else
|
|
||||||
#define GGML_PRINT_DEBUG(...)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
typedef SOCKET sockfd_t;
|
typedef SOCKET sockfd_t;
|
||||||
using ssize_t = __int64;
|
using ssize_t = __int64;
|
||||||
@ -411,7 +402,7 @@ static std::shared_ptr<socket_t> get_socket(const std::string & endpoint) {
|
|||||||
initialized = true;
|
initialized = true;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
UNUSED(initialized);
|
GGML_UNUSED(initialized);
|
||||||
#endif
|
#endif
|
||||||
auto sock = socket_connect(host.c_str(), port);
|
auto sock = socket_connect(host.c_str(), port);
|
||||||
if (sock == nullptr) {
|
if (sock == nullptr) {
|
||||||
@ -640,7 +631,7 @@ static void ggml_backend_rpc_free(ggml_backend_t backend) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_backend_rpc_synchronize(ggml_backend_t backend) {
|
static void ggml_backend_rpc_synchronize(ggml_backend_t backend) {
|
||||||
UNUSED(backend);
|
GGML_UNUSED(backend);
|
||||||
// this is no-op because we don't have any async operations
|
// this is no-op because we don't have any async operations
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -850,7 +841,7 @@ void rpc_server::alloc_buffer(const rpc_msg_alloc_buffer_req & request, rpc_msg_
|
|||||||
GGML_PRINT_DEBUG("[%s] size: %" PRIu64 " -> remote_ptr: %" PRIx64 ", remote_size: %" PRIu64 "\n", __func__, request.size, response.remote_ptr, response.remote_size);
|
GGML_PRINT_DEBUG("[%s] size: %" PRIu64 " -> remote_ptr: %" PRIx64 ", remote_size: %" PRIu64 "\n", __func__, request.size, response.remote_ptr, response.remote_size);
|
||||||
buffers.insert(buffer);
|
buffers.insert(buffer);
|
||||||
} else {
|
} else {
|
||||||
GGML_PRINT_DEBUG("[%s] size: %" PRIu64 " -> failed\n", __func__, request.size);
|
GGML_LOG_ERROR("[%s] size: %" PRIu64 " -> failed\n", __func__, request.size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -872,7 +863,7 @@ bool rpc_server::buffer_get_base(const rpc_msg_buffer_get_base_req & request, rp
|
|||||||
GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, request.remote_ptr);
|
GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, request.remote_ptr);
|
||||||
ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(request.remote_ptr);
|
ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(request.remote_ptr);
|
||||||
if (buffers.find(buffer) == buffers.end()) {
|
if (buffers.find(buffer) == buffers.end()) {
|
||||||
GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__);
|
GGML_LOG_ERROR("[%s] buffer not found\n", __func__);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
void * base = ggml_backend_buffer_get_base(buffer);
|
void * base = ggml_backend_buffer_get_base(buffer);
|
||||||
@ -884,7 +875,7 @@ bool rpc_server::free_buffer(const rpc_msg_free_buffer_req & request) {
|
|||||||
GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, request.remote_ptr);
|
GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, request.remote_ptr);
|
||||||
ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(request.remote_ptr);
|
ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(request.remote_ptr);
|
||||||
if (buffers.find(buffer) == buffers.end()) {
|
if (buffers.find(buffer) == buffers.end()) {
|
||||||
GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__);
|
GGML_LOG_ERROR("[%s] buffer not found\n", __func__);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
ggml_backend_buffer_free(buffer);
|
ggml_backend_buffer_free(buffer);
|
||||||
@ -896,7 +887,7 @@ bool rpc_server::buffer_clear(const rpc_msg_buffer_clear_req & request) {
|
|||||||
GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 ", value: %u\n", __func__, request.remote_ptr, request.value);
|
GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 ", value: %u\n", __func__, request.remote_ptr, request.value);
|
||||||
ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(request.remote_ptr);
|
ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(request.remote_ptr);
|
||||||
if (buffers.find(buffer) == buffers.end()) {
|
if (buffers.find(buffer) == buffers.end()) {
|
||||||
GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__);
|
GGML_LOG_ERROR("[%s] buffer not found\n", __func__);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
ggml_backend_buffer_clear(buffer, request.value);
|
ggml_backend_buffer_clear(buffer, request.value);
|
||||||
@ -952,7 +943,7 @@ bool rpc_server::set_tensor(const std::vector<uint8_t> & input) {
|
|||||||
struct ggml_context * ctx = ggml_init(params);
|
struct ggml_context * ctx = ggml_init(params);
|
||||||
ggml_tensor * tensor = deserialize_tensor(ctx, in_tensor);
|
ggml_tensor * tensor = deserialize_tensor(ctx, in_tensor);
|
||||||
if (tensor == nullptr) {
|
if (tensor == nullptr) {
|
||||||
GGML_PRINT_DEBUG("[%s] error deserializing tensor\n", __func__);
|
GGML_LOG_ERROR("[%s] error deserializing tensor\n", __func__);
|
||||||
ggml_free(ctx);
|
ggml_free(ctx);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -1017,7 +1008,7 @@ bool rpc_server::get_tensor(const rpc_msg_get_tensor_req & request, std::vector<
|
|||||||
struct ggml_context * ctx = ggml_init(params);
|
struct ggml_context * ctx = ggml_init(params);
|
||||||
ggml_tensor * tensor = deserialize_tensor(ctx, &request.tensor);
|
ggml_tensor * tensor = deserialize_tensor(ctx, &request.tensor);
|
||||||
if (tensor == nullptr) {
|
if (tensor == nullptr) {
|
||||||
GGML_PRINT_DEBUG("[%s] error deserializing tensor\n", __func__);
|
GGML_LOG_ERROR("[%s] error deserializing tensor\n", __func__);
|
||||||
ggml_free(ctx);
|
ggml_free(ctx);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -1051,7 +1042,7 @@ bool rpc_server::copy_tensor(const rpc_msg_copy_tensor_req & request, rpc_msg_co
|
|||||||
ggml_tensor * src = deserialize_tensor(ctx, &request.src);
|
ggml_tensor * src = deserialize_tensor(ctx, &request.src);
|
||||||
ggml_tensor * dst = deserialize_tensor(ctx, &request.dst);
|
ggml_tensor * dst = deserialize_tensor(ctx, &request.dst);
|
||||||
if (src == nullptr || dst == nullptr) {
|
if (src == nullptr || dst == nullptr) {
|
||||||
GGML_PRINT_DEBUG("[%s] error deserializing tensors\n", __func__);
|
GGML_LOG_ERROR("[%s] error deserializing tensors\n", __func__);
|
||||||
ggml_free(ctx);
|
ggml_free(ctx);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -1385,14 +1376,14 @@ static void ggml_backend_rpc_device_get_memory(ggml_backend_dev_t dev, size_t *
|
|||||||
|
|
||||||
ggml_backend_rpc_get_device_memory(ctx->endpoint.c_str(), free, total);
|
ggml_backend_rpc_get_device_memory(ctx->endpoint.c_str(), free, total);
|
||||||
|
|
||||||
UNUSED(dev);
|
GGML_UNUSED(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum ggml_backend_dev_type ggml_backend_rpc_device_get_type(ggml_backend_dev_t dev) {
|
static enum ggml_backend_dev_type ggml_backend_rpc_device_get_type(ggml_backend_dev_t dev) {
|
||||||
// TODO: obtain value from the server
|
// TODO: obtain value from the server
|
||||||
return GGML_BACKEND_DEVICE_TYPE_GPU;
|
return GGML_BACKEND_DEVICE_TYPE_GPU;
|
||||||
|
|
||||||
UNUSED(dev);
|
GGML_UNUSED(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_backend_rpc_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) {
|
static void ggml_backend_rpc_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) {
|
||||||
@ -1413,7 +1404,7 @@ static ggml_backend_t ggml_backend_rpc_device_init(ggml_backend_dev_t dev, const
|
|||||||
|
|
||||||
return ggml_backend_rpc_init(ctx->endpoint.c_str());
|
return ggml_backend_rpc_init(ctx->endpoint.c_str());
|
||||||
|
|
||||||
UNUSED(params);
|
GGML_UNUSED(params);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ggml_backend_buffer_type_t ggml_backend_rpc_device_get_buffer_type(ggml_backend_dev_t dev) {
|
static ggml_backend_buffer_type_t ggml_backend_rpc_device_get_buffer_type(ggml_backend_dev_t dev) {
|
||||||
@ -1421,12 +1412,12 @@ static ggml_backend_buffer_type_t ggml_backend_rpc_device_get_buffer_type(ggml_b
|
|||||||
|
|
||||||
return ggml_backend_rpc_buffer_type(ctx->endpoint.c_str());
|
return ggml_backend_rpc_buffer_type(ctx->endpoint.c_str());
|
||||||
|
|
||||||
UNUSED(dev);
|
GGML_UNUSED(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ggml_backend_rpc_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) {
|
static bool ggml_backend_rpc_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) {
|
||||||
UNUSED(dev);
|
GGML_UNUSED(dev);
|
||||||
UNUSED(op);
|
GGML_UNUSED(op);
|
||||||
//TODO: call the remote backend and cache the results
|
//TODO: call the remote backend and cache the results
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -1463,20 +1454,20 @@ static const struct ggml_backend_device_i ggml_backend_rpc_device_i = {
|
|||||||
static const char * ggml_backend_rpc_reg_get_name(ggml_backend_reg_t reg) {
|
static const char * ggml_backend_rpc_reg_get_name(ggml_backend_reg_t reg) {
|
||||||
return "RPC";
|
return "RPC";
|
||||||
|
|
||||||
UNUSED(reg);
|
GGML_UNUSED(reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t ggml_backend_rpc_reg_get_device_count(ggml_backend_reg_t reg) {
|
static size_t ggml_backend_rpc_reg_get_device_count(ggml_backend_reg_t reg) {
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
UNUSED(reg);
|
GGML_UNUSED(reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ggml_backend_dev_t ggml_backend_rpc_reg_get_device(ggml_backend_reg_t reg, size_t index) {
|
static ggml_backend_dev_t ggml_backend_rpc_reg_get_device(ggml_backend_reg_t reg, size_t index) {
|
||||||
GGML_ABORT("The RPC backend does not have enumerated devices - use ggml_backend_add_device instead");
|
GGML_ABORT("The RPC backend does not have enumerated devices - use ggml_backend_add_device instead");
|
||||||
|
|
||||||
UNUSED(reg);
|
GGML_UNUSED(reg);
|
||||||
UNUSED(index);
|
GGML_UNUSED(index);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void * ggml_backend_rpc_get_proc_address(ggml_backend_reg_t reg, const char * name) {
|
static void * ggml_backend_rpc_get_proc_address(ggml_backend_reg_t reg, const char * name) {
|
||||||
@ -1485,7 +1476,7 @@ static void * ggml_backend_rpc_get_proc_address(ggml_backend_reg_t reg, const ch
|
|||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
UNUSED(reg);
|
GGML_UNUSED(reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct ggml_backend_reg_i ggml_backend_rpc_reg_i = {
|
static const struct ggml_backend_reg_i ggml_backend_rpc_reg_i = {
|
||||||
|
Loading…
Reference in New Issue
Block a user