llama : print devices used on model load
Some checks failed
flake8 Lint / Lint (push) Has been cancelled

This commit is contained in:
slaren 2024-10-07 22:45:30 +02:00
parent 5f4e30ddba
commit 20ca856ab1
2 changed files with 6 additions and 2 deletions

View File

@ -555,7 +555,6 @@ struct ggml_backend_registry {
register_backend(ggml_backend_cuda_reg());
#endif
#ifdef GGML_USE_METAL
register_backend(ggml_backend_metal_reg());
#endif
#ifdef GGML_USE_VULKAN
@ -565,7 +564,7 @@ struct ggml_backend_registry {
register_backend(ggml_backend_blas_reg());
#endif
// TODO: sycl, vulkan, kompute, cann
// TODO: sycl, kompute, cann
register_backend(ggml_backend_cpu_reg());
}

View File

@ -19100,8 +19100,13 @@ struct llama_model * llama_load_model_from_file(
case GGML_BACKEND_DEVICE_TYPE_GPU:
case GGML_BACKEND_DEVICE_TYPE_GPU_FULL:
{
size_t free, total; // NOLINT
ggml_backend_dev_memory(dev, &free, &total);
LLAMA_LOG_INFO("%s: using device %s (%s) - %zu MiB free\n", __func__, ggml_backend_dev_name(dev), ggml_backend_dev_description(dev), free/1024/1024);
model->devices.push_back(dev);
break;
}
}
}