mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 02:44:36 +00:00
CUDA/HIP: fix tests/test-backend-ops (#8896)
This commit is contained in:
parent
506122d854
commit
a8dbc6f753
@ -2742,11 +2742,12 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
|
|||||||
case GGML_OP_MUL_MAT_ID:
|
case GGML_OP_MUL_MAT_ID:
|
||||||
{
|
{
|
||||||
struct ggml_tensor * a = op->src[0];
|
struct ggml_tensor * a = op->src[0];
|
||||||
if (op->op == GGML_OP_MUL_MAT) {
|
|
||||||
struct ggml_tensor * b = op->src[1];
|
struct ggml_tensor * b = op->src[1];
|
||||||
if (a->ne[3] != b->ne[3]) {
|
if (b->type == GGML_TYPE_F16 && a->type != GGML_TYPE_F16) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
if (op->op == GGML_OP_MUL_MAT && a->ne[3] != b->ne[3]) {
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
switch (a->type) {
|
switch (a->type) {
|
||||||
case GGML_TYPE_F32:
|
case GGML_TYPE_F32:
|
||||||
@ -2877,7 +2878,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
|
|||||||
return true;
|
return true;
|
||||||
case GGML_OP_FLASH_ATTN_EXT:
|
case GGML_OP_FLASH_ATTN_EXT:
|
||||||
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||||
return op->src[0]->ne[0] == 64 || op->src[0]->ne[0] == 128;
|
return (op->src[0]->ne[0] == 64 && op->src[1]->type == GGML_TYPE_F16) || op->src[0]->ne[0] == 128;
|
||||||
#else
|
#else
|
||||||
if (op->src[0]->ne[0] == 128) {
|
if (op->src[0]->ne[0] == 128) {
|
||||||
return true;
|
return true;
|
||||||
|
Loading…
Reference in New Issue
Block a user