From 1928967874104d451c4075eb0d38654e700a8187 Mon Sep 17 00:00:00 2001 From: pidack Date: Tue, 27 Aug 2024 17:31:40 +0800 Subject: [PATCH] resolve test-backend-ops conflicts --- tests/test-backend-ops.cpp | 64 ++------------------------------------ 1 file changed, 2 insertions(+), 62 deletions(-) diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 5dd8fbfcc..3955ef332 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -474,8 +474,8 @@ struct test_case { if (memcmp(t1_data.data(), t2_data.data(), ggml_nbytes(t1)) != 0) { printf("sentinel mismatch: %s ", t1->name); -// ud->ok = false; -// return true; + ud->ok = false; + return true; } } @@ -1694,64 +1694,6 @@ struct test_leaky_relu : public test_case { } }; -// GGML_OP_SSM_CONV -struct test_ssm_conv : public test_case { - const ggml_type type; - const int64_t d_conv; - const int64_t d_inner; - const int64_t n_seq_tokens; - const int64_t n_seqs; - - std::string vars() override { - return VARS_TO_STR5(type, d_conv, d_inner, n_seq_tokens, n_seqs); - } - - test_ssm_conv(ggml_type type = GGML_TYPE_F32, - int64_t d_conv = 4, - int64_t d_inner = 1536, - int64_t n_seq_tokens = 7, - int64_t n_seqs = 2) - : type(type), d_conv(d_conv), d_inner(d_inner), n_seq_tokens(n_seq_tokens), n_seqs(n_seqs) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * sx = ggml_new_tensor_3d(ctx, type, d_conv - 1 + n_seq_tokens, d_inner, n_seqs); - ggml_tensor * c = ggml_new_tensor_2d(ctx, type, d_conv, d_inner); - ggml_tensor * out = ggml_ssm_conv(ctx, sx, c); - return out; - } -}; - -// GGML_OP_SSM_SCAN -struct test_ssm_scan : public test_case { - const ggml_type type; - const int64_t d_state; - const int64_t d_inner; - const int64_t n_seq_tokens; - const int64_t n_seqs; - - std::string vars() override { - return VARS_TO_STR5(type, d_state, d_inner, n_seq_tokens, n_seqs); - } - - test_ssm_scan(ggml_type type = GGML_TYPE_F32, - int64_t d_state = 16, - int64_t d_inner = 1536, - int64_t n_seq_tokens = 7, - int64_t n_seqs = 2) - : type(type), d_state(d_state), d_inner(d_inner), n_seq_tokens(n_seq_tokens), n_seqs(n_seqs) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * s = ggml_new_tensor_3d(ctx, type, d_state, d_inner, n_seqs); - ggml_tensor * x = ggml_new_tensor_3d(ctx, type, d_inner, n_seq_tokens, n_seqs); - ggml_tensor * dt = ggml_new_tensor_3d(ctx, type, d_inner, n_seq_tokens, n_seqs); - ggml_tensor * A = ggml_new_tensor_2d(ctx, type, d_state, d_inner); - ggml_tensor * B = ggml_new_tensor_3d(ctx, type, d_state, n_seq_tokens, n_seqs); - ggml_tensor * C = ggml_new_tensor_3d(ctx, type, d_state, n_seq_tokens, n_seqs); - ggml_tensor * out = ggml_ssm_scan(ctx, s, x, dt, A, B, C); - return out; - } -}; - // GGML_OP_FLASH_ATTN_EXT struct test_flash_attn_ext : public test_case { const int64_t hs; // head size @@ -2549,8 +2491,6 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op test_cases.emplace_back(new test_arange()); test_cases.emplace_back(new test_timestep_embedding()); test_cases.emplace_back(new test_leaky_relu()); - test_cases.emplace_back(new test_ssm_conv()); - test_cases.emplace_back(new test_ssm_scan()); for (int hs : { 64, 80, 128, 256, }) { for (bool mask : { true, false } ) {