From 9517fceeaafbc6f9517f7502096bac51f628b022 Mon Sep 17 00:00:00 2001 From: JohannesGaessler Date: Fri, 29 Dec 2023 11:38:56 +0100 Subject: [PATCH] CUDA: fix tensor core logic for Pascal and HIP --- ggml-cuda.cu | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index abad9cc39e2cf0..f2333d475f6f51 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -134,7 +134,7 @@ // TODO: improve this to be correct for more hardware // for example, currently fails for GeForce GTX 1660 which is TURING arch (> VOLTA) but does not have tensor cores // probably other such cases, and not sure what happens on AMD hardware -#if !defined(GGML_CUDA_FORCE_MMQ) +#if !defined(GGML_CUDA_FORCE_MMQ) && !defined(GGML_USE_HIPBLAS) #define CUDA_USE_TENSOR_CORES #endif @@ -8663,7 +8663,7 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 } #ifdef CUDA_USE_TENSOR_CORES - const bool use_tensor_cores = true; + const bool use_tensor_cores = min_compute_capability >= CC_VOLTA; #else const bool use_tensor_cores = false; #endif @@ -8706,7 +8706,7 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 // when tensor cores are available, use them for large batch size // ref: https://github.com/ggerganov/llama.cpp/pull/3776 - if (use_tensor_cores && min_compute_capability >= CC_VOLTA && src1->ne[1] > MMQ_MAX_BATCH_SIZE) { + if (use_tensor_cores && src1->ne[1] > MMQ_MAX_BATCH_SIZE) { use_mul_mat_q = false; }