diff --git a/ggml-cuda/common.cuh b/ggml-cuda/common.cuh index 1adc6b5b809d1..582bc1be75dfb 100644 --- a/ggml-cuda/common.cuh +++ b/ggml-cuda/common.cuh @@ -294,30 +294,45 @@ static __device__ __forceinline__ float warp_reduce_max(float x) { return x; } -#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && CUDART_VERSION < CUDART_HMAX -static __device__ __forceinline__ half hmax(const half a, const half b) { +static __device__ __forceinline__ half ggml_cuda_hmax(const half a, const half b) { +#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) + +#if CUDART_VERSION >= CUDART_HMAX + return __hmax(a, b); +#else return __half2float(a) > __half2float(b) ? a : b; +#endif // CUDART_VERSION >= CUDART_HMAX + +#else + GGML_UNUSED(a); + GGML_UNUSED(b); + NO_DEVICE_CODE; +#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && CUDART_VERSION < CUDART_HMAX } -static __device__ __forceinline__ half2 hmax2(const half2 a, const half2 b) { +static __device__ __forceinline__ half2 ggml_cuda_hmax2(const half2 a, const half2 b) { +#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) + +#if CUDART_VERSION >= CUDART_HMAX + return __hmax2(a, b); +#else half2 ret; reinterpret_cast(ret.x) = __low2float(a) > __low2float(b) ? __low2half(a) : __low2half(b); reinterpret_cast(ret.y) = __high2float(a) > __high2float(b) ? __high2half(a) : __high2half(b); return ret; -} +#endif // CUDART_VERSION >= CUDART_HMAX + #else -static __device__ __inline__ __half hmax(const __half a, const __half b) { - return __hmax(a,b); -} -static __device__ __inline__ __half2 hmax2(const __half2 a, const __half2 b) { - return __hmax2(a,b); -} + GGML_UNUSED(a); + GGML_UNUSED(b); + NO_DEVICE_CODE; #endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && CUDART_VERSION < CUDART_HMAX +} static __device__ __forceinline__ half2 warp_reduce_max(half2 x) { #if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { - x = hmax2(x, __shfl_xor_sync(0xffffffff, x, mask, 32)); + x = ggml_cuda_hmax2(x, __shfl_xor_sync(0xffffffff, x, mask, 32)); } return x; #else diff --git a/ggml-cuda/fattn.cu b/ggml-cuda/fattn.cu index f9418495b8ab6..c8a11d1733464 100644 --- a/ggml-cuda/fattn.cu +++ b/ggml-cuda/fattn.cu @@ -116,7 +116,7 @@ static __global__ void flash_attn_vec_ext_f16( sum2 = warp_reduce_sum(sum2); half sum = __low2half(sum2) + __high2half(sum2); sum += mask ? maskh[k_VKQ_0 + i_KQ] : __float2half(0.0f); - kqmax_new = hmax(kqmax_new, sum); + kqmax_new = ggml_cuda_hmax(kqmax_new, sum); if (threadIdx.x == 0) { KQ[i_KQ] = sum; } @@ -416,9 +416,9 @@ static __global__ void flash_attn_ext_f16( const int k = k0 + threadIdx.x; KQ2_tmp[k0/WARP_SIZE] += mask ? mask2[(j*ne11 + k_VKQ_0)/2 + k] : make_half2(0.0f, 0.0f); - KQ_max_new = hmax2(KQ_max_new, KQ2_tmp[k0/WARP_SIZE]); + KQ_max_new = ggml_cuda_hmax2(KQ_max_new, KQ2_tmp[k0/WARP_SIZE]); } - KQ_max_new = __half2half2(warp_reduce_max(hmax(__low2half(KQ_max_new), __high2half(KQ_max_new)))); + KQ_max_new = __half2half2(warp_reduce_max(ggml_cuda_hmax(__low2half(KQ_max_new), __high2half(KQ_max_new)))); const half2 diff = KQ_max_h2[j0/nwarps] - KQ_max_new; KQ_max_scale_h2[j0/nwarps] = h2exp(diff); const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD));