From b641d986f7bec7dbfcf5b3f148f758b5d0935f58 Mon Sep 17 00:00:00 2001 From: Concedo <39025047+LostRuins@users.noreply.github.com> Date: Wed, 1 May 2024 18:29:07 +0800 Subject: [PATCH] use johannes implementation instead (+1 squashed commits) Squashed commits: [f5e6709d] use johannes implementation instead --- ggml-cuda/common.cuh | 30 +++++++++++++++++++++++++----- ggml-cuda/fattn.cu | 23 ----------------------- 2 files changed, 25 insertions(+), 28 deletions(-) diff --git a/ggml-cuda/common.cuh b/ggml-cuda/common.cuh index 276c3fa23799b..1adc6b5b809d1 100644 --- a/ggml-cuda/common.cuh +++ b/ggml-cuda/common.cuh @@ -137,7 +137,8 @@ #define STRINGIZE(...) STRINGIZE_IMPL(__VA_ARGS__) #define WARP_SIZE 32 -#define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed) +#define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed) +#define CUDART_HMASK 12000 // CUDA 12.0, min. ver. for half2 -> uint mask comparisons #define CC_PASCAL 600 #define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products @@ -293,20 +294,39 @@ static __device__ __forceinline__ float warp_reduce_max(float x) { return x; } +#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && CUDART_VERSION < CUDART_HMAX +static __device__ __forceinline__ half hmax(const half a, const half b) { + return __half2float(a) > __half2float(b) ? a : b; +} +static __device__ __forceinline__ half2 hmax2(const half2 a, const half2 b) { + half2 ret; + reinterpret_cast(ret.x) = __low2float(a) > __low2float(b) ? __low2half(a) : __low2half(b); + reinterpret_cast(ret.y) = __high2float(a) > __high2float(b) ? __high2half(a) : __high2half(b); + return ret; +} +#else +static __device__ __inline__ __half hmax(const __half a, const __half b) { + return __hmax(a,b); +} +static __device__ __inline__ __half2 hmax2(const __half2 a, const __half2 b) { + return __hmax2(a,b); +} +#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && CUDART_VERSION < CUDART_HMAX + static __device__ __forceinline__ half2 warp_reduce_max(half2 x) { -#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL && CUDART_VERSION >= CUDART_HMAX +#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { - x = __hmax2(x, __shfl_xor_sync(0xffffffff, x, mask, 32)); + x = hmax2(x, __shfl_xor_sync(0xffffffff, x, mask, 32)); } return x; #else GGML_UNUSED(x); NO_DEVICE_CODE; -#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL && CUDART_VERSION >= CUDART_HMAX +#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL } -#if CUDART_VERSION < 12000 +#if CUDART_VERSION < CUDART_HMASK static __device__ __forceinline__ uint32_t __hgt2_mask(const half2 a, const half2 b) { const uint32_t mask_low = 0x0000FFFF * (float( __low2half(a)) > float( __low2half(b))); const uint32_t mask_high = 0xFFFF0000 * (float(__high2half(a)) > float(__high2half(b))); diff --git a/ggml-cuda/fattn.cu b/ggml-cuda/fattn.cu index a023c9a176c69..f9418495b8ab6 100644 --- a/ggml-cuda/fattn.cu +++ b/ggml-cuda/fattn.cu @@ -11,29 +11,6 @@ #define HALF_MAX_HALF __float2half(65504.0f/2) // Use neg. of this instead of -INFINITY to initialize KQ max vals to avoid NaN upon subtraction. #define SOFTMAX_FTZ_THRESHOLD -20.0f // Softmax exp. of values smaller than this are flushed to zero to avoid NaNs. -//hack: polyfill hmax and hmax2 for older cuda version -#if CUDART_VERSION < CUDART_HMAX -__device__ __inline__ __half hmax(const __half a, const __half b) { - const float fa = __half2float(a); - const float fb = __half2float(b); - return __float2half(fa > fb ? fa : fb); -} -__device__ __inline__ __half2 hmax2(const __half2 a, const __half2 b) { - __half2 result; - result.x = hmax(a.x, b.x); - result.y = hmax(a.y, b.y); - return result; -} -#else -__device__ __inline__ __half hmax(const __half a, const __half b) { - return __hmax(a,b); -} -__device__ __inline__ __half2 hmax2(const __half2 a, const __half2 b) { - return __hmax2(a,b); -} -#endif - - template // D == head size __launch_bounds__(((D + WARP_SIZE - 1) / WARP_SIZE)*WARP_SIZE, 1) static __global__ void flash_attn_vec_ext_f16(