Skip to content

Commit

Permalink
use johannes implementation instead
Browse files Browse the repository at this point in the history
  • Loading branch information
LostRuins committed May 1, 2024
1 parent e9978bf commit f5e6709
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 27 deletions.
28 changes: 24 additions & 4 deletions ggml-cuda/common.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,8 @@
#define STRINGIZE(...) STRINGIZE_IMPL(__VA_ARGS__)

#define WARP_SIZE 32
#define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed)
#define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed)
#define CUDART_HMASK 12000 // CUDA 12.0, min. ver. for half2 -> uint mask comparisons

#define CC_PASCAL 600
#define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products
Expand Down Expand Up @@ -293,8 +294,27 @@ static __device__ __forceinline__ float warp_reduce_max(float x) {
return x;
}

#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && CUDART_VERSION < CUDART_HMAX
static __device__ __forceinline__ half hmax(const half a, const half b) {
return __half2float(a) > __half2float(b) ? a : b;
}
static __device__ __forceinline__ half2 hmax2(const half2 a, const half2 b) {
half2 ret;
reinterpret_cast<half&>(ret.x) = __low2float(a) > __low2float(b) ? __low2half(a) : __low2half(b);
reinterpret_cast<half&>(ret.y) = __high2float(a) > __high2float(b) ? __high2half(a) : __high2half(b);
return ret;
}
#else
static __device__ __inline__ __half hmax(const __half a, const __half b) {
return __hmax(a,b);
}
static __device__ __inline__ __half2 hmax2(const __half2 a, const __half2 b) {
return __hmax2(a,b);
}
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && CUDART_VERSION < CUDART_HMAX

static __device__ __forceinline__ half2 warp_reduce_max(half2 x) {
#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL && CUDART_VERSION >= CUDART_HMAX
#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
x = __hmax2(x, __shfl_xor_sync(0xffffffff, x, mask, 32));
Expand All @@ -303,10 +323,10 @@ static __device__ __forceinline__ half2 warp_reduce_max(half2 x) {
#else
GGML_UNUSED(x);
NO_DEVICE_CODE;
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL && CUDART_VERSION >= CUDART_HMAX
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
}

#if CUDART_VERSION < 12000
#if CUDART_VERSION < CUDART_HMASK
static __device__ __forceinline__ uint32_t __hgt2_mask(const half2 a, const half2 b) {
const uint32_t mask_low = 0x0000FFFF * (float( __low2half(a)) > float( __low2half(b)));
const uint32_t mask_high = 0xFFFF0000 * (float(__high2half(a)) > float(__high2half(b)));
Expand Down
23 changes: 0 additions & 23 deletions ggml-cuda/fattn.cu
Original file line number Diff line number Diff line change
Expand Up @@ -11,29 +11,6 @@
#define HALF_MAX_HALF __float2half(65504.0f/2) // Use neg. of this instead of -INFINITY to initialize KQ max vals to avoid NaN upon subtraction.
#define SOFTMAX_FTZ_THRESHOLD -20.0f // Softmax exp. of values smaller than this are flushed to zero to avoid NaNs.

//hack: polyfill hmax and hmax2 for older cuda version
#if CUDART_VERSION < CUDART_HMAX
__device__ __inline__ __half hmax(const __half a, const __half b) {
const float fa = __half2float(a);
const float fb = __half2float(b);
return __float2half(fa > fb ? fa : fb);
}
__device__ __inline__ __half2 hmax2(const __half2 a, const __half2 b) {
__half2 result;
result.x = hmax(a.x, b.x);
result.y = hmax(a.y, b.y);
return result;
}
#else
__device__ __inline__ __half hmax(const __half a, const __half b) {
return __hmax(a,b);
}
__device__ __inline__ __half2 hmax2(const __half2 a, const __half2 b) {
return __hmax2(a,b);
}
#endif


template<int D, int parallel_blocks> // D == head size
__launch_bounds__(((D + WARP_SIZE - 1) / WARP_SIZE)*WARP_SIZE, 1)
static __global__ void flash_attn_vec_ext_f16(
Expand Down

0 comments on commit f5e6709

Please sign in to comment.