|
|
@@ -137,7 +137,8 @@
|
|
|
#define STRINGIZE(...) STRINGIZE_IMPL(__VA_ARGS__)
|
|
|
|
|
|
#define WARP_SIZE 32
|
|
|
-#define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed)
|
|
|
+#define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed)
|
|
|
+#define CUDART_HMASK 12000 // CUDA 12.0, min. ver. for half2 -> uint mask comparisons
|
|
|
|
|
|
#define CC_PASCAL 600
|
|
|
#define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products
|
|
|
@@ -293,20 +294,54 @@ static __device__ __forceinline__ float warp_reduce_max(float x) {
|
|
|
return x;
|
|
|
}
|
|
|
|
|
|
+static __device__ __forceinline__ half ggml_cuda_hmax(const half a, const half b) {
|
|
|
+#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
|
|
|
+
|
|
|
+#if CUDART_VERSION >= CUDART_HMAX
|
|
|
+ return __hmax(a, b);
|
|
|
+#else
|
|
|
+ return __half2float(a) > __half2float(b) ? a : b;
|
|
|
+#endif // CUDART_VERSION >= CUDART_HMAX
|
|
|
+
|
|
|
+#else
|
|
|
+ GGML_UNUSED(a);
|
|
|
+ GGML_UNUSED(b);
|
|
|
+ NO_DEVICE_CODE;
|
|
|
+#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && CUDART_VERSION < CUDART_HMAX
|
|
|
+}
|
|
|
+static __device__ __forceinline__ half2 ggml_cuda_hmax2(const half2 a, const half2 b) {
|
|
|
+#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
|
|
|
+
|
|
|
+#if CUDART_VERSION >= CUDART_HMAX
|
|
|
+ return __hmax2(a, b);
|
|
|
+#else
|
|
|
+ half2 ret;
|
|
|
+ reinterpret_cast<half&>(ret.x) = __low2float(a) > __low2float(b) ? __low2half(a) : __low2half(b);
|
|
|
+ reinterpret_cast<half&>(ret.y) = __high2float(a) > __high2float(b) ? __high2half(a) : __high2half(b);
|
|
|
+ return ret;
|
|
|
+#endif // CUDART_VERSION >= CUDART_HMAX
|
|
|
+
|
|
|
+#else
|
|
|
+ GGML_UNUSED(a);
|
|
|
+ GGML_UNUSED(b);
|
|
|
+ NO_DEVICE_CODE;
|
|
|
+#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && CUDART_VERSION < CUDART_HMAX
|
|
|
+}
|
|
|
+
|
|
|
static __device__ __forceinline__ half2 warp_reduce_max(half2 x) {
|
|
|
-#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL && CUDART_VERSION >= CUDART_HMAX
|
|
|
+#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
|
|
|
#pragma unroll
|
|
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
|
|
- x = __hmax2(x, __shfl_xor_sync(0xffffffff, x, mask, 32));
|
|
|
+ x = ggml_cuda_hmax2(x, __shfl_xor_sync(0xffffffff, x, mask, 32));
|
|
|
}
|
|
|
return x;
|
|
|
#else
|
|
|
GGML_UNUSED(x);
|
|
|
NO_DEVICE_CODE;
|
|
|
-#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL && CUDART_VERSION >= CUDART_HMAX
|
|
|
+#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
|
|
|
}
|
|
|
|
|
|
-#if CUDART_VERSION < 12000
|
|
|
+#if CUDART_VERSION < CUDART_HMASK
|
|
|
static __device__ __forceinline__ uint32_t __hgt2_mask(const half2 a, const half2 b) {
|
|
|
const uint32_t mask_low = 0x0000FFFF * (float( __low2half(a)) > float( __low2half(b)));
|
|
|
const uint32_t mask_high = 0xFFFF0000 * (float(__high2half(a)) > float(__high2half(b)));
|