Parcourir la source

CUDA: remove unnecessary warp reduce in FA (ggml/1032)

* kqmax_new_j in every thread within warp is same after operate at line 199,this reduce can be omit

* same problem in vec32

---------

Co-authored-by: ZhaoXiaoYu <zhao.xiaoyu@zte.com.cn>
mahorozte il y a 1 an
Parent
commit
e9e661bd59
2 fichiers modifiés avec 0 ajouts et 2 suppressions
  1. 0 1
      ggml/src/ggml-cuda/fattn-vec-f16.cuh
  2. 0 1
      ggml/src/ggml-cuda/fattn-vec-f32.cuh

+ 0 - 1
ggml/src/ggml-cuda/fattn-vec-f16.cuh

@@ -220,7 +220,6 @@ static __global__ void flash_attn_vec_ext_f16(
         for (int j = 0; j < ncols; ++j) {
             half kqmax_new_j = ncols == 1 ? kqmax_new : kqmax_new_arr[j];
 
-            kqmax_new_j = warp_reduce_max(kqmax_new_j);
             if (threadIdx.x == 0) {
                 kqmax_shared[j][threadIdx.y] = kqmax_new_j;
             }

+ 0 - 1
ggml/src/ggml-cuda/fattn-vec-f32.cuh

@@ -206,7 +206,6 @@ static __global__ void flash_attn_vec_ext_f32(
         for (int j = 0; j < ncols; ++j) {
             float kqmax_new_j = kqmax_new_arr[j];
 
-            kqmax_new_j = warp_reduce_max(kqmax_new_j);
             if (threadIdx.x == 0) {
                 kqmax_shared[j][threadIdx.y] = kqmax_new_j;
             }