|
|
@@ -283,8 +283,7 @@ void launch_fattn_tile_f32_64_128(ggml_backend_cuda_context & ctx, ggml_tensor *
|
|
|
}
|
|
|
|
|
|
void ggml_cuda_flash_attn_ext_tile_f32(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|
|
- const ggml_tensor * KQV = dst;
|
|
|
- const ggml_tensor * Q = dst->src[0];
|
|
|
+ const ggml_tensor * Q = dst->src[0];
|
|
|
|
|
|
if (Q->ne[1] <= 16) {
|
|
|
constexpr int cols_per_block = 16;
|