sum.cu 1.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647
  1. #if !defined(GGML_USE_HIPBLAS) && !defined(GGML_USE_MUSA) && CUDART_VERSION >= 11700
  2. #define USE_CUB
  3. #endif // !defined(GGML_USE_HIPBLAS) && !defined(GGML_USE_MUSA) && CUDART_VERSION >= 11700
  4. #ifdef USE_CUB
  5. // On Windows CUB uses libraries with variables called CC_PASCAL which conflict with the define in common.cuh.
  6. // For this reason CUB must be included BEFORE anything else.
  7. #include <cub/cub.cuh>
  8. using namespace cub;
  9. #endif // USE_CUB
  10. #include "sumrows.cuh"
  11. #include "sum.cuh"
  12. #include <cstdint>
  13. void sum_f32_cuda(ggml_cuda_pool & pool, const float * x, float * dst, const int64_t ne, cudaStream_t stream) {
  14. #ifdef USE_CUB
  15. size_t tmp_size = 0;
  16. DeviceReduce::Sum(nullptr, tmp_size, x, dst, ne, stream);
  17. ggml_cuda_pool_alloc<uint8_t> tmp_alloc(pool, tmp_size);
  18. DeviceReduce::Sum(tmp_alloc.ptr, tmp_size, x, dst, ne, stream);
  19. #else
  20. // Use (inefficient) sum_rows implementation as a fallback.
  21. // For AMD there is rocPRIM which could be used as a drop-in replacement via hipcub but this would require C++11 -> C++14.
  22. sum_rows_f32_cuda(x, dst, ne, 1, stream);
  23. GGML_UNUSED(pool);
  24. #endif // USE_CUB
  25. }
  26. void ggml_cuda_op_sum(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
  27. const ggml_tensor * src0 = dst->src[0];
  28. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  29. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  30. GGML_ASSERT(ggml_is_contiguous(src0));
  31. const float * src0_d = (const float *) src0->data;
  32. float * dst_d = (float *) dst->data;
  33. const int64_t ne = ggml_nelements(src0);
  34. ggml_cuda_pool & pool = ctx.pool();
  35. cudaStream_t stream = ctx.stream();
  36. sum_f32_cuda(pool, src0_d, dst_d, ne, stream);
  37. }