argmax.cu 2.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. #include "common.cuh"
  2. #include "argmax.cuh"
  3. #include "sum.cuh"
  4. #include <cstdint>
  5. static __global__ void argmax_f32(
  6. const float * x, int32_t * dst, const int64_t ncols, const int64_t nrows) {
  7. int argmax_thread = 0;
  8. const int64_t row0 = (int64_t)blockIdx.x*WARP_SIZE;
  9. #pragma unroll
  10. for (int64_t row1 = 0; row1 < WARP_SIZE; ++row1) {
  11. const int64_t row = row0 + row1;
  12. if (row >= nrows) {
  13. break;
  14. }
  15. float maxval = -FLT_MAX;
  16. int argmax = -1;
  17. for (int32_t col = threadIdx.x; col < ncols; col += WARP_SIZE) {
  18. const float val = x[row*ncols + col];
  19. const int bigger = val > maxval;
  20. const int not_bigger = bigger ^ 0x00000001;
  21. maxval = maxval*not_bigger + val*bigger;
  22. argmax = argmax*not_bigger + col*bigger;
  23. }
  24. #pragma unroll
  25. for (int mask = 16; mask > 0; mask >>= 1) {
  26. const float val = __shfl_xor_sync(0xFFFFFFFF, maxval, mask, WARP_SIZE);
  27. const int col = __shfl_xor_sync(0xFFFFFFFF, argmax, mask, WARP_SIZE);
  28. const int bigger = val > maxval;
  29. const int not_bigger = bigger ^ 0x00000001;
  30. maxval = maxval*not_bigger + val*bigger;
  31. argmax = argmax*not_bigger + col*bigger;
  32. }
  33. const int store = row1 == threadIdx.x;
  34. argmax_thread += store*argmax;
  35. }
  36. const int row = row0 + threadIdx.x;
  37. if (row >= nrows) {
  38. return;
  39. }
  40. dst[row] = argmax_thread;
  41. }
  42. void ggml_cuda_argmax(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
  43. const ggml_tensor * src0 = dst->src[0];
  44. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  45. GGML_ASSERT( dst->type == GGML_TYPE_I32);
  46. GGML_ASSERT(ggml_is_contiguous(src0));
  47. const int64_t ne00 = src0->ne[0];
  48. const int64_t nrows = ggml_nrows(src0);
  49. const float * src0_d = (const float *) src0->data;
  50. int32_t * dst_d = (int32_t *) dst->data;
  51. cudaStream_t stream = ctx.stream();
  52. const int64_t num_blocks = (nrows + WARP_SIZE - 1) / WARP_SIZE;
  53. const dim3 blocks_dim(WARP_SIZE, 1, 1);
  54. const dim3 blocks_num(num_blocks, 1, 1);
  55. argmax_f32<<<blocks_num, blocks_dim, 0, stream>>>(src0_d, dst_d, ne00, nrows);
  56. }