get_row_f16.cpp 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. #include "kernel_operator.h"
  2. // optimize me. Use template to avoid copy code.
  3. using namespace AscendC;
  4. #define BUFFER_NUM 2
  5. class GET_ROW_F16 {
  6. public:
  7. __aicore__ inline GET_ROW_F16() {}
  8. __aicore__ inline void init(GM_ADDR input, GM_ADDR indices, GM_ADDR output,
  9. int64_t *input_ne_ub, size_t *input_nb_ub,
  10. int64_t *indices_ne_ub, size_t *indices_nb_ub,
  11. int64_t *output_ne_ub, size_t *output_nb_ub) {
  12. // TODO, use template for F16/f32
  13. int64_t op_block_num = GetBlockNum();
  14. op_block_idx = GetBlockIdx();
  15. for (int i = 0; i < 4; i++) {
  16. input_ne[i] = input_ne_ub[i];
  17. input_stride[i] = input_nb_ub[i] / input_nb_ub[0];
  18. indices_ne[i] = indices_ne_ub[i];
  19. indices_stride[i] = indices_nb_ub[i] / indices_nb_ub[0];
  20. output_ne[i] = output_ne_ub[i];
  21. output_stride[i] = output_nb_ub[i] / output_nb_ub[0];
  22. }
  23. // Indices has two dims. n_elements = all rows should get.
  24. // dr, all rows should this thread get.
  25. uint64_t n_elements =
  26. indices_ne[0] * indices_ne[1] * indices_ne[2] * indices_ne[3];
  27. dr = n_elements / op_block_num;
  28. uint64_t tails = n_elements % op_block_num;
  29. if (op_block_idx < tails) {
  30. dr += 1;
  31. ir = dr * op_block_idx;
  32. } else {
  33. ir = dr * op_block_idx + tails;
  34. }
  35. input_gm.SetGlobalBuffer((__gm__ half *)input);
  36. indices_gm.SetGlobalBuffer((__gm__ int32_t *)indices);
  37. output_gm.SetGlobalBuffer((__gm__ float *)output);
  38. uint64_t input_local_buffer_size = ((input_ne[0] * sizeof(half) + 31)
  39. & ~31);
  40. uint64_t output_local_buffer_size = ((input_ne[0] * sizeof(float) + 31)
  41. & ~31);
  42. local_buffer_elems = input_local_buffer_size / sizeof(half);
  43. // TODO, consider long row that can't put in UB.
  44. // All data should asign to 32. It's ok because all data is align to 32.
  45. pipe.InitBuffer(input_queue, BUFFER_NUM, input_local_buffer_size);
  46. pipe.InitBuffer(output_queue, BUFFER_NUM, output_local_buffer_size);
  47. }
  48. __aicore__ inline void copy_in(uint32_t offset, size_t len) {
  49. size_t origin_len = len;
  50. LocalTensor<half> input_local = input_queue.AllocTensor<half>();
  51. const size_t elem_per_block = 32 / sizeof(half);
  52. size_t tail = len % elem_per_block;
  53. len = len & ~(elem_per_block - 1);
  54. if(tail != 0) {
  55. len += elem_per_block;
  56. }
  57. DataCopy(input_local, input_gm[offset], len);
  58. input_queue.EnQue(input_local);
  59. }
  60. __aicore__ inline void copy_out(uint32_t offset, size_t len) {
  61. LocalTensor<float> output_local = output_queue.DeQue<float>();
  62. const size_t elem_per_block = 32 / sizeof(float);
  63. size_t tail = len % elem_per_block;
  64. len = len & ~(elem_per_block - 1);
  65. if (len > 0) {
  66. DataCopy(output_gm[offset], output_local, len);
  67. }
  68. if(tail != 0) {
  69. #ifdef ASCEND_310P
  70. for (size_t i = tail; i < elem_per_block; i++) {
  71. output_local[len + i].SetValue(0, 0);
  72. }
  73. SetAtomicAdd<float>();
  74. DataCopy(output_gm[offset + len], output_local[len], elem_per_block);
  75. SetAtomicNone();
  76. #else
  77. DataCopyExtParams dataCopyParams;
  78. dataCopyParams.blockCount = 1;
  79. dataCopyParams.blockLen = tail * sizeof(float);
  80. DataCopyPad(output_gm[offset + len], output_local[len],
  81. dataCopyParams);
  82. #endif
  83. }
  84. output_queue.FreeTensor(output_local);
  85. }
  86. __aicore__ inline void calculate_row(int64_t idx) {
  87. const int64_t indices_ne2_idx = idx / (indices_ne[0] * indices_ne[1]);
  88. const int64_t indices_ne1_idx =
  89. (idx - indices_ne2_idx * indices_ne[0] * indices_ne[1]) /
  90. indices_ne[0];
  91. const int64_t indices_ne0_idx =
  92. (idx - indices_ne2_idx * indices_ne[0] * indices_ne[1] -
  93. indices_ne1_idx * indices_ne[0]);
  94. const int64_t indices_offset = indices_ne0_idx * indices_stride[0] +
  95. indices_ne1_idx * indices_stride[1] +
  96. indices_ne2_idx * indices_stride[2];
  97. const int32_t selected_row_idx = indices_gm.GetValue(indices_offset);
  98. const int64_t input_offset = selected_row_idx * input_stride[1] +
  99. indices_ne1_idx * input_stride[2] +
  100. indices_ne2_idx * input_stride[3];
  101. const int64_t output_offset = indices_ne0_idx * output_stride[1] +
  102. indices_ne1_idx * output_stride[2] +
  103. indices_ne2_idx * output_stride[3];
  104. copy_in(input_offset, input_ne[0]);
  105. LocalTensor<half> input_local = input_queue.DeQue<half>();
  106. LocalTensor<float> output_local = output_queue.AllocTensor<float>();
  107. Cast(output_local, input_local, RoundMode::CAST_NONE,
  108. local_buffer_elems);
  109. output_queue.EnQue(output_local);
  110. copy_out(output_offset, input_ne[0]);
  111. input_queue.FreeTensor(input_local);
  112. }
  113. __aicore__ inline void calculate() {
  114. for (int64_t i = ir; i < ir + dr; i++) {
  115. calculate_row(i);
  116. }
  117. }
  118. private:
  119. int64_t input_ne[4];
  120. size_t input_stride[4];
  121. int64_t indices_ne[4];
  122. size_t indices_stride[4];
  123. int64_t output_ne[4];
  124. size_t output_stride[4];
  125. size_t local_buffer_elems;
  126. int64_t ir;
  127. int64_t dr;
  128. TPipe pipe;
  129. GlobalTensor<half> input_gm;
  130. GlobalTensor<int32_t> indices_gm;
  131. GlobalTensor<float> output_gm;
  132. TQue<QuePosition::VECIN, BUFFER_NUM> input_queue;
  133. TQue<QuePosition::VECOUT, BUFFER_NUM> output_queue;
  134. int64_t op_block_idx;
  135. };
  136. template <typename T>
  137. __aicore__ inline void copy_to_ub(GM_ADDR gm, T *ub, size_t size) {
  138. auto gm_ptr = (__gm__ uint8_t *)gm;
  139. auto ub_ptr = (uint8_t *)(ub);
  140. for (int32_t i = 0; i < size; ++i, ++ub_ptr, ++gm_ptr) {
  141. *ub_ptr = *gm_ptr;
  142. }
  143. }
  144. extern "C" __global__ __aicore__ void ascendc_get_row_f16(
  145. GM_ADDR input_gm, GM_ADDR indices_gm, GM_ADDR output_gm,
  146. GM_ADDR input_ne_gm, GM_ADDR input_nb_gm, GM_ADDR indices_ne_gm,
  147. GM_ADDR indices_nb_gm, GM_ADDR output_ne_gm, GM_ADDR output_nb_gm) {
  148. int64_t input_ne_ub[4];
  149. size_t input_nb_ub[4];
  150. int64_t indices_ne_ub[4];
  151. size_t indices_nb_ub[4];
  152. int64_t output_ne_ub[4];
  153. size_t output_nb_ub[4];
  154. copy_to_ub(input_ne_gm, input_ne_ub, 32);
  155. copy_to_ub(input_nb_gm, input_nb_ub, 32);
  156. copy_to_ub(indices_ne_gm, indices_ne_ub, 32);
  157. copy_to_ub(indices_nb_gm, indices_nb_ub, 32);
  158. copy_to_ub(output_ne_gm, output_ne_ub, 32);
  159. copy_to_ub(output_nb_gm, output_nb_ub, 32);
  160. GET_ROW_F16 op;
  161. op.init(input_gm, indices_gm, output_gm, input_ne_ub, input_nb_ub,
  162. indices_ne_ub, indices_nb_ub, output_ne_ub, output_nb_ub);
  163. op.calculate();
  164. }