getrows.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311
  1. //
  2. // MIT license
  3. // Copyright (C) 2024 Intel Corporation
  4. // SPDX-License-Identifier: MIT
  5. //
  6. //
  7. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  8. // See https://llvm.org/LICENSE.txt for license information.
  9. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  10. //
  11. #include "ggml-impl.h"
  12. #include "common.hpp"
  13. #include "dequantize.hpp"
  14. #include "getrows.hpp"
  15. template<int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
  16. static void k_get_rows(
  17. const void * src0, const int32_t * src1, dst_t * dst,
  18. int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/
  19. /*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/
  20. /*size_t s0,*/ size_t s1, size_t s2, size_t s3,
  21. /*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03,
  22. size_t s10, size_t s11, size_t s12,
  23. const sycl::nd_item<3> &item_ct1/*, size_t s13*/) {
  24. const int i00 = (item_ct1.get_group(2) * item_ct1.get_local_range(2) +
  25. item_ct1.get_local_id(2)) *
  26. 2;
  27. const int i10 = item_ct1.get_local_range(1) * item_ct1.get_group(1) +
  28. item_ct1.get_local_id(1);
  29. const int i11 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) +
  30. item_ct1.get_local_id(0)) /
  31. ne12;
  32. const int i12 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) +
  33. item_ct1.get_local_id(0)) %
  34. ne12;
  35. if (i00 >= ne00) {
  36. return;
  37. }
  38. const int i01 = src1[i10*s10 + i11*s11 + i12*s12];
  39. dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3;
  40. const void * src0_row = (const char *)src0 + i01*nb01 + i11*nb02 + i12*nb03;
  41. const int ib = i00/qk; // block index
  42. const int iqs = (i00%qk)/qr; // quant index
  43. const int iybs = i00 - i00%qk; // dst block start index
  44. const int y_offset = qr == 1 ? 1 : qk/2;
  45. // dequantize
  46. dfloat2 v;
  47. dequantize_kernel(src0_row, ib, iqs, v);
  48. dst_row[iybs + iqs + 0] = v.x();
  49. dst_row[iybs + iqs + y_offset] = v.y();
  50. }
  51. template<int qk, int qr, dequantize_kernel_t_reorder dequantize_kernel_recorder, typename dst_t>
  52. static void k_get_rows_reorder(
  53. const void * src0, const void *src0_dq, const int32_t * src1, dst_t * dst,
  54. int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/
  55. /*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/
  56. /*size_t s0,*/ size_t s1, size_t s2, size_t s3,
  57. /*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03,
  58. size_t s10, size_t s11, size_t s12,
  59. const sycl::nd_item<3> &item_ct1/*, size_t s13*/) {
  60. const int i00 = (item_ct1.get_group(2) * item_ct1.get_local_range(2) +
  61. item_ct1.get_local_id(2)) *
  62. 2;
  63. const int i10 = item_ct1.get_local_range(1) * item_ct1.get_group(1) +
  64. item_ct1.get_local_id(1);
  65. const int i11 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) +
  66. item_ct1.get_local_id(0)) /
  67. ne12;
  68. const int i12 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) +
  69. item_ct1.get_local_id(0)) %
  70. ne12;
  71. if (i00 >= ne00) {
  72. return;
  73. }
  74. auto ncols = ne00;
  75. const int i01 = src1[i10*s10 + i11*s11 + i12*s12];
  76. dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3;
  77. const int src0_off = i01 * ncols + i00;
  78. const int ib = src0_off / QK4_0; // block index
  79. const int iqs = (i00%qk)/qr; // x quant index
  80. const int iybs = i00 - i00%qk; // dst block start index
  81. const int y_offset = qr == 1 ? 1 : qk/2;
  82. // dequantize
  83. dfloat2 v;
  84. dequantize_kernel_recorder((const void *)src0_dq, ib, (const void *)src0, src0_off/2, v);
  85. dst_row[iybs + iqs + 0] = v.x();
  86. dst_row[iybs + iqs + y_offset] = v.y();
  87. GGML_UNUSED(nb01);
  88. GGML_UNUSED(nb02);
  89. GGML_UNUSED(nb03);
  90. }
  91. template<typename src0_t, typename dst_t>
  92. static void k_get_rows_float(
  93. const src0_t * src0, const int32_t * src1, dst_t * dst,
  94. int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/
  95. /*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/
  96. /*size_t s0,*/ size_t s1, size_t s2, size_t s3,
  97. /*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03,
  98. size_t s10, size_t s11, size_t s12,
  99. const sycl::nd_item<3> &item_ct1/*, size_t s13*/) {
  100. const int i00 = item_ct1.get_group(2) * item_ct1.get_local_range(2) +
  101. item_ct1.get_local_id(2);
  102. const int i10 = item_ct1.get_local_range(1) * item_ct1.get_group(1) +
  103. item_ct1.get_local_id(1);
  104. const int i11 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) +
  105. item_ct1.get_local_id(0)) /
  106. ne12;
  107. const int i12 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) +
  108. item_ct1.get_local_id(0)) %
  109. ne12;
  110. if (i00 >= ne00) {
  111. return;
  112. }
  113. const int i01 = src1[i10*s10 + i11*s11 + i12*s12];
  114. dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3;
  115. const src0_t * src0_row = (const src0_t *)((const char *)src0 + i01*nb01 + i11*nb02 + i12*nb03);
  116. dst_row[i00] = src0_row[i00];
  117. }
  118. template <int qk, int qr, dequantize_kernel_t dq>
  119. static void get_rows_sycl(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
  120. ggml_tensor *dst, const void *src0_dd,
  121. const int32_t *src1_dd, float *dst_dd,
  122. queue_ptr stream) {
  123. GGML_TENSOR_BINARY_OP_LOCALS
  124. const sycl::range<3> block_dims(1, 1, SYCL_GET_ROWS_BLOCK_SIZE);
  125. const int block_num_x = (ne00 + 2*SYCL_GET_ROWS_BLOCK_SIZE - 1) / (2*SYCL_GET_ROWS_BLOCK_SIZE);
  126. const sycl::range<3> block_nums(ne11 * ne12, ne10, block_num_x);
  127. // strides in elements
  128. //const size_t s0 = nb0 / ggml_element_size(dst);
  129. const size_t s1 = nb1 / ggml_element_size(dst);
  130. const size_t s2 = nb2 / ggml_element_size(dst);
  131. const size_t s3 = nb3 / ggml_element_size(dst);
  132. const size_t s10 = nb10 / ggml_element_size(src1);
  133. const size_t s11 = nb11 / ggml_element_size(src1);
  134. const size_t s12 = nb12 / ggml_element_size(src1);
  135. //const size_t s13 = nb13 / ggml_element_size(src1);
  136. GGML_ASSERT(ne00 % 2 == 0);
  137. stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims),
  138. [=](sycl::nd_item<3> item_ct1) {
  139. k_get_rows<qk, qr, dq>(
  140. src0_dd, src1_dd, dst_dd, ne00, ne12, s1, s2,
  141. s3, nb01, nb02, nb03, s10, s11, s12, item_ct1);
  142. });
  143. GGML_UNUSED(dst);
  144. GGML_UNUSED(ctx);
  145. }
  146. template <int qk, int qr, dequantize_kernel_t_reorder dq_reorder>
  147. static void get_rows_sycl_reorder(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
  148. ggml_tensor *dst, const void *src0_dd,
  149. const int32_t *src1_dd, float *dst_dd,
  150. queue_ptr stream) {
  151. GGML_TENSOR_BINARY_OP_LOCALS
  152. const sycl::range<3> block_dims(1, 1, SYCL_GET_ROWS_BLOCK_SIZE);
  153. const int block_num_x = (ne00 + 2*SYCL_GET_ROWS_BLOCK_SIZE - 1) / (2*SYCL_GET_ROWS_BLOCK_SIZE);
  154. const sycl::range<3> block_nums(ne11 * ne12, ne10, block_num_x);
  155. // strides in elements
  156. //const size_t s0 = nb0 / ggml_element_size(dst);
  157. const size_t s1 = nb1 / ggml_element_size(dst);
  158. const size_t s2 = nb2 / ggml_element_size(dst);
  159. const size_t s3 = nb3 / ggml_element_size(dst);
  160. const size_t s10 = nb10 / ggml_element_size(src1);
  161. const size_t s11 = nb11 / ggml_element_size(src1);
  162. const size_t s12 = nb12 / ggml_element_size(src1);
  163. //const size_t s13 = nb13 / ggml_element_size(src1);
  164. GGML_ASSERT(ne00 % 2 == 0);
  165. const uint8_t* src0_q = (const uint8_t*)src0_dd;
  166. const size_t ncols = ne00;
  167. const size_t nrows = ne01;
  168. const sycl::half* src0_dq = (const sycl::half*)(src0_q + nrows * ncols / 2);
  169. stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims),
  170. [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]]{
  171. k_get_rows_reorder<qk, qr, dq_reorder>(
  172. src0_dd, src0_dq, src1_dd, dst_dd, ne00, ne12, s1, s2,
  173. s3, nb01, nb02, nb03, s10, s11, s12, item_ct1);
  174. });
  175. GGML_UNUSED(dst);
  176. GGML_UNUSED(ctx);
  177. }
  178. template <typename src0_t>
  179. static void get_rows_sycl_float(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  180. const ggml_tensor *src1, ggml_tensor *dst,
  181. const src0_t *src0_dd, const int32_t *src1_dd,
  182. float *dst_dd, queue_ptr stream) {
  183. GGML_TENSOR_BINARY_OP_LOCALS
  184. const sycl::range<3> block_dims(1, 1, SYCL_GET_ROWS_BLOCK_SIZE);
  185. const int block_num_x = (ne00 + SYCL_GET_ROWS_BLOCK_SIZE - 1) / SYCL_GET_ROWS_BLOCK_SIZE;
  186. const sycl::range<3> block_nums(ne11 * ne12, ne10, block_num_x);
  187. // strides in elements
  188. //const size_t s0 = nb0 / ggml_element_size(dst);
  189. const size_t s1 = nb1 / ggml_element_size(dst);
  190. const size_t s2 = nb2 / ggml_element_size(dst);
  191. const size_t s3 = nb3 / ggml_element_size(dst);
  192. const size_t s10 = nb10 / ggml_element_size(src1);
  193. const size_t s11 = nb11 / ggml_element_size(src1);
  194. const size_t s12 = nb12 / ggml_element_size(src1);
  195. //const size_t s13 = nb13 / ggml_element_size(src1);
  196. {
  197. dpct::has_capability_or_fail(stream->get_device(),
  198. {sycl::aspect::fp16});
  199. stream->parallel_for(
  200. sycl::nd_range<3>(block_nums * block_dims, block_dims),
  201. [=](sycl::nd_item<3> item_ct1) {
  202. k_get_rows_float(src0_dd, src1_dd, dst_dd, ne00, ne12, s1, s2,
  203. s3, nb01, nb02, nb03, s10, s11, s12, item_ct1);
  204. });
  205. }
  206. GGML_UNUSED(dst);
  207. GGML_UNUSED(ctx);
  208. }
  209. void ggml_sycl_op_get_rows(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
  210. GGML_ASSERT(dst->src[1]->type == GGML_TYPE_I32);
  211. GGML_ASSERT(dst->type == GGML_TYPE_F32);
  212. GGML_ASSERT(dst->src[0]->nb[0] == ggml_type_size(dst->src[0]->type));
  213. GGML_ASSERT(dst->src[1]->nb[0] == ggml_type_size(dst->src[1]->type));
  214. GGML_ASSERT(dst->nb[0] == ggml_type_size(dst->type));
  215. const int32_t * src1_i32 = (const int32_t *) dst->src[1]->data;
  216. /* TODO: Refactor and remove duplicates */
  217. switch (dst->src[0]->type) {
  218. case GGML_TYPE_F16:
  219. get_rows_sycl_float(ctx, dst->src[0], dst->src[1], dst, (const sycl::half *)dst->src[0]->data,
  220. src1_i32, (float *)dst->data, ctx.stream());
  221. break;
  222. case GGML_TYPE_F32:
  223. get_rows_sycl_float(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data,
  224. src1_i32, (float *)dst->data, ctx.stream());
  225. break;
  226. case GGML_TYPE_Q4_0:
  227. if (ctx.opt_feature.reorder && dst->op == GGML_OP_MUL_MAT) {
  228. get_rows_sycl_reorder<QK4_0, QR4_0, dequantize_q4_0_reorder>(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data,
  229. src1_i32, (float *)dst->data, ctx.stream());
  230. } else {
  231. get_rows_sycl<QK4_0, QR4_0, dequantize_q4_0>(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data,
  232. src1_i32, (float *)dst->data, ctx.stream());
  233. }
  234. break;
  235. case GGML_TYPE_Q4_1:
  236. get_rows_sycl<QK4_1, QR4_1, dequantize_q4_1>(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data,
  237. src1_i32, (float *)dst->data, ctx.stream());
  238. break;
  239. case GGML_TYPE_Q5_0:
  240. get_rows_sycl<QK5_0, QR5_0, dequantize_q5_0>(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data,
  241. src1_i32, (float *)dst->data, ctx.stream());
  242. break;
  243. case GGML_TYPE_Q5_1:
  244. get_rows_sycl<QK5_1, QR5_1, dequantize_q5_1>(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data,
  245. src1_i32, (float *)dst->data, ctx.stream());
  246. break;
  247. case GGML_TYPE_Q8_0:
  248. get_rows_sycl<QK8_0, QR8_0, dequantize_q8_0>(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data,
  249. src1_i32, (float *)dst->data, ctx.stream());
  250. break;
  251. default:
  252. // TODO: k-quants
  253. GGML_LOG_ERROR("%s: unsupported type: %s\n", __func__, ggml_type_name(dst->src[0]->type));
  254. GGML_ABORT("fatal error");
  255. }
  256. }