concat.cpp 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. //
  2. // MIT license
  3. // Copyright (C) 2024 Intel Corporation
  4. // SPDX-License-Identifier: MIT
  5. //
  6. //
  7. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  8. // See https://llvm.org/LICENSE.txt for license information.
  9. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  10. //
  11. #include "concat.hpp"
  12. #include "common.hpp"
  13. static void concat_f32_dim0(const float *x, const float *y, float *dst,
  14. const int ne0, const int ne00,
  15. const sycl::nd_item<3> &item_ct1) {
  16. int nidx = item_ct1.get_local_id(2) +
  17. item_ct1.get_group(2) * item_ct1.get_local_range(2);
  18. if (nidx >= ne0) {
  19. return;
  20. }
  21. // operation
  22. int offset_dst = nidx + item_ct1.get_group(1) * ne0 +
  23. item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1);
  24. if (nidx < ne00) { // src0
  25. int offset_src = nidx + item_ct1.get_group(1) * ne00 +
  26. item_ct1.get_group(0) * ne00 * item_ct1.get_group_range(1);
  27. dst[offset_dst] = x[offset_src];
  28. } else {
  29. int offset_src =
  30. nidx - ne00 + item_ct1.get_group(1) * (ne0 - ne00) +
  31. item_ct1.get_group(0) * (ne0 - ne00) * item_ct1.get_group_range(1);
  32. dst[offset_dst] = y[offset_src];
  33. }
  34. }
  35. static void concat_f32_dim1(const float *x, const float *y, float *dst,
  36. const int ne0, const int ne01,
  37. const sycl::nd_item<3> &item_ct1) {
  38. int nidx = item_ct1.get_local_id(2) +
  39. item_ct1.get_group(2) * item_ct1.get_local_range(2);
  40. if (nidx >= ne0) {
  41. return;
  42. }
  43. // operation
  44. int offset_dst = nidx + item_ct1.get_group(1) * ne0 +
  45. item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1);
  46. if (item_ct1.get_group(1) < ne01) { // src0
  47. int offset_src =
  48. nidx + item_ct1.get_group(1) * ne0 + item_ct1.get_group(0) * ne0 * ne01;
  49. dst[offset_dst] = x[offset_src];
  50. } else {
  51. int offset_src =
  52. nidx + (item_ct1.get_group(1) - ne01) * ne0 +
  53. item_ct1.get_group(0) * ne0 * (item_ct1.get_group_range(1) - ne01);
  54. dst[offset_dst] = y[offset_src];
  55. }
  56. }
  57. static void concat_f32_dim2(const float *x, const float *y, float *dst,
  58. const int ne0, const int ne02,
  59. const sycl::nd_item<3> &item_ct1) {
  60. int nidx = item_ct1.get_local_id(2) +
  61. item_ct1.get_group(2) * item_ct1.get_local_range(2);
  62. if (nidx >= ne0) {
  63. return;
  64. }
  65. // operation
  66. int offset_dst = nidx + item_ct1.get_group(1) * ne0 +
  67. item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1);
  68. if (item_ct1.get_group(0) < ne02) { // src0
  69. int offset_src = nidx + item_ct1.get_group(1) * ne0 +
  70. item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1);
  71. dst[offset_dst] = x[offset_src];
  72. } else {
  73. int offset_src =
  74. nidx + item_ct1.get_group(1) * ne0 +
  75. (item_ct1.get_group(0) - ne02) * ne0 * item_ct1.get_group_range(1);
  76. dst[offset_dst] = y[offset_src];
  77. }
  78. }
  79. static void concat_f32_sycl(const float *x, const float *y, float *dst,
  80. int ne00, int ne01, int ne02, int ne0, int ne1,
  81. int ne2, int dim, queue_ptr stream) {
  82. int num_blocks = (ne0 + SYCL_CONCAT_BLOCK_SIZE - 1) / SYCL_CONCAT_BLOCK_SIZE;
  83. sycl::range<3> gridDim(ne2, ne1, num_blocks);
  84. switch (dim) {
  85. case 0:
  86. stream->parallel_for(
  87. sycl::nd_range<3>(gridDim *
  88. sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE),
  89. sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE)),
  90. [=](sycl::nd_item<3> item_ct1) {
  91. concat_f32_dim0(x, y, dst, ne0, ne00, item_ct1);
  92. });
  93. break;
  94. case 1:
  95. stream->parallel_for(
  96. sycl::nd_range<3>(gridDim *
  97. sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE),
  98. sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE)),
  99. [=](sycl::nd_item<3> item_ct1) {
  100. concat_f32_dim1(x, y, dst, ne0, ne01, item_ct1);
  101. });
  102. break;
  103. // dim >=2 will be dispatched to the default path
  104. default:
  105. stream->parallel_for(
  106. sycl::nd_range<3>(gridDim *
  107. sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE),
  108. sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE)),
  109. [=](sycl::nd_item<3> item_ct1) {
  110. concat_f32_dim2(x, y, dst, ne0, ne02, item_ct1);
  111. });
  112. break;
  113. }
  114. }
  115. // non-contiguous kernel (slow)
  116. static void concat_f32_sycl_non_cont(
  117. queue_ptr stream, const char *src0, const char *src1, char *dst,
  118. int64_t ne00, int64_t ne01, int64_t ne02, int64_t ne03, uint64_t nb00,
  119. uint64_t nb01, uint64_t nb02, uint64_t nb03, int64_t /*ne10*/,
  120. int64_t /*ne11*/, int64_t /*ne12*/, int64_t /*ne13*/, uint64_t nb10,
  121. uint64_t nb11, uint64_t nb12, uint64_t nb13, int64_t ne0, int64_t ne1,
  122. int64_t ne2, int64_t ne3, uint64_t nb0, uint64_t nb1, uint64_t nb2,
  123. uint64_t nb3, int32_t dim) {
  124. sycl::range<3> gridDim(ne3, ne2, ne1);
  125. stream->parallel_for(
  126. sycl::nd_range<3>(gridDim, sycl::range<3>(1, 1, 1)),
  127. [=](sycl::nd_item<3> item_ct1) {
  128. int64_t i3 = item_ct1.get_group(0);
  129. int64_t i2 = item_ct1.get_group(1);
  130. int64_t i1 = item_ct1.get_group(2);
  131. int64_t o[4] = {0, 0, 0, 0};
  132. o[dim] = dim == 0 ? ne00 : (dim == 1 ? ne01 : (dim == 2 ? ne02 : ne03));
  133. const float *x;
  134. for (int i0 = item_ct1.get_local_id(2); i0 < ne0;
  135. i0 += item_ct1.get_local_range(2)) {
  136. if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) {
  137. x = (const float *)(src0 + (i3)*nb03 + (i2)*nb02 + (i1)*nb01 +
  138. (i0)*nb00);
  139. } else {
  140. x = (const float *)(src1 + (i3 - o[3]) * nb13 + (i2 - o[2]) * nb12 +
  141. (i1 - o[1]) * nb11 + (i0 - o[0]) * nb10);
  142. }
  143. float *y = (float *)(dst + i3 * nb3 + i2 * nb2 + i1 * nb1 + i0 * nb0);
  144. *y = *x;
  145. }
  146. });
  147. }
  148. void ggml_sycl_op_concat(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
  149. const ggml_tensor *src1, ggml_tensor *dst) {
  150. queue_ptr stream = ctx.stream();
  151. const int32_t dim = ((int32_t *)dst->op_params)[0];
  152. if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1)) {
  153. const float *src0_d = (const float *)src0->data;
  154. const float *src1_d = (const float *)src1->data;
  155. float *dst_d = (float *)dst->data;
  156. if (dim != 3) {
  157. for (int i3 = 0; i3 < dst->ne[3]; i3++) {
  158. concat_f32_sycl(
  159. src0_d + i3 * (src0->nb[3] / 4), src1_d + i3 * (src1->nb[3] / 4),
  160. dst_d + i3 * (dst->nb[3] / 4), src0->ne[0], src0->ne[1],
  161. src0->ne[2], dst->ne[0], dst->ne[1], dst->ne[2], dim, stream);
  162. }
  163. } else {
  164. const size_t size0 = ggml_nbytes(src0);
  165. const size_t size1 = ggml_nbytes(src1);
  166. SYCL_CHECK(CHECK_TRY_ERROR(stream->memcpy(dst_d, src0_d, size0).wait()));
  167. SYCL_CHECK(CHECK_TRY_ERROR(
  168. stream->memcpy(dst_d + size0 / 4, src1_d, size1).wait()));
  169. }
  170. } else
  171. concat_f32_sycl_non_cont(
  172. stream, (const char *)src0->data, (const char *)src1->data,
  173. (char *)dst->data, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3],
  174. src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], src1->ne[0],
  175. src1->ne[1], src1->ne[2], src1->ne[3], src1->nb[0], src1->nb[1],
  176. src1->nb[2], src1->nb[3], dst->ne[0], dst->ne[1], dst->ne[2],
  177. dst->ne[3], dst->nb[0], dst->nb[1], dst->nb[2], dst->nb[3], dim);
  178. }