acl_tensor.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269
  1. /*
  2. * Copyright (c) 2023-2024 The ggml authors
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a copy
  5. * of this software and associated documentation files (the "Software"), to
  6. * deal in the Software without restriction, including without limitation the
  7. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  8. * sell copies of the Software, and to permit persons to whom the Software is
  9. * furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  17. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  20. * IN THE SOFTWARE.
  21. */
  22. #ifndef CANN_ACL_TENSOR_H
  23. #define CANN_ACL_TENSOR_H
  24. #include <algorithm>
  25. #include <cstring>
  26. #include <aclnn/aclnn_base.h>
  27. #include "common.h"
  28. /**
  29. * @brief Maps a ggml_type to its corresponding aclDataType.
  30. *
  31. * @details This function takes a ggml_type as input and returns the corresponding
  32. * aclDataType. It supports mapping for various ggml_types. If the input type
  33. * does not match any of the predefined ggml_types, the function returns
  34. * ACL_DT_UNDEFINED.
  35. *
  36. * @param type The ggml_type to be mapped.
  37. * @return The corresponding aclDataType. If the input type is not recognized,
  38. * ACL_DT_UNDEFINED is returned.
  39. */
  40. aclDataType ggml_cann_type_mapping(ggml_type type);
  41. /**
  42. * @brief Creates an ACL tensor from a ggml_tensor with optional shape.
  43. *
  44. * @details This function creates an ACL tensor based on the properties of the
  45. * provided ggml_tensor. It supports customer shape by adjusting dimensions
  46. * and strides accordingly. If customer shape is applied, additional
  47. * dimensions and strides are calculated based on the provided parameters.
  48. *
  49. * @param tensor Pointer to the ggml_tensor to be converted to ACL tensor.
  50. * @param ne Pointer to an array containing dimensions. Defaults to nullptr
  51. * if no customer shape is applied.
  52. * @param nb Pointer to an array containing strides. Defaults to nullptr
  53. * if no customer shape is applied.
  54. * @param dims Number of dimensions in the tensor. Defaults to 0 if no customer
  55. * shape is applied.
  56. * @param format ACL tensor format. Defaults to ACL_FORMAT_ND.
  57. * @param offset Offset in bytes for the ACL tensor data. Defaults to 0.
  58. * @return Pointer to the created ACL tensor.
  59. */
  60. aclTensor * ggml_cann_create_tensor(const ggml_tensor * tensor,
  61. int64_t * ne = nullptr,
  62. size_t * nb = nullptr,
  63. int64_t dims = 0,
  64. aclFormat format = ACL_FORMAT_ND,
  65. size_t offset = 0);
  66. /**
  67. * @brief Template for creating an ACL tensor from provided parameters. typename TYPE
  68. * should be size_t or float.
  69. *
  70. * @details This function creates an ACL tensor using the provided data pointer,
  71. * data type, dimensions, strides, format, offset, and additional parameters.
  72. * It calculates necessary dimensions and strides based on the provided ne and nb
  73. * arrays, adjusting them for the ACL tensor creation. The ACL storage length
  74. * is also calculated based on the provided dimensions and strides.
  75. *
  76. * @param data_ptr Pointer to the data buffer for the ACL tensor.
  77. * @param dtype ACL data type of the tensor.
  78. * @param type_size Size of each element in the tensor data buffer.
  79. * @param ne Pointer to an array containing tensor dimensions.
  80. * @param nb Pointer to an array containing tensor strides.
  81. * @param dims Number of dimensions of the tensor.
  82. * @param format ACL tensor format. Defaults to ACL_FORMAT_ND.
  83. * @param offset Offset in bytes for the ACL tensor data. Defaults to 0.
  84. * @return Pointer to the created ACL tensor.
  85. */
  86. template <typename TYPE>
  87. aclTensor * ggml_cann_create_tensor(void * data_ptr,
  88. aclDataType dtype,
  89. TYPE type_size,
  90. int64_t * ne,
  91. TYPE * nb,
  92. int64_t dims,
  93. aclFormat format = ACL_FORMAT_ND,
  94. size_t offset = 0) {
  95. int64_t tmp_ne[GGML_MAX_DIMS * 2];
  96. int64_t tmp_stride[GGML_MAX_DIMS * 2];
  97. memcpy(tmp_ne, ne, dims * sizeof(int64_t));
  98. for (int i = 0; i < dims; i++) {
  99. tmp_stride[i] = nb[i] / type_size;
  100. }
  101. int64_t acl_storage_len = 1;
  102. for (int i = 0; i < dims; i++) {
  103. acl_storage_len += (tmp_ne[i] - 1) * tmp_stride[i];
  104. }
  105. std::reverse(tmp_ne, tmp_ne + dims);
  106. std::reverse(tmp_stride, tmp_stride + dims);
  107. aclTensor * acl_tensor =
  108. aclCreateTensor(tmp_ne, dims, dtype, tmp_stride, offset / type_size, format, &acl_storage_len, 1, data_ptr);
  109. return acl_tensor;
  110. }
  111. /**
  112. * @brief Checks if tensors require broadcasting based on their shapes.
  113. *
  114. * @details This function determines if two ggml_tensors need to be broadcasted for
  115. * element-wise operations. Broadcasting is necessary if the shapes of the
  116. * tensors are not identical and no dimension in either tensor equals 1.
  117. *
  118. * @param t0 Pointer to the first ggml_tensor.
  119. * @param t1 Pointer to the second ggml_tensor.
  120. * @return True if broadcasting is needed, False otherwise.
  121. *
  122. * @remarks This function iterates over the dimensions of t0 and t1. It checks if each
  123. * dimension in t1 differs from t0's corresponding dimension and is not equal
  124. * to 1. If such a dimension is found, broadcasting is required to align t1
  125. * with t0 for element-wise operations.
  126. */
  127. bool ggml_cann_need_bcast(const ggml_tensor * t0, const ggml_tensor * t1);
  128. /**
  129. * @brief Computes broadcast shapes and strides for two ggml_tensors.
  130. *
  131. * @details This function calculates the broadcast shapes and strides for two ggml_tensors,
  132. * following the broadcasting rules similar to numpy. It adjusts dimensions and
  133. * strides to ensure compatibility for element-wise operations where one tensor
  134. * can be broadcasted to match the shape of another tensor.
  135. *
  136. * @param src0 Pointer to the first ggml_tensor.
  137. * @param src1 Pointer to the second ggml_tensor.
  138. * @param bcast_ne_src0 Output array to store broadcasted dimensions for src0.
  139. * @param bcast_ne_src1 Output array to store broadcasted dimensions for src1.
  140. * @param bcast_nb_src0 Output array to store broadcasted strides for src0.
  141. * @param bcast_nb_src1 Output array to store broadcasted strides for src1.
  142. * @return Number of dimensions in the broadcasted shape.
  143. *
  144. * @pre ggml_can_repeat(src1, src0) must return true, indicating src1 can be broadcasted
  145. * to match src0.
  146. *
  147. * @remarks This function iterates over the dimensions of src0 and src1, calculating the
  148. * necessary broadcast dimensions and strides. If a dimension requires broadcasting
  149. * (i.e., its size in src1 is smaller than in src0), an additional dimension is
  150. * added with size calculated to match src0's dimension. This adjustment ensures
  151. * that src1 can be element-wise broadcasted to src0's shape.
  152. *
  153. * How it works:
  154. *
  155. * if dim0 has padding.
  156. * a -> (2, 2) padding = 2
  157. * a: [[1, 2, *, *]
  158. * [2, 3, *, *]]
  159. * nb = (8, 4, 2)
  160. *
  161. * if a should bcast with b -> (2, 4)
  162. * b' -> (2, 2, 2)
  163. * b : [[1, 2, 3, 4, *, *]
  164. * [5, 6, 7, 8, *, *]]
  165. * nb = (12, 6, 1)
  166. *
  167. * after bcast:
  168. * a' -> (2, 1, 2)
  169. * a': [[[1, 2], *, *]
  170. * [[2, 3], *, *]]
  171. * nb = (8, 4, 2, 1)
  172. *
  173. * b' : [[[1, 2], [3, 4], *, *]
  174. * [[5, 6], [7, 8], *, *]]
  175. * nb = (12, 6, 2, 1)
  176. * \endcode
  177. *
  178. * dim1 in a inserted dim, should add nb for dim1,
  179. * and all other nb moves to next in order.
  180. */
  181. int64_t ggml_cann_get_bcast_shape(const ggml_tensor * src0,
  182. const ggml_tensor * src1,
  183. int64_t * bcast_ne_src0,
  184. int64_t * bcast_ne_src1,
  185. size_t * bcast_nb_src0,
  186. size_t * bcast_nb_src1);
  187. // Bcast macro to avoid duplicate code.
  188. #define BCAST_SHAPE(src0, src1) \
  189. int64_t bcast_##src0##_ne[GGML_MAX_DIMS * 2]; \
  190. int64_t bcast_##src1##_ne[GGML_MAX_DIMS * 2]; \
  191. size_t bcast_##src0##_nb[GGML_MAX_DIMS * 2]; \
  192. size_t bcast_##src1##_nb[GGML_MAX_DIMS * 2]; \
  193. int64_t bcast_dims = ggml_cann_get_bcast_shape(src0, src1, bcast_##src0##_ne, bcast_##src1##_ne, \
  194. bcast_##src0##_nb, bcast_##src1##_nb);
  195. #define BCAST_PARAM(tensor) bcast_##tensor##_ne, bcast_##tensor##_nb, bcast_dims
  196. /**
  197. * @brief Calculates broadcast shapes for matrix multiplication.
  198. *
  199. * @details This function computes the broadcast shapes required for matrix multiplication
  200. * based on the input, weight, and destination tensor shapes. It ensures that the
  201. * dimensions of weight tensors are expanded appropriately to satisfy matrix
  202. * multiplication broadcast rules.
  203. *
  204. * @param input_ne Array containing the dimensions of the input tensor.
  205. * @param weight_ne Array containing the dimensions of the weight tensor.
  206. * @param dst_ne Array containing the dimensions of the destination tensor.
  207. * @param input_nb Array containing the strides of the input tensor.
  208. * @param weight_nb Array containing the strides of the weight tensor.
  209. * @param dst_nb Array containing the strides of the destination tensor.
  210. * @param bcast_input_ne Output array for broadcasted input tensor dimensions.
  211. * @param bcast_weight_ne Output array for broadcasted weight tensor dimensions.
  212. * @param bcast_dst_ne Output array for broadcasted destination tensor dimensions.
  213. * @param bcast_input_nb Output array for broadcasted input tensor strides.
  214. * @param bcast_weight_nb Output array for broadcasted weight tensor strides.
  215. * @param bcast_dst_nb Output array for broadcasted destination tensor strides.
  216. * @return The number of dimensions in the broadcasted tensors.
  217. *
  218. * @remarks This function iterates over the tensor dimensions and calculates the broadcast
  219. * shapes needed for matrix multiplication. It ensures that dimensions where
  220. * weight tensor requires expansion are appropriately handled to conform with
  221. * broadcasting rules.
  222. * @note compare with ggml_cann_get_bcast_shape, mul_mat broadcast need add this new dim
  223. * before cast dim.
  224. * @sa ggml_cann_get_bcast_shape
  225. */
  226. int64_t ggml_cann_get_mulmat_bcast_shape(const int64_t * input_ne,
  227. const int64_t * weight_ne,
  228. const int64_t * dst_ne,
  229. const size_t * input_nb,
  230. const size_t * weight_nb,
  231. const size_t * dst_nb,
  232. int64_t * bcast_input_ne,
  233. int64_t * bcast_weight_ne,
  234. int64_t * bcast_dst_ne,
  235. size_t * bcast_input_nb,
  236. size_t * bcast_weight_nb,
  237. size_t * bcast_dst_nb);
  238. // Bcast macro to avoid duplicate code.
  239. #define BCAST_MUL_MAT_SHAPE(input, weight, dst) \
  240. int64_t bcast_##input##_ne[GGML_MAX_DIMS * 2]; \
  241. int64_t bcast_##weight##_ne[GGML_MAX_DIMS * 2]; \
  242. int64_t bcast_##dst##_ne[GGML_MAX_DIMS * 2]; \
  243. size_t bcast_##input##_nb[GGML_MAX_DIMS * 2]; \
  244. size_t bcast_##weight##_nb[GGML_MAX_DIMS * 2]; \
  245. size_t bcast_##dst##_nb[GGML_MAX_DIMS * 2]; \
  246. int64_t bcast_dims = ggml_cann_get_mulmat_bcast_shape( \
  247. input->ne, weight->ne, dst->ne, input->nb, weight->nb, dst->nb, bcast_##input##_ne, bcast_##weight##_ne, \
  248. bcast_##dst##_ne, bcast_##input##_nb, bcast_##weight##_nb, bcast_##dst##_nb);
  249. #define BCAST_MUL_MAT_PARAM(tensor) bcast_##tensor##_ne, bcast_##tensor##_nb, bcast_dims
  250. #endif // CANN_ACL_TENSOR_H