acl_tensor.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. /*
  2. * Copyright (c) 2023-2024 The ggml authors
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a copy
  5. * of this software and associated documentation files (the "Software"), to
  6. * deal in the Software without restriction, including without limitation the
  7. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  8. * sell copies of the Software, and to permit persons to whom the Software is
  9. * furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  17. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  20. * IN THE SOFTWARE.
  21. */
  22. #ifndef CANN_ACL_TENSOR_H
  23. #define CANN_ACL_TENSOR_H
  24. #include <algorithm>
  25. #include <cstring>
  26. #include <aclnn/aclnn_base.h>
  27. #include "common.h"
  28. /**
  29. * @brief Maps a ggml_type to its corresponding aclDataType.
  30. *
  31. * @details This function takes a ggml_type as input and returns the corresponding
  32. * aclDataType. It supports mapping for various ggml_types. If the input type
  33. * does not match any of the predefined ggml_types, the function returns
  34. * ACL_DT_UNDEFINED.
  35. *
  36. * @param type The ggml_type to be mapped.
  37. * @return The corresponding aclDataType. If the input type is not recognized,
  38. * ACL_DT_UNDEFINED is returned.
  39. */
  40. aclDataType ggml_cann_type_mapping(ggml_type type);
  41. /**
  42. * @brief Creates an ACL tensor from a ggml_tensor with optional shape.
  43. *
  44. * @details This function creates an ACL tensor based on the properties of the
  45. * provided ggml_tensor. It supports customer shape by adjusting dimensions
  46. * and strides accordingly. If customer shape is applied, additional
  47. * dimensions and strides are calculated based on the provided parameters.
  48. *
  49. * @param tensor Pointer to the ggml_tensor to be converted to ACL tensor.
  50. * @param ne Pointer to an array containing dimensions. Defaults to nullptr
  51. * if no customer shape is applied.
  52. * @param nb Pointer to an array containing strides. Defaults to nullptr
  53. * if no customer shape is applied.
  54. * @param dims Number of dimensions in the tensor. Defaults to 0 if no customer
  55. * shape is applied.
  56. * @param format ACL tensor format. Defaults to ACL_FORMAT_ND.
  57. * @param offset Offset in bytes for the ACL tensor data. Defaults to 0.
  58. * @return Pointer to the created ACL tensor.
  59. */
  60. aclTensor* ggml_cann_create_tensor(const ggml_tensor* tensor, int64_t* ne = nullptr,
  61. size_t* nb = nullptr, int64_t dims = 0,
  62. aclFormat format = ACL_FORMAT_ND,
  63. size_t offset = 0);
  64. /**
  65. * @brief Template for creating an ACL tensor from provided parameters. typename TYPE
  66. * should be size_t or float.
  67. *
  68. * @details This function creates an ACL tensor using the provided data pointer,
  69. * data type, dimensions, strides, format, offset, and additional parameters.
  70. * It calculates necessary dimensions and strides based on the provided ne and nb
  71. * arrays, adjusting them for the ACL tensor creation. The ACL storage length
  72. * is also calculated based on the provided dimensions and strides.
  73. *
  74. * @param data_ptr Pointer to the data buffer for the ACL tensor.
  75. * @param dtype ACL data type of the tensor.
  76. * @param type_size Size of each element in the tensor data buffer.
  77. * @param ne Pointer to an array containing tensor dimensions.
  78. * @param nb Pointer to an array containing tensor strides.
  79. * @param dims Number of dimensions of the tensor.
  80. * @param format ACL tensor format. Defaults to ACL_FORMAT_ND.
  81. * @param offset Offset in bytes for the ACL tensor data. Defaults to 0.
  82. * @return Pointer to the created ACL tensor.
  83. */
  84. template<typename TYPE>
  85. aclTensor* ggml_cann_create_tensor(void* data_ptr, aclDataType dtype,
  86. TYPE type_size, int64_t* ne, TYPE* nb,
  87. int64_t dims,
  88. aclFormat format = ACL_FORMAT_ND,
  89. size_t offset = 0) {
  90. int64_t tmp_ne[GGML_MAX_DIMS * 2];
  91. int64_t tmp_stride[GGML_MAX_DIMS * 2];
  92. memcpy(tmp_ne, ne, dims * sizeof(int64_t));
  93. for (int i = 0; i < dims; i++) {
  94. tmp_stride[i] = nb[i] / type_size;
  95. }
  96. std::reverse(tmp_ne, tmp_ne + dims);
  97. std::reverse(tmp_stride, tmp_stride + dims);
  98. int64_t acl_storage_len = 0;
  99. for (int i = 0; i < dims; i++) {
  100. acl_storage_len += (ne[i] - 1) * nb[i];
  101. }
  102. aclTensor* acl_tensor =
  103. aclCreateTensor(tmp_ne, dims, dtype, tmp_stride, offset / type_size,
  104. format, &acl_storage_len, 1, data_ptr);
  105. return acl_tensor;
  106. }
  107. /**
  108. * @brief Checks if tensors require broadcasting based on their shapes.
  109. *
  110. * @details This function determines if two ggml_tensors need to be broadcasted for
  111. * element-wise operations. Broadcasting is necessary if the shapes of the
  112. * tensors are not identical and no dimension in either tensor equals 1.
  113. *
  114. * @param t0 Pointer to the first ggml_tensor.
  115. * @param t1 Pointer to the second ggml_tensor.
  116. * @return True if broadcasting is needed, False otherwise.
  117. *
  118. * @remarks This function iterates over the dimensions of t0 and t1. It checks if each
  119. * dimension in t1 differs from t0's corresponding dimension and is not equal
  120. * to 1. If such a dimension is found, broadcasting is required to align t1
  121. * with t0 for element-wise operations.
  122. */
  123. bool ggml_cann_need_bcast(const ggml_tensor* t0, const ggml_tensor* t1);
  124. /**
  125. * @brief Computes broadcast shapes and strides for two ggml_tensors.
  126. *
  127. * @details This function calculates the broadcast shapes and strides for two ggml_tensors,
  128. * following the broadcasting rules similar to numpy. It adjusts dimensions and
  129. * strides to ensure compatibility for element-wise operations where one tensor
  130. * can be broadcasted to match the shape of another tensor.
  131. *
  132. * @param src0 Pointer to the first ggml_tensor.
  133. * @param src1 Pointer to the second ggml_tensor.
  134. * @param bcast_ne_src0 Output array to store broadcasted dimensions for src0.
  135. * @param bcast_ne_src1 Output array to store broadcasted dimensions for src1.
  136. * @param bcast_nb_src0 Output array to store broadcasted strides for src0.
  137. * @param bcast_nb_src1 Output array to store broadcasted strides for src1.
  138. * @return Number of dimensions in the broadcasted shape.
  139. *
  140. * @pre ggml_can_repeat(src1, src0) must return true, indicating src1 can be broadcasted
  141. * to match src0.
  142. *
  143. * @remarks This function iterates over the dimensions of src0 and src1, calculating the
  144. * necessary broadcast dimensions and strides. If a dimension requires broadcasting
  145. * (i.e., its size in src1 is smaller than in src0), an additional dimension is
  146. * added with size calculated to match src0's dimension. This adjustment ensures
  147. * that src1 can be element-wise broadcasted to src0's shape.
  148. *
  149. * How it works:
  150. *
  151. * if dim0 has padding.
  152. * a -> (2, 2) padding = 2
  153. * a: [[1, 2, *, *]
  154. * [2, 3, *, *]]
  155. * nb = (8, 4, 2)
  156. *
  157. * if a should bcast with b -> (2, 4)
  158. * b' -> (2, 2, 2)
  159. * b : [[1, 2, 3, 4, *, *]
  160. * [5, 6, 7, 8, *, *]]
  161. * nb = (12, 6, 1)
  162. *
  163. * after bcast:
  164. * a' -> (2, 1, 2)
  165. * a': [[[1, 2], *, *]
  166. * [[2, 3], *, *]]
  167. * nb = (8, 4, 2, 1)
  168. *
  169. * b' : [[[1, 2], [3, 4], *, *]
  170. * [[5, 6], [7, 8], *, *]]
  171. * nb = (12, 6, 2, 1)
  172. * \endcode
  173. *
  174. * dim1 in a inserted dim, should add nb for dim1,
  175. * and all other nb moves to next in order.
  176. */
  177. int64_t ggml_cann_get_bcast_shape(const ggml_tensor* src0, const ggml_tensor* src1,
  178. int64_t* bcast_ne_src0, int64_t* bcast_ne_src1,
  179. size_t* bcast_nb_src0, size_t* bcast_nb_src1);
  180. // Bcast macro to avoid duplicate code.
  181. #define BCAST_SHAPE(src0, src1) \
  182. int64_t bcast_##src0##_ne[GGML_MAX_DIMS * 2]; \
  183. int64_t bcast_##src1##_ne[GGML_MAX_DIMS * 2]; \
  184. size_t bcast_##src0##_nb[GGML_MAX_DIMS * 2]; \
  185. size_t bcast_##src1##_nb[GGML_MAX_DIMS * 2]; \
  186. int64_t bcast_dims = ggml_cann_get_bcast_shape( \
  187. src0, src1, bcast_##src0##_ne, bcast_##src1##_ne, bcast_##src0##_nb, \
  188. bcast_##src1##_nb);
  189. #define BCAST_PARAM(tensor) bcast_##tensor##_ne, bcast_##tensor##_nb, bcast_dims
  190. /**
  191. * @brief Calculates broadcast shapes for matrix multiplication.
  192. *
  193. * @details This function computes the broadcast shapes required for matrix multiplication
  194. * based on the input, weight, and destination tensor shapes. It ensures that the
  195. * dimensions of weight tensors are expanded appropriately to satisfy matrix
  196. * multiplication broadcast rules.
  197. *
  198. * @param input_ne Array containing the dimensions of the input tensor.
  199. * @param weight_ne Array containing the dimensions of the weight tensor.
  200. * @param dst_ne Array containing the dimensions of the destination tensor.
  201. * @param input_nb Array containing the strides of the input tensor.
  202. * @param weight_nb Array containing the strides of the weight tensor.
  203. * @param dst_nb Array containing the strides of the destination tensor.
  204. * @param bcast_input_ne Output array for broadcasted input tensor dimensions.
  205. * @param bcast_weight_ne Output array for broadcasted weight tensor dimensions.
  206. * @param bcast_dst_ne Output array for broadcasted destination tensor dimensions.
  207. * @param bcast_input_nb Output array for broadcasted input tensor strides.
  208. * @param bcast_weight_nb Output array for broadcasted weight tensor strides.
  209. * @param bcast_dst_nb Output array for broadcasted destination tensor strides.
  210. * @return The number of dimensions in the broadcasted tensors.
  211. *
  212. * @remarks This function iterates over the tensor dimensions and calculates the broadcast
  213. * shapes needed for matrix multiplication. It ensures that dimensions where
  214. * weight tensor requires expansion are appropriately handled to conform with
  215. * broadcasting rules.
  216. * @note compare with ggml_cann_get_bcast_shape, mul_mat broadcast need add this new dim
  217. * before cast dim.
  218. * @sa ggml_cann_get_bcast_shape
  219. */
  220. int64_t ggml_cann_get_mulmat_bcast_shape(
  221. const int64_t* input_ne, const int64_t* weight_ne, const int64_t* dst_ne,
  222. const size_t* input_nb, const size_t* weight_nb, const size_t* dst_nb,
  223. int64_t* bcast_input_ne, int64_t* bcast_weight_ne, int64_t* bcast_dst_ne,
  224. size_t* bcast_input_nb, size_t* bcast_weight_nb, size_t* bcast_dst_nb);
  225. // Bcast macro to avoid duplicate code.
  226. #define BCAST_MUL_MAT_SHAPE(input, weight, dst) \
  227. int64_t bcast_##input##_ne[GGML_MAX_DIMS * 2]; \
  228. int64_t bcast_##weight##_ne[GGML_MAX_DIMS * 2]; \
  229. int64_t bcast_##dst##_ne[GGML_MAX_DIMS * 2]; \
  230. size_t bcast_##input##_nb[GGML_MAX_DIMS * 2]; \
  231. size_t bcast_##weight##_nb[GGML_MAX_DIMS * 2]; \
  232. size_t bcast_##dst##_nb[GGML_MAX_DIMS * 2]; \
  233. int64_t bcast_dims = ggml_cann_get_mulmat_bcast_shape( \
  234. input->ne, weight->ne, dst->ne, input->nb, weight->nb, dst->nb, \
  235. bcast_##input##_ne, bcast_##weight##_ne, bcast_##dst##_ne, \
  236. bcast_##input##_nb, bcast_##weight##_nb, bcast_##dst##_nb);
  237. #define BCAST_MUL_MAT_PARAM(tensor) \
  238. bcast_##tensor##_ne, bcast_##tensor##_nb, bcast_dims
  239. #endif // CANN_ACL_TENSOR_H