ggml-cann.cpp 70 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020
  1. /*
  2. * Copyright (c) 2023-2024 The ggml authors
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a copy
  5. * of this software and associated documentation files (the "Software"), to
  6. * deal in the Software without restriction, including without limitation the
  7. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  8. * sell copies of the Software, and to permit persons to whom the Software is
  9. * furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  17. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  20. * IN THE SOFTWARE.
  21. */
  22. #include "ggml-cann.h"
  23. #include <acl/acl.h>
  24. #include <stdarg.h>
  25. #include <cmath>
  26. #include <cstdio>
  27. #include <cstring>
  28. #include <mutex>
  29. #include "ggml-backend-impl.h"
  30. #include "ggml-cann/aclnn_ops.h"
  31. #include "ggml-cann/common.h"
  32. #define GGML_COMMON_DECL_C
  33. #include "ggml-common.h"
  34. /**
  35. * @brief Default logging callback for GGML.
  36. *
  37. * This function is the default logging callback that logs messages to stderr.
  38. *
  39. * @param level The log level.
  40. * @param msg The log message.
  41. * @param user_data User data passed to the callback.
  42. */
  43. static void ggml_cann_default_log_callback(enum ggml_log_level level,
  44. const char* msg, void* user_data) {
  45. GGML_UNUSED(level);
  46. GGML_UNUSED(user_data);
  47. fprintf(stderr, "%s", msg);
  48. }
  49. ggml_log_callback ggml_cann_log_callback = ggml_cann_default_log_callback;
  50. void* ggml_cann_log_user_data = NULL;
  51. GGML_API void ggml_backend_cann_log_set_callback(ggml_log_callback log_callback,
  52. void* user_data) {
  53. ggml_cann_log_callback = log_callback;
  54. ggml_cann_log_user_data = user_data;
  55. }
  56. #define GGML_CANN_LOG_INFO(...) ggml_cann_log(GGML_LOG_LEVEL_INFO, __VA_ARGS__)
  57. #define GGML_CANN_LOG_WARN(...) ggml_cann_log(GGML_LOG_LEVEL_WARN, __VA_ARGS__)
  58. #define GGML_CANN_LOG_ERROR(...) \
  59. ggml_cann_log(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
  60. GGML_ATTRIBUTE_FORMAT(2, 3)
  61. /**
  62. * @brief Log a message using the current logging callback.
  63. *
  64. * This function formats a log message and passes it to the current logging
  65. * callback.
  66. *
  67. * @param level The log level.
  68. * @param format The format string for the log message.
  69. * @param ... The arguments for the format string.
  70. */
  71. static void ggml_cann_log(enum ggml_log_level level, const char* format, ...) {
  72. if (ggml_cann_log_callback != NULL) {
  73. va_list args;
  74. va_start(args, format);
  75. char buffer[128];
  76. int len = vsnprintf(buffer, 128, format, args);
  77. if (len < 128) {
  78. ggml_cann_log_callback(level, buffer, ggml_cann_log_user_data);
  79. } else {
  80. // vsnprintf adds a null terminator
  81. std::vector<char> buffer2(len + 1);
  82. va_end(args);
  83. va_start(args, format);
  84. vsnprintf(&buffer2[0], buffer2.size(), format, args);
  85. ggml_cann_log_callback(level, buffer2.data(),
  86. ggml_cann_log_user_data);
  87. }
  88. va_end(args);
  89. }
  90. }
  91. /**
  92. * @brief Handles CANN errors by printing an error message and aborting.
  93. *
  94. * @param stmt The statement that caused the error.
  95. * @param func The function in which the error occurred.
  96. * @param file The file in which the error occurred.
  97. * @param line The line number where the error occurred.
  98. * @param msg The error message.
  99. */
  100. [[noreturn]] void ggml_cann_error(const char* stmt, const char* func,
  101. const char* file, int line, const char* msg) {
  102. int32_t id = -1;
  103. aclrtGetDevice(&id);
  104. GGML_CANN_LOG_ERROR("CANN error: %s\n", msg);
  105. GGML_CANN_LOG_ERROR(" current device: %d, in function %s at %s:%d\n", id, func,
  106. file, line);
  107. GGML_CANN_LOG_ERROR(" %s\n", stmt);
  108. // abort with GGML_ASSERT to get a stack trace
  109. GGML_ABORT("CANN error");
  110. }
  111. /**
  112. * @brief Sets the device to be used by CANN.
  113. *
  114. * @param device The device ID to set.
  115. */
  116. void ggml_cann_set_device(const int32_t device) {
  117. // TODO: uncomment these lines after empty context has fixed.
  118. // int current_device;
  119. // ACL_CHECK(aclrtGetDevice(&current_device));
  120. // if (device == current_device) {
  121. // return;
  122. // }
  123. ACL_CHECK(aclrtSetDevice(device));
  124. }
  125. /**
  126. * @brief Retrieves the current device ID.
  127. *
  128. * @return The current device ID.
  129. */
  130. int32_t ggml_cann_get_device() {
  131. int32_t id;
  132. ACL_CHECK(aclrtGetDevice(&id));
  133. return id;
  134. }
  135. /**
  136. * @brief Initialize the CANN device information.
  137. *
  138. * This function initializes the CANN device information by obtaining the
  139. * device count and setting the memory allocation granularity for each device.
  140. *
  141. * @return A structure containing the device information.
  142. */
  143. static ggml_cann_device_info ggml_cann_init() {
  144. ggml_cann_device_info info = {};
  145. aclError err = aclrtGetDeviceCount((uint32_t*)&info.device_count);
  146. if (err != ACL_SUCCESS) {
  147. GGML_CANN_LOG_ERROR("%s: failed to initialize CANN: %s\n",
  148. __func__, aclGetRecentErrMsg());
  149. return info;
  150. }
  151. GGML_ASSERT(info.device_count <= GGML_CANN_MAX_DEVICES);
  152. for (int id = 0; id < info.device_count; ++id) {
  153. aclrtPhysicalMemProp prop = {};
  154. prop.handleType = ACL_MEM_HANDLE_TYPE_NONE;
  155. prop.allocationType = ACL_MEM_ALLOCATION_TYPE_PINNED;
  156. prop.memAttr = ACL_HBM_MEM_HUGE;
  157. prop.location.type = ACL_MEM_LOCATION_TYPE_DEVICE;
  158. prop.location.id = id;
  159. prop.reserve = 0;
  160. ACL_CHECK(aclrtMemGetAllocationGranularity(
  161. &prop, ACL_RT_MEM_ALLOC_GRANULARITY_RECOMMENDED,
  162. &info.devices[id].vmm_granularity));
  163. }
  164. // TODO: add more device info later.
  165. return info;
  166. }
  167. /**
  168. * @brief Retrieve the CANN device information.
  169. *
  170. * This function returns a reference to a structure containing the CANN device
  171. * information. The device information is initialized once and reused on
  172. * subsequent calls.
  173. *
  174. * @return A reference to the structure containing the device information.
  175. */
  176. const ggml_cann_device_info& ggml_cann_info() {
  177. static ggml_cann_device_info info = ggml_cann_init();
  178. return info;
  179. }
  180. //#define DEBUG_CANN_MALLOC
  181. /**
  182. * @brief A pool of CANN buffers(legacy).
  183. *
  184. * This class manages a pool of CANN buffers for a specific device.
  185. */
  186. struct ggml_cann_pool_leg : public ggml_cann_pool {
  187. /**
  188. * @brief The maximum number of buffers in the pool.
  189. */
  190. static const int MAX_BUFFERS = 256;
  191. /**
  192. * @brief The device ID associated with this buffer pool.
  193. */
  194. int device;
  195. /**
  196. * @brief Structure representing a CANN buffer.
  197. */
  198. struct ggml_cann_buffer {
  199. void* ptr = nullptr; ///< Pointer to the buffer memory.
  200. size_t size = 0; ///< Size of the buffer.
  201. };
  202. /**
  203. * @brief Array of CANN buffers in the pool.
  204. */
  205. ggml_cann_buffer buffer_pool[MAX_BUFFERS] = {};
  206. /**
  207. * @brief Total size of all buffers in the pool.
  208. */
  209. size_t pool_size = 0;
  210. /**
  211. * @brief Constructor to initialize the buffer pool for a specific device.
  212. *
  213. * @param device The device ID to associate with this buffer pool.
  214. */
  215. explicit ggml_cann_pool_leg(int device) : device(device) {}
  216. /**
  217. * @brief Destructor to free all buffers in the pool.
  218. */
  219. ~ggml_cann_pool_leg() {
  220. ggml_cann_set_device(device);
  221. for (int i = 0; i < MAX_BUFFERS; ++i) {
  222. ggml_cann_buffer& b = buffer_pool[i];
  223. if (b.ptr != nullptr) {
  224. ACL_CHECK(aclrtFree(b.ptr));
  225. pool_size -= b.size;
  226. }
  227. }
  228. GGML_ASSERT(pool_size == 0);
  229. }
  230. /**
  231. * @brief Allocate a buffer of the given size.
  232. *
  233. * @param size The size of the buffer to allocate.
  234. * @param actual_size A pointer to a variable to receive the actual size of
  235. * the allocated buffer.
  236. * @return A pointer to the allocated buffer.
  237. */
  238. void* alloc(size_t size, size_t* actual_size) override {
  239. #ifdef DEBUG_CANN_MALLOC
  240. int nnz = 0;
  241. size_t max_size = 0;
  242. #endif
  243. size_t best_diff = 1ull << 36;
  244. int ibest = -1;
  245. for (int i = 0; i < MAX_BUFFERS; ++i) {
  246. ggml_cann_buffer& b = buffer_pool[i];
  247. if (b.ptr != nullptr) {
  248. #ifdef DEBUG_CANN_MALLOC
  249. ++nnz;
  250. if (b.size > max_size) max_size = b.size;
  251. #endif
  252. if (b.size >= size) {
  253. size_t diff = b.size - size;
  254. if (diff < best_diff) {
  255. best_diff = diff;
  256. ibest = i;
  257. if (!best_diff) {
  258. void* ptr = b.ptr;
  259. *actual_size = b.size;
  260. b.ptr = nullptr;
  261. b.size = 0;
  262. return ptr;
  263. }
  264. }
  265. }
  266. }
  267. }
  268. if (ibest >= 0) {
  269. ggml_cann_buffer& b = buffer_pool[ibest];
  270. void* ptr = b.ptr;
  271. *actual_size = b.size;
  272. b.ptr = nullptr;
  273. b.size = 0;
  274. return ptr;
  275. }
  276. void* ptr;
  277. size_t look_ahead_size = (size_t)(1.05 * size);
  278. look_ahead_size = 256 * ((look_ahead_size + 255) / 256);
  279. ggml_cann_set_device(device);
  280. ACL_CHECK(
  281. aclrtMalloc(&ptr, look_ahead_size, ACL_MEM_MALLOC_HUGE_FIRST));
  282. *actual_size = look_ahead_size;
  283. pool_size += look_ahead_size;
  284. #ifdef DEBUG_CANN_MALLOC
  285. GGML_CANN_LOG_INFO(
  286. "%s[%d]: %d buffers, max_size = %u MB, pool_size = %u MB, "
  287. "requested %u MB\n",
  288. __func__, device, nnz, (uint32_t)(max_size / 1024 / 1024),
  289. (uint32_t)(pool_size / 1024 / 1024),
  290. (uint32_t)(size / 1024 / 1024));
  291. #endif
  292. return ptr;
  293. }
  294. /**
  295. * @brief Free a buffer and return it to the pool.
  296. *
  297. * @param ptr Pointer to the buffer to free.
  298. * @param size Size of the buffer to free.
  299. */
  300. void free(void* ptr, size_t size) override {
  301. for (int i = 0; i < MAX_BUFFERS; ++i) {
  302. ggml_cann_buffer& b = buffer_pool[i];
  303. if (b.ptr == nullptr) {
  304. b.ptr = ptr;
  305. b.size = size;
  306. return;
  307. }
  308. }
  309. // memory should always buffered. these memory may still needed by
  310. // tasks in stream.
  311. // TODO, fix me.
  312. GGML_ABORT("Cann buffer pool full, increase MAX_CANN_BUFFERS\n");
  313. }
  314. };
  315. /**
  316. * @brief A pool of CANN buffers with virtual memory.
  317. *
  318. * This class manages a pool of CANN buffers with virtual memory for a specific
  319. * device.
  320. */
  321. struct ggml_cann_pool_vmm : public ggml_cann_pool {
  322. /**
  323. * @brief The maximum size of the virtual memory pool (32 GB).
  324. */
  325. static const size_t CANN_POOL_VMM_MAX_SIZE = 1ull << 35; // 32 GB
  326. /**
  327. * @brief The device ID associated with this buffer pool.
  328. */
  329. int device;
  330. /**
  331. * @brief Pointer to the start of the virtual memory pool.
  332. */
  333. void* pool_addr = 0;
  334. /**
  335. * @brief Amount of virtual memory used in the pool.
  336. */
  337. size_t pool_used = 0;
  338. /**
  339. * @brief Total size of the virtual memory pool.
  340. */
  341. size_t pool_size = 0;
  342. /**
  343. * @brief Allocation granularity for the virtual memory pool.
  344. */
  345. size_t granularity;
  346. /**
  347. * @brief Handles for the physical memory allocated.
  348. */
  349. std::vector<aclrtDrvMemHandle> handles;
  350. /**
  351. * @brief Offsets for the mapped memory regions.
  352. */
  353. std::vector<void*> map_offsets;
  354. /**
  355. * @brief Constructor to initialize the buffer pool with virtual memory for
  356. * a specific device.
  357. *
  358. * @param device The device ID to associate with this buffer pool.
  359. */
  360. explicit ggml_cann_pool_vmm(int device)
  361. : device(device),
  362. granularity(ggml_cann_info().devices[device].vmm_granularity) {}
  363. /**
  364. * @brief Destructor to free all buffers in the virtual memory pool.
  365. */
  366. ~ggml_cann_pool_vmm() {
  367. if (pool_addr != 0) {
  368. for (auto& offset : map_offsets) {
  369. ACL_CHECK(aclrtUnmapMem(offset));
  370. }
  371. for (auto& handle : handles) {
  372. ACL_CHECK(aclrtFreePhysical(handle));
  373. }
  374. ACL_CHECK(aclrtReleaseMemAddress(pool_addr));
  375. }
  376. }
  377. /**
  378. * @brief Allocate a buffer of the given size in the virtual memory pool.
  379. *
  380. * @param size The size of the buffer to allocate.
  381. * @param actual_size A pointer to a variable to receive the actual size of
  382. * the allocated buffer.
  383. * @return A pointer to the allocated buffer.
  384. */
  385. void* alloc(size_t size, size_t* actual_size) override {
  386. // round up the allocation size to the alignment to ensure that all
  387. // allocations are aligned for all data types
  388. const size_t alignment = 128;
  389. size = alignment * ((size + alignment - 1) / alignment);
  390. size_t avail = pool_size - pool_used;
  391. if (size > avail) {
  392. // round up to the next multiple of the granularity
  393. size_t reserve_size = size - avail;
  394. reserve_size =
  395. granularity * ((reserve_size + granularity - 1) / granularity);
  396. GGML_ASSERT(pool_size + reserve_size <= CANN_POOL_VMM_MAX_SIZE);
  397. // allocate more physical memory
  398. aclrtPhysicalMemProp prop = {};
  399. prop.handleType = ACL_MEM_HANDLE_TYPE_NONE;
  400. prop.allocationType = ACL_MEM_ALLOCATION_TYPE_PINNED;
  401. prop.memAttr = ACL_HBM_MEM_HUGE;
  402. prop.location.type = ACL_MEM_LOCATION_TYPE_DEVICE;
  403. prop.location.id = device;
  404. prop.reserve = 0;
  405. aclrtDrvMemHandle handle;
  406. ACL_CHECK(aclrtMallocPhysical(&handle, reserve_size, &prop, 0));
  407. // reserve virtual address space (if not already reserved)
  408. if (pool_addr == 0) {
  409. ACL_CHECK(aclrtReserveMemAddress(
  410. &pool_addr, CANN_POOL_VMM_MAX_SIZE, 0, NULL, 1));
  411. }
  412. // map at the end of the pool
  413. ACL_CHECK(aclrtMapMem((char*)pool_addr + pool_size, reserve_size, 0,
  414. handle, 0));
  415. handles.push_back(handle);
  416. map_offsets.push_back((char*)pool_addr + pool_size);
  417. // add to the pool
  418. pool_size += reserve_size;
  419. // GGML_CANN_LOG_INFO("cann pool[%d]: size increased to %llu MB (
  420. // reserved %llu MB)\n",
  421. // device, (unsigned long long) (pool_size/1024/1024),
  422. // (unsigned long long) (reserve_size/1024/1024));
  423. }
  424. GGML_ASSERT(pool_addr != 0);
  425. void* ptr = (void*)((char*)pool_addr + pool_used);
  426. *actual_size = size;
  427. pool_used += size;
  428. #ifdef DEBUG_CANN_MALLOC
  429. GGML_CANN_LOG_INFO("cann pool[%d]: allocated %llu bytes at %llx\n", device,
  430. (unsigned long long)size, (unsigned long long)ptr);
  431. #endif
  432. return ptr;
  433. }
  434. /**
  435. * @brief Free a buffer and return it to the virtual memory pool.
  436. *
  437. * @param ptr Pointer to the buffer to free.
  438. * @param size Size of the buffer to free.
  439. */
  440. void free(void* ptr, size_t size) override {
  441. #ifdef DEBUG_CANN_MALLOC
  442. GGML_CANN_LOG_INFO("cann pool[%d]: freed %llu bytes at %llx\n", device,
  443. (unsigned long long)size, (unsigned long long)ptr);
  444. #endif
  445. pool_used -= size;
  446. // all deallocations must be in reverse order of the allocations
  447. GGML_ASSERT(ptr == (void*)((char*)pool_addr + pool_used));
  448. }
  449. };
  450. /**
  451. * @brief Create a new CANN pool for a specific device.
  452. *
  453. * Factory method to create a new CANN pool object based on the device type.
  454. *
  455. * @param device The device ID for which to create the pool.
  456. * @return A unique pointer to the created CANN pool.
  457. */
  458. std::unique_ptr<ggml_cann_pool> ggml_backend_cann_context::new_pool_for_device(
  459. int device) {
  460. // return std::unique_ptr<ggml_cann_pool>(new ggml_cann_pool_leg(device));
  461. return std::unique_ptr<ggml_cann_pool>(new ggml_cann_pool_vmm(device));
  462. }
  463. // cann buffer
  464. /**
  465. * @brief Context for managing a CANN buffer associated with a specific device.
  466. *
  467. * This structure holds information about a CANN buffer, including the device
  468. * ID, device pointer, and a name derived from GGML_CANN_NAME and the device ID.
  469. */
  470. struct ggml_backend_cann_buffer_context {
  471. int32_t device; ///< The device ID associated with this buffer context.
  472. void* dev_ptr =
  473. nullptr; ///< Pointer to the device memory allocated for the buffer.
  474. /**
  475. * @brief Constructor to initialize the CANN buffer context.
  476. *
  477. * @param device The device ID associated with this buffer context.
  478. * @param dev_ptr Pointer to the device memory allocated for the buffer.
  479. */
  480. ggml_backend_cann_buffer_context(int32_t device, void* dev_ptr)
  481. : device(device),
  482. dev_ptr(dev_ptr) {}
  483. /**
  484. * @brief Destructor to free the device memory allocated for the buffer.
  485. */
  486. ~ggml_backend_cann_buffer_context() { ACL_CHECK(aclrtFree(dev_ptr)); }
  487. };
  488. /**
  489. * @brief Retrieve the name associated with a CANN buffer.
  490. *
  491. * This function returns the name of a CANN buffer, which is stored in the
  492. * context of the buffer.
  493. *
  494. * @param buffer The CANN buffer whose name is to be retrieved.
  495. * @return A pointer to a C-string containing the name of the buffer.
  496. */
  497. GGML_CALL static const char* ggml_backend_cann_buffer_get_name(
  498. ggml_backend_buffer_t buffer) {
  499. return "CANN";
  500. GGML_UNUSED(buffer);
  501. }
  502. /**
  503. * @brief Check if a buffer is a CANN buffer.
  504. *
  505. * This function checks if a given buffer is a CANN buffer by comparing its
  506. * `get_name` function pointer to `ggml_backend_cann_buffer_get_name`.
  507. *
  508. * @param buffer The buffer to check.
  509. * @return true if the buffer is a CANN buffer, false otherwise.
  510. */
  511. GGML_CALL static bool ggml_backend_buffer_is_cann(
  512. ggml_backend_buffer_t buffer) {
  513. return buffer->iface.get_name == ggml_backend_cann_buffer_get_name;
  514. }
  515. /**
  516. * @brief Free resources associated with a CANN buffer.
  517. *
  518. * This function frees the resources associated with a CANN buffer, including
  519. * its context.
  520. *
  521. * @param buffer The CANN buffer to free.
  522. */
  523. GGML_CALL static void ggml_backend_cann_buffer_free_buffer(
  524. ggml_backend_buffer_t buffer) {
  525. ggml_backend_cann_buffer_context* ctx =
  526. (ggml_backend_cann_buffer_context*)buffer->context;
  527. delete ctx;
  528. }
  529. /**
  530. * @brief Retrieve the base pointer of a CANN buffer.
  531. *
  532. * This function returns the base pointer of a CANN buffer, which points to the
  533. * device memory allocated for the buffer.
  534. *
  535. * @param buffer The CANN buffer whose base pointer is to be retrieved.
  536. * @return A pointer to the base of the device memory allocated for the buffer.
  537. */
  538. GGML_CALL static void* ggml_backend_cann_buffer_get_base(
  539. ggml_backend_buffer_t buffer) {
  540. ggml_backend_cann_buffer_context* ctx =
  541. (ggml_backend_cann_buffer_context*)buffer->context;
  542. return ctx->dev_ptr;
  543. }
  544. /**
  545. * @brief Transform quantized Q4.0 tensor data into a format suitable for CANN
  546. * processing.
  547. *
  548. * This function transforms quantized Q4.0 tensor data into a format suitable
  549. * for CANN processing. It extracts quantization values and scales from the
  550. * source data and prepares them in a format expected by CANN operations.
  551. *
  552. * @param tensor Pointer to the tensor information.
  553. * @param src Pointer to the source data in Q4.0 format.
  554. * @param dst Pointer to the destination buffer where transformed data will be
  555. * stored.
  556. */
  557. GGML_CALL static void ggml_backend_cann_transform_q4_0(ggml_tensor* tensor,
  558. const void* src,
  559. void* dst) {
  560. int64_t n_elems = ggml_nelements(tensor);
  561. int64_t groups = n_elems / QK4_0;
  562. size_t quant_bytes = n_elems * sizeof(uint8_t) / 2;
  563. uint8_t* quant_offset = (uint8_t*)dst;
  564. uint16_t* scale_offset = (uint16_t*)((char*)dst + quant_bytes);
  565. for (int i = 0; i < groups; i++) {
  566. const block_q4_0* group =
  567. (const block_q4_0*)((const char*)src + i * sizeof(block_q4_0));
  568. *scale_offset = group->d;
  569. scale_offset++;
  570. // 0-15
  571. for (int j = 0; j < QK4_0 / 2; j += 2) {
  572. (*quant_offset) = (group->qs[j] & 0x0F);
  573. (*quant_offset) |= ((group->qs[j + 1] << 4));
  574. quant_offset++;
  575. }
  576. // 16-31
  577. for (int j = 0; j < QK4_0 / 2; j += 2) {
  578. (*quant_offset) = (group->qs[j] >> 4);
  579. (*quant_offset) |= (group->qs[j + 1] & 0xF0);
  580. quant_offset++;
  581. }
  582. }
  583. // put (uint4b_t -8) into int4b_t
  584. for (quant_offset = (uint8_t*)dst;
  585. quant_offset < (uint8_t*)dst + quant_bytes; quant_offset++) {
  586. (*quant_offset) ^= 0x88;
  587. }
  588. }
  589. /**
  590. * @brief Transform CANN processed data back into quantized Q4.0 format.
  591. *
  592. * This function transforms CANN processed data back into quantized Q4.0 format.
  593. * It reverses the transformation performed by
  594. * ggml_backend_cann_transform_q4_0(), converting the data back into its
  595. * original quantized form.
  596. *
  597. * @param tensor Pointer to the tensor information.
  598. * @param src Pointer to the source buffer containing transformed data.
  599. * @param dst Pointer to the destination buffer where the Q4.0 formatted data
  600. * will be stored.
  601. */
  602. GGML_CALL static void ggml_backend_cann_transform_back_q4_0(
  603. const ggml_tensor* tensor, void* src, void* dst) {
  604. int64_t n_elems = ggml_nelements(tensor);
  605. int64_t groups = n_elems / QK4_0;
  606. size_t quant_bytes = n_elems * sizeof(uint8_t) / 2;
  607. uint8_t* quant_offset = (uint8_t*)src;
  608. uint16_t* scale_offset = (uint16_t*)((char*)src + quant_bytes);
  609. for (; quant_offset < (uint8_t*)src + quant_bytes; quant_offset++) {
  610. (*quant_offset) ^= 0x88;
  611. }
  612. quant_offset = (uint8_t*)src;
  613. for (int i = 0; i < groups; i++) {
  614. block_q4_0* group = (block_q4_0*)((char*)dst + i * sizeof(block_q4_0));
  615. group->d = *scale_offset;
  616. scale_offset++;
  617. // 0-15
  618. for (int j = 0; j < QK4_0 / 2; j += 2) {
  619. group->qs[j] = ((*quant_offset) & 0x0F);
  620. group->qs[j + 1] = ((*quant_offset) >> 4);
  621. quant_offset++;
  622. }
  623. // 16-31
  624. for (int j = 0; j < QK4_0 / 2; j += 2) {
  625. group->qs[j] |= ((*quant_offset) << 4);
  626. group->qs[j + 1] |= ((*quant_offset) & 0xF0);
  627. quant_offset++;
  628. }
  629. }
  630. }
  631. /**
  632. * @brief Transform quantized Q8.0 tensor data into a format suitable for CANN
  633. * processing.
  634. *
  635. * This function transforms quantized Q8.0 tensor data into a format suitable
  636. * for CANN processing. It extracts quantization values and scales from the
  637. * source data and prepares them in a format expected by CANN operations.
  638. *
  639. * @param tensor Pointer to the tensor information.
  640. * @param src Pointer to the source data in Q8.0 format.
  641. * @param dst Pointer to the destination buffer where transformed data will be
  642. * stored.
  643. */
  644. GGML_CALL static void ggml_backend_cann_transform_q8_0(ggml_tensor* tensor,
  645. const void* src,
  646. void* dst) {
  647. int64_t n_elems = ggml_nelements(tensor);
  648. int64_t groups = n_elems / QK8_0;
  649. size_t quant_bytes = n_elems * sizeof(uint8_t);
  650. uint8_t* quant_offset = (uint8_t*)dst;
  651. uint16_t* scale_offset = (uint16_t*)((char*)dst + quant_bytes);
  652. for (int i = 0; i < groups; i++) {
  653. const block_q8_0* group =
  654. (const block_q8_0*)((const char*)src + i * sizeof(block_q8_0));
  655. *scale_offset = group->d;
  656. scale_offset++;
  657. size_t group_quant_size = QK8_0 * sizeof(uint8_t);
  658. memcpy(quant_offset, group->qs, group_quant_size);
  659. quant_offset += group_quant_size;
  660. }
  661. }
  662. /**
  663. * @brief Transform CANN processed data back into quantized Q8.0 format.
  664. *
  665. * This function transforms CANN processed data back into quantized Q8.0 format.
  666. * It reverses the transformation performed by
  667. * ggml_backend_cann_transform_q8_0(), converting the data back into its
  668. * original quantized form.
  669. *
  670. * @param tensor Pointer to the tensor information.
  671. * @param src Pointer to the source buffer containing transformed data.
  672. * @param dst Pointer to the destination buffer where the Q8.0 formatted data
  673. * will be stored.
  674. */
  675. GGML_CALL static void ggml_backend_cann_transform_back_q8_0(
  676. const ggml_tensor* tensor, const void* src, void* dst) {
  677. int64_t n_elems = ggml_nelements(tensor);
  678. int64_t groups = n_elems / QK8_0;
  679. size_t quant_bytes = n_elems * sizeof(uint8_t);
  680. const uint8_t* quant_offset = (const uint8_t*)src;
  681. const uint16_t* scale_offset =
  682. (const uint16_t*)((const char*)src + quant_bytes);
  683. for (int i = 0; i < groups; i++) {
  684. block_q8_0* group = (block_q8_0*)((char*)dst + i * sizeof(block_q8_0));
  685. group->d = *scale_offset;
  686. scale_offset++;
  687. size_t group_quant_size = QK8_0 * sizeof(uint8_t);
  688. memcpy(group->qs, quant_offset, group_quant_size);
  689. quant_offset += group_quant_size;
  690. }
  691. }
  692. /**
  693. * @brief Transform tensor data based on its type for CANN processing.
  694. *
  695. * This function transforms tensor data based on its quantization type for CANN
  696. * processing. It dispatches the transformation based on the tensor's type to
  697. * specialized functions handling Q4.0 and Q8.0 formats.
  698. *
  699. * @param tensor Pointer to the tensor information.
  700. * @param src Pointer to the source data to be transformed.
  701. * @param dst Pointer to the destination buffer where transformed data will be
  702. * stored.
  703. */
  704. GGML_CALL static void ggml_backend_cann_transform(ggml_tensor* tensor,
  705. const void* src, void* dst) {
  706. switch (tensor->type) {
  707. case GGML_TYPE_Q4_0:
  708. ggml_backend_cann_transform_q4_0(tensor, src, dst);
  709. break;
  710. case GGML_TYPE_Q8_0:
  711. ggml_backend_cann_transform_q8_0(tensor, src, dst);
  712. break;
  713. default:
  714. break;
  715. }
  716. }
  717. /**
  718. * @brief Transform CANN processed data back into tensor data based on its type.
  719. *
  720. * This function transforms CANN processed data back into tensor data based on
  721. * its quantization type for Q4.0 and Q8.0 formats. It dispatches the
  722. * transformation based on the tensor's type to specialized functions.
  723. *
  724. * @param tensor Pointer to the tensor information.
  725. * @param src Pointer to the source data containing CANN processed data.
  726. * @param dst Pointer to the destination buffer where transformed tensor data
  727. * will be stored.
  728. */
  729. GGML_CALL static void ggml_backend_cann_transform_back(
  730. const ggml_tensor* tensor, void* src, void* dst) {
  731. switch (tensor->type) {
  732. case GGML_TYPE_Q4_0:
  733. ggml_backend_cann_transform_back_q4_0(tensor, src, dst);
  734. break;
  735. case GGML_TYPE_Q8_0:
  736. ggml_backend_cann_transform_back_q8_0(tensor, src, dst);
  737. break;
  738. default:
  739. break;
  740. }
  741. }
  742. /**
  743. * @brief Check if transformation is needed for a given tensor type.
  744. *
  745. * This function checks if transformation is needed for a given tensor type
  746. * to prepare data for CANN processing.
  747. *
  748. * @param type The tensor type to check.
  749. * @return true if transformation is needed, false otherwise.
  750. */
  751. GGML_CALL static bool need_transform(ggml_type type) {
  752. switch (type) {
  753. case GGML_TYPE_Q4_0:
  754. case GGML_TYPE_Q8_0:
  755. return true;
  756. default:
  757. return false;
  758. }
  759. }
  760. /**
  761. * @brief Initialize a tensor using data from a CANN buffer.
  762. *
  763. * This function initializes a tensor using data from a CANN buffer.
  764. * It handles special cases such as views and quantization.
  765. *
  766. * @param buffer The CANN buffer from which to initialize the tensor.
  767. * @param tensor Pointer to the tensor to be initialized.
  768. */
  769. GGML_CALL static void ggml_backend_cann_buffer_init_tensor(
  770. ggml_backend_buffer_t buffer, ggml_tensor* tensor) {
  771. if (tensor->view_src != NULL && tensor->view_offs == 0) {
  772. GGML_ASSERT(tensor->view_src->buffer->buft == buffer->buft);
  773. return;
  774. }
  775. // TODO: can backend doesn't support quantized yet. Just leave the code
  776. // here.
  777. if (ggml_is_quantized(tensor->type)) {
  778. // Initialize padding to 0 to avoid possible NaN values
  779. size_t original_size = ggml_nbytes(tensor);
  780. size_t padded_size =
  781. ggml_backend_buft_get_alloc_size(buffer->buft, tensor);
  782. if (padded_size > original_size && tensor->view_src == nullptr) {
  783. size_t memset_size = padded_size - original_size;
  784. ACL_CHECK(aclrtMemset((char*)tensor->data + original_size,
  785. memset_size, 0, memset_size));
  786. }
  787. }
  788. }
  789. // TODO: need handle tensor which has paddings.
  790. /**
  791. * @brief Set tensor data in a CANN buffer.
  792. *
  793. * This function sets tensor data in a CANN buffer, handling transformations
  794. * if needed based on the tensor's type.
  795. *
  796. * @param buffer The CANN buffer where the tensor data will be set.
  797. * @param tensor Pointer to the tensor whose data will be set.
  798. * @param data Pointer to the source data to be copied into the tensor.
  799. * @param offset Offset in the source data from where to start copying.
  800. * @param size Size of the data to be copied, in bytes.
  801. */
  802. GGML_CALL static void ggml_backend_cann_buffer_set_tensor(
  803. ggml_backend_buffer_t buffer, ggml_tensor *tensor, const void *data,
  804. size_t offset, size_t size) {
  805. ggml_backend_cann_buffer_context *ctx =
  806. (ggml_backend_cann_buffer_context *)buffer->context;
  807. ggml_cann_set_device(ctx->device);
  808. // TODO: refer to cann(#6017), it use thread's default stream.
  809. // For acl, synchronous functions use this default stream.
  810. // Why aclrtSynchronizeDevice?
  811. if (!need_transform(tensor->type)) {
  812. ACL_CHECK(aclrtMemcpy((char *)tensor->data + offset, size, data, size,
  813. ACL_MEMCPY_HOST_TO_DEVICE));
  814. } else {
  815. void *transform_buffer = malloc(size);
  816. ggml_backend_cann_transform(tensor, data, transform_buffer);
  817. #ifndef NDEBUG
  818. void *check_buffer = malloc(size);
  819. ggml_backend_cann_transform_back(tensor, transform_buffer,
  820. check_buffer);
  821. GGML_ASSERT(memcmp(data, check_buffer, size) == 0);
  822. free(check_buffer);
  823. #endif
  824. ACL_CHECK(aclrtMemcpy((char *)tensor->data + offset, size,
  825. transform_buffer, size,
  826. ACL_MEMCPY_HOST_TO_DEVICE));
  827. free(transform_buffer);
  828. }
  829. }
  830. /**
  831. * @brief Get tensor data from a CANN buffer.
  832. *
  833. * This function retrieves tensor data from a CANN buffer, handling
  834. * transformations if needed based on the tensor's type.
  835. *
  836. * @param buffer The CANN buffer from which to retrieve tensor data.
  837. * @param tensor Pointer to the tensor whose data will be retrieved.
  838. * @param data Pointer to the destination buffer where the tensor data will be
  839. * copied.
  840. * @param offset Offset in the destination buffer where to start copying.
  841. * @param size Size of the data to be copied, in bytes.
  842. */
  843. GGML_CALL static void ggml_backend_cann_buffer_get_tensor(
  844. ggml_backend_buffer_t buffer, const ggml_tensor* tensor, void* data,
  845. size_t offset, size_t size) {
  846. ggml_backend_cann_buffer_context* ctx =
  847. (ggml_backend_cann_buffer_context*)buffer->context;
  848. ggml_cann_set_device(ctx->device);
  849. if (!need_transform(tensor->type)) {
  850. ACL_CHECK(aclrtMemcpy(data, size, (char*)tensor->data + offset, size,
  851. ACL_MEMCPY_DEVICE_TO_HOST));
  852. } else {
  853. void* transform_buffer = malloc(size);
  854. ACL_CHECK(aclrtMemcpy(transform_buffer, size,
  855. (char*)tensor->data + offset, size,
  856. ACL_MEMCPY_DEVICE_TO_HOST));
  857. ggml_backend_cann_transform_back(tensor, transform_buffer, data);
  858. free(transform_buffer);
  859. }
  860. }
  861. /**
  862. * @brief Copy tensor data between CANN buffers if possible.
  863. *
  864. * This function copies tensor data between CANN buffers if the source and
  865. * destination buffers are CANN buffers and they meet the necessary conditions
  866. * (same device or devices can access each other).
  867. *
  868. * @param buffer The destination CANN buffer where the tensor data will be
  869. * copied.
  870. * @param src Pointer to the source tensor whose data will be copied.
  871. * @param dst Pointer to the destination tensor where the data will be copied.
  872. * @return true if the copy operation succeeded, false otherwise.
  873. */
  874. GGML_CALL static bool ggml_backend_cann_buffer_cpy_tensor(
  875. ggml_backend_buffer_t buffer, const ggml_tensor* src, ggml_tensor* dst) {
  876. if (ggml_backend_buffer_is_cann(src->buffer)) {
  877. ggml_backend_cann_buffer_context* src_ctx =
  878. (ggml_backend_cann_buffer_context*)src->buffer->context;
  879. ggml_backend_cann_buffer_context* dst_ctx =
  880. (ggml_backend_cann_buffer_context*)buffer->context;
  881. size_t memcpy_size = ggml_nbytes(src);
  882. // Same device.
  883. if (src_ctx->device == dst_ctx->device) {
  884. ACL_CHECK(aclrtMemcpy((char*)dst->data, memcpy_size,
  885. (const char*)src->data, memcpy_size,
  886. ACL_MEMCPY_DEVICE_TO_DEVICE));
  887. return true;
  888. } else {
  889. // Different device but can access by peer.
  890. int32_t canAccessPeer = 0;
  891. ACL_CHECK(aclrtDeviceCanAccessPeer(&canAccessPeer, src_ctx->device,
  892. dst_ctx->device));
  893. if (canAccessPeer) {
  894. ggml_cann_set_device(src_ctx->device);
  895. ACL_CHECK(aclrtDeviceEnablePeerAccess(dst_ctx->device, 0));
  896. ACL_CHECK(aclrtMemcpy((char*)dst->data, memcpy_size,
  897. (const char*)src->data, memcpy_size,
  898. ACL_MEMCPY_DEVICE_TO_DEVICE));
  899. return true;
  900. }
  901. }
  902. }
  903. return false;
  904. }
  905. /**
  906. * @brief Clear a CANN buffer by setting all its memory to a specified value.
  907. *
  908. * This function clears a CANN buffer by setting all its memory to a specified
  909. * value.
  910. *
  911. * @param buffer The CANN buffer to be cleared.
  912. * @param value The value to which each byte in the buffer will be set.
  913. */
  914. GGML_CALL static void ggml_backend_cann_buffer_clear(
  915. ggml_backend_buffer_t buffer, uint8_t value) {
  916. ggml_backend_cann_buffer_context* ctx =
  917. (ggml_backend_cann_buffer_context*)buffer->context;
  918. ggml_cann_set_device(ctx->device);
  919. ACL_CHECK(aclrtMemset(ctx->dev_ptr, buffer->size, value, buffer->size));
  920. }
  921. /**
  922. * @brief Interface for a CANN buffer in the backend.
  923. *
  924. * This structure defines function pointers to operations that can be performed
  925. * on a CANN buffer within the backend.
  926. */
  927. static ggml_backend_buffer_i ggml_backend_cann_buffer_interface = {
  928. /* .get_name = */ ggml_backend_cann_buffer_get_name,
  929. /* .free_buffer = */ ggml_backend_cann_buffer_free_buffer,
  930. /* .get_base = */ ggml_backend_cann_buffer_get_base,
  931. /* .init_tensor = */ ggml_backend_cann_buffer_init_tensor,
  932. /* .set_tensor = */ ggml_backend_cann_buffer_set_tensor,
  933. /* .get_tensor = */ ggml_backend_cann_buffer_get_tensor,
  934. /* .cpy_tensor = */ ggml_backend_cann_buffer_cpy_tensor,
  935. /* .clear = */ ggml_backend_cann_buffer_clear,
  936. /* .reset = */ NULL,
  937. };
  938. // cann buffer type
  939. /**
  940. * @brief Structure representing context information for a specific backend
  941. * buffer type.
  942. */
  943. struct ggml_backend_cann_buffer_type_context {
  944. int32_t
  945. device; /**< Device identifier associated with the buffer context. */
  946. std::string name; /**< Name associated with the buffer context. */
  947. };
  948. /**
  949. * @brief Retrieves the name associated with a CANN buffer type.
  950. *
  951. * This function returns the descriptive name associated with the specified
  952. * CANN buffer type context.
  953. *
  954. * @param buft Pointer to the buffer type context.
  955. * @return Const pointer to the C-style string containing the name.
  956. */
  957. GGML_CALL static const char* ggml_backend_cann_buffer_type_name(
  958. ggml_backend_buffer_type_t buft) {
  959. return "CANN";
  960. GGML_UNUSED(buft);
  961. }
  962. /**
  963. * @brief Allocates a new CANN buffer of the specified type and size.
  964. *
  965. * This function allocates a new CANN buffer on the specified device with the
  966. * given size.
  967. *
  968. * @param buft Pointer to the buffer type context.
  969. * @param size Size in bytes of the buffer to allocate.
  970. * @return Pointer to the allocated buffer, or nullptr if allocation fails.
  971. */
  972. GGML_CALL static ggml_backend_buffer_t
  973. ggml_backend_cann_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft,
  974. size_t size) {
  975. ggml_backend_cann_buffer_type_context* buft_ctx =
  976. (ggml_backend_cann_buffer_type_context*)buft->context;
  977. ggml_cann_set_device(buft_ctx->device);
  978. size = std::max(size, (size_t)1);
  979. void* dev_ptr;
  980. aclError err = aclrtMalloc(&dev_ptr, size, ACL_MEM_MALLOC_HUGE_FIRST);
  981. if (err != ACL_SUCCESS) {
  982. GGML_CANN_LOG_ERROR(
  983. "%s: allocating %.2f MiB on device %d: aclrtMalloc failed: %s\n",
  984. __func__, size / 1024.0 / 1024.0, buft_ctx->device,
  985. aclGetRecentErrMsg());
  986. return nullptr;
  987. }
  988. ggml_backend_cann_buffer_context* ctx =
  989. new ggml_backend_cann_buffer_context(buft_ctx->device, dev_ptr);
  990. return ggml_backend_buffer_init(buft, ggml_backend_cann_buffer_interface,
  991. ctx, size);
  992. }
  993. /**
  994. * @brief Retrieves the memory alignment requirement for CANN buffers of this
  995. * type.
  996. *
  997. * This function returns the alignment requirement in bytes for memory allocated
  998. * by the CANN buffer type.
  999. *
  1000. * @param buft Pointer to the buffer type context (unused in this
  1001. * implementation).
  1002. * @return The alignment requirement in bytes (fixed at 128 bytes for CANN
  1003. * buffers).
  1004. */
  1005. GGML_CALL static size_t ggml_backend_cann_buffer_type_get_alignment(
  1006. ggml_backend_buffer_type_t buft) {
  1007. return 128;
  1008. GGML_UNUSED(buft);
  1009. }
  1010. /**
  1011. * @brief Calculates the allocation size required for a tensor in a CANN buffer.
  1012. *
  1013. * Computes the total allocation size needed for storing the tensor's data in a
  1014. * CANN buffer, considering any necessary padding or adjustments for quantized
  1015. * types.
  1016. *
  1017. * @param buft Pointer to the buffer type context (unused in this
  1018. * implementation).
  1019. * @param tensor Pointer to the tensor for which the allocation size is
  1020. * calculated.
  1021. * @return The total allocation size in bytes required for the tensor in the
  1022. * CANN buffer.
  1023. */
  1024. GGML_CALL static size_t ggml_backend_cann_buffer_type_get_alloc_size(
  1025. ggml_backend_buffer_type_t buft, const ggml_tensor* tensor) {
  1026. size_t size = ggml_nbytes(tensor);
  1027. int64_t ne0 = tensor->ne[0];
  1028. // last line must bigger than 32, because every single op deal at
  1029. // least 32 bytes.
  1030. // TODO: quantized type?
  1031. // int64_t line_size = ne0 * ggml_element_size(tensor);
  1032. // int64_t line_size_align_32 = (line_size + 31) & ~31;
  1033. // size += (line_size_align_32 - line_size);
  1034. // TODO: not support quantized yet.
  1035. // TODO: consider un-continue tensor.
  1036. if (ggml_is_quantized(tensor->type)) {
  1037. if (ne0 % MATRIX_ROW_PADDING != 0) {
  1038. size += ggml_row_size(
  1039. tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
  1040. }
  1041. }
  1042. return size;
  1043. GGML_UNUSED(buft);
  1044. }
  1045. /**
  1046. * @brief Interface for managing CANN buffer types in the GGML backend.
  1047. *
  1048. * Provides function pointers for allocating, querying properties, and managing
  1049. * memory for CANN buffer types in the GGML backend.
  1050. */
  1051. static ggml_backend_buffer_type_i ggml_backend_cann_buffer_type_interface = {
  1052. /* .get_name = */ ggml_backend_cann_buffer_type_name,
  1053. /* .alloc_buffer = */ ggml_backend_cann_buffer_type_alloc_buffer,
  1054. /* .get_alignment = */ ggml_backend_cann_buffer_type_get_alignment,
  1055. /* .get_max_size = */ NULL, // defaults to SIZE_MAX
  1056. /* .get_alloc_size = */ ggml_backend_cann_buffer_type_get_alloc_size,
  1057. /* .is_host = */ NULL,
  1058. };
  1059. /**
  1060. * @brief Retrieves the CANN buffer type for a specified device.
  1061. *
  1062. * This function initializes and returns the buffer type interface associated
  1063. * with the given device. It ensures thread-safe access using a mutex.
  1064. *
  1065. * @param device The device index for which to retrieve the buffer type.
  1066. * @return A pointer to the buffer type interface for the specified device, or
  1067. * nullptr if the device index is out of range.
  1068. */
  1069. GGML_CALL ggml_backend_buffer_type_t
  1070. ggml_backend_cann_buffer_type(int32_t device) {
  1071. static std::mutex mutex;
  1072. std::lock_guard<std::mutex> lock(mutex);
  1073. if (device >= ggml_backend_cann_get_device_count()) {
  1074. return nullptr;
  1075. }
  1076. static ggml_backend_buffer_type
  1077. ggml_backend_cann_buffer_types[GGML_CANN_MAX_DEVICES];
  1078. static bool ggml_backend_cann_buffer_type_initialized = false;
  1079. if (!ggml_backend_cann_buffer_type_initialized) {
  1080. for (int32_t i = 0; i < GGML_CANN_MAX_DEVICES; i++) {
  1081. ggml_backend_cann_buffer_types[i] = {
  1082. /* .iface = */ ggml_backend_cann_buffer_type_interface,
  1083. /* .context = */
  1084. new ggml_backend_cann_buffer_type_context{
  1085. i, "CANN" + std::to_string(i)},
  1086. };
  1087. }
  1088. ggml_backend_cann_buffer_type_initialized = true;
  1089. }
  1090. return &ggml_backend_cann_buffer_types[device];
  1091. }
  1092. /**
  1093. * @brief Computes the forward operation for a given tensor using CANN
  1094. * operations.
  1095. *
  1096. * This function selects the appropriate CANN operation based on the type of
  1097. * operation specified in the tensor and performs the computation.
  1098. *
  1099. * @param ctx The CANN context containing necessary resources and
  1100. * configurations.
  1101. * @param dst The destination tensor where the result of the computation will be
  1102. * stored.
  1103. * @return true if the computation was successful; false otherwise.
  1104. */
  1105. static bool ggml_cann_compute_forward(ggml_backend_cann_context& ctx,
  1106. struct ggml_tensor* dst) {
  1107. switch (dst->op) {
  1108. case GGML_OP_REPEAT:
  1109. ggml_cann_repeat(ctx, dst);
  1110. break;
  1111. case GGML_OP_GET_ROWS:
  1112. ggml_cann_get_rows(ctx, dst);
  1113. break;
  1114. case GGML_OP_DUP:
  1115. ggml_cann_dup(ctx, dst);
  1116. break;
  1117. case GGML_OP_ADD:
  1118. ggml_cann_add(ctx, dst);
  1119. break;
  1120. case GGML_OP_ACC:
  1121. ggml_cann_acc(ctx, dst);
  1122. break;
  1123. case GGML_OP_MUL:
  1124. ggml_cann_mul_div<aclnnMulGetWorkspaceSize, aclnnMul>(ctx, dst);
  1125. break;
  1126. case GGML_OP_DIV:
  1127. ggml_cann_mul_div<aclnnDivGetWorkspaceSize, aclnnDiv>(ctx, dst);
  1128. break;
  1129. case GGML_OP_UNARY:
  1130. switch (ggml_get_unary_op(dst)) {
  1131. case GGML_UNARY_OP_GELU:
  1132. ggml_cann_activation<aclnnGeluGetWorkspaceSize, aclnnGelu>(
  1133. ctx, dst);
  1134. break;
  1135. case GGML_UNARY_OP_SILU:
  1136. ggml_cann_activation<aclnnSiluGetWorkspaceSize, aclnnSilu>(
  1137. ctx, dst);
  1138. break;
  1139. // TODO: Use faster gelu??
  1140. case GGML_UNARY_OP_GELU_QUICK:
  1141. ggml_cann_activation<aclnnGeluGetWorkspaceSize, aclnnGelu>(
  1142. ctx, dst);
  1143. break;
  1144. case GGML_UNARY_OP_TANH:
  1145. ggml_cann_activation<aclnnTanhGetWorkspaceSize, aclnnTanh>(
  1146. ctx, dst);
  1147. break;
  1148. case GGML_UNARY_OP_RELU:
  1149. ggml_cann_activation<aclnnReluGetWorkspaceSize, aclnnRelu>(
  1150. ctx, dst);
  1151. break;
  1152. case GGML_UNARY_OP_HARDSIGMOID:
  1153. ggml_cann_activation<aclnnHardsigmoidGetWorkspaceSize,
  1154. aclnnHardsigmoid>(ctx, dst);
  1155. break;
  1156. case GGML_UNARY_OP_HARDSWISH:
  1157. ggml_cann_activation<aclnnHardswishGetWorkspaceSize,
  1158. aclnnHardswish>(ctx, dst);
  1159. break;
  1160. default:
  1161. return false;
  1162. }
  1163. break;
  1164. case GGML_OP_NORM:
  1165. ggml_cann_norm(ctx, dst);
  1166. break;
  1167. case GGML_OP_GROUP_NORM:
  1168. ggml_cann_group_norm(ctx, dst);
  1169. break;
  1170. case GGML_OP_CONCAT:
  1171. ggml_cann_concat(ctx, dst);
  1172. break;
  1173. case GGML_OP_UPSCALE:
  1174. ggml_cann_upsample_nearest2d(ctx, dst);
  1175. break;
  1176. case GGML_OP_PAD:
  1177. ggml_cann_pad(ctx, dst);
  1178. break;
  1179. case GGML_OP_ARANGE:
  1180. ggml_cann_arange(ctx, dst);
  1181. break;
  1182. case GGML_OP_TIMESTEP_EMBEDDING:
  1183. ggml_cann_timestep_embedding(ctx, dst);
  1184. break;
  1185. case GGML_OP_LEAKY_RELU:
  1186. ggml_cann_leaky_relu(ctx, dst);
  1187. break;
  1188. case GGML_OP_RMS_NORM:
  1189. ggml_cann_rms_norm(ctx, dst);
  1190. break;
  1191. case GGML_OP_MUL_MAT:
  1192. ggml_cann_mul_mat(ctx, dst);
  1193. break;
  1194. case GGML_OP_MUL_MAT_ID:
  1195. return false;
  1196. case GGML_OP_SCALE:
  1197. ggml_cann_scale(ctx, dst);
  1198. break;
  1199. case GGML_OP_SQR:
  1200. ggml_cann_sqr(ctx, dst);
  1201. break;
  1202. case GGML_OP_CLAMP:
  1203. ggml_cann_clamp(ctx, dst);
  1204. break;
  1205. case GGML_OP_CPY:
  1206. ggml_cann_cpy(ctx, dst);
  1207. break;
  1208. case GGML_OP_CONT:
  1209. ggml_cann_dup(ctx, dst);
  1210. break;
  1211. case GGML_OP_NONE:
  1212. case GGML_OP_RESHAPE:
  1213. case GGML_OP_VIEW:
  1214. case GGML_OP_PERMUTE:
  1215. case GGML_OP_TRANSPOSE:
  1216. break;
  1217. case GGML_OP_DIAG_MASK_INF:
  1218. ggml_cann_diag_mask(ctx, dst, -INFINITY);
  1219. break;
  1220. case GGML_OP_SOFT_MAX:
  1221. ggml_cann_softmax(ctx, dst);
  1222. break;
  1223. case GGML_OP_ROPE:
  1224. ggml_cann_rope(ctx, dst);
  1225. break;
  1226. case GGML_OP_IM2COL:
  1227. ggml_cann_im2col(ctx, dst);
  1228. break;
  1229. case GGML_OP_POOL_2D:
  1230. ggml_cann_pool2d(ctx, dst);
  1231. break;
  1232. case GGML_OP_SUM_ROWS:
  1233. ggml_cann_sum_rows(ctx, dst);
  1234. break;
  1235. case GGML_OP_ARGSORT:
  1236. ggml_cann_argsort(ctx, dst);
  1237. break;
  1238. default:
  1239. return false;
  1240. }
  1241. return true;
  1242. }
  1243. // backend
  1244. /**
  1245. * @brief Retrieves the name associated with the CANN backend.
  1246. *
  1247. * This function returns the name assigned to the CANN backend, which is stored
  1248. * in the context of the provided backend structure.
  1249. *
  1250. * @param backend Pointer to the CANN backend structure.
  1251. * @return A pointer to a constant string representing the backend name.
  1252. */
  1253. GGML_CALL static const char* ggml_backend_cann_name(ggml_backend_t backend) {
  1254. ggml_backend_cann_context* cann_ctx =
  1255. (ggml_backend_cann_context*)backend->context;
  1256. return cann_ctx->name.c_str();
  1257. }
  1258. /**
  1259. * @brief Frees resources associated with the CANN backend.
  1260. *
  1261. * This function releases resources associated with the CANN backend context
  1262. * and resets the device associated with the backend to its initial state.
  1263. *
  1264. * @param backend Pointer to the CANN backend structure to be freed.
  1265. */
  1266. GGML_CALL static void ggml_backend_cann_free(ggml_backend_t backend) {
  1267. ggml_backend_cann_context* cann_ctx =
  1268. (ggml_backend_cann_context*)backend->context;
  1269. ACL_CHECK(aclrtSynchronizeDevice());
  1270. ACL_CHECK(aclrtResetDevice(cann_ctx->device));
  1271. // finalize when last backend freed.
  1272. if (cann_ctx->device == ggml_backend_cann_get_device_count() - 1) {
  1273. ACL_CHECK(aclFinalize());
  1274. }
  1275. delete cann_ctx;
  1276. delete backend;
  1277. }
  1278. /**
  1279. * @brief Retrieves the default buffer type associated with the CANN backend.
  1280. *
  1281. * This function returns the buffer type specific to the device associated
  1282. * with the CANN backend. It is used to allocate buffers for computations
  1283. * performed by the backend.
  1284. *
  1285. * @param backend Pointer to the CANN backend structure.
  1286. * @return Pointer to the buffer type structure for the CANN backend.
  1287. */
  1288. GGML_CALL static ggml_backend_buffer_type_t
  1289. ggml_backend_cann_get_default_buffer_type(ggml_backend_t backend) {
  1290. ggml_backend_cann_context* cann_ctx =
  1291. (ggml_backend_cann_context*)backend->context;
  1292. return ggml_backend_cann_buffer_type(cann_ctx->device);
  1293. }
  1294. /**
  1295. * @brief Sets tensor data asynchronously in the CANN backend.
  1296. *
  1297. * This function asynchronously sets tensor data in the CANN backend. Depending
  1298. * on the tensor type, it may perform data transformations before copying data
  1299. * to the device.
  1300. *
  1301. * @param backend Pointer to the CANN backend structure.
  1302. * @param tensor Pointer to the tensor structure to set data for.
  1303. * @param data Pointer to the host data to copy to the tensor.
  1304. * @param offset Offset in bytes within the host data.
  1305. * @param size Size of the data to copy in bytes.
  1306. */
  1307. GGML_CALL static void ggml_backend_cann_set_tensor_async(ggml_backend_t backend,
  1308. ggml_tensor *tensor,
  1309. const void *data,
  1310. size_t offset,
  1311. size_t size) {
  1312. ggml_backend_cann_context *cann_ctx =
  1313. (ggml_backend_cann_context *)backend->context;
  1314. if (!need_transform(tensor->type)) {
  1315. ACL_CHECK(aclrtMemcpyAsync((char *)tensor->data + offset, size, data,
  1316. size, ACL_MEMCPY_HOST_TO_DEVICE,
  1317. cann_ctx->stream()));
  1318. } else {
  1319. void *transform_buffer = malloc(size);
  1320. ggml_backend_cann_transform(tensor, data, transform_buffer);
  1321. #ifndef NDEBUG
  1322. void *check_buffer = malloc(size);
  1323. ggml_backend_cann_transform_back(tensor, transform_buffer,
  1324. check_buffer);
  1325. GGML_ASSERT(memcmp(data, check_buffer, size));
  1326. free(check_buffer);
  1327. #endif
  1328. ACL_CHECK(aclrtMemcpyAsync(
  1329. (char *)tensor->data + offset, size, transform_buffer, size,
  1330. ACL_MEMCPY_HOST_TO_DEVICE, cann_ctx->stream()));
  1331. ACL_CHECK(aclrtSynchronizeStream(cann_ctx->stream()));
  1332. free(transform_buffer);
  1333. }
  1334. }
  1335. GGML_CALL static void ggml_backend_cann_get_tensor_async(
  1336. ggml_backend_t backend, const ggml_tensor *tensor, void *data,
  1337. size_t offset, size_t size) {
  1338. ggml_backend_cann_context *cann_ctx =
  1339. (ggml_backend_cann_context *)backend->context;
  1340. ggml_backend_buffer_t buf =
  1341. tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
  1342. GGML_ASSERT(buf->buft == ggml_backend_cann_buffer_type(cann_ctx->device) &&
  1343. "unsupported buffer type");
  1344. if (!need_transform(tensor->type)) {
  1345. ACL_CHECK(aclrtMemcpyAsync(data, size, (char *)tensor->data + offset,
  1346. size, ACL_MEMCPY_DEVICE_TO_HOST,
  1347. cann_ctx->stream()));
  1348. } else {
  1349. void *transform_buffer = malloc(size);
  1350. ACL_CHECK(aclrtMemcpyAsync(
  1351. transform_buffer, size, (char *)tensor->data + offset, size,
  1352. ACL_MEMCPY_DEVICE_TO_HOST, cann_ctx->stream()));
  1353. ACL_CHECK(aclrtSynchronizeStream(cann_ctx->stream()));
  1354. ggml_backend_cann_transform_back(tensor, transform_buffer, data);
  1355. free(transform_buffer);
  1356. }
  1357. }
  1358. /**
  1359. * @brief Asynchronously copies tensor data between CANN backends.
  1360. *
  1361. * This function copies tensor data asynchronously between two CANN backends. It
  1362. * checks if both tensors reside in CANN buffers and whether the devices support
  1363. * peer-to-peer access for direct copying. If not, it returns false.
  1364. *
  1365. * @param backend_src Pointer to the source CANN backend structure.
  1366. * @param backend_dst Pointer to the destination CANN backend structure.
  1367. * @param src Pointer to the source tensor to copy data from.
  1368. * @param dst Pointer to the destination tensor to copy data to.
  1369. * @return true if the copy operation succeeds, false otherwise.
  1370. */
  1371. GGML_CALL static bool ggml_backend_cann_cpy_tensor_async(
  1372. ggml_backend_t backend_src, ggml_backend_t backend_dst,
  1373. const ggml_tensor* src, ggml_tensor* dst) {
  1374. GGML_ASSERT(ggml_backend_is_cann(backend_src) ||
  1375. ggml_backend_is_cann(backend_dst));
  1376. if (!ggml_backend_buffer_is_cann(src->buffer) ||
  1377. !ggml_backend_buffer_is_cann(dst->buffer)) {
  1378. return false;
  1379. }
  1380. ggml_backend_buffer_t buf_src =
  1381. src->view_src ? src->view_src->buffer : src->buffer;
  1382. ggml_backend_buffer_t buf_dst =
  1383. dst->view_src ? dst->view_src->buffer : dst->buffer;
  1384. ggml_backend_cann_context* cann_ctx_src =
  1385. (ggml_backend_cann_context*)backend_src->context;
  1386. ggml_backend_cann_context* cann_ctx_dst =
  1387. (ggml_backend_cann_context*)backend_dst->context;
  1388. size_t copy_size = ggml_nbytes(dst);
  1389. if (backend_src != backend_dst) {
  1390. ggml_backend_cann_buffer_context* buf_ctx_src =
  1391. (ggml_backend_cann_buffer_context*)buf_src->context;
  1392. ggml_backend_cann_buffer_context* buf_ctx_dst =
  1393. (ggml_backend_cann_buffer_context*)buf_dst->context;
  1394. GGML_ASSERT(cann_ctx_src->device == buf_ctx_src->device);
  1395. GGML_ASSERT(cann_ctx_dst->device == buf_ctx_dst->device);
  1396. int32_t canAccessPeer = 0;
  1397. ACL_CHECK(aclrtDeviceCanAccessPeer(&canAccessPeer, cann_ctx_src->device,
  1398. cann_ctx_dst->device));
  1399. if (!canAccessPeer) {
  1400. return false;
  1401. }
  1402. // need open both directions for memcpyasync between devices.
  1403. ggml_cann_set_device(cann_ctx_dst->device);
  1404. ACL_CHECK(aclrtDeviceEnablePeerAccess(cann_ctx_src->device, 0));
  1405. ggml_cann_set_device(cann_ctx_src->device);
  1406. ACL_CHECK(aclrtDeviceEnablePeerAccess(cann_ctx_dst->device, 0));
  1407. ACL_CHECK(aclrtMemcpyAsync(dst->data, copy_size, src->data, copy_size,
  1408. ACL_MEMCPY_DEVICE_TO_DEVICE,
  1409. cann_ctx_src->stream()));
  1410. //TODO: workaround for Event didn`t work here.
  1411. aclrtSynchronizeStream(cann_ctx_src->stream());
  1412. } else {
  1413. // src and dst are on the same backend
  1414. ACL_CHECK(aclrtMemcpyAsync(dst->data, copy_size, src->data, copy_size,
  1415. ACL_MEMCPY_DEVICE_TO_DEVICE,
  1416. cann_ctx_dst->stream()));
  1417. }
  1418. return true;
  1419. }
  1420. /**
  1421. * @brief Synchronizes a CANN backend.
  1422. *
  1423. * This function synchronizes the specified CANN backend by waiting for all
  1424. * operations in its associated stream to complete.
  1425. *
  1426. * @param backend Pointer to the CANN backend structure to synchronize.
  1427. */
  1428. GGML_CALL static void ggml_backend_cann_synchronize(ggml_backend_t backend) {
  1429. ggml_backend_cann_context* cann_ctx =
  1430. (ggml_backend_cann_context*)backend->context;
  1431. ggml_cann_set_device(cann_ctx->device);
  1432. ACL_CHECK(aclrtSynchronizeStream(cann_ctx->stream()));
  1433. }
  1434. /**
  1435. * @brief Computes a computational graph using a CANN backend.
  1436. *
  1437. * This function computes the operations defined in the computational graph
  1438. * using the specified CANN backend.
  1439. *
  1440. * @param backend Pointer to the CANN backend structure to use for computation.
  1441. * @param cgraph Pointer to the computational graph structure containing nodes
  1442. * representing operations to be computed.
  1443. * @return enum ggml_status Returns GGML_STATUS_SUCCESS if computation
  1444. * completes successfully, otherwise an appropriate error status.
  1445. */
  1446. GGML_CALL static enum ggml_status ggml_backend_cann_graph_compute(
  1447. ggml_backend_t backend, ggml_cgraph* cgraph) {
  1448. ggml_backend_cann_context* cann_ctx =
  1449. (ggml_backend_cann_context*)backend->context;
  1450. ggml_cann_set_device(cann_ctx->device);
  1451. for (int i = 0; i < cgraph->n_nodes; i++) {
  1452. ggml_tensor* node = cgraph->nodes[i];
  1453. if (ggml_is_empty(node) || node->op == GGML_OP_NONE) {
  1454. continue;
  1455. }
  1456. bool ok = ggml_cann_compute_forward(*cann_ctx, node);
  1457. if (!ok) {
  1458. GGML_CANN_LOG_ERROR("%s: error: op not supported %s (%s)\n", __func__,
  1459. node->name, ggml_op_name(node->op));
  1460. }
  1461. GGML_ASSERT(ok);
  1462. }
  1463. return GGML_STATUS_SUCCESS;
  1464. }
  1465. /**
  1466. * @brief Checks if the CANN backend supports a specific operation.
  1467. *
  1468. * This function checks whether the specified operation is supported by the
  1469. * CANN backend.
  1470. *
  1471. * @param backend Pointer to the CANN backend structure to check support for
  1472. * the operation.
  1473. * @param op Pointer to the tensor representing the operation to check.
  1474. * @return bool Returns true if the operation is supported by the backend,
  1475. * otherwise false.
  1476. */
  1477. GGML_CALL static bool ggml_backend_cann_supports_op(ggml_backend_t backend,
  1478. const ggml_tensor* op) {
  1479. switch (op->op) {
  1480. case GGML_OP_UNARY:
  1481. switch (ggml_get_unary_op(op)) {
  1482. case GGML_UNARY_OP_GELU:
  1483. case GGML_UNARY_OP_SILU:
  1484. case GGML_UNARY_OP_RELU:
  1485. case GGML_UNARY_OP_HARDSIGMOID:
  1486. case GGML_UNARY_OP_HARDSWISH:
  1487. case GGML_UNARY_OP_GELU_QUICK:
  1488. case GGML_UNARY_OP_TANH:
  1489. return true;
  1490. default:
  1491. return false;
  1492. }
  1493. case GGML_OP_MUL_MAT: {
  1494. switch (op->src[0]->type) {
  1495. case GGML_TYPE_F16:
  1496. case GGML_TYPE_F32:
  1497. case GGML_TYPE_Q8_0:
  1498. // TODO: fix me
  1499. // Current groupsize should not be greater than k-1 in
  1500. // aclnnWeightQuantBatchMatmulV2GetWorkspaceSize().
  1501. case GGML_TYPE_Q4_0:
  1502. return true;
  1503. default:
  1504. return false;
  1505. }
  1506. }
  1507. case GGML_OP_MUL_MAT_ID:
  1508. return false;
  1509. // embedding
  1510. case GGML_OP_GET_ROWS: {
  1511. switch (op->src[0]->type) {
  1512. case GGML_TYPE_F32:
  1513. case GGML_TYPE_F16:
  1514. case GGML_TYPE_Q4_0:
  1515. case GGML_TYPE_Q8_0:
  1516. return true;
  1517. default:
  1518. return false;
  1519. }
  1520. } break;
  1521. case GGML_OP_CPY: {
  1522. switch (op->type) {
  1523. case GGML_TYPE_F32:
  1524. case GGML_TYPE_F16:
  1525. case GGML_TYPE_Q8_0:
  1526. case GGML_TYPE_Q4_0:
  1527. return true;
  1528. default:
  1529. return false;
  1530. }
  1531. }
  1532. case GGML_OP_DUP:
  1533. case GGML_OP_REPEAT:
  1534. case GGML_OP_CONCAT:
  1535. case GGML_OP_NONE:
  1536. case GGML_OP_RESHAPE:
  1537. case GGML_OP_VIEW:
  1538. case GGML_OP_PERMUTE:
  1539. case GGML_OP_TRANSPOSE:
  1540. case GGML_OP_NORM:
  1541. case GGML_OP_ADD:
  1542. case GGML_OP_MUL:
  1543. case GGML_OP_DIV:
  1544. case GGML_OP_RMS_NORM:
  1545. case GGML_OP_SCALE:
  1546. case GGML_OP_SQR:
  1547. case GGML_OP_CLAMP:
  1548. case GGML_OP_CONT:
  1549. case GGML_OP_DIAG_MASK_INF:
  1550. case GGML_OP_SOFT_MAX:
  1551. case GGML_OP_ROPE:
  1552. case GGML_OP_IM2COL:
  1553. case GGML_OP_POOL_2D:
  1554. case GGML_OP_SUM_ROWS:
  1555. case GGML_OP_ARGSORT:
  1556. case GGML_OP_ACC:
  1557. case GGML_OP_GROUP_NORM:
  1558. case GGML_OP_UPSCALE:
  1559. case GGML_OP_PAD:
  1560. case GGML_OP_ARANGE:
  1561. case GGML_OP_TIMESTEP_EMBEDDING:
  1562. case GGML_OP_LEAKY_RELU:
  1563. return true;
  1564. default:
  1565. return false;
  1566. }
  1567. GGML_UNUSED(backend);
  1568. }
  1569. /**
  1570. * @brief Checks if the backend buffer type is associated with the CANN backend.
  1571. *
  1572. * This function checks whether the provided backend buffer type is associated
  1573. * with the CANN backend based on the comparison of its name retrieval function
  1574. * pointer.
  1575. *
  1576. * @param buft Pointer to the backend buffer type to check.
  1577. * @return bool Returns true if the buffer type is associated with the CANN
  1578. * backend, otherwise false.
  1579. */
  1580. static bool ggml_backend_buft_is_cann(ggml_backend_buffer_type_t buft) {
  1581. return buft->iface.get_name == ggml_backend_cann_buffer_type_name;
  1582. }
  1583. /**
  1584. * @brief Checks if the CANN backend supports a specific backend buffer type.
  1585. *
  1586. * This function determines whether the CANN backend supports the given backend
  1587. * buffer type by comparing the device context of the backend and buffer type.
  1588. * It returns true if the devices are same between the backend context and
  1589. * buffer type context.
  1590. *
  1591. * @param backend Pointer to the CANN backend.
  1592. * @param buft Pointer to the backend buffer type to check.
  1593. * @return bool Returns true if the CANN backend supports the buffer type,
  1594. * otherwise false.
  1595. */
  1596. GGML_CALL static bool ggml_backend_cann_supports_buft(
  1597. ggml_backend_t backend, ggml_backend_buffer_type_t buft) {
  1598. if (ggml_backend_buft_is_cann(buft)) {
  1599. ggml_backend_cann_context * cann_ctx =
  1600. (ggml_backend_cann_context *)backend->context;
  1601. ggml_backend_cann_buffer_type_context * buft_ctx =
  1602. (ggml_backend_cann_buffer_type_context *)buft->context;
  1603. return buft_ctx->device == cann_ctx->device;
  1604. }
  1605. return false;
  1606. }
  1607. /**
  1608. * @brief Determines if a tensor operation should be offloaded to the CANN
  1609. * backend.
  1610. *
  1611. * This function checks if a given tensor operation should be offloaded to the
  1612. * CANN backend based on the operation type and the size of the tensor. It
  1613. * returns true if the second dimension (ne[1]) of the tensor is greater than or
  1614. * equal to the minimum batch size and the operation is not GGML_OP_GET_ROWS.
  1615. *
  1616. * @param backend Pointer to the CANN backend.
  1617. * @param op Pointer to the tensor operation to check.
  1618. * @return bool Returns true if the operation should be offloaded, otherwise
  1619. * false.
  1620. */
  1621. GGML_CALL static bool ggml_backend_cann_offload_op(ggml_backend_t backend,
  1622. const ggml_tensor* op) {
  1623. const int min_batch_size = 32;
  1624. GGML_UNUSED(backend);
  1625. return op->ne[1] >= min_batch_size && op->op != GGML_OP_GET_ROWS;
  1626. }
  1627. /**
  1628. * @brief Creates a new event for the CANN backend.
  1629. *
  1630. * This function initializes a new event for the CANN backend by setting the
  1631. * device and creating an ACL runtime event. The created event is then wrapped
  1632. * in a ggml_backend_event structure and returned.
  1633. *
  1634. * @param backend Pointer to the CANN backend.
  1635. * @return ggml_backend_event_t Returns a pointer to the new event structure.
  1636. */
  1637. static ggml_backend_event_t ggml_backend_cann_event_new(
  1638. ggml_backend_t backend) {
  1639. ggml_backend_cann_context* cann_ctx =
  1640. (ggml_backend_cann_context*)backend->context;
  1641. ggml_cann_set_device(cann_ctx->device);
  1642. aclrtEvent event;
  1643. ACL_CHECK(aclrtCreateEvent(&event));
  1644. return new ggml_backend_event{
  1645. /* .backend = */ backend,
  1646. /* .context = */ event,
  1647. };
  1648. }
  1649. /**
  1650. * @brief Frees a CANN backend event.
  1651. *
  1652. * This function destroys the ACL runtime event associated with the given CANN
  1653. * backend event and then deletes the event structure itself.
  1654. *
  1655. * @param event Pointer to the event structure to be freed.
  1656. */
  1657. static void ggml_backend_cann_event_free(ggml_backend_event_t event) {
  1658. ACL_CHECK(aclrtDestroyEvent((aclrtEvent)event->context));
  1659. delete event;
  1660. }
  1661. /**
  1662. * @brief Records an event on the CANN backend stream.
  1663. *
  1664. * This function records the given event on the ACL runtime stream associated
  1665. * with the backend context.
  1666. *
  1667. * @param event Pointer to the event structure to be recorded.
  1668. */
  1669. static void ggml_backend_cann_event_record(ggml_backend_event_t event) {
  1670. ggml_backend_cann_context* cann_ctx =
  1671. (ggml_backend_cann_context*)event->backend->context;
  1672. ACL_CHECK(aclrtRecordEvent((aclrtEvent)event->context, cann_ctx->stream()));
  1673. }
  1674. /**
  1675. * @brief Waits for a recorded event to complete on the CANN backend stream.
  1676. *
  1677. * This function makes the given backend wait for the event to complete on its
  1678. * ACL runtime stream.
  1679. *
  1680. * @param backend Pointer to the backend structure.
  1681. * @param event Pointer to the event structure that the backend needs to wait
  1682. * for.
  1683. */
  1684. static void ggml_backend_cann_event_wait(ggml_backend_t backend,
  1685. ggml_backend_event_t event) {
  1686. ggml_backend_cann_context* cann_ctx =
  1687. (ggml_backend_cann_context*)backend->context;
  1688. if (ggml_backend_is_cann(event->backend)) {
  1689. ACL_CHECK(aclrtStreamWaitEvent(cann_ctx->stream(),
  1690. (aclrtEvent)event->context));
  1691. } else {
  1692. GGML_ABORT("fatal error");
  1693. }
  1694. }
  1695. /**
  1696. * @brief Synchronizes the given event on the CANN backend.
  1697. *
  1698. * This function waits for the specified event to complete on the ACL runtime.
  1699. *
  1700. * @param event Pointer to the event structure to be synchronized.
  1701. */
  1702. static void ggml_backend_cann_event_synchronize(ggml_backend_event_t event) {
  1703. ACL_CHECK(aclrtSynchronizeEvent((aclrtEvent)event->context));
  1704. }
  1705. /**
  1706. * @brief Structure defining the interface for the CANN backend.
  1707. *
  1708. * This structure contains function pointers for various operations
  1709. * supported by the CANN backend, including name retrieval, memory
  1710. * management, tensor operations, synchronization, and event handling.
  1711. */
  1712. static ggml_backend_i ggml_backend_cann_interface = {
  1713. /* .get_name = */ ggml_backend_cann_name,
  1714. /* .free = */ ggml_backend_cann_free,
  1715. /* .get_default_buffer_type = */ ggml_backend_cann_get_default_buffer_type,
  1716. /* .set_tensor_async = */ ggml_backend_cann_set_tensor_async,
  1717. /* .get_tensor_async = */ ggml_backend_cann_get_tensor_async,
  1718. /* .cpy_tensor_async = */ ggml_backend_cann_cpy_tensor_async,
  1719. /* .synchronize = */ ggml_backend_cann_synchronize,
  1720. /* .graph_plan_create = */ NULL,
  1721. /* .graph_plan_free = */ NULL,
  1722. /* .graph_plan_update = */ NULL,
  1723. /* .graph_plan_compute = */ NULL,
  1724. /* .graph_compute = */ ggml_backend_cann_graph_compute,
  1725. /* .supports_op = */ ggml_backend_cann_supports_op,
  1726. /* .supports_buft = */ ggml_backend_cann_supports_buft,
  1727. /* .offload_op = */ ggml_backend_cann_offload_op,
  1728. /* .event_new = */ ggml_backend_cann_event_new,
  1729. /* .event_free = */ ggml_backend_cann_event_free,
  1730. /* .event_record = */ ggml_backend_cann_event_record,
  1731. /* .event_wait = */ ggml_backend_cann_event_wait,
  1732. /* .event_synchronize = */ ggml_backend_cann_event_synchronize,
  1733. };
  1734. /**
  1735. * @brief Return the hardcoded GUID for the CANN backend.
  1736. *
  1737. * This function returns a static GUID which uniquely identifies the CANN
  1738. * backend.
  1739. *
  1740. * @return A pointer to the static GUID.
  1741. */
  1742. static ggml_guid_t ggml_backend_cann_guid() {
  1743. static ggml_guid guid = {0xa1, 0x94, 0xaf, 0xac, 0xbd, 0x4f, 0x47, 0x34,
  1744. 0xbe, 0x1a, 0x9e, 0x71, 0x1f, 0x9e, 0xed, 0x64};
  1745. return &guid;
  1746. }
  1747. GGML_CALL ggml_backend_t ggml_backend_cann_init(int32_t device) {
  1748. aclInit(nullptr);
  1749. if (device < 0 || device >= ggml_backend_cann_get_device_count()) {
  1750. GGML_CANN_LOG_ERROR("%s: error: invalid device %d\n", __func__, device);
  1751. return nullptr;
  1752. }
  1753. ggml_backend_cann_context* ctx = new ggml_backend_cann_context(device);
  1754. if (ctx == nullptr) {
  1755. GGML_CANN_LOG_ERROR("%s: error: failed to allocate context\n", __func__);
  1756. return nullptr;
  1757. }
  1758. ggml_backend_t cann_backend =
  1759. new ggml_backend{/* .guid = */ ggml_backend_cann_guid(),
  1760. /* .interface = */ ggml_backend_cann_interface,
  1761. /* .context = */ ctx};
  1762. return cann_backend;
  1763. }
  1764. GGML_CALL bool ggml_backend_is_cann(ggml_backend_t backend) {
  1765. return backend != NULL &&
  1766. ggml_guid_matches(backend->guid, ggml_backend_cann_guid());
  1767. }
  1768. GGML_CALL int32_t ggml_backend_cann_get_device_count() {
  1769. return ggml_cann_info().device_count;
  1770. }
  1771. GGML_CALL void ggml_backend_cann_get_device_description(
  1772. int32_t device, char* description, size_t description_size) {
  1773. ggml_cann_set_device(device);
  1774. const char* soc_name = aclrtGetSocName();
  1775. snprintf(description, description_size, "%s", soc_name);
  1776. }
  1777. GGML_CALL void ggml_backend_cann_get_device_memory(int32_t device, size_t* free,
  1778. size_t* total) {
  1779. ggml_cann_set_device(device);
  1780. ACL_CHECK(aclrtGetMemInfo(ACL_HBM_MEM, free, total));
  1781. }
  1782. // backend registry
  1783. /**
  1784. * @brief Initializes a CANN backend based on the provided parameters.
  1785. *
  1786. * This function initializes a CANN backend using the device index and then
  1787. * initializes the backend using `ggml_backend_cann_init`.
  1788. *
  1789. * @param params Parameters for initialization (unused in this implementation).
  1790. * @param user_data User data containing the device index to initialize the
  1791. * backend.
  1792. * @return ggml_backend_t The initialized CANN backend.
  1793. */
  1794. GGML_CALL static ggml_backend_t ggml_backend_reg_cann_init(const char* params,
  1795. void* user_data) {
  1796. ggml_backend_t cann_backend =
  1797. ggml_backend_cann_init((int)(intptr_t)user_data);
  1798. return cann_backend;
  1799. GGML_UNUSED(params);
  1800. }
  1801. extern "C" GGML_CALL int ggml_backend_cann_reg_devices();
  1802. /**
  1803. * @brief Registers CANN (Ascend) devices as backend options.
  1804. *
  1805. * This function initializes ACL, retrieves the number of available CANN
  1806. * devices, and registers each device as a backend option using
  1807. * `ggml_backend_register`. Each device is given a unique name based on
  1808. * `GGML_CANN_NAME` followed by its index.
  1809. *
  1810. * @return int The number of CANN devices registered.
  1811. */
  1812. GGML_CALL int ggml_backend_cann_reg_devices() {
  1813. uint32_t device_count = ggml_backend_cann_get_device_count();
  1814. // initialization
  1815. for (uint32_t i = 0; i < device_count; i++) {
  1816. char name[128];
  1817. snprintf(name, sizeof(name), "CANN%d", i);
  1818. ggml_backend_register(name, ggml_backend_reg_cann_init,
  1819. ggml_backend_cann_buffer_type(i),
  1820. (void*)(intptr_t)i);
  1821. }
  1822. return device_count;
  1823. }