ggml-cann.cpp 76 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188
  1. /*
  2. * Copyright (c) 2023-2024 The ggml authors
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a copy
  5. * of this software and associated documentation files (the "Software"), to
  6. * deal in the Software without restriction, including without limitation the
  7. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  8. * sell copies of the Software, and to permit persons to whom the Software is
  9. * furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  17. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  20. * IN THE SOFTWARE.
  21. */
  22. #include "ggml-cann.h"
  23. #include <acl/acl.h>
  24. #include <stdarg.h>
  25. #include <cmath>
  26. #include <cstdio>
  27. #include <cstring>
  28. #include <mutex>
  29. #include "ggml-impl.h"
  30. #include "ggml-backend-impl.h"
  31. #include "ggml-cann/aclnn_ops.h"
  32. #include "ggml-cann/common.h"
  33. #define GGML_COMMON_DECL_C
  34. #include "ggml-common.h"
  35. #define GGML_CANN_NAME "CANN"
  36. /**
  37. * @brief Handles CANN errors by printing an error message and aborting.
  38. *
  39. * @param stmt The statement that caused the error.
  40. * @param func The function in which the error occurred.
  41. * @param file The file in which the error occurred.
  42. * @param line The line number where the error occurred.
  43. * @param msg The error message.
  44. */
  45. [[noreturn]] void ggml_cann_error(const char* stmt, const char* func,
  46. const char* file, int line, const char* msg) {
  47. int32_t id = -1;
  48. aclrtGetDevice(&id);
  49. GGML_LOG_ERROR("CANN error: %s\n", msg);
  50. GGML_LOG_ERROR(" current device: %d, in function %s at %s:%d\n", id, func,
  51. file, line);
  52. GGML_LOG_ERROR(" %s\n", stmt);
  53. // abort with GGML_ASSERT to get a stack trace
  54. GGML_ABORT("CANN error");
  55. }
  56. /**
  57. * @brief Sets the device to be used by CANN.
  58. *
  59. * @param device The device ID to set.
  60. */
  61. void ggml_cann_set_device(const int32_t device) {
  62. // TODO: uncomment these lines after empty context has fixed.
  63. // int current_device;
  64. // ACL_CHECK(aclrtGetDevice(&current_device));
  65. // if (device == current_device) {
  66. // return;
  67. // }
  68. ACL_CHECK(aclrtSetDevice(device));
  69. }
  70. /**
  71. * @brief Retrieves the current device ID.
  72. *
  73. * @return The current device ID.
  74. */
  75. int32_t ggml_cann_get_device() {
  76. int32_t id;
  77. ACL_CHECK(aclrtGetDevice(&id));
  78. return id;
  79. }
  80. /**
  81. * @brief Initialize the CANN device information.
  82. *
  83. * This function initializes the CANN device information by obtaining the
  84. * device count and setting the memory allocation granularity for each device.
  85. *
  86. * @return A structure containing the device information.
  87. */
  88. static ggml_cann_device_info ggml_cann_init() {
  89. ggml_cann_device_info info = {};
  90. aclError err = aclrtGetDeviceCount((uint32_t*)&info.device_count);
  91. if (err != ACL_SUCCESS) {
  92. GGML_LOG_ERROR("%s: failed to initialize CANN: %s\n",
  93. __func__, aclGetRecentErrMsg());
  94. return info;
  95. }
  96. GGML_ASSERT(info.device_count <= GGML_CANN_MAX_DEVICES);
  97. for (int id = 0; id < info.device_count; ++id) {
  98. aclrtPhysicalMemProp prop = {};
  99. prop.handleType = ACL_MEM_HANDLE_TYPE_NONE;
  100. prop.allocationType = ACL_MEM_ALLOCATION_TYPE_PINNED;
  101. prop.memAttr = ACL_HBM_MEM_HUGE;
  102. prop.location.type = ACL_MEM_LOCATION_TYPE_DEVICE;
  103. prop.location.id = id;
  104. prop.reserve = 0;
  105. ACL_CHECK(aclrtMemGetAllocationGranularity(
  106. &prop, ACL_RT_MEM_ALLOC_GRANULARITY_RECOMMENDED,
  107. &info.devices[id].vmm_granularity));
  108. size_t free, total;
  109. ggml_backend_cann_get_device_memory(id, &free, &total);
  110. info.devices[id].total_vram = free;
  111. }
  112. // TODO: add more device info later.
  113. return info;
  114. }
  115. /**
  116. * @brief Retrieve the CANN device information.
  117. *
  118. * This function returns a reference to a structure containing the CANN device
  119. * information. The device information is initialized once and reused on
  120. * subsequent calls.
  121. *
  122. * @return A reference to the structure containing the device information.
  123. */
  124. const ggml_cann_device_info& ggml_cann_info() {
  125. static ggml_cann_device_info info = ggml_cann_init();
  126. return info;
  127. }
  128. //#define DEBUG_CANN_MALLOC
  129. /**
  130. * @brief A pool of CANN buffers(legacy).
  131. *
  132. * This class manages a pool of CANN buffers for a specific device.
  133. */
  134. struct ggml_cann_pool_leg : public ggml_cann_pool {
  135. /**
  136. * @brief The maximum number of buffers in the pool.
  137. */
  138. static const int MAX_BUFFERS = 256;
  139. /**
  140. * @brief The device ID associated with this buffer pool.
  141. */
  142. int device;
  143. /**
  144. * @brief Structure representing a CANN buffer.
  145. */
  146. struct ggml_cann_buffer {
  147. void* ptr = nullptr; ///< Pointer to the buffer memory.
  148. size_t size = 0; ///< Size of the buffer.
  149. };
  150. /**
  151. * @brief Array of CANN buffers in the pool.
  152. */
  153. ggml_cann_buffer buffer_pool[MAX_BUFFERS] = {};
  154. /**
  155. * @brief Total size of all buffers in the pool.
  156. */
  157. size_t pool_size = 0;
  158. /**
  159. * @brief Constructor to initialize the buffer pool for a specific device.
  160. *
  161. * @param device The device ID to associate with this buffer pool.
  162. */
  163. explicit ggml_cann_pool_leg(int device) : device(device) {}
  164. /**
  165. * @brief Destructor to free all buffers in the pool.
  166. */
  167. ~ggml_cann_pool_leg() {
  168. ggml_cann_set_device(device);
  169. for (int i = 0; i < MAX_BUFFERS; ++i) {
  170. ggml_cann_buffer& b = buffer_pool[i];
  171. if (b.ptr != nullptr) {
  172. ACL_CHECK(aclrtFree(b.ptr));
  173. pool_size -= b.size;
  174. }
  175. }
  176. GGML_ASSERT(pool_size == 0);
  177. }
  178. /**
  179. * @brief Allocate a buffer of the given size.
  180. *
  181. * @param size The size of the buffer to allocate.
  182. * @param actual_size A pointer to a variable to receive the actual size of
  183. * the allocated buffer.
  184. * @return A pointer to the allocated buffer.
  185. */
  186. void* alloc(size_t size, size_t* actual_size) override {
  187. const size_t alignment = 128;
  188. size = GGML_PAD(size, alignment);
  189. if (size == 0) {
  190. size = alignment;
  191. }
  192. #ifdef DEBUG_CANN_MALLOC
  193. int nnz = 0;
  194. size_t max_size = 0;
  195. #endif
  196. size_t best_diff = 1ull << 36;
  197. int ibest = -1;
  198. for (int i = 0; i < MAX_BUFFERS; ++i) {
  199. ggml_cann_buffer& b = buffer_pool[i];
  200. if (b.ptr != nullptr) {
  201. #ifdef DEBUG_CANN_MALLOC
  202. ++nnz;
  203. if (b.size > max_size) max_size = b.size;
  204. #endif
  205. if (b.size >= size) {
  206. size_t diff = b.size - size;
  207. if (diff < best_diff) {
  208. best_diff = diff;
  209. ibest = i;
  210. if (!best_diff) {
  211. void* ptr = b.ptr;
  212. *actual_size = b.size;
  213. b.ptr = nullptr;
  214. b.size = 0;
  215. return ptr;
  216. }
  217. }
  218. }
  219. }
  220. }
  221. if (ibest >= 0) {
  222. ggml_cann_buffer& b = buffer_pool[ibest];
  223. void* ptr = b.ptr;
  224. *actual_size = b.size;
  225. b.ptr = nullptr;
  226. b.size = 0;
  227. return ptr;
  228. }
  229. void* ptr;
  230. ggml_cann_set_device(device);
  231. ACL_CHECK(
  232. aclrtMalloc(&ptr, size, ACL_MEM_MALLOC_HUGE_FIRST));
  233. *actual_size = size;
  234. pool_size += size;
  235. #ifdef DEBUG_CANN_MALLOC
  236. GGML_LOG_INFO(
  237. "%s[%d]: %d buffers, max_size = %u MB, pool_size = %u MB, "
  238. "requested %u MB\n",
  239. __func__, device, nnz, (uint32_t)(max_size / 1024 / 1024),
  240. (uint32_t)(pool_size / 1024 / 1024),
  241. (uint32_t)(size / 1024 / 1024));
  242. #endif
  243. return ptr;
  244. }
  245. /**
  246. * @brief Free a buffer and return it to the pool.
  247. *
  248. * @param ptr Pointer to the buffer to free.
  249. * @param size Size of the buffer to free.
  250. */
  251. void free(void* ptr, size_t size) override {
  252. for (int i = 0; i < MAX_BUFFERS; ++i) {
  253. ggml_cann_buffer& b = buffer_pool[i];
  254. if (b.ptr == nullptr) {
  255. b.ptr = ptr;
  256. b.size = size;
  257. return;
  258. }
  259. }
  260. // memory should always buffered. these memory may still needed by
  261. // tasks in stream.
  262. // TODO, fix me.
  263. GGML_ABORT("Cann buffer pool full, increase MAX_CANN_BUFFERS\n");
  264. }
  265. };
  266. /**
  267. * @brief A pool of CANN buffers with virtual memory.
  268. *
  269. * This class manages a pool of CANN buffers with virtual memory for a specific
  270. * device.
  271. */
  272. struct ggml_cann_pool_vmm : public ggml_cann_pool {
  273. /**
  274. * @brief The maximum size of the virtual memory pool (32 GB).
  275. */
  276. size_t max_size;
  277. /**
  278. * @brief The device ID associated with this buffer pool.
  279. */
  280. int device;
  281. /**
  282. * @brief Pointer to the start of the virtual memory pool.
  283. */
  284. void* pool_addr = 0;
  285. /**
  286. * @brief Amount of virtual memory used in the pool.
  287. */
  288. size_t pool_used = 0;
  289. /**
  290. * @brief Total size of the virtual memory pool.
  291. */
  292. size_t pool_size = 0;
  293. /**
  294. * @brief Allocation granularity for the virtual memory pool.
  295. */
  296. size_t granularity;
  297. /**
  298. * @brief Handles for the physical memory allocated.
  299. */
  300. std::vector<aclrtDrvMemHandle> handles;
  301. /**
  302. * @brief Offsets for the mapped memory regions.
  303. */
  304. std::vector<void*> map_offsets;
  305. /**
  306. * @brief Constructor to initialize the buffer pool with virtual memory for
  307. * a specific device.
  308. *
  309. * @param device The device ID to associate with this buffer pool.
  310. */
  311. explicit ggml_cann_pool_vmm(int device)
  312. : device(device),
  313. granularity(ggml_cann_info().devices[device].vmm_granularity) {
  314. auto dev = ggml_cann_info().devices[device];
  315. granularity = dev.vmm_granularity;
  316. max_size = dev.total_vram;
  317. }
  318. /**
  319. * @brief Destructor to free all buffers in the virtual memory pool.
  320. */
  321. ~ggml_cann_pool_vmm() {
  322. if (pool_addr != 0) {
  323. for (auto& offset : map_offsets) {
  324. ACL_CHECK(aclrtUnmapMem(offset));
  325. }
  326. for (auto& handle : handles) {
  327. ACL_CHECK(aclrtFreePhysical(handle));
  328. }
  329. ACL_CHECK(aclrtReleaseMemAddress(pool_addr));
  330. }
  331. }
  332. /**
  333. * @brief Allocate a buffer of the given size in the virtual memory pool.
  334. *
  335. * @param size The size of the buffer to allocate.
  336. * @param actual_size A pointer to a variable to receive the actual size of
  337. * the allocated buffer.
  338. * @return A pointer to the allocated buffer.
  339. */
  340. void* alloc(size_t size, size_t* actual_size) override {
  341. // round up the allocation size to the alignment to ensure that all
  342. // allocations are aligned for all data types
  343. const size_t alignment = 128;
  344. size = GGML_PAD(size, alignment);
  345. if (size == 0) {
  346. size = alignment;
  347. }
  348. size_t avail = pool_size - pool_used;
  349. if (size > avail) {
  350. // round up to the next multiple of the granularity
  351. size_t reserve_size = size - avail;
  352. reserve_size = GGML_PAD(reserve_size, granularity);
  353. GGML_ASSERT(pool_size + reserve_size <= max_size);
  354. // allocate more physical memory
  355. aclrtPhysicalMemProp prop = {};
  356. prop.handleType = ACL_MEM_HANDLE_TYPE_NONE;
  357. prop.allocationType = ACL_MEM_ALLOCATION_TYPE_PINNED;
  358. prop.memAttr = ACL_HBM_MEM_HUGE;
  359. prop.location.type = ACL_MEM_LOCATION_TYPE_DEVICE;
  360. prop.location.id = device;
  361. prop.reserve = 0;
  362. aclrtDrvMemHandle handle;
  363. ACL_CHECK(aclrtMallocPhysical(&handle, reserve_size, &prop, 0));
  364. // reserve virtual address space (if not already reserved)
  365. if (pool_addr == 0) {
  366. ACL_CHECK(aclrtReserveMemAddress(
  367. &pool_addr, max_size, 0, NULL, 1));
  368. }
  369. // map at the end of the pool
  370. ACL_CHECK(aclrtMapMem((char*)pool_addr + pool_size, reserve_size, 0,
  371. handle, 0));
  372. handles.push_back(handle);
  373. map_offsets.push_back((char*)pool_addr + pool_size);
  374. // add to the pool
  375. pool_size += reserve_size;
  376. #ifdef DEBUG_CANN_MALLOC
  377. GGML_LOG_INFO("cann pool[%d]: size increased to %llu MB (reserved %llu MB)\n",
  378. device, (unsigned long long) (pool_size/1024/1024),
  379. (unsigned long long) (reserve_size/1024/1024));
  380. #endif
  381. }
  382. GGML_ASSERT(pool_addr != 0);
  383. void* ptr = (void*)((char*)pool_addr + pool_used);
  384. *actual_size = size;
  385. pool_used += size;
  386. #ifdef DEBUG_CANN_MALLOC
  387. GGML_LOG_INFO("cann pool[%d]: allocated %llu bytes at %llx\n", device,
  388. (unsigned long long)size, (unsigned long long)ptr);
  389. #endif
  390. return ptr;
  391. }
  392. /**
  393. * @brief Free a buffer and return it to the virtual memory pool.
  394. *
  395. * @param ptr Pointer to the buffer to free.
  396. * @param size Size of the buffer to free.
  397. */
  398. void free(void* ptr, size_t size) override {
  399. #ifdef DEBUG_CANN_MALLOC
  400. GGML_LOG_INFO("cann pool[%d]: freed %llu bytes at %llx\n", device,
  401. (unsigned long long)size, (unsigned long long)ptr);
  402. #endif
  403. pool_used -= size;
  404. // all deallocations must be in reverse order of the allocations
  405. GGML_ASSERT(ptr == (void*)((char*)pool_addr + pool_used));
  406. }
  407. };
  408. /**
  409. * @brief Create a new CANN pool for a specific device.
  410. *
  411. * Factory method to create a new CANN pool object based on the device type.
  412. *
  413. * @param device The device ID for which to create the pool.
  414. * @return A unique pointer to the created CANN pool.
  415. */
  416. std::unique_ptr<ggml_cann_pool> ggml_backend_cann_context::new_pool_for_device(
  417. int device) {
  418. return std::unique_ptr<ggml_cann_pool>(new ggml_cann_pool_vmm(device));
  419. }
  420. // cann buffer
  421. /**
  422. * @brief Context for managing a CANN buffer associated with a specific device.
  423. *
  424. * This structure holds information about a CANN buffer, including the device
  425. * ID, device pointer, and a name derived from GGML_CANN_NAME and the device ID.
  426. */
  427. struct ggml_backend_cann_buffer_context {
  428. int32_t device; ///< The device ID associated with this buffer context.
  429. void* dev_ptr =
  430. nullptr; ///< Pointer to the device memory allocated for the buffer.
  431. /**
  432. * @brief Constructor to initialize the CANN buffer context.
  433. *
  434. * @param device The device ID associated with this buffer context.
  435. * @param dev_ptr Pointer to the device memory allocated for the buffer.
  436. */
  437. ggml_backend_cann_buffer_context(int32_t device, void* dev_ptr)
  438. : device(device),
  439. dev_ptr(dev_ptr) {}
  440. /**
  441. * @brief Destructor to free the device memory allocated for the buffer.
  442. */
  443. ~ggml_backend_cann_buffer_context() { ACL_CHECK(aclrtFree(dev_ptr)); }
  444. };
  445. /**
  446. * @brief Check if a buffer is a CANN buffer.
  447. *
  448. * This function checks if a given buffer is a CANN buffer by comparing its
  449. * `get_name` function pointer to `ggml_backend_cann_buffer_get_name`.
  450. *
  451. * @param buffer The buffer to check.
  452. * @return true if the buffer is a CANN buffer, false otherwise.
  453. */
  454. static bool ggml_backend_buft_is_cann(ggml_backend_buffer_type_t buft);
  455. static bool ggml_backend_buffer_is_cann(
  456. ggml_backend_buffer_t buffer) {
  457. return ggml_backend_buft_is_cann(buffer->buft);
  458. }
  459. /**
  460. * @brief Free resources associated with a CANN buffer.
  461. *
  462. * This function frees the resources associated with a CANN buffer, including
  463. * its context.
  464. *
  465. * @param buffer The CANN buffer to free.
  466. */
  467. static void ggml_backend_cann_buffer_free_buffer(
  468. ggml_backend_buffer_t buffer) {
  469. ggml_backend_cann_buffer_context* ctx =
  470. (ggml_backend_cann_buffer_context*)buffer->context;
  471. delete ctx;
  472. }
  473. /**
  474. * @brief Retrieve the base pointer of a CANN buffer.
  475. *
  476. * This function returns the base pointer of a CANN buffer, which points to the
  477. * device memory allocated for the buffer.
  478. *
  479. * @param buffer The CANN buffer whose base pointer is to be retrieved.
  480. * @return A pointer to the base of the device memory allocated for the buffer.
  481. */
  482. static void* ggml_backend_cann_buffer_get_base(
  483. ggml_backend_buffer_t buffer) {
  484. ggml_backend_cann_buffer_context* ctx =
  485. (ggml_backend_cann_buffer_context*)buffer->context;
  486. return ctx->dev_ptr;
  487. }
  488. /**
  489. * @brief Transform quantized Q4.0 tensor data into a format suitable for CANN
  490. * processing.
  491. *
  492. * This function transforms quantized Q4.0 tensor data into a format suitable
  493. * for CANN processing. It extracts quantization values and scales from the
  494. * source data and prepares them in a format expected by CANN operations.
  495. *
  496. * @param tensor Pointer to the tensor information.
  497. * @param src Pointer to the source data in Q4.0 format.
  498. * @param dst Pointer to the destination buffer where transformed data will be
  499. * stored.
  500. */
  501. static void ggml_backend_cann_transform_q4_0(ggml_tensor* tensor,
  502. const void* src,
  503. void* dst) {
  504. int64_t n_elems = ggml_nelements(tensor);
  505. int64_t groups = n_elems / QK4_0;
  506. size_t quant_bytes = n_elems * sizeof(uint8_t) / 2;
  507. uint8_t* quant_offset = (uint8_t*)dst;
  508. uint16_t* scale_offset = (uint16_t*)((char*)dst + quant_bytes);
  509. for (int i = 0; i < groups; i++) {
  510. const block_q4_0* group =
  511. (const block_q4_0*)((const char*)src + i * sizeof(block_q4_0));
  512. *scale_offset = group->d;
  513. scale_offset++;
  514. // 0-15
  515. for (int j = 0; j < QK4_0 / 2; j += 2) {
  516. (*quant_offset) = (group->qs[j] & 0x0F);
  517. (*quant_offset) |= ((group->qs[j + 1] << 4));
  518. quant_offset++;
  519. }
  520. // 16-31
  521. for (int j = 0; j < QK4_0 / 2; j += 2) {
  522. (*quant_offset) = (group->qs[j] >> 4);
  523. (*quant_offset) |= (group->qs[j + 1] & 0xF0);
  524. quant_offset++;
  525. }
  526. }
  527. // put (uint4b_t -8) into int4b_t
  528. for (quant_offset = (uint8_t*)dst;
  529. quant_offset < (uint8_t*)dst + quant_bytes; quant_offset++) {
  530. (*quant_offset) ^= 0x88;
  531. }
  532. }
  533. /**
  534. * @brief Transform CANN processed data back into quantized Q4.0 format.
  535. *
  536. * This function transforms CANN processed data back into quantized Q4.0 format.
  537. * It reverses the transformation performed by
  538. * ggml_backend_cann_transform_q4_0(), converting the data back into its
  539. * original quantized form.
  540. *
  541. * @param tensor Pointer to the tensor information.
  542. * @param src Pointer to the source buffer containing transformed data.
  543. * @param dst Pointer to the destination buffer where the Q4.0 formatted data
  544. * will be stored.
  545. */
  546. static void ggml_backend_cann_transform_back_q4_0(
  547. const ggml_tensor* tensor, void* src, void* dst) {
  548. int64_t n_elems = ggml_nelements(tensor);
  549. int64_t groups = n_elems / QK4_0;
  550. size_t quant_bytes = n_elems * sizeof(uint8_t) / 2;
  551. uint8_t* quant_offset = (uint8_t*)src;
  552. uint16_t* scale_offset = (uint16_t*)((char*)src + quant_bytes);
  553. for (; quant_offset < (uint8_t*)src + quant_bytes; quant_offset++) {
  554. (*quant_offset) ^= 0x88;
  555. }
  556. quant_offset = (uint8_t*)src;
  557. for (int i = 0; i < groups; i++) {
  558. block_q4_0* group = (block_q4_0*)((char*)dst + i * sizeof(block_q4_0));
  559. group->d = *scale_offset;
  560. scale_offset++;
  561. // 0-15
  562. for (int j = 0; j < QK4_0 / 2; j += 2) {
  563. group->qs[j] = ((*quant_offset) & 0x0F);
  564. group->qs[j + 1] = ((*quant_offset) >> 4);
  565. quant_offset++;
  566. }
  567. // 16-31
  568. for (int j = 0; j < QK4_0 / 2; j += 2) {
  569. group->qs[j] |= ((*quant_offset) << 4);
  570. group->qs[j + 1] |= ((*quant_offset) & 0xF0);
  571. quant_offset++;
  572. }
  573. }
  574. }
  575. /**
  576. * @brief Transform quantized Q8.0 tensor data into a format suitable for CANN
  577. * processing.
  578. *
  579. * This function transforms quantized Q8.0 tensor data into a format suitable
  580. * for CANN processing. It extracts quantization values and scales from the
  581. * source data and prepares them in a format expected by CANN operations.
  582. *
  583. * @param tensor Pointer to the tensor information.
  584. * @param src Pointer to the source data in Q8.0 format.
  585. * @param dst Pointer to the destination buffer where transformed data will be
  586. * stored.
  587. */
  588. static void ggml_backend_cann_transform_q8_0(ggml_tensor* tensor,
  589. const void* src,
  590. void* dst) {
  591. int64_t n_elems = ggml_nelements(tensor);
  592. int64_t groups = n_elems / QK8_0;
  593. size_t quant_bytes = n_elems * sizeof(uint8_t);
  594. uint8_t* quant_offset = (uint8_t*)dst;
  595. uint16_t* scale_offset = (uint16_t*)((char*)dst + quant_bytes);
  596. for (int i = 0; i < groups; i++) {
  597. const block_q8_0* group =
  598. (const block_q8_0*)((const char*)src + i * sizeof(block_q8_0));
  599. *scale_offset = group->d;
  600. scale_offset++;
  601. size_t group_quant_size = QK8_0 * sizeof(uint8_t);
  602. memcpy(quant_offset, group->qs, group_quant_size);
  603. quant_offset += group_quant_size;
  604. }
  605. }
  606. /**
  607. * @brief Transform CANN processed data back into quantized Q8.0 format.
  608. *
  609. * This function transforms CANN processed data back into quantized Q8.0 format.
  610. * It reverses the transformation performed by
  611. * ggml_backend_cann_transform_q8_0(), converting the data back into its
  612. * original quantized form.
  613. *
  614. * @param tensor Pointer to the tensor information.
  615. * @param src Pointer to the source buffer containing transformed data.
  616. * @param dst Pointer to the destination buffer where the Q8.0 formatted data
  617. * will be stored.
  618. */
  619. static void ggml_backend_cann_transform_back_q8_0(
  620. const ggml_tensor* tensor, const void* src, void* dst) {
  621. int64_t n_elems = ggml_nelements(tensor);
  622. int64_t groups = n_elems / QK8_0;
  623. size_t quant_bytes = n_elems * sizeof(uint8_t);
  624. const uint8_t* quant_offset = (const uint8_t*)src;
  625. const uint16_t* scale_offset =
  626. (const uint16_t*)((const char*)src + quant_bytes);
  627. for (int i = 0; i < groups; i++) {
  628. block_q8_0* group = (block_q8_0*)((char*)dst + i * sizeof(block_q8_0));
  629. group->d = *scale_offset;
  630. scale_offset++;
  631. size_t group_quant_size = QK8_0 * sizeof(uint8_t);
  632. memcpy(group->qs, quant_offset, group_quant_size);
  633. quant_offset += group_quant_size;
  634. }
  635. }
  636. /**
  637. * @brief Transform tensor data based on its type for CANN processing.
  638. *
  639. * This function transforms tensor data based on its quantization type for CANN
  640. * processing. It dispatches the transformation based on the tensor's type to
  641. * specialized functions handling Q4.0 and Q8.0 formats.
  642. *
  643. * @param tensor Pointer to the tensor information.
  644. * @param src Pointer to the source data to be transformed.
  645. * @param dst Pointer to the destination buffer where transformed data will be
  646. * stored.
  647. */
  648. static void ggml_backend_cann_transform(ggml_tensor* tensor,
  649. const void* src, void* dst) {
  650. switch (tensor->type) {
  651. case GGML_TYPE_Q4_0:
  652. ggml_backend_cann_transform_q4_0(tensor, src, dst);
  653. break;
  654. case GGML_TYPE_Q8_0:
  655. ggml_backend_cann_transform_q8_0(tensor, src, dst);
  656. break;
  657. default:
  658. break;
  659. }
  660. }
  661. /**
  662. * @brief Transform CANN processed data back into tensor data based on its type.
  663. *
  664. * This function transforms CANN processed data back into tensor data based on
  665. * its quantization type for Q4.0 and Q8.0 formats. It dispatches the
  666. * transformation based on the tensor's type to specialized functions.
  667. *
  668. * @param tensor Pointer to the tensor information.
  669. * @param src Pointer to the source data containing CANN processed data.
  670. * @param dst Pointer to the destination buffer where transformed tensor data
  671. * will be stored.
  672. */
  673. static void ggml_backend_cann_transform_back(
  674. const ggml_tensor* tensor, void* src, void* dst) {
  675. switch (tensor->type) {
  676. case GGML_TYPE_Q4_0:
  677. ggml_backend_cann_transform_back_q4_0(tensor, src, dst);
  678. break;
  679. case GGML_TYPE_Q8_0:
  680. ggml_backend_cann_transform_back_q8_0(tensor, src, dst);
  681. break;
  682. default:
  683. break;
  684. }
  685. }
  686. /**
  687. * @brief Check if transformation is needed for a given tensor type.
  688. *
  689. * This function checks if transformation is needed for a given tensor type
  690. * to prepare data for CANN processing.
  691. *
  692. * @param type The tensor type to check.
  693. * @return true if transformation is needed, false otherwise.
  694. */
  695. static bool need_transform(ggml_type type) {
  696. switch (type) {
  697. case GGML_TYPE_Q4_0:
  698. case GGML_TYPE_Q8_0:
  699. return true;
  700. default:
  701. return false;
  702. }
  703. }
  704. /**
  705. * @brief Initialize a tensor using data from a CANN buffer.
  706. *
  707. * This function initializes a tensor using data from a CANN buffer.
  708. * It handles special cases such as views and quantization.
  709. *
  710. * @param buffer The CANN buffer from which to initialize the tensor.
  711. * @param tensor Pointer to the tensor to be initialized.
  712. */
  713. static void ggml_backend_cann_buffer_init_tensor(
  714. ggml_backend_buffer_t buffer, ggml_tensor* tensor) {
  715. if (tensor->view_src != NULL && tensor->view_offs == 0) {
  716. GGML_ASSERT(tensor->view_src->buffer->buft == buffer->buft);
  717. return;
  718. }
  719. // TODO: can backend doesn't support quantized yet. Just leave the code
  720. // here.
  721. if (ggml_is_quantized(tensor->type)) {
  722. // Initialize padding to 0 to avoid possible NaN values
  723. size_t original_size = ggml_nbytes(tensor);
  724. size_t padded_size =
  725. ggml_backend_buft_get_alloc_size(buffer->buft, tensor);
  726. if (padded_size > original_size && tensor->view_src == nullptr) {
  727. size_t memset_size = padded_size - original_size;
  728. ACL_CHECK(aclrtMemset((char*)tensor->data + original_size,
  729. memset_size, 0, memset_size));
  730. }
  731. }
  732. }
  733. // TODO: need handle tensor which has paddings.
  734. /**
  735. * @brief Set tensor data in a CANN buffer.
  736. *
  737. * This function sets tensor data in a CANN buffer, handling transformations
  738. * if needed based on the tensor's type.
  739. *
  740. * @param buffer The CANN buffer where the tensor data will be set.
  741. * @param tensor Pointer to the tensor whose data will be set.
  742. * @param data Pointer to the source data to be copied into the tensor.
  743. * @param offset Offset in the source data from where to start copying.
  744. * @param size Size of the data to be copied, in bytes.
  745. */
  746. static void ggml_backend_cann_buffer_set_tensor(
  747. ggml_backend_buffer_t buffer, ggml_tensor *tensor, const void *data,
  748. size_t offset, size_t size) {
  749. ggml_backend_cann_buffer_context *ctx =
  750. (ggml_backend_cann_buffer_context *)buffer->context;
  751. ggml_cann_set_device(ctx->device);
  752. // TODO: refer to cann(#6017), it use thread's default stream.
  753. // For acl, synchronous functions use this default stream.
  754. // Why aclrtSynchronizeDevice?
  755. if (!need_transform(tensor->type)) {
  756. ACL_CHECK(aclrtMemcpy((char *)tensor->data + offset, size, data, size,
  757. ACL_MEMCPY_HOST_TO_DEVICE));
  758. } else {
  759. void *transform_buffer = malloc(size);
  760. ggml_backend_cann_transform(tensor, data, transform_buffer);
  761. ACL_CHECK(aclrtMemcpy((char *)tensor->data + offset, size,
  762. transform_buffer, size,
  763. ACL_MEMCPY_HOST_TO_DEVICE));
  764. free(transform_buffer);
  765. }
  766. }
  767. /**
  768. * @brief Get tensor data from a CANN buffer.
  769. *
  770. * This function retrieves tensor data from a CANN buffer, handling
  771. * transformations if needed based on the tensor's type.
  772. *
  773. * @param buffer The CANN buffer from which to retrieve tensor data.
  774. * @param tensor Pointer to the tensor whose data will be retrieved.
  775. * @param data Pointer to the destination buffer where the tensor data will be
  776. * copied.
  777. * @param offset Offset in the destination buffer where to start copying.
  778. * @param size Size of the data to be copied, in bytes.
  779. */
  780. static void ggml_backend_cann_buffer_get_tensor(
  781. ggml_backend_buffer_t buffer, const ggml_tensor* tensor, void* data,
  782. size_t offset, size_t size) {
  783. ggml_backend_cann_buffer_context* ctx =
  784. (ggml_backend_cann_buffer_context*)buffer->context;
  785. ggml_cann_set_device(ctx->device);
  786. if (!need_transform(tensor->type)) {
  787. ACL_CHECK(aclrtMemcpy(data, size, (char*)tensor->data + offset, size,
  788. ACL_MEMCPY_DEVICE_TO_HOST));
  789. } else {
  790. void* transform_buffer = malloc(size);
  791. ACL_CHECK(aclrtMemcpy(transform_buffer, size,
  792. (char*)tensor->data + offset, size,
  793. ACL_MEMCPY_DEVICE_TO_HOST));
  794. ggml_backend_cann_transform_back(tensor, transform_buffer, data);
  795. free(transform_buffer);
  796. }
  797. }
  798. /**
  799. * @brief Copy tensor data between CANN buffers if possible.
  800. *
  801. * This function copies tensor data between CANN buffers if the source and
  802. * destination buffers are CANN buffers and they meet the necessary conditions
  803. * (same device or devices can access each other).
  804. *
  805. * @param buffer The destination CANN buffer where the tensor data will be
  806. * copied.
  807. * @param src Pointer to the source tensor whose data will be copied.
  808. * @param dst Pointer to the destination tensor where the data will be copied.
  809. * @return true if the copy operation succeeded, false otherwise.
  810. */
  811. static bool ggml_backend_cann_buffer_cpy_tensor(
  812. ggml_backend_buffer_t buffer, const ggml_tensor* src, ggml_tensor* dst) {
  813. if (ggml_backend_buffer_is_cann(src->buffer)) {
  814. ggml_backend_cann_buffer_context* src_ctx =
  815. (ggml_backend_cann_buffer_context*)src->buffer->context;
  816. ggml_backend_cann_buffer_context* dst_ctx =
  817. (ggml_backend_cann_buffer_context*)buffer->context;
  818. size_t memcpy_size = ggml_nbytes(src);
  819. // Same device.
  820. if (src_ctx->device == dst_ctx->device) {
  821. ACL_CHECK(aclrtMemcpy((char*)dst->data, memcpy_size,
  822. (const char*)src->data, memcpy_size,
  823. ACL_MEMCPY_DEVICE_TO_DEVICE));
  824. return true;
  825. } else {
  826. // Different device but can access by peer.
  827. int32_t canAccessPeer = 0;
  828. ACL_CHECK(aclrtDeviceCanAccessPeer(&canAccessPeer, src_ctx->device,
  829. dst_ctx->device));
  830. if (canAccessPeer) {
  831. ggml_cann_set_device(src_ctx->device);
  832. ACL_CHECK(aclrtDeviceEnablePeerAccess(dst_ctx->device, 0));
  833. ACL_CHECK(aclrtMemcpy((char*)dst->data, memcpy_size,
  834. (const char*)src->data, memcpy_size,
  835. ACL_MEMCPY_DEVICE_TO_DEVICE));
  836. return true;
  837. }
  838. }
  839. }
  840. return false;
  841. }
  842. /**
  843. * @brief Clear a CANN buffer by setting all its memory to a specified value.
  844. *
  845. * This function clears a CANN buffer by setting all its memory to a specified
  846. * value.
  847. *
  848. * @param buffer The CANN buffer to be cleared.
  849. * @param value The value to which each byte in the buffer will be set.
  850. */
  851. static void ggml_backend_cann_buffer_clear(
  852. ggml_backend_buffer_t buffer, uint8_t value) {
  853. ggml_backend_cann_buffer_context* ctx =
  854. (ggml_backend_cann_buffer_context*)buffer->context;
  855. ggml_cann_set_device(ctx->device);
  856. ACL_CHECK(aclrtMemset(ctx->dev_ptr, buffer->size, value, buffer->size));
  857. }
  858. /**
  859. * @brief Interface for a CANN buffer in the backend.
  860. *
  861. * This structure defines function pointers to operations that can be performed
  862. * on a CANN buffer within the backend.
  863. */
  864. static const ggml_backend_buffer_i ggml_backend_cann_buffer_interface = {
  865. /* .free_buffer = */ ggml_backend_cann_buffer_free_buffer,
  866. /* .get_base = */ ggml_backend_cann_buffer_get_base,
  867. /* .init_tensor = */ ggml_backend_cann_buffer_init_tensor,
  868. /* .memset_tensor = */ NULL,
  869. /* .set_tensor = */ ggml_backend_cann_buffer_set_tensor,
  870. /* .get_tensor = */ ggml_backend_cann_buffer_get_tensor,
  871. /* .cpy_tensor = */ ggml_backend_cann_buffer_cpy_tensor,
  872. /* .clear = */ ggml_backend_cann_buffer_clear,
  873. /* .reset = */ NULL,
  874. };
  875. // cann buffer type
  876. /**
  877. * @brief Structure representing context information for a specific backend
  878. * buffer type.
  879. */
  880. struct ggml_backend_cann_buffer_type_context {
  881. int32_t
  882. device; /**< Device identifier associated with the buffer context. */
  883. std::string name; /**< Name associated with the buffer context. */
  884. };
  885. /**
  886. * @brief Retrieves the name associated with a CANN buffer type.
  887. *
  888. * This function returns the descriptive name associated with the specified
  889. * CANN buffer type context.
  890. *
  891. * @param buft Pointer to the buffer type context.
  892. * @return Const pointer to the C-style string containing the name.
  893. */
  894. static const char* ggml_backend_cann_buffer_type_name(
  895. ggml_backend_buffer_type_t buft) {
  896. ggml_backend_cann_buffer_type_context* buft_ctx =
  897. (ggml_backend_cann_buffer_type_context*)buft->context;
  898. return buft_ctx->name.c_str();
  899. }
  900. /**
  901. * @brief Allocates a new CANN buffer of the specified type and size.
  902. *
  903. * This function allocates a new CANN buffer on the specified device with the
  904. * given size.
  905. *
  906. * @param buft Pointer to the buffer type context.
  907. * @param size Size in bytes of the buffer to allocate.
  908. * @return Pointer to the allocated buffer, or nullptr if allocation fails.
  909. */
  910. static ggml_backend_buffer_t
  911. ggml_backend_cann_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft,
  912. size_t size) {
  913. ggml_backend_cann_buffer_type_context* buft_ctx =
  914. (ggml_backend_cann_buffer_type_context*)buft->context;
  915. ggml_cann_set_device(buft_ctx->device);
  916. size = std::max(size, (size_t)1);
  917. void* dev_ptr;
  918. aclError err = aclrtMalloc(&dev_ptr, size, ACL_MEM_MALLOC_HUGE_FIRST);
  919. if (err != ACL_SUCCESS) {
  920. GGML_LOG_ERROR(
  921. "%s: allocating %.2f MiB on device %d: aclrtMalloc failed: %s\n",
  922. __func__, size / 1024.0 / 1024.0, buft_ctx->device,
  923. aclGetRecentErrMsg());
  924. return nullptr;
  925. }
  926. ggml_backend_cann_buffer_context* ctx =
  927. new ggml_backend_cann_buffer_context(buft_ctx->device, dev_ptr);
  928. return ggml_backend_buffer_init(buft, ggml_backend_cann_buffer_interface,
  929. ctx, size);
  930. }
  931. /**
  932. * @brief Retrieves the memory alignment requirement for CANN buffers of this
  933. * type.
  934. *
  935. * This function returns the alignment requirement in bytes for memory allocated
  936. * by the CANN buffer type.
  937. *
  938. * @param buft Pointer to the buffer type context (unused in this
  939. * implementation).
  940. * @return The alignment requirement in bytes (fixed at 128 bytes for CANN
  941. * buffers).
  942. */
  943. static size_t ggml_backend_cann_buffer_type_get_alignment(
  944. ggml_backend_buffer_type_t buft) {
  945. return 128;
  946. GGML_UNUSED(buft);
  947. }
  948. /**
  949. * @brief Calculates the allocation size required for a tensor in a CANN buffer.
  950. *
  951. * Computes the total allocation size needed for storing the tensor's data in a
  952. * CANN buffer, considering any necessary padding or adjustments for quantized
  953. * types.
  954. *
  955. * @param buft Pointer to the buffer type context (unused in this
  956. * implementation).
  957. * @param tensor Pointer to the tensor for which the allocation size is
  958. * calculated.
  959. * @return The total allocation size in bytes required for the tensor in the
  960. * CANN buffer.
  961. */
  962. static size_t ggml_backend_cann_buffer_type_get_alloc_size(
  963. ggml_backend_buffer_type_t buft, const ggml_tensor* tensor) {
  964. size_t size = ggml_nbytes(tensor);
  965. int64_t ne0 = tensor->ne[0];
  966. // last line must bigger than 32, because every single op deal at
  967. // least 32 bytes.
  968. // TODO: quantized type?
  969. // int64_t line_size = ne0 * ggml_element_size(tensor);
  970. // int64_t line_size_align_32 = (line_size + 31) & ~31;
  971. // size += (line_size_align_32 - line_size);
  972. // TODO: not support quantized yet.
  973. // TODO: consider un-continue tensor.
  974. if (ggml_is_quantized(tensor->type)) {
  975. if (ne0 % MATRIX_ROW_PADDING != 0) {
  976. size += ggml_row_size(
  977. tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
  978. }
  979. }
  980. return size;
  981. GGML_UNUSED(buft);
  982. }
  983. static bool ggml_backend_cann_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
  984. return false;
  985. GGML_UNUSED(buft);
  986. }
  987. /**
  988. * @brief Interface for managing CANN buffer types in the GGML backend.
  989. *
  990. * Provides function pointers for allocating, querying properties, and managing
  991. * memory for CANN buffer types in the GGML backend.
  992. */
  993. static const ggml_backend_buffer_type_i ggml_backend_cann_buffer_type_interface = {
  994. /* .get_name = */ ggml_backend_cann_buffer_type_name,
  995. /* .alloc_buffer = */ ggml_backend_cann_buffer_type_alloc_buffer,
  996. /* .get_alignment = */ ggml_backend_cann_buffer_type_get_alignment,
  997. /* .get_max_size = */ NULL, // defaults to SIZE_MAX
  998. /* .get_alloc_size = */ ggml_backend_cann_buffer_type_get_alloc_size,
  999. /* .is_host = */ ggml_backend_cann_buffer_type_is_host,
  1000. };
  1001. /**
  1002. * @brief Retrieves the CANN buffer type for a specified device.
  1003. *
  1004. * This function initializes and returns the buffer type interface associated
  1005. * with the given device. It ensures thread-safe access using a mutex.
  1006. *
  1007. * @param device The device index for which to retrieve the buffer type.
  1008. * @return A pointer to the buffer type interface for the specified device, or
  1009. * nullptr if the device index is out of range.
  1010. */
  1011. ggml_backend_buffer_type_t
  1012. ggml_backend_cann_buffer_type(int32_t device) {
  1013. static std::mutex mutex;
  1014. std::lock_guard<std::mutex> lock(mutex);
  1015. if (device >= ggml_backend_cann_get_device_count()) {
  1016. return nullptr;
  1017. }
  1018. static ggml_backend_buffer_type
  1019. ggml_backend_cann_buffer_types[GGML_CANN_MAX_DEVICES];
  1020. static bool ggml_backend_cann_buffer_type_initialized = false;
  1021. if (!ggml_backend_cann_buffer_type_initialized) {
  1022. for (int32_t i = 0; i < ggml_cann_info().device_count; i++) {
  1023. ggml_backend_cann_buffer_types[i] = {
  1024. /* .iface = */ ggml_backend_cann_buffer_type_interface,
  1025. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), i),
  1026. /* .context = */
  1027. new ggml_backend_cann_buffer_type_context{
  1028. i, "CANN" + std::to_string(i)},
  1029. };
  1030. }
  1031. ggml_backend_cann_buffer_type_initialized = true;
  1032. }
  1033. return &ggml_backend_cann_buffer_types[device];
  1034. }
  1035. /**
  1036. * @brief Retrieves the name associated with a CANN host buffer type.
  1037. *
  1038. * This function returns the descriptive name associated with the specified
  1039. * CANN host buffer type context.
  1040. *
  1041. * @param buft Pointer to the host buffer type context.
  1042. * @return Const pointer to the C-style string containing the name.
  1043. */
  1044. static const char * ggml_backend_cann_host_buffer_type_name(ggml_backend_buffer_type_t buft) {
  1045. return "CANN_Host";
  1046. GGML_UNUSED(buft);
  1047. }
  1048. /**
  1049. * @brief Retrieves the name associated with a CANN host buffer.
  1050. *
  1051. * This function returns the descriptive name associated with the specified
  1052. * CANN host buffer context.
  1053. *
  1054. * @param buft Pointer to the host buffer context.
  1055. * @return Const pointer to the C-style string containing the name.
  1056. */
  1057. static const char * ggml_backend_cann_host_buffer_name(ggml_backend_buffer_t buffer) {
  1058. return "CANN_Host";
  1059. GGML_UNUSED(buffer);
  1060. }
  1061. /**
  1062. * @brief Free resources associated with a CANN host buffer.
  1063. *
  1064. * This function frees the resources associated with a CANN host buffer, including
  1065. * its context.
  1066. *
  1067. * @param buffer The CANN host buffer to free.
  1068. */
  1069. static void ggml_backend_cann_host_buffer_free(ggml_backend_buffer_t buffer) {
  1070. ACL_CHECK(aclrtFreeHost(buffer->context));
  1071. }
  1072. /**
  1073. * @brief Allocates a new CANN host buffer of the specified size.
  1074. *
  1075. * This function allocates a new CANN host buffer with the given size.
  1076. * @param size Size in bytes of the host buffer to allocate.
  1077. * @return Pointer to the allocated host buffer, or nullptr if allocation fails.
  1078. */
  1079. static void * ggml_cann_host_malloc(size_t size) {
  1080. if (getenv("GGML_CANN_NO_PINNED") != nullptr) {
  1081. return nullptr;
  1082. }
  1083. const size_t alignment = 128;
  1084. size = GGML_PAD(size, alignment);
  1085. if (size == 0) {
  1086. size = alignment;
  1087. }
  1088. void * hostPtr = nullptr;
  1089. aclError err = aclrtMallocHost((void **) &hostPtr, size);
  1090. if (err != ACL_SUCCESS) {
  1091. GGML_LOG_WARN("%s: failed to allocate %.2f MiB of pinned memory: %s\n", __func__,
  1092. size / 1024.0 / 1024.0, aclGetRecentErrMsg());
  1093. return nullptr;
  1094. }
  1095. return hostPtr;
  1096. }
  1097. /**
  1098. * @brief Allocates a new CANN host buffer of the specified type and size.
  1099. *
  1100. * @param buft Pointer to the host buffer type context.
  1101. * @param size Size in bytes of the host buffer to allocate.
  1102. * @return Pointer to the allocated host buffer, or CPU buffer pointer if allocation fails.
  1103. */
  1104. static ggml_backend_buffer_t ggml_backend_cann_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  1105. void * hostPtr = ggml_cann_host_malloc(size);
  1106. if (hostPtr == nullptr) {
  1107. // fallback to cpu buffer
  1108. return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
  1109. }
  1110. ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(hostPtr, size);
  1111. buffer->buft = buft;
  1112. buffer->iface.free_buffer = ggml_backend_cann_host_buffer_free;
  1113. return buffer;
  1114. }
  1115. /**
  1116. * @brief Interface for managing CANN host buffer types in the GGML backend.
  1117. *
  1118. * Provides function pointers for allocating, querying properties, and managing
  1119. * memory for CANN buffer types in the GGML backend.
  1120. */
  1121. ggml_backend_buffer_type_t ggml_backend_cann_host_buffer_type() {
  1122. static struct ggml_backend_buffer_type ggml_backend_cann_buffer_type_host = {
  1123. /* .iface = */ {
  1124. /* .get_name = */ ggml_backend_cann_host_buffer_type_name,
  1125. /* .alloc_buffer = */ ggml_backend_cann_host_buffer_type_alloc_buffer,
  1126. /* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment,
  1127. /* .get_max_size = */ NULL, // defaults to SIZE_MAX
  1128. /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size,
  1129. /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host,
  1130. },
  1131. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), 0),
  1132. /* .context = */ nullptr,
  1133. };
  1134. return &ggml_backend_cann_buffer_type_host;
  1135. }
  1136. /**
  1137. * @brief Computes the forward operation for a given tensor using CANN
  1138. * operations.
  1139. *
  1140. * This function selects the appropriate CANN operation based on the type of
  1141. * operation specified in the tensor and performs the computation.
  1142. *
  1143. * @param ctx The CANN context containing necessary resources and
  1144. * configurations.
  1145. * @param dst The destination tensor where the result of the computation will be
  1146. * stored.
  1147. * @return true if the computation was successful; false otherwise.
  1148. */
  1149. static bool ggml_cann_compute_forward(ggml_backend_cann_context& ctx,
  1150. struct ggml_tensor* dst) {
  1151. switch (dst->op) {
  1152. case GGML_OP_REPEAT:
  1153. ggml_cann_repeat(ctx, dst);
  1154. break;
  1155. case GGML_OP_GET_ROWS:
  1156. ggml_cann_get_rows(ctx, dst);
  1157. break;
  1158. case GGML_OP_DUP:
  1159. ggml_cann_dup(ctx, dst);
  1160. break;
  1161. case GGML_OP_ADD:
  1162. ggml_cann_add(ctx, dst);
  1163. break;
  1164. case GGML_OP_ACC:
  1165. ggml_cann_acc(ctx, dst);
  1166. break;
  1167. case GGML_OP_MUL:
  1168. ggml_cann_mul_div<aclnnMulGetWorkspaceSize, aclnnMul>(ctx, dst);
  1169. break;
  1170. case GGML_OP_DIV:
  1171. ggml_cann_mul_div<aclnnDivGetWorkspaceSize, aclnnDiv>(ctx, dst);
  1172. break;
  1173. case GGML_OP_UNARY:
  1174. switch (ggml_get_unary_op(dst)) {
  1175. case GGML_UNARY_OP_GELU:
  1176. ggml_cann_activation<aclnnGeluGetWorkspaceSize, aclnnGelu>(
  1177. ctx, dst);
  1178. break;
  1179. case GGML_UNARY_OP_SILU:
  1180. ggml_cann_activation<aclnnSiluGetWorkspaceSize, aclnnSilu>(
  1181. ctx, dst);
  1182. break;
  1183. // TODO: Use faster gelu??
  1184. case GGML_UNARY_OP_GELU_QUICK:
  1185. ggml_cann_activation<aclnnGeluGetWorkspaceSize, aclnnGelu>(
  1186. ctx, dst);
  1187. break;
  1188. case GGML_UNARY_OP_TANH:
  1189. ggml_cann_activation<aclnnTanhGetWorkspaceSize, aclnnTanh>(
  1190. ctx, dst);
  1191. break;
  1192. case GGML_UNARY_OP_RELU:
  1193. ggml_cann_activation<aclnnReluGetWorkspaceSize, aclnnRelu>(
  1194. ctx, dst);
  1195. break;
  1196. case GGML_UNARY_OP_HARDSIGMOID:
  1197. ggml_cann_activation<aclnnHardsigmoidGetWorkspaceSize,
  1198. aclnnHardsigmoid>(ctx, dst);
  1199. break;
  1200. case GGML_UNARY_OP_HARDSWISH:
  1201. ggml_cann_activation<aclnnHardswishGetWorkspaceSize,
  1202. aclnnHardswish>(ctx, dst);
  1203. break;
  1204. default:
  1205. return false;
  1206. }
  1207. break;
  1208. case GGML_OP_NORM:
  1209. ggml_cann_norm(ctx, dst);
  1210. break;
  1211. case GGML_OP_GROUP_NORM:
  1212. ggml_cann_group_norm(ctx, dst);
  1213. break;
  1214. case GGML_OP_CONCAT:
  1215. ggml_cann_concat(ctx, dst);
  1216. break;
  1217. case GGML_OP_UPSCALE:
  1218. ggml_cann_upsample_nearest2d(ctx, dst);
  1219. break;
  1220. case GGML_OP_PAD:
  1221. ggml_cann_pad(ctx, dst);
  1222. break;
  1223. case GGML_OP_ARANGE:
  1224. ggml_cann_arange(ctx, dst);
  1225. break;
  1226. case GGML_OP_TIMESTEP_EMBEDDING:
  1227. ggml_cann_timestep_embedding(ctx, dst);
  1228. break;
  1229. case GGML_OP_LEAKY_RELU:
  1230. ggml_cann_leaky_relu(ctx, dst);
  1231. break;
  1232. case GGML_OP_RMS_NORM:
  1233. ggml_cann_rms_norm(ctx, dst);
  1234. break;
  1235. case GGML_OP_MUL_MAT:
  1236. ggml_cann_mul_mat(ctx, dst);
  1237. break;
  1238. case GGML_OP_MUL_MAT_ID:
  1239. return false;
  1240. case GGML_OP_SCALE:
  1241. ggml_cann_scale(ctx, dst);
  1242. break;
  1243. case GGML_OP_SQR:
  1244. ggml_cann_sqr(ctx, dst);
  1245. break;
  1246. case GGML_OP_CLAMP:
  1247. ggml_cann_clamp(ctx, dst);
  1248. break;
  1249. case GGML_OP_CPY:
  1250. ggml_cann_cpy(ctx, dst);
  1251. break;
  1252. case GGML_OP_CONT:
  1253. ggml_cann_dup(ctx, dst);
  1254. break;
  1255. case GGML_OP_NONE:
  1256. case GGML_OP_RESHAPE:
  1257. case GGML_OP_VIEW:
  1258. case GGML_OP_PERMUTE:
  1259. case GGML_OP_TRANSPOSE:
  1260. break;
  1261. case GGML_OP_DIAG_MASK_INF:
  1262. ggml_cann_diag_mask(ctx, dst, -INFINITY);
  1263. break;
  1264. case GGML_OP_SOFT_MAX:
  1265. ggml_cann_softmax(ctx, dst);
  1266. break;
  1267. case GGML_OP_ROPE:
  1268. ggml_cann_rope(ctx, dst);
  1269. break;
  1270. case GGML_OP_IM2COL:
  1271. ggml_cann_im2col(ctx, dst);
  1272. break;
  1273. case GGML_OP_POOL_2D:
  1274. ggml_cann_pool2d(ctx, dst);
  1275. break;
  1276. case GGML_OP_SUM_ROWS:
  1277. ggml_cann_sum_rows(ctx, dst);
  1278. break;
  1279. case GGML_OP_ARGSORT:
  1280. ggml_cann_argsort(ctx, dst);
  1281. break;
  1282. default:
  1283. return false;
  1284. }
  1285. return true;
  1286. }
  1287. // backend
  1288. /**
  1289. * @brief Retrieves the name associated with the CANN backend.
  1290. *
  1291. * This function returns the name assigned to the CANN backend, which is stored
  1292. * in the context of the provided backend structure.
  1293. *
  1294. * @param backend Pointer to the CANN backend structure.
  1295. * @return A pointer to a constant string representing the backend name.
  1296. */
  1297. static const char* ggml_backend_cann_name(ggml_backend_t backend) {
  1298. ggml_backend_cann_context* cann_ctx =
  1299. (ggml_backend_cann_context*)backend->context;
  1300. return cann_ctx->name.c_str();
  1301. }
  1302. /**
  1303. * @brief Frees resources associated with the CANN backend.
  1304. *
  1305. * This function releases resources associated with the CANN backend context
  1306. * and resets the device associated with the backend to its initial state.
  1307. *
  1308. * @param backend Pointer to the CANN backend structure to be freed.
  1309. */
  1310. static void ggml_backend_cann_free(ggml_backend_t backend) {
  1311. ggml_backend_cann_context* cann_ctx =
  1312. (ggml_backend_cann_context*)backend->context;
  1313. ACL_CHECK(aclrtSynchronizeDevice());
  1314. ACL_CHECK(aclrtResetDevice(cann_ctx->device));
  1315. // finalize when last backend freed.
  1316. if (cann_ctx->device == ggml_backend_cann_get_device_count() - 1) {
  1317. ACL_CHECK(aclFinalize());
  1318. }
  1319. delete cann_ctx;
  1320. delete backend;
  1321. }
  1322. /**
  1323. * @brief Sets tensor data asynchronously in the CANN backend.
  1324. *
  1325. * This function asynchronously sets tensor data in the CANN backend. Depending
  1326. * on the tensor type, it may perform data transformations before copying data
  1327. * to the device.
  1328. *
  1329. * @param backend Pointer to the CANN backend structure.
  1330. * @param tensor Pointer to the tensor structure to set data for.
  1331. * @param data Pointer to the host data to copy to the tensor.
  1332. * @param offset Offset in bytes within the host data.
  1333. * @param size Size of the data to copy in bytes.
  1334. */
  1335. static void ggml_backend_cann_set_tensor_async(ggml_backend_t backend,
  1336. ggml_tensor *tensor,
  1337. const void *data,
  1338. size_t offset,
  1339. size_t size) {
  1340. ggml_backend_cann_context *cann_ctx =
  1341. (ggml_backend_cann_context *)backend->context;
  1342. if (!need_transform(tensor->type)) {
  1343. ACL_CHECK(aclrtMemcpyAsync((char *)tensor->data + offset, size, data,
  1344. size, ACL_MEMCPY_HOST_TO_DEVICE,
  1345. cann_ctx->stream()));
  1346. } else {
  1347. void *transform_buffer = malloc(size);
  1348. ggml_backend_cann_transform(tensor, data, transform_buffer);
  1349. ACL_CHECK(aclrtMemcpyAsync(
  1350. (char *)tensor->data + offset, size, transform_buffer, size,
  1351. ACL_MEMCPY_HOST_TO_DEVICE, cann_ctx->stream()));
  1352. ACL_CHECK(aclrtSynchronizeStream(cann_ctx->stream()));
  1353. free(transform_buffer);
  1354. }
  1355. }
  1356. static void ggml_backend_cann_get_tensor_async(
  1357. ggml_backend_t backend, const ggml_tensor *tensor, void *data,
  1358. size_t offset, size_t size) {
  1359. ggml_backend_cann_context *cann_ctx =
  1360. (ggml_backend_cann_context *)backend->context;
  1361. ggml_backend_buffer_t buf =
  1362. tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
  1363. GGML_ASSERT(buf->buft == ggml_backend_cann_buffer_type(cann_ctx->device) &&
  1364. "unsupported buffer type");
  1365. if (!need_transform(tensor->type)) {
  1366. ACL_CHECK(aclrtMemcpyAsync(data, size, (char *)tensor->data + offset,
  1367. size, ACL_MEMCPY_DEVICE_TO_HOST,
  1368. cann_ctx->stream()));
  1369. } else {
  1370. void *transform_buffer = malloc(size);
  1371. ACL_CHECK(aclrtMemcpyAsync(
  1372. transform_buffer, size, (char *)tensor->data + offset, size,
  1373. ACL_MEMCPY_DEVICE_TO_HOST, cann_ctx->stream()));
  1374. ACL_CHECK(aclrtSynchronizeStream(cann_ctx->stream()));
  1375. ggml_backend_cann_transform_back(tensor, transform_buffer, data);
  1376. free(transform_buffer);
  1377. }
  1378. }
  1379. /**
  1380. * @brief Asynchronously copies tensor data between CANN backends.
  1381. *
  1382. * This function copies tensor data asynchronously between two CANN backends. It
  1383. * checks if both tensors reside in CANN buffers and whether the devices support
  1384. * peer-to-peer access for direct copying. If not, it returns false.
  1385. *
  1386. * @param backend_src Pointer to the source CANN backend structure.
  1387. * @param backend_dst Pointer to the destination CANN backend structure.
  1388. * @param src Pointer to the source tensor to copy data from.
  1389. * @param dst Pointer to the destination tensor to copy data to.
  1390. * @return true if the copy operation succeeds, false otherwise.
  1391. */
  1392. static bool ggml_backend_cann_cpy_tensor_async(
  1393. ggml_backend_t backend_src, ggml_backend_t backend_dst,
  1394. const ggml_tensor* src, ggml_tensor* dst) {
  1395. GGML_ASSERT(ggml_backend_is_cann(backend_src) ||
  1396. ggml_backend_is_cann(backend_dst));
  1397. if (!ggml_backend_buffer_is_cann(src->buffer) ||
  1398. !ggml_backend_buffer_is_cann(dst->buffer)) {
  1399. return false;
  1400. }
  1401. ggml_backend_buffer_t buf_src =
  1402. src->view_src ? src->view_src->buffer : src->buffer;
  1403. ggml_backend_buffer_t buf_dst =
  1404. dst->view_src ? dst->view_src->buffer : dst->buffer;
  1405. ggml_backend_cann_context* cann_ctx_src =
  1406. (ggml_backend_cann_context*)backend_src->context;
  1407. ggml_backend_cann_context* cann_ctx_dst =
  1408. (ggml_backend_cann_context*)backend_dst->context;
  1409. size_t copy_size = ggml_nbytes(dst);
  1410. if (backend_src != backend_dst) {
  1411. ggml_backend_cann_buffer_context* buf_ctx_src =
  1412. (ggml_backend_cann_buffer_context*)buf_src->context;
  1413. ggml_backend_cann_buffer_context* buf_ctx_dst =
  1414. (ggml_backend_cann_buffer_context*)buf_dst->context;
  1415. GGML_ASSERT(cann_ctx_src->device == buf_ctx_src->device);
  1416. GGML_ASSERT(cann_ctx_dst->device == buf_ctx_dst->device);
  1417. int32_t canAccessPeer = 0;
  1418. ACL_CHECK(aclrtDeviceCanAccessPeer(&canAccessPeer, cann_ctx_src->device,
  1419. cann_ctx_dst->device));
  1420. if (!canAccessPeer) {
  1421. return false;
  1422. }
  1423. // need open both directions for memcpyasync between devices.
  1424. ggml_cann_set_device(cann_ctx_dst->device);
  1425. ACL_CHECK(aclrtDeviceEnablePeerAccess(cann_ctx_src->device, 0));
  1426. ggml_cann_set_device(cann_ctx_src->device);
  1427. ACL_CHECK(aclrtDeviceEnablePeerAccess(cann_ctx_dst->device, 0));
  1428. ACL_CHECK(aclrtMemcpyAsync(dst->data, copy_size, src->data, copy_size,
  1429. ACL_MEMCPY_DEVICE_TO_DEVICE,
  1430. cann_ctx_src->stream()));
  1431. //TODO: workaround for Event didn`t work here.
  1432. aclrtSynchronizeStream(cann_ctx_src->stream());
  1433. } else {
  1434. // src and dst are on the same backend
  1435. ACL_CHECK(aclrtMemcpyAsync(dst->data, copy_size, src->data, copy_size,
  1436. ACL_MEMCPY_DEVICE_TO_DEVICE,
  1437. cann_ctx_dst->stream()));
  1438. }
  1439. return true;
  1440. }
  1441. /**
  1442. * @brief Synchronizes a CANN backend.
  1443. *
  1444. * This function synchronizes the specified CANN backend by waiting for all
  1445. * operations in its associated stream to complete.
  1446. *
  1447. * @param backend Pointer to the CANN backend structure to synchronize.
  1448. */
  1449. static void ggml_backend_cann_synchronize(ggml_backend_t backend) {
  1450. ggml_backend_cann_context* cann_ctx =
  1451. (ggml_backend_cann_context*)backend->context;
  1452. ggml_cann_set_device(cann_ctx->device);
  1453. ACL_CHECK(aclrtSynchronizeStream(cann_ctx->stream()));
  1454. }
  1455. /**
  1456. * @brief Computes a computational graph using a CANN backend.
  1457. *
  1458. * This function computes the operations defined in the computational graph
  1459. * using the specified CANN backend.
  1460. *
  1461. * @param backend Pointer to the CANN backend structure to use for computation.
  1462. * @param cgraph Pointer to the computational graph structure containing nodes
  1463. * representing operations to be computed.
  1464. * @return enum ggml_status Returns GGML_STATUS_SUCCESS if computation
  1465. * completes successfully, otherwise an appropriate error status.
  1466. */
  1467. static enum ggml_status ggml_backend_cann_graph_compute(
  1468. ggml_backend_t backend, ggml_cgraph* cgraph) {
  1469. ggml_backend_cann_context* cann_ctx =
  1470. (ggml_backend_cann_context*)backend->context;
  1471. ggml_cann_set_device(cann_ctx->device);
  1472. for (int i = 0; i < cgraph->n_nodes; i++) {
  1473. ggml_tensor* node = cgraph->nodes[i];
  1474. if (ggml_is_empty(node) || node->op == GGML_OP_NONE) {
  1475. continue;
  1476. }
  1477. bool ok = ggml_cann_compute_forward(*cann_ctx, node);
  1478. if (!ok) {
  1479. GGML_LOG_ERROR("%s: error: op not supported %s (%s)\n", __func__,
  1480. node->name, ggml_op_name(node->op));
  1481. }
  1482. GGML_ASSERT(ok);
  1483. }
  1484. return GGML_STATUS_SUCCESS;
  1485. }
  1486. /**
  1487. * @brief Checks if the CANN backend supports a specific operation.
  1488. *
  1489. * This function checks whether the specified operation is supported by the
  1490. * CANN backend.
  1491. *
  1492. * @param backend Pointer to the CANN backend structure to check support for
  1493. * the operation.
  1494. * @param op Pointer to the tensor representing the operation to check.
  1495. * @return bool Returns true if the operation is supported by the backend,
  1496. * otherwise false.
  1497. */
  1498. static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev,
  1499. const ggml_tensor* op) {
  1500. switch (op->op) {
  1501. case GGML_OP_UNARY:
  1502. switch (ggml_get_unary_op(op)) {
  1503. case GGML_UNARY_OP_GELU:
  1504. case GGML_UNARY_OP_SILU:
  1505. case GGML_UNARY_OP_RELU:
  1506. case GGML_UNARY_OP_HARDSIGMOID:
  1507. case GGML_UNARY_OP_HARDSWISH:
  1508. case GGML_UNARY_OP_GELU_QUICK:
  1509. case GGML_UNARY_OP_TANH:
  1510. return true;
  1511. default:
  1512. return false;
  1513. }
  1514. case GGML_OP_MUL_MAT: {
  1515. switch (op->src[0]->type) {
  1516. case GGML_TYPE_Q8_0:
  1517. // Current groupsize should not be greater than k-1 in
  1518. // aclnnWeightQuantBatchMatmulV2GetWorkspaceSize
  1519. if (op->src[0]->ne[0] <= QK8_0) {
  1520. return false;
  1521. }
  1522. case GGML_TYPE_F16:
  1523. case GGML_TYPE_F32:
  1524. case GGML_TYPE_Q4_0:
  1525. return true;
  1526. default:
  1527. return false;
  1528. }
  1529. }
  1530. case GGML_OP_MUL_MAT_ID:
  1531. return false;
  1532. // embedding
  1533. case GGML_OP_GET_ROWS: {
  1534. switch (op->src[0]->type) {
  1535. case GGML_TYPE_F32:
  1536. case GGML_TYPE_F16:
  1537. case GGML_TYPE_Q4_0:
  1538. case GGML_TYPE_Q8_0:
  1539. return true;
  1540. default:
  1541. return false;
  1542. }
  1543. } break;
  1544. case GGML_OP_CPY: {
  1545. switch (op->type) {
  1546. case GGML_TYPE_F32:
  1547. case GGML_TYPE_F16:
  1548. case GGML_TYPE_Q8_0:
  1549. case GGML_TYPE_Q4_0:
  1550. return true;
  1551. default:
  1552. return false;
  1553. }
  1554. }
  1555. case GGML_OP_CONT: {
  1556. // TODO: support GGML_TYPE_BF16
  1557. switch (op->src[0]->type) {
  1558. case GGML_TYPE_F32:
  1559. case GGML_TYPE_F16:
  1560. return true;
  1561. default:
  1562. return false;
  1563. }
  1564. }
  1565. case GGML_OP_ROPE: {
  1566. // TODO: with ops-test v == 1
  1567. float * ext_factor = (float*)((int32_t*)op->op_params + 7);
  1568. // TODO: n_dims <= ne0
  1569. if (op->src[0]->ne[0] != op->op_params[1]) {
  1570. return false;
  1571. }
  1572. // TODO: ext_factor != 0
  1573. if (*ext_factor != 0) {
  1574. return false;
  1575. }
  1576. const int mode = ((const int32_t *) op->op_params)[2];
  1577. if (mode & GGML_ROPE_TYPE_MROPE) {
  1578. return false;
  1579. }
  1580. if (mode & GGML_ROPE_TYPE_VISION) {
  1581. return false;
  1582. }
  1583. return true;
  1584. }
  1585. case GGML_OP_UPSCALE: {
  1586. // aclnnUpsampleNearest2dGetWorkspaceSize not support
  1587. // selfDimN[2]/outDimN[2] or selfDimC[3]/outDimC[3] not equal
  1588. if (op->src[0]->ne[2] * op->ne[3] != op->src[0]->ne[3] * op->ne[2]) {
  1589. return false;
  1590. }
  1591. return true;
  1592. }
  1593. case GGML_OP_IM2COL:
  1594. case GGML_OP_CONCAT:
  1595. case GGML_OP_DUP:
  1596. case GGML_OP_REPEAT:
  1597. case GGML_OP_NONE:
  1598. case GGML_OP_RESHAPE:
  1599. case GGML_OP_VIEW:
  1600. case GGML_OP_PERMUTE:
  1601. case GGML_OP_TRANSPOSE:
  1602. case GGML_OP_NORM:
  1603. case GGML_OP_ADD:
  1604. case GGML_OP_MUL:
  1605. case GGML_OP_DIV:
  1606. case GGML_OP_RMS_NORM:
  1607. case GGML_OP_SCALE:
  1608. case GGML_OP_SQR:
  1609. case GGML_OP_CLAMP:
  1610. case GGML_OP_DIAG_MASK_INF:
  1611. case GGML_OP_SOFT_MAX:
  1612. case GGML_OP_POOL_2D:
  1613. case GGML_OP_SUM_ROWS:
  1614. case GGML_OP_ARGSORT:
  1615. case GGML_OP_ACC:
  1616. case GGML_OP_GROUP_NORM:
  1617. case GGML_OP_PAD:
  1618. case GGML_OP_ARANGE:
  1619. case GGML_OP_TIMESTEP_EMBEDDING:
  1620. case GGML_OP_LEAKY_RELU:
  1621. return true;
  1622. default:
  1623. return false;
  1624. }
  1625. GGML_UNUSED(dev);
  1626. }
  1627. /**
  1628. * @brief Checks if the backend buffer type is associated with the CANN backend.
  1629. *
  1630. * This function checks whether the provided backend buffer type is associated
  1631. * with the CANN backend based on the comparison of its name retrieval function
  1632. * pointer.
  1633. *
  1634. * @param buft Pointer to the backend buffer type to check.
  1635. * @return bool Returns true if the buffer type is associated with the CANN
  1636. * backend, otherwise false.
  1637. */
  1638. static bool ggml_backend_buft_is_cann(ggml_backend_buffer_type_t buft) {
  1639. return buft->iface.get_name == ggml_backend_cann_buffer_type_name;
  1640. }
  1641. /**
  1642. * @brief Determines if a tensor operation should be offloaded to the CANN
  1643. * backend.
  1644. *
  1645. * This function checks if a given tensor operation should be offloaded to the
  1646. * CANN backend based on the operation type and the size of the tensor. It
  1647. * returns true if the second dimension (ne[1]) of the tensor is greater than or
  1648. * equal to the minimum batch size and the operation is not GGML_OP_GET_ROWS.
  1649. *
  1650. * @param backend Pointer to the CANN backend.
  1651. * @param op Pointer to the tensor operation to check.
  1652. * @return bool Returns true if the operation should be offloaded, otherwise
  1653. * false.
  1654. */
  1655. static bool ggml_backend_cann_offload_op(ggml_backend_dev_t dev,
  1656. const ggml_tensor* op) {
  1657. const int min_batch_size = 32;
  1658. GGML_UNUSED(dev);
  1659. return op->ne[1] >= min_batch_size && op->op != GGML_OP_GET_ROWS;
  1660. }
  1661. /**
  1662. * @brief Records an event on the CANN backend stream.
  1663. *
  1664. * This function records the given event on the ACL runtime stream associated
  1665. * with the backend context.
  1666. *
  1667. * @param event Pointer to the event structure to be recorded.
  1668. */
  1669. static void ggml_backend_cann_event_record(ggml_backend_t backend, ggml_backend_event_t event) {
  1670. ggml_backend_cann_context* cann_ctx =
  1671. (ggml_backend_cann_context*)backend->context;
  1672. ACL_CHECK(aclrtRecordEvent((aclrtEvent)event->context, cann_ctx->stream()));
  1673. }
  1674. /**
  1675. * @brief Waits for a recorded event to complete on the CANN backend stream.
  1676. *
  1677. * This function makes the given backend wait for the event to complete on its
  1678. * ACL runtime stream.
  1679. *
  1680. * @param backend Pointer to the backend structure.
  1681. * @param event Pointer to the event structure that the backend needs to wait
  1682. * for.
  1683. */
  1684. static void ggml_backend_cann_event_wait(ggml_backend_t backend,
  1685. ggml_backend_event_t event) {
  1686. ggml_backend_cann_context* cann_ctx =
  1687. (ggml_backend_cann_context*)backend->context;
  1688. if (ggml_backend_is_cann(backend)) {
  1689. ACL_CHECK(aclrtStreamWaitEvent(cann_ctx->stream(),
  1690. (aclrtEvent)event->context));
  1691. } else {
  1692. GGML_ABORT("fatal error");
  1693. }
  1694. }
  1695. /**
  1696. * @brief Structure defining the interface for the CANN backend.
  1697. *
  1698. * This structure contains function pointers for various operations
  1699. * supported by the CANN backend, including name retrieval, memory
  1700. * management, tensor operations, synchronization, and event handling.
  1701. */
  1702. static const ggml_backend_i ggml_backend_cann_interface = {
  1703. /* .get_name = */ ggml_backend_cann_name,
  1704. /* .free = */ ggml_backend_cann_free,
  1705. /* .set_tensor_async = */ ggml_backend_cann_set_tensor_async,
  1706. /* .get_tensor_async = */ ggml_backend_cann_get_tensor_async,
  1707. /* .cpy_tensor_async = */ ggml_backend_cann_cpy_tensor_async,
  1708. /* .synchronize = */ ggml_backend_cann_synchronize,
  1709. /* .graph_plan_create = */ NULL,
  1710. /* .graph_plan_free = */ NULL,
  1711. /* .graph_plan_update = */ NULL,
  1712. /* .graph_plan_compute = */ NULL,
  1713. /* .graph_compute = */ ggml_backend_cann_graph_compute,
  1714. /* .event_record = */ ggml_backend_cann_event_record,
  1715. /* .event_wait = */ ggml_backend_cann_event_wait,
  1716. };
  1717. /**
  1718. * @brief Return the hardcoded GUID for the CANN backend.
  1719. *
  1720. * This function returns a static GUID which uniquely identifies the CANN
  1721. * backend.
  1722. *
  1723. * @return A pointer to the static GUID.
  1724. */
  1725. static ggml_guid_t ggml_backend_cann_guid() {
  1726. static ggml_guid guid = {0xa1, 0x94, 0xaf, 0xac, 0xbd, 0x4f, 0x47, 0x34,
  1727. 0xbe, 0x1a, 0x9e, 0x71, 0x1f, 0x9e, 0xed, 0x64};
  1728. return &guid;
  1729. }
  1730. // backend device
  1731. struct ggml_backend_cann_device_context {
  1732. int device;
  1733. std::string name;
  1734. std::string description;
  1735. };
  1736. static const char * ggml_backend_cann_device_get_name(ggml_backend_dev_t dev) {
  1737. ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context;
  1738. return ctx->name.c_str();
  1739. }
  1740. static const char* ggml_backend_cann_device_get_description(ggml_backend_dev_t dev) {
  1741. ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context;
  1742. return ctx->description.c_str();
  1743. }
  1744. static void ggml_backend_cann_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
  1745. ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context;
  1746. ggml_backend_cann_get_device_memory(ctx->device, free, total);
  1747. }
  1748. static enum ggml_backend_dev_type ggml_backend_cann_device_get_type(ggml_backend_dev_t dev) {
  1749. GGML_UNUSED(dev);
  1750. return GGML_BACKEND_DEVICE_TYPE_GPU;
  1751. }
  1752. static void ggml_backend_cann_device_get_props(ggml_backend_dev_t dev, ggml_backend_dev_props * props) {
  1753. props->name = ggml_backend_cann_device_get_name(dev);
  1754. props->description = ggml_backend_cann_device_get_description(dev);
  1755. props->type = ggml_backend_cann_device_get_type(dev);
  1756. ggml_backend_cann_device_get_memory(dev, &props->memory_free, &props->memory_total);
  1757. bool host_buffer = getenv("GGML_CANN_NO_PINNED") == nullptr;
  1758. props->caps = {
  1759. /* .async = */ false,
  1760. /* .host_buffer = */ host_buffer,
  1761. /* .buffer_from_host_ptr = */ false,
  1762. /* .events = */ true,
  1763. };
  1764. }
  1765. static ggml_backend_t ggml_backend_cann_device_init(ggml_backend_dev_t dev, const char * params) {
  1766. GGML_UNUSED(params);
  1767. ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context;
  1768. return ggml_backend_cann_init(ctx->device);
  1769. }
  1770. /**
  1771. * @brief Checks if the CANN backend supports a specific backend buffer type.
  1772. *
  1773. * This function determines whether the CANN backend supports the given backend
  1774. * buffer type by comparing the device context of the backend and buffer type.
  1775. * It returns true if the devices are same between the backend context and
  1776. * buffer type context.
  1777. *
  1778. * @param backend Pointer to the CANN backend.
  1779. * @param buft Pointer to the backend buffer type to check.
  1780. * @return bool Returns true if the CANN backend supports the buffer type,
  1781. * otherwise false.
  1782. */
  1783. static bool ggml_backend_cann_supports_buft(
  1784. ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
  1785. if (ggml_backend_buft_is_cann(buft)) {
  1786. ggml_backend_cann_device_context * dev_ctx = (ggml_backend_cann_device_context *)dev->context;
  1787. ggml_backend_cann_buffer_type_context * buft_ctx =
  1788. (ggml_backend_cann_buffer_type_context *)buft->context;
  1789. return buft_ctx->device == dev_ctx->device;
  1790. }
  1791. return false;
  1792. }
  1793. static ggml_backend_buffer_type_t ggml_backend_cann_device_get_buffer_type(ggml_backend_dev_t dev) {
  1794. ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context;
  1795. return ggml_backend_cann_buffer_type(ctx->device);
  1796. }
  1797. static ggml_backend_buffer_type_t ggml_backend_cann_device_get_host_buffer_type(ggml_backend_dev_t dev) {
  1798. GGML_UNUSED(dev);
  1799. return ggml_backend_cann_host_buffer_type();
  1800. }
  1801. /**
  1802. * @brief Creates a new event for the CANN backend device.
  1803. *
  1804. * This function initializes a new event for the CANN backend by setting the
  1805. * device and creating an ACL runtime event. The created event is then wrapped
  1806. * in a ggml_backend_event structure and returned.
  1807. *
  1808. * @param backend Pointer to the CANN backend.
  1809. * @return ggml_backend_event_t Returns a pointer to the new event structure.
  1810. */
  1811. static ggml_backend_event_t ggml_backend_cann_device_event_new(
  1812. ggml_backend_dev_t dev) {
  1813. ggml_backend_cann_device_context * dev_ctx = (ggml_backend_cann_device_context *)dev->context;
  1814. ggml_cann_set_device(dev_ctx->device);
  1815. aclrtEvent event;
  1816. ACL_CHECK(aclrtCreateEvent(&event));
  1817. return new ggml_backend_event{
  1818. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), dev_ctx->device),
  1819. /* .context = */ event,
  1820. };
  1821. }
  1822. /**
  1823. * @brief Frees a CANN backend event.
  1824. *
  1825. * This function destroys the ACL runtime event associated with the given CANN
  1826. * backend event and then deletes the event structure itself.
  1827. *
  1828. * @param event Pointer to the event structure to be freed.
  1829. */
  1830. static void ggml_backend_cann_device_event_free(ggml_backend_dev_t dev, ggml_backend_event_t event) {
  1831. ACL_CHECK(aclrtDestroyEvent((aclrtEvent)event->context));
  1832. delete event;
  1833. GGML_UNUSED(dev);
  1834. }
  1835. /**
  1836. * @brief Synchronizes the given event on the CANN backend.
  1837. *
  1838. * This function waits for the specified event to complete on the ACL runtime.
  1839. *
  1840. * @param event Pointer to the event structure to be synchronized.
  1841. */
  1842. static void ggml_backend_cann_device_event_synchronize(ggml_backend_dev_t dev, ggml_backend_event_t event) {
  1843. ACL_CHECK(aclrtSynchronizeEvent((aclrtEvent)event->context));
  1844. GGML_UNUSED(dev);
  1845. }
  1846. static const ggml_backend_device_i ggml_backend_cann_device_interface = {
  1847. /* .get_name = */ ggml_backend_cann_device_get_name,
  1848. /* .get_description = */ ggml_backend_cann_device_get_description,
  1849. /* .get_memory = */ ggml_backend_cann_device_get_memory,
  1850. /* .get_type = */ ggml_backend_cann_device_get_type,
  1851. /* .get_props = */ ggml_backend_cann_device_get_props,
  1852. /* .init_backend = */ ggml_backend_cann_device_init, // called for every card
  1853. /* .get_buffer_type = */ ggml_backend_cann_device_get_buffer_type,
  1854. /* .get_host_buffer_type = */ ggml_backend_cann_device_get_host_buffer_type,
  1855. /* .buffer_from_host_ptr = */ NULL, // not supported for CANN
  1856. /* .supports_op = */ ggml_backend_cann_supports_op,
  1857. /* .supports_buft = */ ggml_backend_cann_supports_buft,
  1858. /* .offload_op = */ ggml_backend_cann_offload_op,
  1859. /* .event_new = */ ggml_backend_cann_device_event_new,
  1860. /* .event_free = */ ggml_backend_cann_device_event_free,
  1861. /* .event_synchronize = */ ggml_backend_cann_device_event_synchronize,
  1862. };
  1863. // backend reg
  1864. struct ggml_backend_cann_reg_context {
  1865. std::vector<ggml_backend_dev_t> devices;
  1866. };
  1867. static const char * ggml_backend_cann_reg_get_name(ggml_backend_reg_t reg) {
  1868. GGML_UNUSED(reg);
  1869. return GGML_CANN_NAME;
  1870. }
  1871. static size_t ggml_backend_cann_reg_get_device_count(ggml_backend_reg_t reg) {
  1872. ggml_backend_cann_reg_context * ctx = (ggml_backend_cann_reg_context *)reg->context;
  1873. return ctx->devices.size();
  1874. }
  1875. static ggml_backend_dev_t ggml_backend_cann_reg_get_device(ggml_backend_reg_t reg, size_t index) {
  1876. ggml_backend_cann_reg_context * ctx = (ggml_backend_cann_reg_context *)reg->context;
  1877. GGML_ASSERT(index < ctx->devices.size());
  1878. return ctx->devices[index];
  1879. }
  1880. static void * ggml_backend_cann_reg_get_proc_address(ggml_backend_reg_t reg, const char * name) {
  1881. GGML_UNUSED(reg);
  1882. GGML_UNUSED(name);
  1883. // reserved for future use
  1884. return nullptr;
  1885. }
  1886. static const ggml_backend_reg_i ggml_backend_cann_reg_interface = {
  1887. /* .get_name = */ ggml_backend_cann_reg_get_name,
  1888. /* .get_device_count = */ ggml_backend_cann_reg_get_device_count,
  1889. /* .get_device = */ ggml_backend_cann_reg_get_device,
  1890. /* .get_proc_address = */ ggml_backend_cann_reg_get_proc_address,
  1891. };
  1892. // backend registry, called only once for cann backend
  1893. ggml_backend_reg_t ggml_backend_cann_reg() {
  1894. static ggml_backend_reg reg;
  1895. static bool initialized = false;
  1896. {
  1897. static std::mutex mutex;
  1898. std::lock_guard<std::mutex> lock(mutex);
  1899. if (!initialized) {
  1900. aclInit(nullptr);
  1901. ggml_backend_cann_reg_context * ctx = new ggml_backend_cann_reg_context;
  1902. for (int i = 0; i < ggml_cann_info().device_count; i++) {
  1903. ggml_backend_cann_device_context* dev_ctx = new ggml_backend_cann_device_context();
  1904. dev_ctx->description = aclrtGetSocName();
  1905. dev_ctx->device = i;
  1906. dev_ctx->name = GGML_CANN_NAME + std::to_string(i);
  1907. ggml_cann_set_device(i);
  1908. ggml_backend_dev_t dev = new ggml_backend_device {
  1909. /* .iface = */ ggml_backend_cann_device_interface,
  1910. /* .reg = */ &reg,
  1911. /* .context = */ dev_ctx
  1912. };
  1913. ctx->devices.push_back(dev);
  1914. }
  1915. reg = ggml_backend_reg {
  1916. /* .api_version = */ GGML_BACKEND_API_VERSION,
  1917. /* .iface = */ ggml_backend_cann_reg_interface,
  1918. /* .context = */ ctx
  1919. };
  1920. }
  1921. initialized = true;
  1922. }
  1923. return &reg;
  1924. }
  1925. ggml_backend_t ggml_backend_cann_init(int32_t device) {
  1926. aclInit(nullptr);
  1927. if (device < 0 || device >= ggml_backend_cann_get_device_count()) {
  1928. GGML_LOG_ERROR("%s: error: invalid device %d\n", __func__, device);
  1929. return nullptr;
  1930. }
  1931. ggml_backend_cann_context* ctx = new ggml_backend_cann_context(device);
  1932. if (ctx == nullptr) {
  1933. GGML_LOG_ERROR("%s: error: failed to allocate context\n", __func__);
  1934. return nullptr;
  1935. }
  1936. ggml_cann_set_device(ctx->device);
  1937. ggml_backend_t cann_backend =
  1938. new ggml_backend{/* .guid = */ ggml_backend_cann_guid(),
  1939. /* .interface = */ ggml_backend_cann_interface,
  1940. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), device),
  1941. /* .context = */ ctx};
  1942. return cann_backend;
  1943. }
  1944. bool ggml_backend_is_cann(ggml_backend_t backend) {
  1945. return backend != NULL &&
  1946. ggml_guid_matches(backend->guid, ggml_backend_cann_guid());
  1947. }
  1948. int32_t ggml_backend_cann_get_device_count() {
  1949. return ggml_cann_info().device_count;
  1950. }
  1951. void ggml_backend_cann_get_device_description(
  1952. int32_t device, char* description, size_t description_size) {
  1953. ggml_cann_set_device(device);
  1954. const char* soc_name = aclrtGetSocName();
  1955. snprintf(description, description_size, "%s", soc_name);
  1956. }
  1957. void ggml_backend_cann_get_device_memory(int32_t device, size_t* free,
  1958. size_t* total) {
  1959. ggml_cann_set_device(device);
  1960. ACL_CHECK(aclrtGetMemInfo(ACL_HBM_MEM, free, total));
  1961. }
  1962. GGML_BACKEND_DL_IMPL(ggml_backend_cann_reg)