ggml-cann.cpp 96 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755
  1. /*
  2. * Copyright (c) 2023-2024 The ggml authors
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a copy
  5. * of this software and associated documentation files (the "Software"), to
  6. * deal in the Software without restriction, including without limitation the
  7. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  8. * sell copies of the Software, and to permit persons to whom the Software is
  9. * furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  17. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  20. * IN THE SOFTWARE.
  21. */
  22. #include "ggml-cann.h"
  23. #include <acl/acl.h>
  24. #include <stdarg.h>
  25. #include <aclnnop/aclnn_trans_matmul_weight.h>
  26. #include <cmath>
  27. #include <cstdio>
  28. #include <cstring>
  29. #include <mutex>
  30. #include <queue>
  31. #include <chrono>
  32. #include <unordered_set>
  33. #include <optional>
  34. #include "ggml-impl.h"
  35. #include "ggml-backend-impl.h"
  36. #include "ggml-cann/aclnn_ops.h"
  37. #include "ggml-cann/common.h"
  38. #include "ggml.h"
  39. #define GGML_COMMON_DECL_C
  40. #include "ggml-common.h"
  41. #define GGML_CANN_NAME "CANN"
  42. /**
  43. * @brief Handles CANN errors by printing an error message and aborting.
  44. *
  45. * @param stmt The statement that caused the error.
  46. * @param func The function in which the error occurred.
  47. * @param file The file in which the error occurred.
  48. * @param line The line number where the error occurred.
  49. * @param msg The error message.
  50. */
  51. [[noreturn]] void ggml_cann_error(const char* stmt, const char* func,
  52. const char* file, int line, const char* msg) {
  53. int32_t id = -1;
  54. aclrtGetDevice(&id);
  55. GGML_LOG_ERROR("CANN error: %s\n", msg);
  56. GGML_LOG_ERROR(" current device: %d, in function %s at %s:%d\n", id, func,
  57. file, line);
  58. GGML_LOG_ERROR(" %s\n", stmt);
  59. // abort with GGML_ASSERT to get a stack trace
  60. GGML_ABORT("CANN error");
  61. }
  62. /**
  63. * @brief Sets the device to be used by CANN.
  64. *
  65. * @param device The device ID to set.
  66. */
  67. void ggml_cann_set_device(const int32_t device) {
  68. // TODO: uncomment these lines after empty context has fixed.
  69. // int current_device;
  70. // ACL_CHECK(aclrtGetDevice(&current_device));
  71. // if (device == current_device) {
  72. // return;
  73. // }
  74. ACL_CHECK(aclrtSetDevice(device));
  75. }
  76. /**
  77. * @brief Retrieves the current device ID.
  78. *
  79. * @return The current device ID.
  80. */
  81. int32_t ggml_cann_get_device() {
  82. int32_t id;
  83. ACL_CHECK(aclrtGetDevice(&id));
  84. return id;
  85. }
  86. /**
  87. * @brief Get the value of the specified environment variable (name).
  88. * if not empty, return a std::string object
  89. */
  90. std::optional<std::string> get_env(const std::string& name) {
  91. const char* val = std::getenv(name.c_str());
  92. if (!val) return std::nullopt;
  93. std::string res = std::string(val);
  94. std::transform(res.begin(), res.end(), res.begin(), ::tolower);
  95. return res;
  96. }
  97. /**
  98. * @brief Verify whether the environment variable is a valid value.
  99. */
  100. bool parse_bool(const std::string& value) {
  101. std::unordered_set<std::string> valid_values = {"on", "1", "yes", "y", "enable", "true"};
  102. return valid_values.find(value) != valid_values.end();
  103. }
  104. /**
  105. * @brief Initialize the CANN device information.
  106. *
  107. * This function initializes the CANN device information by obtaining the
  108. * device count and setting the memory allocation granularity for each device.
  109. *
  110. * @return A structure containing the device information.
  111. */
  112. static ggml_cann_device_info ggml_cann_init() {
  113. ggml_cann_device_info info = {};
  114. aclError err = aclrtGetDeviceCount((uint32_t*)&info.device_count);
  115. if (err != ACL_SUCCESS) {
  116. GGML_LOG_ERROR("%s: failed to initialize CANN: %s\n",
  117. __func__, aclGetRecentErrMsg());
  118. return info;
  119. }
  120. GGML_ASSERT(info.device_count <= GGML_CANN_MAX_DEVICES);
  121. for (int id = 0; id < info.device_count; ++id) {
  122. aclrtPhysicalMemProp prop = {};
  123. prop.handleType = ACL_MEM_HANDLE_TYPE_NONE;
  124. prop.allocationType = ACL_MEM_ALLOCATION_TYPE_PINNED;
  125. prop.memAttr = ACL_HBM_MEM_HUGE;
  126. prop.location.type = ACL_MEM_LOCATION_TYPE_DEVICE;
  127. prop.location.id = id;
  128. prop.reserve = 0;
  129. err = aclrtMemGetAllocationGranularity(
  130. &prop, ACL_RT_MEM_ALLOC_GRANULARITY_RECOMMENDED,
  131. &info.devices[id].vmm_granularity);
  132. info.devices[id].vmm = err == ACL_SUCCESS;
  133. size_t free, total;
  134. ggml_backend_cann_get_device_memory(id, &free, &total);
  135. info.devices[id].total_vram = free;
  136. }
  137. // TODO: add more device info later.
  138. return info;
  139. }
  140. /**
  141. * @brief Retrieve the CANN device information.
  142. *
  143. * This function returns a reference to a structure containing the CANN device
  144. * information. The device information is initialized once and reused on
  145. * subsequent calls.
  146. *
  147. * @return A reference to the structure containing the device information.
  148. */
  149. const ggml_cann_device_info& ggml_cann_info() {
  150. static ggml_cann_device_info info = ggml_cann_init();
  151. return info;
  152. }
  153. //#define DEBUG_CANN_MALLOC
  154. /**
  155. * @brief A pool of CANN buffers(priority segment buffer).
  156. *
  157. * This class manages a pool of CANN buffers for a specific device.
  158. */
  159. struct ggml_cann_pool_buf_prio : public ggml_cann_pool {
  160. /**
  161. * @brief The maximum reuse margin for a buffer.
  162. */
  163. static const size_t max_reuse_margin = 1ull << 22; // 4MB
  164. /**
  165. * @brief The minimum free margin for a buffer.
  166. */
  167. static const size_t min_free_margin = 1ull << 20; // 1MB
  168. /**
  169. * @brief The alignment for buffer allocation.
  170. */
  171. static const size_t alignment = 128;
  172. /**
  173. * @brief The device ID associated with this buffer pool.
  174. */
  175. int device;
  176. /**
  177. * @brief Whether to disable clean during buffer allocation.
  178. */
  179. bool disable_clean = false;
  180. /**
  181. * @brief Structure representing a CANN buffer.
  182. */
  183. struct ggml_cann_buffer {
  184. void* ptr = nullptr; ///< Pointer to the buffer.
  185. size_t size = 0; ///< Size of the buffer.
  186. std::chrono::steady_clock::time_point last_used; ///< Last used time.
  187. bool operator>(const ggml_cann_buffer& other) const {
  188. return size > other.size;
  189. }
  190. };
  191. /**
  192. * @brief Array of CANN buffers in the pool.
  193. */
  194. std::unordered_map<void*, size_t> buffer_pool;
  195. std::priority_queue<ggml_cann_buffer,
  196. std::vector<ggml_cann_buffer>,
  197. std::greater<>> free_buffers ;
  198. /**
  199. * @brief Total size of all buffers in the pool.
  200. */
  201. size_t pool_size = 0;
  202. /**
  203. * @brief Constructor to initialize the buffer pool for a specific device.
  204. *
  205. * @param device The device ID to associate with this buffer pool.
  206. */
  207. explicit ggml_cann_pool_buf_prio(int device) : device(device) {
  208. disable_clean = parse_bool(get_env("GGML_CANN_DISABLE_BUF_POOL_CLEAN").value_or(""));
  209. }
  210. /**
  211. * @brief Destructor to free all buffers in the pool.
  212. */
  213. ~ggml_cann_pool_buf_prio() {
  214. ggml_cann_set_device(device);
  215. for (auto& [b_ptr, b_size] : buffer_pool) {
  216. aclrtFree(b_ptr);
  217. pool_size -= b_size;
  218. }
  219. buffer_pool.clear();
  220. GGML_ASSERT(pool_size == 0);
  221. }
  222. /**
  223. * @brief Allocate a buffer of the given size.
  224. *
  225. * @param size The size of the buffer to allocate.
  226. * @param actual_size A pointer to a variable to receive the actual size of
  227. * the allocated buffer.
  228. * @return A pointer to the allocated buffer.
  229. */
  230. void* alloc(size_t size, size_t* actual_size) override {
  231. size = GGML_PAD(size, alignment);
  232. if (size == 0) {
  233. size = alignment;
  234. }
  235. void* ptr = nullptr;
  236. auto now = std::chrono::steady_clock::now();
  237. std::vector<ggml_cann_buffer> free_buffers_rest;
  238. free_buffers_rest.reserve(free_buffers.size());
  239. while (!free_buffers.empty()) {
  240. auto b = free_buffers.top();
  241. free_buffers.pop();
  242. if (b.size >= size) {
  243. // reuse the buffer if the size is enough
  244. const size_t margin = b.size - size;
  245. if (margin <= max_reuse_margin) {
  246. *actual_size = b.size;
  247. ptr = b.ptr;
  248. #ifdef DEBUG_CANN_MALLOC
  249. GGML_LOG_INFO(
  250. "cann pool[%d]: reused %p, "
  251. "pool_size = %5u MB, "
  252. "size = %5u MB, "
  253. "margin = %5u MB\n",
  254. device, b.ptr,
  255. (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576),
  256. (uint32_t)(GGML_PAD(size, 1048576) / 1048576),
  257. (uint32_t)(GGML_PAD(margin, 1048576) / 1048576));
  258. #endif
  259. break;
  260. }
  261. }
  262. bool should_clean = !disable_clean &&
  263. b.size > min_free_margin &&
  264. std::chrono::duration_cast<std::chrono::milliseconds>(now - b.last_used).count() > 100;
  265. if (should_clean) {
  266. // free the buffer if the size is needed to be freed
  267. ACL_CHECK(aclrtFree(b.ptr));
  268. pool_size -= b.size;
  269. buffer_pool.erase(b.ptr);
  270. #ifdef DEBUG_CANN_MALLOC
  271. GGML_LOG_INFO(
  272. "cann pool[%d]: clean %p, "
  273. "pool_size = %5u MB, "
  274. "size = %5u MB\n",
  275. device, b.ptr,
  276. (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576),
  277. (uint32_t)(GGML_PAD(b.size, 1048576) / 1048576));
  278. #endif
  279. continue;
  280. }
  281. free_buffers_rest.push_back(b);
  282. }
  283. for (ggml_cann_buffer &b : free_buffers_rest) {
  284. free_buffers.push(std::move(b));
  285. }
  286. #ifdef DEBUG_CANN_MALLOC
  287. GGML_LOG_INFO("cann pool[%d] free pool_size = %5u MB\n\n", device, (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576));
  288. #endif
  289. if (ptr != nullptr) {
  290. return ptr;
  291. }
  292. // allocate a new buffer if no buffer can be reused
  293. ggml_cann_set_device(device);
  294. ACL_CHECK(aclrtMalloc(&ptr, size, ACL_MEM_MALLOC_HUGE_FIRST));
  295. *actual_size = size;
  296. pool_size += size;
  297. #ifdef DEBUG_CANN_MALLOC
  298. GGML_LOG_INFO(
  299. "cann pool[%d]: allocate %p, "
  300. "pool_size = %5u MB, "
  301. "size = %5u MB\n",
  302. device, ptr, (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576),
  303. (uint32_t)(GGML_PAD(size, 1048576) / 1048576));
  304. #endif
  305. buffer_pool.emplace(ptr, size);
  306. return ptr;
  307. }
  308. /**
  309. * @brief Free a buffer and return it to the pool.
  310. *
  311. * @param ptr Pointer to the buffer to free.
  312. * @param size Size of the buffer to free.
  313. */
  314. void free(void* ptr, size_t size) override {
  315. GGML_UNUSED(size);
  316. auto it = buffer_pool.find(ptr);
  317. if (it == buffer_pool.end()) {
  318. GGML_ABORT("cann pool[%d]: buffer %p not found in pool\n", device, ptr);
  319. }
  320. auto now = std::chrono::steady_clock::now();
  321. free_buffers.emplace(ggml_cann_buffer{ptr, it->second, now});
  322. #ifdef DEBUG_CANN_MALLOC
  323. GGML_LOG_INFO(
  324. "cann pool[%d]: return %p, "
  325. "pool_size = %5u MB\n",
  326. device, ptr,
  327. (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576));
  328. #endif
  329. }
  330. };
  331. /**
  332. * @brief A pool of CANN buffers(segment buffer).
  333. *
  334. * This class manages a pool of CANN buffers for a specific device.
  335. */
  336. struct ggml_cann_pool_buf : public ggml_cann_pool {
  337. /**
  338. * @brief The maximum reuse margin for a buffer.
  339. */
  340. static const size_t max_reuse_margin = 1ull << 22; // 4MB
  341. /**
  342. * @brief The minimum free margin for a buffer.
  343. */
  344. static const size_t min_free_margin = 1ull << 20; // 1MB
  345. /**
  346. * @brief The alignment for buffer allocation.
  347. */
  348. static const size_t alignment = 128;
  349. /**
  350. * @brief The maximum number of buffers in the pool.
  351. */
  352. static const int MAX_BUFFERS = 256;
  353. /**
  354. * @brief The device ID associated with this buffer pool.
  355. */
  356. int device;
  357. /**
  358. * @brief Whether to disable clean during buffer allocation.
  359. */
  360. bool disable_clean = false;
  361. /**
  362. * @brief Structure representing a CANN buffer.
  363. */
  364. struct ggml_cann_buffer {
  365. void* ptr = nullptr; ///< Pointer to the buffer memory.
  366. size_t size = 0; ///< Size of the buffer.
  367. bool used = false; ///< Whether the buffer is currently in use.
  368. std::chrono::steady_clock::time_point last_used; ///< Last used time.
  369. };
  370. /**
  371. * @brief Array of CANN buffers in the pool.
  372. */
  373. ggml_cann_buffer buffer_pool[MAX_BUFFERS] = {};
  374. /**
  375. * @brief Total size of all buffers in the pool.
  376. */
  377. size_t pool_size = 0;
  378. /**
  379. * @brief Constructor to initialize the buffer pool for a specific device.
  380. *
  381. * @param device The device ID to associate with this buffer pool.
  382. */
  383. explicit ggml_cann_pool_buf(int device) : device(device) {
  384. disable_clean = parse_bool(get_env("GGML_CANN_DISABLE_BUF_POOL_CLEAN").value_or(""));
  385. }
  386. /**
  387. * @brief Destructor to free all buffers in the pool.
  388. */
  389. ~ggml_cann_pool_buf() {
  390. ggml_cann_set_device(device);
  391. for (int i = 0; i < MAX_BUFFERS; ++i) {
  392. ggml_cann_buffer& b = buffer_pool[i];
  393. if (b.ptr != nullptr) {
  394. aclrtFree(b.ptr);
  395. pool_size -= b.size;
  396. }
  397. }
  398. GGML_ASSERT(pool_size == 0);
  399. }
  400. /**
  401. * @brief Allocate a buffer of the given size.
  402. *
  403. * @param size The size of the buffer to allocate.
  404. * @param actual_size A pointer to a variable to receive the actual size of
  405. * the allocated buffer.
  406. * @return A pointer to the allocated buffer.
  407. */
  408. void* alloc(size_t size, size_t* actual_size) override {
  409. size = GGML_PAD(size, alignment);
  410. if (size == 0) {
  411. size = alignment;
  412. }
  413. void* ptr = nullptr;
  414. auto now = std::chrono::steady_clock::now();
  415. int i = 0;
  416. for (; i < MAX_BUFFERS; ++i) {
  417. ggml_cann_buffer& b = buffer_pool[i];
  418. if (b.ptr == nullptr) {
  419. break;
  420. }
  421. if (b.used) {
  422. continue;
  423. }
  424. if (b.size >= size) {
  425. // reuse the buffer if the size is enough
  426. const size_t margin = b.size - size;
  427. if (margin <= max_reuse_margin) {
  428. *actual_size = b.size;
  429. b.used = true;
  430. ptr = b.ptr;
  431. #ifdef DEBUG_CANN_MALLOC
  432. GGML_LOG_INFO(
  433. "cann pool[%d]: reused %p, "
  434. "pool_size = %5u MB, "
  435. "size = %5u MB, "
  436. "margin = %5u MB\n",
  437. device, b.ptr,
  438. (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576),
  439. (uint32_t)(GGML_PAD(size, 1048576) / 1048576),
  440. (uint32_t)(GGML_PAD(margin, 1048576) / 1048576));
  441. #endif
  442. break;
  443. }
  444. }
  445. bool should_clean = !disable_clean &&
  446. b.size > min_free_margin &&
  447. std::chrono::duration_cast<std::chrono::milliseconds>(now - b.last_used).count() > 100;
  448. if (should_clean) {
  449. // free the buffer if the size is needed to be freed
  450. ACL_CHECK(aclrtFree(b.ptr));
  451. pool_size -= b.size;
  452. #ifdef DEBUG_CANN_MALLOC
  453. GGML_LOG_INFO(
  454. "cann pool[%d]: clean %p, "
  455. "pool_size = %5u MB, "
  456. "size = %5u MB\n",
  457. device, b.ptr,
  458. (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576),
  459. (uint32_t)(GGML_PAD(b.size, 1048576) / 1048576));
  460. #endif
  461. b.ptr = nullptr;
  462. }
  463. }
  464. if (ptr != nullptr) {
  465. return ptr;
  466. }
  467. if (i < MAX_BUFFERS) {
  468. // allocate a new buffer if no buffer can be reused
  469. ggml_cann_buffer& b = buffer_pool[i];
  470. ggml_cann_set_device(device);
  471. ACL_CHECK(aclrtMalloc(&b.ptr, size, ACL_MEM_MALLOC_HUGE_FIRST));
  472. pool_size += size;
  473. *actual_size = size;
  474. b.size = size;
  475. b.used = true;
  476. if (i >= MAX_BUFFERS - 8) {
  477. GGML_LOG_WARN("cann pool[%d]: slots almost full\n", device);
  478. }
  479. #ifdef DEBUG_CANN_MALLOC
  480. GGML_LOG_INFO(
  481. "cann pool[%d]: allocate %p, "
  482. "pool_size = %5u MB, "
  483. "size = %5u MB\n",
  484. device, b.ptr,
  485. (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576),
  486. (uint32_t)(GGML_PAD(b.size, 1048576) / 1048576));
  487. #endif
  488. return b.ptr;
  489. }
  490. GGML_ABORT("cann pool[%d]: slots full\n", device);
  491. }
  492. /**
  493. * @brief Free a buffer and return it to the pool.
  494. *
  495. * @param ptr Pointer to the buffer to free.
  496. * @param size Size of the buffer to free.
  497. */
  498. void free(void* ptr, size_t size) override {
  499. GGML_UNUSED(size);
  500. for (int i = 0; i < MAX_BUFFERS; ++i) {
  501. ggml_cann_buffer& b = buffer_pool[i];
  502. if (b.ptr != ptr) {
  503. continue;
  504. }
  505. b.used = false;
  506. b.last_used = std::chrono::steady_clock::now();
  507. #ifdef DEBUG_CANN_MALLOC
  508. GGML_LOG_INFO(
  509. "cann pool[%d]: return %p, "
  510. "pool_size = %5u MB\n",
  511. device, b.ptr,
  512. (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576));
  513. #endif
  514. return;
  515. }
  516. GGML_ABORT("cann pool[%d]: slots full\n", device);
  517. }
  518. };
  519. /**
  520. * @brief A pool of CANN buffers with virtual memory.
  521. *
  522. * This class manages a pool of CANN buffers with virtual memory for a specific
  523. * device.
  524. */
  525. struct ggml_cann_pool_vmm : public ggml_cann_pool {
  526. /**
  527. * @brief The maximum size of the virtual memory pool (32 GB).
  528. */
  529. size_t max_size;
  530. /**
  531. * @brief The device ID associated with this buffer pool.
  532. */
  533. int device;
  534. /**
  535. * @brief Pointer to the start of the virtual memory pool.
  536. */
  537. void* pool_addr = 0;
  538. /**
  539. * @brief Amount of virtual memory used in the pool.
  540. */
  541. size_t pool_used = 0;
  542. /**
  543. * @brief Total size of the virtual memory pool.
  544. */
  545. size_t pool_size = 0;
  546. /**
  547. * @brief Allocation granularity for the virtual memory pool.
  548. */
  549. size_t granularity;
  550. /**
  551. * @brief Handles for the physical memory allocated.
  552. */
  553. std::vector<aclrtDrvMemHandle> handles;
  554. /**
  555. * @brief Offsets for the mapped memory regions.
  556. */
  557. std::vector<void*> map_offsets;
  558. /**
  559. * @brief Constructor to initialize the buffer pool with virtual memory for
  560. * a specific device.
  561. *
  562. * @param device The device ID to associate with this buffer pool.
  563. */
  564. explicit ggml_cann_pool_vmm(int device)
  565. : device(device) {
  566. auto dev = ggml_cann_info().devices[device];
  567. granularity = dev.vmm_granularity;
  568. max_size = dev.total_vram;
  569. }
  570. /**
  571. * @brief Destructor to free all buffers in the virtual memory pool.
  572. */
  573. ~ggml_cann_pool_vmm() {
  574. if (pool_addr != 0) {
  575. for (auto& offset : map_offsets) {
  576. ACL_CHECK(aclrtUnmapMem(offset));
  577. }
  578. for (auto& handle : handles) {
  579. ACL_CHECK(aclrtFreePhysical(handle));
  580. }
  581. ACL_CHECK(aclrtReleaseMemAddress(pool_addr));
  582. }
  583. }
  584. /**
  585. * @brief Allocate a buffer of the given size in the virtual memory pool.
  586. *
  587. * @param size The size of the buffer to allocate.
  588. * @param actual_size A pointer to a variable to receive the actual size of
  589. * the allocated buffer.
  590. * @return A pointer to the allocated buffer.
  591. */
  592. void* alloc(size_t size, size_t* actual_size) override {
  593. // round up the allocation size to the alignment to ensure that all
  594. // allocations are aligned for all data types
  595. const size_t alignment = 128;
  596. size = GGML_PAD(size, alignment);
  597. if (size == 0) {
  598. size = alignment;
  599. }
  600. size_t avail = pool_size - pool_used;
  601. if (size > avail) {
  602. // round up to the next multiple of the granularity
  603. size_t reserve_size = size - avail;
  604. reserve_size = GGML_PAD(reserve_size, granularity);
  605. GGML_ASSERT(pool_size + reserve_size <= max_size);
  606. // allocate more physical memory
  607. aclrtPhysicalMemProp prop = {};
  608. prop.handleType = ACL_MEM_HANDLE_TYPE_NONE;
  609. prop.allocationType = ACL_MEM_ALLOCATION_TYPE_PINNED;
  610. prop.memAttr = ACL_HBM_MEM_HUGE;
  611. prop.location.type = ACL_MEM_LOCATION_TYPE_DEVICE;
  612. prop.location.id = device;
  613. prop.reserve = 0;
  614. aclrtDrvMemHandle handle;
  615. ACL_CHECK(aclrtMallocPhysical(&handle, reserve_size, &prop, 0));
  616. // reserve virtual address space (if not already reserved)
  617. if (pool_addr == 0) {
  618. ACL_CHECK(aclrtReserveMemAddress(
  619. &pool_addr, max_size, 0, NULL, 1));
  620. }
  621. // map at the end of the pool
  622. ACL_CHECK(aclrtMapMem((char*)pool_addr + pool_size, reserve_size, 0,
  623. handle, 0));
  624. handles.push_back(handle);
  625. map_offsets.push_back((char*)pool_addr + pool_size);
  626. // add to the pool
  627. pool_size += reserve_size;
  628. #ifdef DEBUG_CANN_MALLOC
  629. GGML_LOG_INFO("cann pool[%d]: size increased to %llu MB (reserved %llu MB)\n",
  630. device, (unsigned long long) (pool_size/1024/1024),
  631. (unsigned long long) (reserve_size/1024/1024));
  632. #endif
  633. }
  634. GGML_ASSERT(pool_addr != 0);
  635. void* ptr = (void*)((char*)pool_addr + pool_used);
  636. *actual_size = size;
  637. pool_used += size;
  638. #ifdef DEBUG_CANN_MALLOC
  639. GGML_LOG_INFO("cann pool[%d]: allocated %llu bytes at %llx\n", device,
  640. (unsigned long long)size, (unsigned long long)ptr);
  641. #endif
  642. return ptr;
  643. }
  644. /**
  645. * @brief Free a buffer and return it to the virtual memory pool.
  646. *
  647. * @param ptr Pointer to the buffer to free.
  648. * @param size Size of the buffer to free.
  649. */
  650. void free(void* ptr, size_t size) override {
  651. #ifdef DEBUG_CANN_MALLOC
  652. GGML_LOG_INFO("cann pool[%d]: freed %llu bytes at %llx\n", device,
  653. (unsigned long long)size, (unsigned long long)ptr);
  654. #endif
  655. pool_used -= size;
  656. // all deallocations must be in reverse order of the allocations
  657. GGML_ASSERT(ptr == (void*)((char*)pool_addr + pool_used));
  658. }
  659. };
  660. /**
  661. * @brief Create a new CANN pool for a specific device.
  662. *
  663. * Factory method to create a new CANN pool object based on the device type.
  664. *
  665. * @param device The device ID for which to create the pool.
  666. * @return A unique pointer to the created CANN pool.
  667. */
  668. std::unique_ptr<ggml_cann_pool> ggml_backend_cann_context::new_pool_for_device(
  669. int device) {
  670. std::string mem_pool_type = get_env("GGML_CANN_MEM_POOL").value_or("");
  671. if (mem_pool_type == "prio") {
  672. GGML_LOG_INFO("%s: device %d use buffer pool with priority queue\n", __func__, device);
  673. return std::unique_ptr<ggml_cann_pool>(new ggml_cann_pool_buf_prio(device));
  674. }
  675. if (ggml_cann_info().devices[device].vmm && mem_pool_type != "leg") {
  676. GGML_LOG_INFO("%s: device %d use vmm pool\n", __func__, device);
  677. return std::unique_ptr<ggml_cann_pool>(new ggml_cann_pool_vmm(device));
  678. }
  679. GGML_LOG_INFO("%s: device %d use buffer pool\n", __func__, device);
  680. return std::unique_ptr<ggml_cann_pool>(new ggml_cann_pool_buf(device));
  681. }
  682. // cann buffer
  683. /**
  684. * @brief Context for managing a CANN buffer associated with a specific device.
  685. *
  686. * This structure holds information about a CANN buffer, including the device
  687. * ID, device pointer, and a name derived from GGML_CANN_NAME and the device ID.
  688. */
  689. struct ggml_backend_cann_buffer_context {
  690. int32_t device; ///< The device ID associated with this buffer context.
  691. void* dev_ptr =
  692. nullptr; ///< Pointer to the device memory allocated for the buffer.
  693. /**
  694. * @brief Constructor to initialize the CANN buffer context.
  695. *
  696. * @param device The device ID associated with this buffer context.
  697. * @param dev_ptr Pointer to the device memory allocated for the buffer.
  698. */
  699. ggml_backend_cann_buffer_context(int32_t device, void* dev_ptr)
  700. : device(device),
  701. dev_ptr(dev_ptr) {}
  702. /**
  703. * @brief Destructor to free the device memory allocated for the buffer.
  704. */
  705. ~ggml_backend_cann_buffer_context() { ACL_CHECK(aclrtFree(dev_ptr)); }
  706. };
  707. /**
  708. * @brief Check if a buffer is a CANN buffer.
  709. *
  710. * This function checks if a given buffer is a CANN buffer by comparing its
  711. * `get_name` function pointer to `ggml_backend_cann_buffer_get_name`.
  712. *
  713. * @param buffer The buffer to check.
  714. * @return true if the buffer is a CANN buffer, false otherwise.
  715. */
  716. static bool ggml_backend_buft_is_cann(ggml_backend_buffer_type_t buft);
  717. static bool ggml_backend_buffer_is_cann(
  718. ggml_backend_buffer_t buffer) {
  719. return ggml_backend_buft_is_cann(buffer->buft);
  720. }
  721. /**
  722. * @brief Free resources associated with a CANN buffer.
  723. *
  724. * This function frees the resources associated with a CANN buffer, including
  725. * its context.
  726. *
  727. * @param buffer The CANN buffer to free.
  728. */
  729. static void ggml_backend_cann_buffer_free_buffer(
  730. ggml_backend_buffer_t buffer) {
  731. ggml_backend_cann_buffer_context* ctx =
  732. (ggml_backend_cann_buffer_context*)buffer->context;
  733. delete ctx;
  734. }
  735. /**
  736. * @brief Retrieve the base pointer of a CANN buffer.
  737. *
  738. * This function returns the base pointer of a CANN buffer, which points to the
  739. * device memory allocated for the buffer.
  740. *
  741. * @param buffer The CANN buffer whose base pointer is to be retrieved.
  742. * @return A pointer to the base of the device memory allocated for the buffer.
  743. */
  744. static void* ggml_backend_cann_buffer_get_base(
  745. ggml_backend_buffer_t buffer) {
  746. ggml_backend_cann_buffer_context* ctx =
  747. (ggml_backend_cann_buffer_context*)buffer->context;
  748. return ctx->dev_ptr;
  749. }
  750. /**
  751. * @brief Transform quantized Q4.0 tensor data into a format suitable for CANN
  752. * processing.
  753. *
  754. * This function transforms quantized Q4.0 tensor data into a format suitable
  755. * for CANN processing. It extracts quantization values and scales from the
  756. * source data and prepares them in a format expected by CANN operations.
  757. *
  758. * @param tensor Pointer to the tensor information.
  759. * @param src Pointer to the source data in Q4.0 format.
  760. * @param dst Pointer to the destination buffer where transformed data will be
  761. * stored.
  762. */
  763. static void ggml_backend_cann_transform_q4_0(ggml_tensor* tensor,
  764. const void* src,
  765. void* dst) {
  766. int64_t n_elems = ggml_nelements(tensor);
  767. int64_t groups = n_elems / QK4_0;
  768. size_t quant_bytes = n_elems * sizeof(uint8_t) / 2;
  769. uint8_t* quant_offset = (uint8_t*)dst;
  770. uint16_t* scale_offset = (uint16_t*)((char*)dst + quant_bytes);
  771. for (int i = 0; i < groups; i++) {
  772. const block_q4_0* group =
  773. (const block_q4_0*)((const char*)src + i * sizeof(block_q4_0));
  774. *scale_offset = group->d;
  775. scale_offset++;
  776. // 0-15
  777. for (int j = 0; j < QK4_0 / 2; j += 2) {
  778. (*quant_offset) = (group->qs[j] & 0x0F);
  779. (*quant_offset) |= ((group->qs[j + 1] << 4));
  780. quant_offset++;
  781. }
  782. // 16-31
  783. for (int j = 0; j < QK4_0 / 2; j += 2) {
  784. (*quant_offset) = (group->qs[j] >> 4);
  785. (*quant_offset) |= (group->qs[j + 1] & 0xF0);
  786. quant_offset++;
  787. }
  788. }
  789. // put (uint4b_t -8) into int4b_t
  790. for (quant_offset = (uint8_t*)dst;
  791. quant_offset < (uint8_t*)dst + quant_bytes; quant_offset++) {
  792. (*quant_offset) ^= 0x88;
  793. }
  794. }
  795. /**
  796. * @brief Transform CANN processed data back into quantized Q4.0 format.
  797. *
  798. * This function transforms CANN processed data back into quantized Q4.0 format.
  799. * It reverses the transformation performed by
  800. * ggml_backend_cann_transform_q4_0(), converting the data back into its
  801. * original quantized form.
  802. *
  803. * @param tensor Pointer to the tensor information.
  804. * @param src Pointer to the source buffer containing transformed data.
  805. * @param dst Pointer to the destination buffer where the Q4.0 formatted data
  806. * will be stored.
  807. */
  808. static void ggml_backend_cann_transform_back_q4_0(
  809. const ggml_tensor* tensor, void* src, void* dst) {
  810. int64_t n_elems = ggml_nelements(tensor);
  811. int64_t groups = n_elems / QK4_0;
  812. size_t quant_bytes = n_elems * sizeof(uint8_t) / 2;
  813. uint8_t* quant_offset = (uint8_t*)src;
  814. uint16_t* scale_offset = (uint16_t*)((char*)src + quant_bytes);
  815. for (; quant_offset < (uint8_t*)src + quant_bytes; quant_offset++) {
  816. (*quant_offset) ^= 0x88;
  817. }
  818. quant_offset = (uint8_t*)src;
  819. for (int i = 0; i < groups; i++) {
  820. block_q4_0* group = (block_q4_0*)((char*)dst + i * sizeof(block_q4_0));
  821. group->d = *scale_offset;
  822. scale_offset++;
  823. // 0-15
  824. for (int j = 0; j < QK4_0 / 2; j += 2) {
  825. group->qs[j] = ((*quant_offset) & 0x0F);
  826. group->qs[j + 1] = ((*quant_offset) >> 4);
  827. quant_offset++;
  828. }
  829. // 16-31
  830. for (int j = 0; j < QK4_0 / 2; j += 2) {
  831. group->qs[j] |= ((*quant_offset) << 4);
  832. group->qs[j + 1] |= ((*quant_offset) & 0xF0);
  833. quant_offset++;
  834. }
  835. }
  836. }
  837. /**
  838. * @brief Transform quantized Q8.0 tensor data into a format suitable for CANN
  839. * processing.
  840. *
  841. * This function transforms quantized Q8.0 tensor data into a format suitable
  842. * for CANN processing. It extracts quantization values and scales from the
  843. * source data and prepares them in a format expected by CANN operations.
  844. *
  845. * @param tensor Pointer to the tensor information.
  846. * @param src Pointer to the source data in Q8.0 format.
  847. * @param dst Pointer to the destination buffer where transformed data will be
  848. * stored.
  849. */
  850. static void ggml_backend_cann_transform_q8_0(ggml_tensor* tensor,
  851. const void* src,
  852. void* dst) {
  853. int64_t n_elems = ggml_nelements(tensor);
  854. int64_t groups = n_elems / QK8_0;
  855. size_t quant_bytes = n_elems * sizeof(uint8_t);
  856. uint8_t* quant_offset = (uint8_t*)dst;
  857. uint16_t* scale_offset = (uint16_t*)((char*)dst + quant_bytes);
  858. for (int i = 0; i < groups; i++) {
  859. const block_q8_0* group =
  860. (const block_q8_0*)((const char*)src + i * sizeof(block_q8_0));
  861. *scale_offset = group->d;
  862. scale_offset++;
  863. size_t group_quant_size = QK8_0 * sizeof(uint8_t);
  864. memcpy(quant_offset, group->qs, group_quant_size);
  865. quant_offset += group_quant_size;
  866. }
  867. }
  868. /**
  869. * @brief Transform CANN processed data back into quantized Q8.0 format.
  870. *
  871. * This function transforms CANN processed data back into quantized Q8.0 format.
  872. * It reverses the transformation performed by
  873. * ggml_backend_cann_transform_q8_0(), converting the data back into its
  874. * original quantized form.
  875. *
  876. * @param tensor Pointer to the tensor information.
  877. * @param src Pointer to the source buffer containing transformed data.
  878. * @param dst Pointer to the destination buffer where the Q8.0 formatted data
  879. * will be stored.
  880. */
  881. static void ggml_backend_cann_transform_back_q8_0(
  882. const ggml_tensor* tensor, const void* src, void* dst) {
  883. int64_t n_elems = ggml_nelements(tensor);
  884. int64_t groups = n_elems / QK8_0;
  885. size_t quant_bytes = n_elems * sizeof(uint8_t);
  886. const uint8_t* quant_offset = (const uint8_t*)src;
  887. const uint16_t* scale_offset =
  888. (const uint16_t*)((const char*)src + quant_bytes);
  889. for (int i = 0; i < groups; i++) {
  890. block_q8_0* group = (block_q8_0*)((char*)dst + i * sizeof(block_q8_0));
  891. group->d = *scale_offset;
  892. scale_offset++;
  893. size_t group_quant_size = QK8_0 * sizeof(uint8_t);
  894. memcpy(group->qs, quant_offset, group_quant_size);
  895. quant_offset += group_quant_size;
  896. }
  897. }
  898. /**
  899. * @brief Transform tensor data based on its type for CANN processing.
  900. *
  901. * This function transforms tensor data based on its quantization type for CANN
  902. * processing. It dispatches the transformation based on the tensor's type to
  903. * specialized functions handling Q4.0 and Q8.0 formats.
  904. *
  905. * @param tensor Pointer to the tensor information.
  906. * @param src Pointer to the source data to be transformed.
  907. * @param dst Pointer to the destination buffer where transformed data will be
  908. * stored.
  909. */
  910. static void ggml_backend_cann_transform(ggml_tensor* tensor,
  911. const void* src, void* dst) {
  912. switch (tensor->type) {
  913. case GGML_TYPE_Q4_0:
  914. ggml_backend_cann_transform_q4_0(tensor, src, dst);
  915. break;
  916. case GGML_TYPE_Q8_0:
  917. ggml_backend_cann_transform_q8_0(tensor, src, dst);
  918. break;
  919. default:
  920. break;
  921. }
  922. }
  923. /**
  924. * @brief Transform CANN processed data back into tensor data based on its type.
  925. *
  926. * This function transforms CANN processed data back into tensor data based on
  927. * its quantization type for Q4.0 and Q8.0 formats. It dispatches the
  928. * transformation based on the tensor's type to specialized functions.
  929. *
  930. * @param tensor Pointer to the tensor information.
  931. * @param src Pointer to the source data containing CANN processed data.
  932. * @param dst Pointer to the destination buffer where transformed tensor data
  933. * will be stored.
  934. */
  935. static void ggml_backend_cann_transform_back(
  936. const ggml_tensor* tensor, void* src, void* dst) {
  937. switch (tensor->type) {
  938. case GGML_TYPE_Q4_0:
  939. ggml_backend_cann_transform_back_q4_0(tensor, src, dst);
  940. break;
  941. case GGML_TYPE_Q8_0:
  942. ggml_backend_cann_transform_back_q8_0(tensor, src, dst);
  943. break;
  944. default:
  945. break;
  946. }
  947. }
  948. /**
  949. * @brief Check if transformation is needed for a given tensor type.
  950. *
  951. * This function checks if transformation is needed for a given tensor type
  952. * to prepare data for CANN processing.
  953. *
  954. * @param type The tensor type to check.
  955. * @return true if transformation is needed, false otherwise.
  956. */
  957. static bool need_transform(ggml_type type) {
  958. switch (type) {
  959. case GGML_TYPE_Q4_0:
  960. case GGML_TYPE_Q8_0:
  961. return true;
  962. default:
  963. return false;
  964. }
  965. }
  966. /**
  967. * @brief Initialize a tensor using data from a CANN buffer.
  968. *
  969. * This function initializes a tensor using data from a CANN buffer.
  970. * It handles special cases such as views and quantization.
  971. *
  972. * @param buffer The CANN buffer from which to initialize the tensor.
  973. * @param tensor Pointer to the tensor to be initialized.
  974. */
  975. static enum ggml_status ggml_backend_cann_buffer_init_tensor(
  976. ggml_backend_buffer_t buffer, ggml_tensor* tensor) {
  977. if (tensor->view_src != NULL && tensor->view_offs == 0) {
  978. GGML_ASSERT(tensor->view_src->buffer->buft == buffer->buft);
  979. return GGML_STATUS_SUCCESS;
  980. }
  981. // TODO: cann backend doesn't support quantized yet. Just leave the code
  982. // here.
  983. if (ggml_is_quantized(tensor->type)) {
  984. // Initialize padding to 0 to avoid possible NaN values
  985. size_t original_size = ggml_nbytes(tensor);
  986. size_t padded_size =
  987. ggml_backend_buft_get_alloc_size(buffer->buft, tensor);
  988. if (padded_size > original_size && tensor->view_src == nullptr) {
  989. size_t memset_size = padded_size - original_size;
  990. ACL_CHECK(aclrtMemset((char*)tensor->data + original_size,
  991. memset_size, 0, memset_size));
  992. }
  993. }
  994. return GGML_STATUS_SUCCESS;
  995. }
  996. static int CreateAclTensorWeight(const void *hostData, const std::vector<int64_t> &shape, void **deviceAddr,
  997. aclDataType dataType, aclTensor **tensor)
  998. {
  999. uint64_t size = 1;
  1000. for (auto i : shape) {
  1001. size *= i;
  1002. }
  1003. const aclIntArray *mat2Size = aclCreateIntArray(shape.data(), shape.size());
  1004. ACL_CHECK(aclnnCalculateMatmulWeightSizeV2(mat2Size, dataType, &size));
  1005. size *= sizeof(int16_t);
  1006. ACL_CHECK(aclrtMalloc(deviceAddr, size, ACL_MEM_MALLOC_HUGE_FIRST));
  1007. aclrtMemcpy(*deviceAddr, size, hostData, size, ACL_MEMCPY_HOST_TO_DEVICE);
  1008. std::vector<int64_t> strides(shape.size(), 1);
  1009. for (int64_t i = shape.size() - 2; i >= 0; i--) {
  1010. strides[i] = shape[i + 1] * strides[i + 1];
  1011. }
  1012. *tensor = aclCreateTensor(shape.data(), shape.size(), dataType, strides.data(), 0, aclFormat::ACL_FORMAT_ND,
  1013. shape.data(), shape.size(), *deviceAddr);
  1014. return 0;
  1015. }
  1016. static void weight_format_to_nz(ggml_tensor *tensor, const void *data, size_t offset) {
  1017. aclrtStream stream;
  1018. ACL_CHECK(aclrtCreateStream(&stream));
  1019. std::vector<int64_t> weightTransposedShape = {tensor->ne[1], tensor->ne[0]};
  1020. void *weightTransposedDeviceAddr = nullptr;
  1021. aclTensor *weightTransposed = nullptr;
  1022. CreateAclTensorWeight(data, weightTransposedShape, &weightTransposedDeviceAddr,
  1023. ggml_cann_type_mapping(tensor->type), &weightTransposed);
  1024. uint64_t workspaceSize = 0;
  1025. aclOpExecutor *executor;
  1026. void *workspaceAddr = nullptr;
  1027. // TransMatmulWeight
  1028. ACL_CHECK(aclnnTransMatmulWeightGetWorkspaceSize(weightTransposed, &workspaceSize, &executor));
  1029. std::unique_ptr<void, aclError (*)(void *)> workspaceAddrPtrTrans(nullptr, aclrtFree);
  1030. if (workspaceSize > 0) {
  1031. ACL_CHECK(aclrtMalloc(&workspaceAddr, workspaceSize, ACL_MEM_MALLOC_HUGE_FIRST));
  1032. workspaceAddrPtrTrans.reset(workspaceAddr);
  1033. }
  1034. ACL_CHECK(aclnnTransMatmulWeight(workspaceAddr, workspaceSize, executor, stream));
  1035. size_t size = ggml_nelements(tensor) * ggml_element_size(tensor);
  1036. aclrtMemcpy((char *)tensor->data + offset, size,
  1037. weightTransposedDeviceAddr, size, ACL_MEMCPY_HOST_TO_DEVICE);
  1038. ACL_CHECK(aclDestroyTensor(weightTransposed));
  1039. aclrtFree(weightTransposedDeviceAddr);
  1040. }
  1041. // TODO: need handle tensor which has paddings.
  1042. /**
  1043. * @brief Set tensor data in a CANN buffer.
  1044. *
  1045. * This function sets tensor data in a CANN buffer, handling transformations
  1046. * if needed based on the tensor's type.
  1047. *
  1048. * @param buffer The CANN buffer where the tensor data will be set.
  1049. * @param tensor Pointer to the tensor whose data will be set.
  1050. * @param data Pointer to the source data to be copied into the tensor.
  1051. * @param offset Offset in the source data from where to start copying.
  1052. * @param size Size of the data to be copied, in bytes.
  1053. */
  1054. static void ggml_backend_cann_buffer_set_tensor(
  1055. ggml_backend_buffer_t buffer, ggml_tensor *tensor, const void *data,
  1056. size_t offset, size_t size) {
  1057. ggml_backend_cann_buffer_context *ctx =
  1058. (ggml_backend_cann_buffer_context *)buffer->context;
  1059. ggml_cann_set_device(ctx->device);
  1060. // TODO: refer to cann(#6017), it use thread's default stream.
  1061. // For acl, synchronous functions use this default stream.
  1062. // Why aclrtSynchronizeDevice?
  1063. bool weightToNZ = false;
  1064. #ifdef ASCEND_310P
  1065. weightToNZ = (getenv("GGML_CANN_WEIGHT_NZ") != nullptr);
  1066. #endif
  1067. if (!need_transform(tensor->type)) {
  1068. ACL_CHECK(aclrtMemcpy((char *)tensor->data + offset, size, data, size,
  1069. ACL_MEMCPY_HOST_TO_DEVICE));
  1070. if (weightToNZ && is_matmul_weight((const ggml_tensor*)tensor)) {
  1071. weight_format_to_nz(tensor, data, offset);
  1072. }
  1073. } else {
  1074. void *transform_buffer = malloc(size);
  1075. ggml_backend_cann_transform(tensor, data, transform_buffer);
  1076. ACL_CHECK(aclrtMemcpy((char *)tensor->data + offset, size,
  1077. transform_buffer, size,
  1078. ACL_MEMCPY_HOST_TO_DEVICE));
  1079. free(transform_buffer);
  1080. }
  1081. }
  1082. /**
  1083. * @brief Get tensor data from a CANN buffer.
  1084. *
  1085. * This function retrieves tensor data from a CANN buffer, handling
  1086. * transformations if needed based on the tensor's type.
  1087. *
  1088. * @param buffer The CANN buffer from which to retrieve tensor data.
  1089. * @param tensor Pointer to the tensor whose data will be retrieved.
  1090. * @param data Pointer to the destination buffer where the tensor data will be
  1091. * copied.
  1092. * @param offset Offset in the destination buffer where to start copying.
  1093. * @param size Size of the data to be copied, in bytes.
  1094. */
  1095. static void ggml_backend_cann_buffer_get_tensor(
  1096. ggml_backend_buffer_t buffer, const ggml_tensor* tensor, void* data,
  1097. size_t offset, size_t size) {
  1098. ggml_backend_cann_buffer_context* ctx =
  1099. (ggml_backend_cann_buffer_context*)buffer->context;
  1100. ggml_cann_set_device(ctx->device);
  1101. if (!need_transform(tensor->type)) {
  1102. ACL_CHECK(aclrtMemcpy(data, size, (char*)tensor->data + offset, size,
  1103. ACL_MEMCPY_DEVICE_TO_HOST));
  1104. } else {
  1105. void* transform_buffer = malloc(size);
  1106. ACL_CHECK(aclrtMemcpy(transform_buffer, size,
  1107. (char*)tensor->data + offset, size,
  1108. ACL_MEMCPY_DEVICE_TO_HOST));
  1109. ggml_backend_cann_transform_back(tensor, transform_buffer, data);
  1110. free(transform_buffer);
  1111. }
  1112. }
  1113. /**
  1114. * @brief Copy tensor data between CANN buffers if possible.
  1115. *
  1116. * This function copies tensor data between CANN buffers if the source and
  1117. * destination buffers are CANN buffers and they meet the necessary conditions
  1118. * (same device or devices can access each other).
  1119. *
  1120. * @param buffer The destination CANN buffer where the tensor data will be
  1121. * copied.
  1122. * @param src Pointer to the source tensor whose data will be copied.
  1123. * @param dst Pointer to the destination tensor where the data will be copied.
  1124. * @return true if the copy operation succeeded, false otherwise.
  1125. */
  1126. static bool ggml_backend_cann_buffer_cpy_tensor(
  1127. ggml_backend_buffer_t buffer, const ggml_tensor* src, ggml_tensor* dst) {
  1128. if (ggml_backend_buffer_is_cann(src->buffer)) {
  1129. ggml_backend_cann_buffer_context* src_ctx =
  1130. (ggml_backend_cann_buffer_context*)src->buffer->context;
  1131. ggml_backend_cann_buffer_context* dst_ctx =
  1132. (ggml_backend_cann_buffer_context*)buffer->context;
  1133. size_t memcpy_size = ggml_nbytes(src);
  1134. // Same device.
  1135. if (src_ctx->device == dst_ctx->device) {
  1136. ACL_CHECK(aclrtMemcpy((char*)dst->data, memcpy_size,
  1137. (const char*)src->data, memcpy_size,
  1138. ACL_MEMCPY_DEVICE_TO_DEVICE));
  1139. return true;
  1140. } else {
  1141. // Different device but can access by peer.
  1142. int32_t canAccessPeer = 0;
  1143. ACL_CHECK(aclrtDeviceCanAccessPeer(&canAccessPeer, src_ctx->device,
  1144. dst_ctx->device));
  1145. if (canAccessPeer) {
  1146. ggml_cann_set_device(src_ctx->device);
  1147. ACL_CHECK(aclrtDeviceEnablePeerAccess(dst_ctx->device, 0));
  1148. ACL_CHECK(aclrtMemcpy((char*)dst->data, memcpy_size,
  1149. (const char*)src->data, memcpy_size,
  1150. ACL_MEMCPY_DEVICE_TO_DEVICE));
  1151. return true;
  1152. }
  1153. }
  1154. }
  1155. return false;
  1156. }
  1157. /**
  1158. * @brief Clear a CANN buffer by setting all its memory to a specified value.
  1159. *
  1160. * This function clears a CANN buffer by setting all its memory to a specified
  1161. * value.
  1162. *
  1163. * @param buffer The CANN buffer to be cleared.
  1164. * @param value The value to which each byte in the buffer will be set.
  1165. */
  1166. static void ggml_backend_cann_buffer_clear(
  1167. ggml_backend_buffer_t buffer, uint8_t value) {
  1168. ggml_backend_cann_buffer_context* ctx =
  1169. (ggml_backend_cann_buffer_context*)buffer->context;
  1170. ggml_cann_set_device(ctx->device);
  1171. ACL_CHECK(aclrtMemset(ctx->dev_ptr, buffer->size, value, buffer->size));
  1172. }
  1173. /**
  1174. * @brief Interface for a CANN buffer in the backend.
  1175. *
  1176. * This structure defines function pointers to operations that can be performed
  1177. * on a CANN buffer within the backend.
  1178. */
  1179. static const ggml_backend_buffer_i ggml_backend_cann_buffer_interface = {
  1180. /* .free_buffer = */ ggml_backend_cann_buffer_free_buffer,
  1181. /* .get_base = */ ggml_backend_cann_buffer_get_base,
  1182. /* .init_tensor = */ ggml_backend_cann_buffer_init_tensor,
  1183. /* .memset_tensor = */ NULL,
  1184. /* .set_tensor = */ ggml_backend_cann_buffer_set_tensor,
  1185. /* .get_tensor = */ ggml_backend_cann_buffer_get_tensor,
  1186. /* .cpy_tensor = */ ggml_backend_cann_buffer_cpy_tensor,
  1187. /* .clear = */ ggml_backend_cann_buffer_clear,
  1188. /* .reset = */ NULL,
  1189. };
  1190. // cann buffer type
  1191. /**
  1192. * @brief Structure representing context information for a specific backend
  1193. * buffer type.
  1194. */
  1195. struct ggml_backend_cann_buffer_type_context {
  1196. int32_t
  1197. device; /**< Device identifier associated with the buffer context. */
  1198. std::string name; /**< Name associated with the buffer context. */
  1199. };
  1200. /**
  1201. * @brief Retrieves the name associated with a CANN buffer type.
  1202. *
  1203. * This function returns the descriptive name associated with the specified
  1204. * CANN buffer type context.
  1205. *
  1206. * @param buft Pointer to the buffer type context.
  1207. * @return Const pointer to the C-style string containing the name.
  1208. */
  1209. static const char* ggml_backend_cann_buffer_type_name(
  1210. ggml_backend_buffer_type_t buft) {
  1211. ggml_backend_cann_buffer_type_context* buft_ctx =
  1212. (ggml_backend_cann_buffer_type_context*)buft->context;
  1213. return buft_ctx->name.c_str();
  1214. }
  1215. /**
  1216. * @brief Allocates a new CANN buffer of the specified type and size.
  1217. *
  1218. * This function allocates a new CANN buffer on the specified device with the
  1219. * given size.
  1220. *
  1221. * @param buft Pointer to the buffer type context.
  1222. * @param size Size in bytes of the buffer to allocate.
  1223. * @return Pointer to the allocated buffer, or nullptr if allocation fails.
  1224. */
  1225. static ggml_backend_buffer_t
  1226. ggml_backend_cann_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft,
  1227. size_t size) {
  1228. ggml_backend_cann_buffer_type_context* buft_ctx =
  1229. (ggml_backend_cann_buffer_type_context*)buft->context;
  1230. ggml_cann_set_device(buft_ctx->device);
  1231. const size_t alignment = 128;
  1232. size = GGML_PAD(size, alignment);
  1233. if (size == 0) {
  1234. size = alignment;
  1235. }
  1236. void* dev_ptr;
  1237. aclError err = aclrtMalloc(&dev_ptr, size, ACL_MEM_MALLOC_HUGE_FIRST);
  1238. if (err != ACL_SUCCESS) {
  1239. GGML_LOG_ERROR(
  1240. "%s: allocating %.2f MiB on device %d: aclrtMalloc failed: %s\n",
  1241. __func__, size / 1024.0 / 1024.0, buft_ctx->device,
  1242. aclGetRecentErrMsg());
  1243. return nullptr;
  1244. }
  1245. ggml_backend_cann_buffer_context* ctx =
  1246. new ggml_backend_cann_buffer_context(buft_ctx->device, dev_ptr);
  1247. return ggml_backend_buffer_init(buft, ggml_backend_cann_buffer_interface,
  1248. ctx, size);
  1249. }
  1250. /**
  1251. * @brief Retrieves the memory alignment requirement for CANN buffers of this
  1252. * type.
  1253. *
  1254. * This function returns the alignment requirement in bytes for memory allocated
  1255. * by the CANN buffer type.
  1256. *
  1257. * @param buft Pointer to the buffer type context (unused in this
  1258. * implementation).
  1259. * @return The alignment requirement in bytes (fixed at 128 bytes for CANN
  1260. * buffers).
  1261. */
  1262. static size_t ggml_backend_cann_buffer_type_get_alignment(
  1263. ggml_backend_buffer_type_t buft) {
  1264. return 128;
  1265. GGML_UNUSED(buft);
  1266. }
  1267. /**
  1268. * @brief Calculates the allocation size required for a tensor in a CANN buffer.
  1269. *
  1270. * Computes the total allocation size needed for storing the tensor's data in a
  1271. * CANN buffer, considering any necessary padding or adjustments for quantized
  1272. * types.
  1273. *
  1274. * @param buft Pointer to the buffer type context (unused in this
  1275. * implementation).
  1276. * @param tensor Pointer to the tensor for which the allocation size is
  1277. * calculated.
  1278. * @return The total allocation size in bytes required for the tensor in the
  1279. * CANN buffer.
  1280. */
  1281. static size_t ggml_backend_cann_buffer_type_get_alloc_size(
  1282. ggml_backend_buffer_type_t buft, const ggml_tensor* tensor) {
  1283. size_t size = ggml_nbytes(tensor);
  1284. int64_t ne0 = tensor->ne[0];
  1285. // last line must bigger than 32, because every single op deal at
  1286. // least 32 bytes.
  1287. // TODO: quantized type?
  1288. // int64_t line_size = ne0 * ggml_element_size(tensor);
  1289. // int64_t line_size_align_32 = (line_size + 31) & ~31;
  1290. // size += (line_size_align_32 - line_size);
  1291. // TODO: not support quantized yet.
  1292. // TODO: consider un-continue tensor.
  1293. if (ggml_is_quantized(tensor->type)) {
  1294. if (ne0 % MATRIX_ROW_PADDING != 0) {
  1295. size += ggml_row_size(
  1296. tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
  1297. }
  1298. }
  1299. return size;
  1300. GGML_UNUSED(buft);
  1301. }
  1302. static bool ggml_backend_cann_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
  1303. return false;
  1304. GGML_UNUSED(buft);
  1305. }
  1306. /**
  1307. * @brief Interface for managing CANN buffer types in the GGML backend.
  1308. *
  1309. * Provides function pointers for allocating, querying properties, and managing
  1310. * memory for CANN buffer types in the GGML backend.
  1311. */
  1312. static const ggml_backend_buffer_type_i ggml_backend_cann_buffer_type_interface = {
  1313. /* .get_name = */ ggml_backend_cann_buffer_type_name,
  1314. /* .alloc_buffer = */ ggml_backend_cann_buffer_type_alloc_buffer,
  1315. /* .get_alignment = */ ggml_backend_cann_buffer_type_get_alignment,
  1316. /* .get_max_size = */ NULL, // defaults to SIZE_MAX
  1317. /* .get_alloc_size = */ ggml_backend_cann_buffer_type_get_alloc_size,
  1318. /* .is_host = */ ggml_backend_cann_buffer_type_is_host,
  1319. };
  1320. /**
  1321. * @brief Retrieves the CANN buffer type for a specified device.
  1322. *
  1323. * This function initializes and returns the buffer type interface associated
  1324. * with the given device. It ensures thread-safe access using a mutex.
  1325. *
  1326. * @param device The device index for which to retrieve the buffer type.
  1327. * @return A pointer to the buffer type interface for the specified device, or
  1328. * nullptr if the device index is out of range.
  1329. */
  1330. ggml_backend_buffer_type_t
  1331. ggml_backend_cann_buffer_type(int32_t device) {
  1332. static std::mutex mutex;
  1333. std::lock_guard<std::mutex> lock(mutex);
  1334. if (device >= ggml_backend_cann_get_device_count()) {
  1335. return nullptr;
  1336. }
  1337. static ggml_backend_buffer_type
  1338. ggml_backend_cann_buffer_types[GGML_CANN_MAX_DEVICES];
  1339. static bool ggml_backend_cann_buffer_type_initialized = false;
  1340. if (!ggml_backend_cann_buffer_type_initialized) {
  1341. for (int32_t i = 0; i < ggml_cann_info().device_count; i++) {
  1342. ggml_backend_cann_buffer_types[i] = {
  1343. /* .iface = */ ggml_backend_cann_buffer_type_interface,
  1344. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), i),
  1345. /* .context = */
  1346. new ggml_backend_cann_buffer_type_context{
  1347. i, "CANN" + std::to_string(i)},
  1348. };
  1349. }
  1350. ggml_backend_cann_buffer_type_initialized = true;
  1351. }
  1352. return &ggml_backend_cann_buffer_types[device];
  1353. }
  1354. /**
  1355. * @brief Retrieves the name associated with a CANN host buffer type.
  1356. *
  1357. * This function returns the descriptive name associated with the specified
  1358. * CANN host buffer type context.
  1359. *
  1360. * @param buft Pointer to the host buffer type context.
  1361. * @return Const pointer to the C-style string containing the name.
  1362. */
  1363. static const char * ggml_backend_cann_host_buffer_type_name(ggml_backend_buffer_type_t buft) {
  1364. return "CANN_Host";
  1365. GGML_UNUSED(buft);
  1366. }
  1367. /**
  1368. * @brief Retrieves the name associated with a CANN host buffer.
  1369. *
  1370. * This function returns the descriptive name associated with the specified
  1371. * CANN host buffer context.
  1372. *
  1373. * @param buft Pointer to the host buffer context.
  1374. * @return Const pointer to the C-style string containing the name.
  1375. */
  1376. static const char * ggml_backend_cann_host_buffer_name(ggml_backend_buffer_t buffer) {
  1377. return "CANN_Host";
  1378. GGML_UNUSED(buffer);
  1379. }
  1380. /**
  1381. * @brief Free resources associated with a CANN host buffer.
  1382. *
  1383. * This function frees the resources associated with a CANN host buffer, including
  1384. * its context.
  1385. *
  1386. * @param buffer The CANN host buffer to free.
  1387. */
  1388. static void ggml_backend_cann_host_buffer_free(ggml_backend_buffer_t buffer) {
  1389. ACL_CHECK(aclrtFreeHost(buffer->context));
  1390. }
  1391. /**
  1392. * @brief Allocates a new CANN host buffer of the specified size.
  1393. *
  1394. * This function allocates a new CANN host buffer with the given size.
  1395. * @param size Size in bytes of the host buffer to allocate.
  1396. * @return Pointer to the allocated host buffer, or nullptr if allocation fails.
  1397. */
  1398. static void * ggml_cann_host_malloc(size_t size) {
  1399. if (getenv("GGML_CANN_NO_PINNED") != nullptr) {
  1400. return nullptr;
  1401. }
  1402. const size_t alignment = 128;
  1403. size = GGML_PAD(size, alignment);
  1404. if (size == 0) {
  1405. size = alignment;
  1406. }
  1407. void * hostPtr = nullptr;
  1408. aclError err = aclrtMallocHost((void **) &hostPtr, size);
  1409. if (err != ACL_SUCCESS) {
  1410. GGML_LOG_WARN("%s: failed to allocate %.2f MiB of pinned memory: %s\n", __func__,
  1411. size / 1024.0 / 1024.0, aclGetRecentErrMsg());
  1412. return nullptr;
  1413. }
  1414. return hostPtr;
  1415. }
  1416. /**
  1417. * @brief Allocates a new CANN host buffer of the specified type and size.
  1418. *
  1419. * @param buft Pointer to the host buffer type context.
  1420. * @param size Size in bytes of the host buffer to allocate.
  1421. * @return Pointer to the allocated host buffer, or CPU buffer pointer if allocation fails.
  1422. */
  1423. static ggml_backend_buffer_t ggml_backend_cann_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  1424. void * hostPtr = ggml_cann_host_malloc(size);
  1425. if (hostPtr == nullptr) {
  1426. // fallback to cpu buffer
  1427. return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
  1428. }
  1429. ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(hostPtr, size);
  1430. buffer->buft = buft;
  1431. buffer->iface.free_buffer = ggml_backend_cann_host_buffer_free;
  1432. return buffer;
  1433. }
  1434. /**
  1435. * @brief Interface for managing CANN host buffer types in the GGML backend.
  1436. *
  1437. * Provides function pointers for allocating, querying properties, and managing
  1438. * memory for CANN buffer types in the GGML backend.
  1439. */
  1440. ggml_backend_buffer_type_t ggml_backend_cann_host_buffer_type() {
  1441. static struct ggml_backend_buffer_type ggml_backend_cann_buffer_type_host = {
  1442. /* .iface = */ {
  1443. /* .get_name = */ ggml_backend_cann_host_buffer_type_name,
  1444. /* .alloc_buffer = */ ggml_backend_cann_host_buffer_type_alloc_buffer,
  1445. /* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment,
  1446. /* .get_max_size = */ NULL, // defaults to SIZE_MAX
  1447. /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size,
  1448. /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host,
  1449. },
  1450. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), 0),
  1451. /* .context = */ nullptr,
  1452. };
  1453. return &ggml_backend_cann_buffer_type_host;
  1454. }
  1455. /**
  1456. * @brief Computes the forward operation for a given tensor using CANN
  1457. * operations.
  1458. *
  1459. * This function selects the appropriate CANN operation based on the type of
  1460. * operation specified in the tensor and performs the computation.
  1461. *
  1462. * @param ctx The CANN context containing necessary resources and
  1463. * configurations.
  1464. * @param dst The destination tensor where the result of the computation will be
  1465. * stored.
  1466. * @return true if the computation was successful; false otherwise.
  1467. */
  1468. static bool ggml_cann_compute_forward(ggml_backend_cann_context& ctx,
  1469. struct ggml_tensor* dst) {
  1470. switch (dst->op) {
  1471. case GGML_OP_REPEAT:
  1472. ggml_cann_repeat(ctx, dst);
  1473. break;
  1474. case GGML_OP_GET_ROWS:
  1475. ggml_cann_get_rows(ctx, dst);
  1476. break;
  1477. case GGML_OP_SET_ROWS:
  1478. ggml_cann_set_rows(ctx, dst);
  1479. break;
  1480. case GGML_OP_DUP:
  1481. ggml_cann_dup(ctx, dst);
  1482. break;
  1483. case GGML_OP_ADD:
  1484. case GGML_OP_ADD1:
  1485. ggml_cann_binary_op<aclnn_add>(ctx, dst);
  1486. break;
  1487. case GGML_OP_SUB:
  1488. ggml_cann_binary_op<aclnn_sub>(ctx, dst);
  1489. break;
  1490. case GGML_OP_ACC:
  1491. ggml_cann_acc(ctx, dst);
  1492. break;
  1493. case GGML_OP_MUL:
  1494. ggml_cann_binary_op<aclnn_mul>(ctx, dst);
  1495. break;
  1496. case GGML_OP_DIV:
  1497. ggml_cann_binary_op<aclnn_div>(ctx, dst);
  1498. break;
  1499. case GGML_OP_UNARY:
  1500. switch (ggml_get_unary_op(dst)) {
  1501. case GGML_UNARY_OP_ABS:
  1502. GGML_CANN_CALL_OP_UNARY(Abs);
  1503. break;
  1504. case GGML_UNARY_OP_NEG:
  1505. GGML_CANN_CALL_OP_UNARY(Neg);
  1506. break;
  1507. case GGML_UNARY_OP_GELU:
  1508. case GGML_UNARY_OP_GELU_ERF:
  1509. // aclnnGelu internally uses the erf-based approximation.
  1510. GGML_CANN_CALL_OP_UNARY(Gelu);
  1511. break;
  1512. case GGML_UNARY_OP_SILU:
  1513. GGML_CANN_CALL_OP_UNARY(Silu);
  1514. break;
  1515. case GGML_UNARY_OP_GELU_QUICK: {
  1516. auto lambda = [](ggml_backend_cann_context& ctx,
  1517. aclTensor* acl_src,
  1518. aclTensor* acl_dst) {
  1519. GGML_CANN_CALL_ACLNN_OP(ctx, GeluV2, acl_src, 0, acl_dst);
  1520. };
  1521. ggml_cann_op_unary(lambda, ctx, dst);
  1522. } break;
  1523. case GGML_UNARY_OP_TANH:
  1524. GGML_CANN_CALL_OP_UNARY(Tanh);
  1525. break;
  1526. case GGML_UNARY_OP_RELU:
  1527. GGML_CANN_CALL_OP_UNARY(Relu);
  1528. break;
  1529. case GGML_UNARY_OP_SIGMOID:
  1530. GGML_CANN_CALL_OP_UNARY(Sigmoid);
  1531. break;
  1532. case GGML_UNARY_OP_HARDSIGMOID:
  1533. GGML_CANN_CALL_OP_UNARY(Hardsigmoid);
  1534. break;
  1535. case GGML_UNARY_OP_HARDSWISH:
  1536. GGML_CANN_CALL_OP_UNARY(Hardswish);
  1537. break;
  1538. case GGML_UNARY_OP_EXP:
  1539. GGML_CANN_CALL_OP_UNARY(Exp);
  1540. break;
  1541. case GGML_UNARY_OP_ELU:
  1542. ggml_cann_elu(ctx, dst);
  1543. break;
  1544. case GGML_UNARY_OP_SGN:
  1545. GGML_CANN_CALL_OP_UNARY(Sign);
  1546. break;
  1547. case GGML_UNARY_OP_STEP:
  1548. ggml_cann_step(ctx, dst);
  1549. break;
  1550. default:
  1551. return false;
  1552. }
  1553. break;
  1554. case GGML_OP_GLU:
  1555. switch (ggml_get_glu_op(dst)) {
  1556. case GGML_GLU_OP_REGLU:
  1557. GGML_CANN_CALL_OP_UNARY_GATED(Relu);
  1558. break;
  1559. case GGML_GLU_OP_GEGLU:
  1560. case GGML_GLU_OP_GEGLU_ERF:
  1561. // aclnnGelu internally uses the erf-based approximation.
  1562. GGML_CANN_CALL_OP_UNARY_GATED(Gelu);
  1563. break;
  1564. case GGML_GLU_OP_SWIGLU:
  1565. GGML_CANN_CALL_OP_UNARY_GATED(Silu);
  1566. break;
  1567. case GGML_GLU_OP_GEGLU_QUICK: {
  1568. auto lambda = [](ggml_backend_cann_context& ctx,
  1569. aclTensor* acl_src,
  1570. aclTensor* acl_dst) {
  1571. GGML_CANN_CALL_ACLNN_OP(ctx, GeluV2, acl_src, 0, acl_dst);
  1572. };
  1573. ggml_cann_op_unary_gated(lambda, ctx, dst);
  1574. } break;
  1575. default:
  1576. return false;
  1577. }
  1578. break;
  1579. case GGML_OP_NORM:
  1580. ggml_cann_norm(ctx, dst);
  1581. break;
  1582. case GGML_OP_GROUP_NORM:
  1583. ggml_cann_group_norm(ctx, dst);
  1584. break;
  1585. case GGML_OP_CONCAT:
  1586. ggml_cann_concat(ctx, dst);
  1587. break;
  1588. case GGML_OP_UPSCALE:
  1589. ggml_cann_upsample_nearest2d(ctx, dst);
  1590. break;
  1591. case GGML_OP_PAD:
  1592. ggml_cann_pad(ctx, dst);
  1593. break;
  1594. case GGML_OP_ARANGE:
  1595. ggml_cann_arange(ctx, dst);
  1596. break;
  1597. case GGML_OP_TIMESTEP_EMBEDDING:
  1598. ggml_cann_timestep_embedding(ctx, dst);
  1599. break;
  1600. case GGML_OP_LEAKY_RELU:
  1601. ggml_cann_leaky_relu(ctx, dst);
  1602. break;
  1603. case GGML_OP_RMS_NORM:
  1604. ggml_cann_rms_norm(ctx, dst);
  1605. break;
  1606. case GGML_OP_MUL_MAT:
  1607. ggml_cann_mul_mat(ctx, dst);
  1608. break;
  1609. case GGML_OP_MUL_MAT_ID:
  1610. ggml_cann_mul_mat_id(ctx, dst);
  1611. break;
  1612. case GGML_OP_SCALE:
  1613. ggml_cann_scale(ctx, dst);
  1614. break;
  1615. case GGML_OP_SQR:
  1616. GGML_ASSERT(dst->src[1] == nullptr);
  1617. dst->src[1] = dst->src[0];
  1618. ggml_cann_binary_op<aclnn_mul>(ctx, dst);
  1619. break;
  1620. case GGML_OP_SQRT:
  1621. GGML_CANN_CALL_OP_UNARY(Sqrt);
  1622. break;
  1623. case GGML_OP_CLAMP:
  1624. ggml_cann_clamp(ctx, dst);
  1625. break;
  1626. case GGML_OP_CPY:
  1627. ggml_cann_cpy(ctx, dst);
  1628. break;
  1629. case GGML_OP_CONT:
  1630. ggml_cann_dup(ctx, dst);
  1631. break;
  1632. case GGML_OP_NONE:
  1633. case GGML_OP_RESHAPE:
  1634. case GGML_OP_VIEW:
  1635. case GGML_OP_PERMUTE:
  1636. case GGML_OP_TRANSPOSE:
  1637. break;
  1638. case GGML_OP_DIAG_MASK_INF:
  1639. ggml_cann_diag_mask(ctx, dst, -INFINITY);
  1640. break;
  1641. case GGML_OP_SOFT_MAX:
  1642. ggml_cann_softmax(ctx, dst);
  1643. break;
  1644. case GGML_OP_ROPE:
  1645. ggml_cann_rope(ctx, dst);
  1646. break;
  1647. case GGML_OP_IM2COL:
  1648. ggml_cann_im2col(ctx, dst);
  1649. break;
  1650. case GGML_OP_POOL_2D:
  1651. ggml_cann_pool2d(ctx, dst);
  1652. break;
  1653. case GGML_OP_SUM:
  1654. ggml_cann_sum(ctx, dst);
  1655. break;
  1656. case GGML_OP_SUM_ROWS:
  1657. ggml_cann_sum_rows(ctx, dst);
  1658. break;
  1659. case GGML_OP_ARGSORT:
  1660. ggml_cann_argsort(ctx, dst);
  1661. break;
  1662. case GGML_OP_ARGMAX:
  1663. ggml_cann_argmax(ctx, dst);
  1664. break;
  1665. case GGML_OP_COS:
  1666. ggml_cann_op_unary<aclnn_cos>(ctx, dst);
  1667. break;
  1668. case GGML_OP_SIN:
  1669. ggml_cann_op_unary<aclnn_sin>(ctx, dst);
  1670. break;
  1671. case GGML_OP_CONV_TRANSPOSE_1D:
  1672. ggml_cann_conv_transpose_1d(ctx, dst);
  1673. break;
  1674. case GGML_OP_LOG:
  1675. GGML_CANN_CALL_OP_UNARY(Log);
  1676. break;
  1677. case GGML_OP_MEAN:
  1678. ggml_cann_mean(ctx, dst);
  1679. break;
  1680. case GGML_OP_PAD_REFLECT_1D:
  1681. ggml_cann_pad_reflect_1d(ctx, dst);
  1682. break;
  1683. case GGML_OP_COUNT_EQUAL:
  1684. ggml_cann_count_equal(ctx, dst);
  1685. break;
  1686. case GGML_OP_FLASH_ATTN_EXT:
  1687. ggml_cann_flash_attn_ext(ctx, dst);
  1688. break;
  1689. default:
  1690. return false;
  1691. }
  1692. return true;
  1693. }
  1694. // backend
  1695. /**
  1696. * @brief Retrieves the name associated with the CANN backend.
  1697. *
  1698. * This function returns the name assigned to the CANN backend, which is stored
  1699. * in the context of the provided backend structure.
  1700. *
  1701. * @param backend Pointer to the CANN backend structure.
  1702. * @return A pointer to a constant string representing the backend name.
  1703. */
  1704. static const char* ggml_backend_cann_name(ggml_backend_t backend) {
  1705. ggml_backend_cann_context* cann_ctx =
  1706. (ggml_backend_cann_context*)backend->context;
  1707. return cann_ctx->name.c_str();
  1708. }
  1709. /**
  1710. * @brief Frees resources associated with the CANN backend.
  1711. *
  1712. * This function releases resources associated with the CANN backend context
  1713. * and resets the device associated with the backend to its initial state.
  1714. *
  1715. * @param backend Pointer to the CANN backend structure to be freed.
  1716. */
  1717. static void ggml_backend_cann_free(ggml_backend_t backend) {
  1718. ggml_backend_cann_context* cann_ctx =
  1719. (ggml_backend_cann_context*)backend->context;
  1720. ACL_CHECK(aclrtSynchronizeDevice());
  1721. ACL_CHECK(aclrtResetDevice(cann_ctx->device));
  1722. delete cann_ctx;
  1723. delete backend;
  1724. }
  1725. /**
  1726. * @brief Sets tensor data asynchronously in the CANN backend.
  1727. *
  1728. * This function asynchronously sets tensor data in the CANN backend.
  1729. *
  1730. * @param backend Pointer to the CANN backend structure.
  1731. * @param tensor Pointer to the tensor structure to set data for.
  1732. * @param data Pointer to the host data to copy to the tensor.
  1733. * @param offset Offset in bytes within the host data.
  1734. * @param size Size of the data to copy in bytes.
  1735. */
  1736. static void ggml_backend_cann_set_tensor_async(ggml_backend_t backend,
  1737. ggml_tensor *tensor,
  1738. const void *data,
  1739. size_t offset,
  1740. size_t size) {
  1741. ggml_backend_cann_context *cann_ctx =
  1742. (ggml_backend_cann_context *)backend->context;
  1743. ggml_backend_buffer_t buf =
  1744. tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
  1745. GGML_ASSERT(buf->buft == ggml_backend_cann_buffer_type(cann_ctx->device) &&
  1746. "unsupported buffer type");
  1747. GGML_ASSERT(!ggml_is_quantized(tensor->type));
  1748. ggml_cann_async_memcpy(cann_ctx, (char *)tensor->data + offset, data, size,
  1749. ACL_MEMCPY_HOST_TO_DEVICE);
  1750. }
  1751. /**
  1752. * @brief Gets tensor data asynchronously in the CANN backend.
  1753. *
  1754. * This function asynchronously gets tensor data in the CANN backend.
  1755. *
  1756. * @param backend Pointer to the CANN backend structure.
  1757. * @param tensor Pointer to the tensor structure to get data from.
  1758. * @param data Pointer to the host data to copy from the tensor.
  1759. * @param offset Offset in bytes within the host data.
  1760. * @param size Size of the data to copy in bytes.
  1761. */
  1762. static void ggml_backend_cann_get_tensor_async(
  1763. ggml_backend_t backend, const ggml_tensor *tensor, void *data,
  1764. size_t offset, size_t size) {
  1765. ggml_backend_cann_context *cann_ctx =
  1766. (ggml_backend_cann_context *)backend->context;
  1767. ggml_backend_buffer_t buf =
  1768. tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
  1769. GGML_ASSERT(buf->buft == ggml_backend_cann_buffer_type(cann_ctx->device) &&
  1770. "unsupported buffer type");
  1771. GGML_ASSERT(!ggml_is_quantized(tensor->type));
  1772. ggml_cann_async_memcpy(cann_ctx, data, (char *)tensor->data + offset, size,
  1773. ACL_MEMCPY_DEVICE_TO_HOST);
  1774. }
  1775. /**
  1776. * @brief Asynchronously copies tensor data between CANN backends.
  1777. *
  1778. * This function copies tensor data asynchronously between two CANN backends. It
  1779. * checks if both tensors reside in CANN buffers and whether the devices support
  1780. * peer-to-peer access for direct copying. If not, it returns false.
  1781. *
  1782. * @param backend_src Pointer to the source CANN backend structure.
  1783. * @param backend_dst Pointer to the destination CANN backend structure.
  1784. * @param src Pointer to the source tensor to copy data from.
  1785. * @param dst Pointer to the destination tensor to copy data to.
  1786. * @return true if the copy operation succeeds, false otherwise.
  1787. */
  1788. static bool ggml_backend_cann_cpy_tensor_async(
  1789. ggml_backend_t backend_src, ggml_backend_t backend_dst,
  1790. const ggml_tensor* src, ggml_tensor* dst) {
  1791. GGML_ASSERT(ggml_backend_is_cann(backend_src) ||
  1792. ggml_backend_is_cann(backend_dst));
  1793. if (!ggml_backend_buffer_is_cann(src->buffer) ||
  1794. !ggml_backend_buffer_is_cann(dst->buffer)) {
  1795. return false;
  1796. }
  1797. ggml_backend_buffer_t buf_src =
  1798. src->view_src ? src->view_src->buffer : src->buffer;
  1799. ggml_backend_buffer_t buf_dst =
  1800. dst->view_src ? dst->view_src->buffer : dst->buffer;
  1801. ggml_backend_cann_context* cann_ctx_src =
  1802. (ggml_backend_cann_context*)backend_src->context;
  1803. ggml_backend_cann_context* cann_ctx_dst =
  1804. (ggml_backend_cann_context*)backend_dst->context;
  1805. size_t copy_size = ggml_nbytes(dst);
  1806. if (backend_src != backend_dst) {
  1807. ggml_backend_cann_buffer_context* buf_ctx_src =
  1808. (ggml_backend_cann_buffer_context*)buf_src->context;
  1809. ggml_backend_cann_buffer_context* buf_ctx_dst =
  1810. (ggml_backend_cann_buffer_context*)buf_dst->context;
  1811. GGML_ASSERT(cann_ctx_src->device == buf_ctx_src->device);
  1812. GGML_ASSERT(cann_ctx_dst->device == buf_ctx_dst->device);
  1813. int32_t canAccessPeer = 0;
  1814. ACL_CHECK(aclrtDeviceCanAccessPeer(&canAccessPeer, cann_ctx_src->device,
  1815. cann_ctx_dst->device));
  1816. if (!canAccessPeer) {
  1817. return false;
  1818. }
  1819. // need open both directions for memcpyasync between devices.
  1820. ggml_cann_set_device(cann_ctx_dst->device);
  1821. ACL_CHECK(aclrtDeviceEnablePeerAccess(cann_ctx_src->device, 0));
  1822. ggml_cann_set_device(cann_ctx_src->device);
  1823. ACL_CHECK(aclrtDeviceEnablePeerAccess(cann_ctx_dst->device, 0));
  1824. // wait for task_queue empty to keep task order.
  1825. cann_ctx_src->task_queue.wait();
  1826. ACL_CHECK(aclrtMemcpyAsync(dst->data, copy_size, src->data, copy_size,
  1827. ACL_MEMCPY_DEVICE_TO_DEVICE,
  1828. cann_ctx_src->stream()));
  1829. //TODO: workaround for Event didn`t work here.
  1830. aclrtSynchronizeStream(cann_ctx_src->stream());
  1831. } else {
  1832. // src and dst are on the same backend
  1833. ACL_CHECK(aclrtMemcpyAsync(dst->data, copy_size, src->data, copy_size,
  1834. ACL_MEMCPY_DEVICE_TO_DEVICE,
  1835. cann_ctx_dst->stream()));
  1836. }
  1837. return true;
  1838. }
  1839. /**
  1840. * @brief Synchronizes a CANN backend.
  1841. *
  1842. * This function synchronizes the specified CANN backend by waiting for all
  1843. * operations in its associated stream to complete.
  1844. *
  1845. * @param backend Pointer to the CANN backend structure to synchronize.
  1846. */
  1847. static void ggml_backend_cann_synchronize(ggml_backend_t backend) {
  1848. ggml_backend_cann_context* cann_ctx =
  1849. (ggml_backend_cann_context*)backend->context;
  1850. cann_ctx->task_queue.wait();
  1851. ggml_cann_set_device(cann_ctx->device);
  1852. ACL_CHECK(aclrtSynchronizeStream(cann_ctx->stream()));
  1853. }
  1854. /**
  1855. * @brief Computes a computational graph using a CANN backend.
  1856. *
  1857. * This function computes the operations defined in the computational graph
  1858. * using the specified CANN backend.
  1859. *
  1860. * @param backend Pointer to the CANN backend structure to use for computation.
  1861. * @param cgraph Pointer to the computational graph structure containing nodes
  1862. * representing operations to be computed.
  1863. * @return enum ggml_status Returns GGML_STATUS_SUCCESS if computation
  1864. * completes successfully, otherwise an appropriate error status.
  1865. */
  1866. static enum ggml_status ggml_backend_cann_graph_compute(
  1867. ggml_backend_t backend, ggml_cgraph* cgraph) {
  1868. ggml_backend_cann_context* cann_ctx =
  1869. (ggml_backend_cann_context*)backend->context;
  1870. ggml_cann_set_device(cann_ctx->device);
  1871. for (int i = 0; i < cgraph->n_nodes; i++) {
  1872. ggml_tensor* node = cgraph->nodes[i];
  1873. if (ggml_is_empty(node) || node->op == GGML_OP_NONE) {
  1874. continue;
  1875. }
  1876. bool ok = ggml_cann_compute_forward(*cann_ctx, node);
  1877. if (!ok) {
  1878. GGML_LOG_ERROR("%s: error: op not supported %s (%s)\n", __func__,
  1879. node->name, ggml_op_name(node->op));
  1880. }
  1881. GGML_ASSERT(ok);
  1882. }
  1883. return GGML_STATUS_SUCCESS;
  1884. }
  1885. /**
  1886. * @brief Checks if the CANN backend supports a specific operation.
  1887. *
  1888. * This function checks whether the specified operation is supported by the
  1889. * CANN backend.
  1890. *
  1891. * @param backend Pointer to the CANN backend structure to check support for
  1892. * the operation.
  1893. * @param op Pointer to the tensor representing the operation to check.
  1894. * @return bool Returns true if the operation is supported by the backend,
  1895. * otherwise false.
  1896. */
  1897. static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev,
  1898. const ggml_tensor* op) {
  1899. switch (op->op) {
  1900. case GGML_OP_UNARY:
  1901. switch (ggml_get_unary_op(op)) {
  1902. case GGML_UNARY_OP_ABS:
  1903. case GGML_UNARY_OP_NEG:
  1904. case GGML_UNARY_OP_GELU:
  1905. case GGML_UNARY_OP_SILU:
  1906. case GGML_UNARY_OP_RELU:
  1907. case GGML_UNARY_OP_SIGMOID:
  1908. case GGML_UNARY_OP_HARDSIGMOID:
  1909. case GGML_UNARY_OP_HARDSWISH:
  1910. case GGML_UNARY_OP_GELU_QUICK:
  1911. case GGML_UNARY_OP_TANH:
  1912. case GGML_UNARY_OP_EXP:
  1913. case GGML_UNARY_OP_ELU:
  1914. case GGML_UNARY_OP_SGN:
  1915. case GGML_UNARY_OP_STEP:
  1916. case GGML_UNARY_OP_GELU_ERF:
  1917. return true;
  1918. default:
  1919. return false;
  1920. }
  1921. case GGML_OP_GLU:
  1922. switch (ggml_get_glu_op(op)) {
  1923. case GGML_GLU_OP_REGLU:
  1924. case GGML_GLU_OP_GEGLU:
  1925. case GGML_GLU_OP_SWIGLU:
  1926. case GGML_GLU_OP_GEGLU_ERF:
  1927. case GGML_GLU_OP_GEGLU_QUICK:
  1928. return true;
  1929. default:
  1930. return false;
  1931. }
  1932. break;
  1933. case GGML_OP_MUL_MAT: {
  1934. switch (op->src[0]->type) {
  1935. case GGML_TYPE_F16:
  1936. case GGML_TYPE_F32:
  1937. return true;
  1938. case GGML_TYPE_Q8_0:
  1939. case GGML_TYPE_Q4_0:
  1940. #ifdef ASCEND_310P
  1941. // Q4 && Q8 per group is not suppor on 310p device
  1942. return false;
  1943. #endif
  1944. // only support contiguous for quantized types.
  1945. return ggml_is_contiguous(op->src[0]) &&
  1946. ggml_is_contiguous(op->src[1]);
  1947. default:
  1948. return false;
  1949. }
  1950. }
  1951. case GGML_OP_MUL_MAT_ID:
  1952. switch (op->src[0]->type) {
  1953. case GGML_TYPE_F16:
  1954. case GGML_TYPE_F32:
  1955. return true;
  1956. case GGML_TYPE_Q8_0:
  1957. case GGML_TYPE_Q4_0:
  1958. #ifdef ASCEND_310P
  1959. // Q4 && Q8 per group is not suppor on 310p device
  1960. return false;
  1961. #endif
  1962. // only support contiguous for quantized types.
  1963. return ggml_is_contiguous(op->src[0]) &&
  1964. ggml_is_contiguous(op->src[1]);
  1965. default:
  1966. return false;
  1967. }
  1968. // embedding
  1969. case GGML_OP_GET_ROWS: {
  1970. switch (op->src[0]->type) {
  1971. case GGML_TYPE_F32:
  1972. case GGML_TYPE_F16:
  1973. case GGML_TYPE_Q8_0:
  1974. return true;
  1975. default:
  1976. return false;
  1977. }
  1978. } break;
  1979. case GGML_OP_SET_ROWS: {
  1980. switch (op->type) {
  1981. case GGML_TYPE_F32:
  1982. case GGML_TYPE_F16:
  1983. return true;
  1984. default:
  1985. return false;
  1986. }
  1987. } break;
  1988. case GGML_OP_CPY: {
  1989. ggml_tensor *src = op->src[0];
  1990. if ((op->type != GGML_TYPE_F32 && op->type != GGML_TYPE_F16) ||
  1991. (src->type != GGML_TYPE_F32 &&
  1992. src->type != GGML_TYPE_F16)) {
  1993. // only support F32 and F16.
  1994. return false;
  1995. }
  1996. if (!ggml_are_same_shape(op, src) && !ggml_is_contiguous(op)) {
  1997. // unsupport dst is not contiguous.
  1998. return false;
  1999. }
  2000. return true;
  2001. } break;
  2002. case GGML_OP_CONT: {
  2003. // TODO: support GGML_TYPE_BF16
  2004. switch (op->src[0]->type) {
  2005. case GGML_TYPE_F32:
  2006. case GGML_TYPE_F16:
  2007. return true;
  2008. default:
  2009. return false;
  2010. }
  2011. }
  2012. case GGML_OP_ROPE: {
  2013. // TODO: with ops-test v == 1
  2014. float ext_factor = 0.0f;
  2015. memcpy(&ext_factor, (const float *) op->op_params + 7, sizeof(float));
  2016. // TODO: n_dims <= ne0
  2017. if (op->src[0]->ne[0] != op->op_params[1]) {
  2018. return false;
  2019. }
  2020. // TODO: ext_factor != 0
  2021. if (ext_factor != 0) {
  2022. return false;
  2023. }
  2024. const int mode = ((const int32_t *) op->op_params)[2];
  2025. if (mode & GGML_ROPE_TYPE_MROPE) {
  2026. return false;
  2027. }
  2028. if (mode & GGML_ROPE_TYPE_VISION) {
  2029. return false;
  2030. }
  2031. if(!ggml_is_contiguous(op->src[0])){
  2032. return false;
  2033. }
  2034. return true;
  2035. }
  2036. case GGML_OP_UPSCALE: {
  2037. // aclnnUpsampleNearest2dGetWorkspaceSize not support
  2038. // selfDimN[2]/outDimN[2] or selfDimC[3]/outDimC[3] not equal
  2039. if (op->src[0]->ne[2] * op->ne[3] != op->src[0]->ne[3] * op->ne[2]) {
  2040. return false;
  2041. }
  2042. if (op->op_params[0] != GGML_SCALE_MODE_NEAREST) {
  2043. return false;
  2044. }
  2045. return true;
  2046. }
  2047. case GGML_OP_POOL_2D: {
  2048. const int32_t * opts = (const int32_t *) op->op_params;
  2049. #ifdef ASCEND_310P
  2050. enum ggml_op_pool opt = static_cast<ggml_op_pool>(opts[0]);
  2051. if(opt == GGML_OP_POOL_MAX){
  2052. return false;
  2053. }
  2054. #endif
  2055. const int k0 = opts[1];
  2056. const int k1 = opts[2];
  2057. const int p0 = opts[5];
  2058. const int p1 = opts[6];
  2059. // value of paddingH should be at most half of kernelH
  2060. // value of paddingW should be at most half of kernelW
  2061. return (p0 <= (k0 / 2)) && (p1 <= (k1 / 2));
  2062. }
  2063. case GGML_OP_SUM:
  2064. case GGML_OP_DUP:
  2065. case GGML_OP_IM2COL:
  2066. case GGML_OP_CONCAT:
  2067. case GGML_OP_REPEAT:
  2068. case GGML_OP_NONE:
  2069. case GGML_OP_RESHAPE:
  2070. case GGML_OP_VIEW:
  2071. case GGML_OP_PERMUTE:
  2072. case GGML_OP_TRANSPOSE:
  2073. case GGML_OP_NORM:
  2074. case GGML_OP_ADD:
  2075. case GGML_OP_ADD1:
  2076. case GGML_OP_SUB:
  2077. case GGML_OP_MUL:
  2078. case GGML_OP_DIV:
  2079. case GGML_OP_RMS_NORM:
  2080. case GGML_OP_SQR:
  2081. case GGML_OP_SQRT:
  2082. case GGML_OP_CLAMP:
  2083. case GGML_OP_DIAG_MASK_INF:
  2084. case GGML_OP_SUM_ROWS:
  2085. case GGML_OP_ARGSORT:
  2086. case GGML_OP_ACC:
  2087. case GGML_OP_GROUP_NORM:
  2088. case GGML_OP_PAD:
  2089. case GGML_OP_ARANGE:
  2090. case GGML_OP_TIMESTEP_EMBEDDING:
  2091. case GGML_OP_LEAKY_RELU:
  2092. case GGML_OP_ARGMAX:
  2093. case GGML_OP_COS:
  2094. case GGML_OP_SIN:
  2095. case GGML_OP_CONV_TRANSPOSE_1D:
  2096. case GGML_OP_LOG:
  2097. case GGML_OP_MEAN:
  2098. case GGML_OP_PAD_REFLECT_1D:
  2099. case GGML_OP_COUNT_EQUAL:
  2100. return true;
  2101. case GGML_OP_SCALE:
  2102. float bias;
  2103. memcpy(&bias, (float*)op->op_params + 1, sizeof(float));
  2104. return bias == 0.0f; // TODO: support bias != 0.0f
  2105. case GGML_OP_SOFT_MAX:
  2106. // TODO: support broadcast
  2107. // ref: https://github.com/ggml-org/llama.cpp/pull/14435
  2108. return !op->src[1] || (op->src[1]->ne[2] == 1 && op->src[1]->ne[3] == 1);
  2109. case GGML_OP_FLASH_ATTN_EXT:{
  2110. // derived from [ggml-cuda.cu]
  2111. if(op->src[1]->type != GGML_TYPE_F16 || op->src[2]->type != GGML_TYPE_F16){
  2112. return false;
  2113. }
  2114. if(op->src[1]->type != GGML_TYPE_F16 && op->src[1]->type != GGML_TYPE_F32 && op->src[1]->type != GGML_TYPE_BF16){
  2115. return false;
  2116. }
  2117. if(op->type != GGML_TYPE_F16 && op->type != GGML_TYPE_F32 && op->type != GGML_TYPE_BF16){
  2118. return false;
  2119. }
  2120. if (op->src[1]->ne[0] != op->src[2]->ne[0]) {
  2121. // different head sizes of K and V are not supported yet
  2122. return false;
  2123. }
  2124. if (op->src[0]->ne[0] == 192) {
  2125. return false;
  2126. }
  2127. if (op->src[0]->ne[0] == 576) {
  2128. // DeepSeek MLA
  2129. return false;
  2130. }
  2131. // TODO: support broadcast
  2132. // ref: https://github.com/ggml-org/llama.cpp/pull/14435
  2133. if (op->src[0]->ne[3] != 1) {
  2134. return false;
  2135. }
  2136. float logitSoftcap = 0.0f;
  2137. memcpy(&logitSoftcap, (float*)op->op_params + 2, sizeof(float));
  2138. if(logitSoftcap != 0.0f) {
  2139. return false;
  2140. }
  2141. return true;
  2142. }
  2143. default:
  2144. return false;
  2145. }
  2146. GGML_UNUSED(dev);
  2147. }
  2148. /**
  2149. * @brief Checks if the backend buffer type is associated with the CANN backend.
  2150. *
  2151. * This function checks whether the provided backend buffer type is associated
  2152. * with the CANN backend based on the comparison of its name retrieval function
  2153. * pointer.
  2154. *
  2155. * @param buft Pointer to the backend buffer type to check.
  2156. * @return bool Returns true if the buffer type is associated with the CANN
  2157. * backend, otherwise false.
  2158. */
  2159. static bool ggml_backend_buft_is_cann(ggml_backend_buffer_type_t buft) {
  2160. return buft->iface.get_name == ggml_backend_cann_buffer_type_name;
  2161. }
  2162. /**
  2163. * @brief Determines if a tensor operation should be offloaded to the CANN
  2164. * backend.
  2165. *
  2166. * This function checks if a given tensor operation should be offloaded to the
  2167. * CANN backend based on the operation type and the size of the tensor. It
  2168. * returns true if the second dimension (ne[1]) of the tensor is greater than or
  2169. * equal to the minimum batch size and the operation is not GGML_OP_GET_ROWS.
  2170. *
  2171. * @param backend Pointer to the CANN backend.
  2172. * @param op Pointer to the tensor operation to check.
  2173. * @return bool Returns true if the operation should be offloaded, otherwise
  2174. * false.
  2175. */
  2176. static bool ggml_backend_cann_offload_op(ggml_backend_dev_t dev,
  2177. const ggml_tensor* op) {
  2178. const int min_batch_size = 32;
  2179. GGML_UNUSED(dev);
  2180. return op->ne[1] >= min_batch_size && op->op != GGML_OP_GET_ROWS;
  2181. }
  2182. /**
  2183. * @brief Records an event on the CANN backend stream.
  2184. *
  2185. * This function records the given event on the ACL runtime stream associated
  2186. * with the backend context.
  2187. *
  2188. * @param event Pointer to the event structure to be recorded.
  2189. */
  2190. static void ggml_backend_cann_event_record(ggml_backend_t backend, ggml_backend_event_t event) {
  2191. ggml_backend_cann_context* cann_ctx =
  2192. (ggml_backend_cann_context*)backend->context;
  2193. ACL_CHECK(aclrtRecordEvent((aclrtEvent)event->context, cann_ctx->stream()));
  2194. }
  2195. /**
  2196. * @brief Waits for a recorded event to complete on the CANN backend stream.
  2197. *
  2198. * This function makes the given backend wait for the event to complete on its
  2199. * ACL runtime stream.
  2200. *
  2201. * @param backend Pointer to the backend structure.
  2202. * @param event Pointer to the event structure that the backend needs to wait
  2203. * for.
  2204. */
  2205. static void ggml_backend_cann_event_wait(ggml_backend_t backend,
  2206. ggml_backend_event_t event) {
  2207. ggml_backend_cann_context* cann_ctx =
  2208. (ggml_backend_cann_context*)backend->context;
  2209. if (ggml_backend_is_cann(backend)) {
  2210. ACL_CHECK(aclrtStreamWaitEvent(cann_ctx->stream(),
  2211. (aclrtEvent)event->context));
  2212. } else {
  2213. GGML_ABORT("fatal error");
  2214. }
  2215. }
  2216. /**
  2217. * @brief Structure defining the interface for the CANN backend.
  2218. *
  2219. * This structure contains function pointers for various operations
  2220. * supported by the CANN backend, including name retrieval, memory
  2221. * management, tensor operations, synchronization, and event handling.
  2222. */
  2223. static const ggml_backend_i ggml_backend_cann_interface = {
  2224. /* .get_name = */ ggml_backend_cann_name,
  2225. /* .free = */ ggml_backend_cann_free,
  2226. /* .set_tensor_async = */ ggml_backend_cann_set_tensor_async,
  2227. /* .get_tensor_async = */ ggml_backend_cann_get_tensor_async,
  2228. /* .cpy_tensor_async = */ ggml_backend_cann_cpy_tensor_async,
  2229. /* .synchronize = */ ggml_backend_cann_synchronize,
  2230. /* .graph_plan_create = */ NULL,
  2231. /* .graph_plan_free = */ NULL,
  2232. /* .graph_plan_update = */ NULL,
  2233. /* .graph_plan_compute = */ NULL,
  2234. /* .graph_compute = */ ggml_backend_cann_graph_compute,
  2235. /* .event_record = */ ggml_backend_cann_event_record,
  2236. /* .event_wait = */ ggml_backend_cann_event_wait,
  2237. };
  2238. /**
  2239. * @brief Return the hardcoded GUID for the CANN backend.
  2240. *
  2241. * This function returns a static GUID which uniquely identifies the CANN
  2242. * backend.
  2243. *
  2244. * @return A pointer to the static GUID.
  2245. */
  2246. static ggml_guid_t ggml_backend_cann_guid() {
  2247. static ggml_guid guid = {0xa1, 0x94, 0xaf, 0xac, 0xbd, 0x4f, 0x47, 0x34,
  2248. 0xbe, 0x1a, 0x9e, 0x71, 0x1f, 0x9e, 0xed, 0x64};
  2249. return &guid;
  2250. }
  2251. // backend device
  2252. struct ggml_backend_cann_device_context {
  2253. int device;
  2254. std::string name;
  2255. std::string description;
  2256. };
  2257. static const char * ggml_backend_cann_device_get_name(ggml_backend_dev_t dev) {
  2258. ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context;
  2259. return ctx->name.c_str();
  2260. }
  2261. static const char* ggml_backend_cann_device_get_description(ggml_backend_dev_t dev) {
  2262. ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context;
  2263. return ctx->description.c_str();
  2264. }
  2265. static void ggml_backend_cann_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
  2266. ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context;
  2267. ggml_backend_cann_get_device_memory(ctx->device, free, total);
  2268. }
  2269. static enum ggml_backend_dev_type ggml_backend_cann_device_get_type(ggml_backend_dev_t dev) {
  2270. GGML_UNUSED(dev);
  2271. return GGML_BACKEND_DEVICE_TYPE_GPU;
  2272. }
  2273. static void ggml_backend_cann_device_get_props(ggml_backend_dev_t dev, ggml_backend_dev_props * props) {
  2274. props->name = ggml_backend_cann_device_get_name(dev);
  2275. props->description = ggml_backend_cann_device_get_description(dev);
  2276. props->type = ggml_backend_cann_device_get_type(dev);
  2277. ggml_backend_cann_device_get_memory(dev, &props->memory_free, &props->memory_total);
  2278. bool host_buffer = getenv("GGML_CANN_NO_PINNED") == nullptr;
  2279. props->caps = {
  2280. /* .async = */ false,
  2281. /* .host_buffer = */ host_buffer,
  2282. /* .buffer_from_host_ptr = */ false,
  2283. /* .events = */ true,
  2284. };
  2285. }
  2286. static ggml_backend_t ggml_backend_cann_device_init(ggml_backend_dev_t dev, const char * params) {
  2287. GGML_UNUSED(params);
  2288. ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context;
  2289. return ggml_backend_cann_init(ctx->device);
  2290. }
  2291. /**
  2292. * @brief Checks if the CANN backend supports a specific backend buffer type.
  2293. *
  2294. * This function determines whether the CANN backend supports the given backend
  2295. * buffer type by comparing the device context of the backend and buffer type.
  2296. * It returns true if the devices are same between the backend context and
  2297. * buffer type context.
  2298. *
  2299. * @param backend Pointer to the CANN backend.
  2300. * @param buft Pointer to the backend buffer type to check.
  2301. * @return bool Returns true if the CANN backend supports the buffer type,
  2302. * otherwise false.
  2303. */
  2304. static bool ggml_backend_cann_supports_buft(
  2305. ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
  2306. if (ggml_backend_buft_is_cann(buft)) {
  2307. ggml_backend_cann_device_context * dev_ctx = (ggml_backend_cann_device_context *)dev->context;
  2308. ggml_backend_cann_buffer_type_context * buft_ctx =
  2309. (ggml_backend_cann_buffer_type_context *)buft->context;
  2310. return buft_ctx->device == dev_ctx->device;
  2311. }
  2312. return false;
  2313. }
  2314. static ggml_backend_buffer_type_t ggml_backend_cann_device_get_buffer_type(ggml_backend_dev_t dev) {
  2315. ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context;
  2316. return ggml_backend_cann_buffer_type(ctx->device);
  2317. }
  2318. static ggml_backend_buffer_type_t ggml_backend_cann_device_get_host_buffer_type(ggml_backend_dev_t dev) {
  2319. GGML_UNUSED(dev);
  2320. return ggml_backend_cann_host_buffer_type();
  2321. }
  2322. /**
  2323. * @brief Creates a new event for the CANN backend device.
  2324. *
  2325. * This function initializes a new event for the CANN backend by setting the
  2326. * device and creating an ACL runtime event. The created event is then wrapped
  2327. * in a ggml_backend_event structure and returned.
  2328. *
  2329. * @param backend Pointer to the CANN backend.
  2330. * @return ggml_backend_event_t Returns a pointer to the new event structure.
  2331. */
  2332. static ggml_backend_event_t ggml_backend_cann_device_event_new(
  2333. ggml_backend_dev_t dev) {
  2334. ggml_backend_cann_device_context * dev_ctx = (ggml_backend_cann_device_context *)dev->context;
  2335. ggml_cann_set_device(dev_ctx->device);
  2336. aclrtEvent event;
  2337. ACL_CHECK(aclrtCreateEvent(&event));
  2338. return new ggml_backend_event{
  2339. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), dev_ctx->device),
  2340. /* .context = */ event,
  2341. };
  2342. }
  2343. /**
  2344. * @brief Frees a CANN backend event.
  2345. *
  2346. * This function destroys the ACL runtime event associated with the given CANN
  2347. * backend event and then deletes the event structure itself.
  2348. *
  2349. * @param event Pointer to the event structure to be freed.
  2350. */
  2351. static void ggml_backend_cann_device_event_free(ggml_backend_dev_t dev, ggml_backend_event_t event) {
  2352. ACL_CHECK(aclrtDestroyEvent((aclrtEvent)event->context));
  2353. delete event;
  2354. GGML_UNUSED(dev);
  2355. }
  2356. /**
  2357. * @brief Synchronizes the given event on the CANN backend.
  2358. *
  2359. * This function waits for the specified event to complete on the ACL runtime.
  2360. *
  2361. * @param event Pointer to the event structure to be synchronized.
  2362. */
  2363. static void ggml_backend_cann_device_event_synchronize(ggml_backend_dev_t dev, ggml_backend_event_t event) {
  2364. ACL_CHECK(aclrtSynchronizeEvent((aclrtEvent)event->context));
  2365. GGML_UNUSED(dev);
  2366. }
  2367. static const ggml_backend_device_i ggml_backend_cann_device_interface = {
  2368. /* .get_name = */ ggml_backend_cann_device_get_name,
  2369. /* .get_description = */ ggml_backend_cann_device_get_description,
  2370. /* .get_memory = */ ggml_backend_cann_device_get_memory,
  2371. /* .get_type = */ ggml_backend_cann_device_get_type,
  2372. /* .get_props = */ ggml_backend_cann_device_get_props,
  2373. /* .init_backend = */ ggml_backend_cann_device_init, // called for every card
  2374. /* .get_buffer_type = */ ggml_backend_cann_device_get_buffer_type,
  2375. /* .get_host_buffer_type = */ ggml_backend_cann_device_get_host_buffer_type,
  2376. /* .buffer_from_host_ptr = */ NULL, // not supported for CANN
  2377. /* .supports_op = */ ggml_backend_cann_supports_op,
  2378. /* .supports_buft = */ ggml_backend_cann_supports_buft,
  2379. /* .offload_op = */ ggml_backend_cann_offload_op,
  2380. /* .event_new = */ ggml_backend_cann_device_event_new,
  2381. /* .event_free = */ ggml_backend_cann_device_event_free,
  2382. /* .event_synchronize = */ ggml_backend_cann_device_event_synchronize,
  2383. };
  2384. // backend reg
  2385. struct ggml_backend_cann_reg_context {
  2386. std::vector<ggml_backend_dev_t> devices;
  2387. };
  2388. static const char * ggml_backend_cann_reg_get_name(ggml_backend_reg_t reg) {
  2389. GGML_UNUSED(reg);
  2390. return GGML_CANN_NAME;
  2391. }
  2392. static size_t ggml_backend_cann_reg_get_device_count(ggml_backend_reg_t reg) {
  2393. ggml_backend_cann_reg_context * ctx = (ggml_backend_cann_reg_context *)reg->context;
  2394. return ctx->devices.size();
  2395. }
  2396. static ggml_backend_dev_t ggml_backend_cann_reg_get_device(ggml_backend_reg_t reg, size_t index) {
  2397. ggml_backend_cann_reg_context * ctx = (ggml_backend_cann_reg_context *)reg->context;
  2398. GGML_ASSERT(index < ctx->devices.size());
  2399. return ctx->devices[index];
  2400. }
  2401. static void * ggml_backend_cann_reg_get_proc_address(ggml_backend_reg_t reg, const char * name) {
  2402. GGML_UNUSED(reg);
  2403. GGML_UNUSED(name);
  2404. // reserved for future use
  2405. return nullptr;
  2406. }
  2407. static const ggml_backend_reg_i ggml_backend_cann_reg_interface = {
  2408. /* .get_name = */ ggml_backend_cann_reg_get_name,
  2409. /* .get_device_count = */ ggml_backend_cann_reg_get_device_count,
  2410. /* .get_device = */ ggml_backend_cann_reg_get_device,
  2411. /* .get_proc_address = */ ggml_backend_cann_reg_get_proc_address,
  2412. };
  2413. // backend registry, called only once for cann backend
  2414. ggml_backend_reg_t ggml_backend_cann_reg() {
  2415. static ggml_backend_reg reg;
  2416. static bool initialized = false;
  2417. {
  2418. static std::mutex mutex;
  2419. std::lock_guard<std::mutex> lock(mutex);
  2420. if (!initialized) {
  2421. aclInit(nullptr);
  2422. ggml_backend_cann_reg_context * ctx = new ggml_backend_cann_reg_context;
  2423. for (int i = 0; i < ggml_cann_info().device_count; i++) {
  2424. ggml_backend_cann_device_context* dev_ctx = new ggml_backend_cann_device_context();
  2425. dev_ctx->description = aclrtGetSocName();
  2426. dev_ctx->device = i;
  2427. dev_ctx->name = GGML_CANN_NAME + std::to_string(i);
  2428. ggml_cann_set_device(i);
  2429. ggml_backend_dev_t dev = new ggml_backend_device {
  2430. /* .iface = */ ggml_backend_cann_device_interface,
  2431. /* .reg = */ &reg,
  2432. /* .context = */ dev_ctx
  2433. };
  2434. ctx->devices.push_back(dev);
  2435. }
  2436. reg = ggml_backend_reg {
  2437. /* .api_version = */ GGML_BACKEND_API_VERSION,
  2438. /* .iface = */ ggml_backend_cann_reg_interface,
  2439. /* .context = */ ctx
  2440. };
  2441. }
  2442. initialized = true;
  2443. }
  2444. return &reg;
  2445. }
  2446. ggml_backend_t ggml_backend_cann_init(int32_t device) {
  2447. aclInit(nullptr);
  2448. if (device < 0 || device >= ggml_backend_cann_get_device_count()) {
  2449. GGML_LOG_ERROR("%s: error: invalid device %d\n", __func__, device);
  2450. return nullptr;
  2451. }
  2452. ggml_backend_cann_context* ctx = new ggml_backend_cann_context(device);
  2453. if (ctx == nullptr) {
  2454. GGML_LOG_ERROR("%s: error: failed to allocate context\n", __func__);
  2455. return nullptr;
  2456. }
  2457. ggml_cann_set_device(ctx->device);
  2458. ggml_backend_t cann_backend =
  2459. new ggml_backend{/* .guid = */ ggml_backend_cann_guid(),
  2460. /* .interface = */ ggml_backend_cann_interface,
  2461. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), device),
  2462. /* .context = */ ctx};
  2463. return cann_backend;
  2464. }
  2465. bool ggml_backend_is_cann(ggml_backend_t backend) {
  2466. return backend != NULL &&
  2467. ggml_guid_matches(backend->guid, ggml_backend_cann_guid());
  2468. }
  2469. int32_t ggml_backend_cann_get_device_count() {
  2470. return ggml_cann_info().device_count;
  2471. }
  2472. void ggml_backend_cann_get_device_description(
  2473. int32_t device, char* description, size_t description_size) {
  2474. ggml_cann_set_device(device);
  2475. const char* soc_name = aclrtGetSocName();
  2476. snprintf(description, description_size, "%s", soc_name);
  2477. }
  2478. void ggml_backend_cann_get_device_memory(int32_t device, size_t* free,
  2479. size_t* total) {
  2480. ggml_cann_set_device(device);
  2481. ACL_CHECK(aclrtGetMemInfo(ACL_HBM_MEM, free, total));
  2482. }
  2483. GGML_BACKEND_DL_IMPL(ggml_backend_cann_reg)