ggml-cann.cpp 103 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930
  1. /*
  2. * Copyright (c) 2023-2024 The ggml authors
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a copy
  5. * of this software and associated documentation files (the "Software"), to
  6. * deal in the Software without restriction, including without limitation the
  7. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  8. * sell copies of the Software, and to permit persons to whom the Software is
  9. * furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  17. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  20. * IN THE SOFTWARE.
  21. */
  22. #include "ggml-cann.h"
  23. #include <acl/acl.h>
  24. #include <stdarg.h>
  25. #include <aclnnop/aclnn_trans_matmul_weight.h>
  26. #include <cmath>
  27. #include <cstdio>
  28. #include <cstring>
  29. #include <mutex>
  30. #include <queue>
  31. #include <chrono>
  32. #include <unordered_set>
  33. #include <optional>
  34. #include "ggml-impl.h"
  35. #include "ggml-backend-impl.h"
  36. #include "ggml-cann/aclnn_ops.h"
  37. #include "ggml-cann/common.h"
  38. #include "ggml.h"
  39. #define GGML_COMMON_DECL_C
  40. #include "ggml-common.h"
  41. #define GGML_CANN_NAME "CANN"
  42. /**
  43. * @brief Handles CANN errors by printing an error message and aborting.
  44. *
  45. * @param stmt The statement that caused the error.
  46. * @param func The function in which the error occurred.
  47. * @param file The file in which the error occurred.
  48. * @param line The line number where the error occurred.
  49. * @param msg The error message.
  50. */
  51. [[noreturn]] void ggml_cann_error(const char* stmt, const char* func,
  52. const char* file, int line, const char* msg) {
  53. int32_t id = -1;
  54. aclrtGetDevice(&id);
  55. GGML_LOG_ERROR("CANN error: %s\n", msg);
  56. GGML_LOG_ERROR(" current device: %d, in function %s at %s:%d\n", id, func,
  57. file, line);
  58. GGML_LOG_ERROR(" %s\n", stmt);
  59. // abort with GGML_ASSERT to get a stack trace
  60. GGML_ABORT("CANN error");
  61. }
  62. /**
  63. * @brief Sets the device to be used by CANN.
  64. *
  65. * @param device The device ID to set.
  66. */
  67. void ggml_cann_set_device(const int32_t device) {
  68. // TODO: uncomment these lines after empty context has fixed.
  69. // int current_device;
  70. // ACL_CHECK(aclrtGetDevice(&current_device));
  71. // if (device == current_device) {
  72. // return;
  73. // }
  74. ACL_CHECK(aclrtSetDevice(device));
  75. }
  76. /**
  77. * @brief Retrieves the current device ID.
  78. *
  79. * @return The current device ID.
  80. */
  81. int32_t ggml_cann_get_device() {
  82. int32_t id;
  83. ACL_CHECK(aclrtGetDevice(&id));
  84. return id;
  85. }
  86. /**
  87. * @brief Get the value of the specified environment variable (name).
  88. * if not empty, return a std::string object
  89. */
  90. std::optional<std::string> get_env(const std::string& name) {
  91. const char* val = std::getenv(name.c_str());
  92. if (!val) return std::nullopt;
  93. std::string res = std::string(val);
  94. std::transform(res.begin(), res.end(), res.begin(), ::tolower);
  95. return res;
  96. }
  97. /**
  98. * @brief Verify whether the environment variable is a valid value.
  99. */
  100. bool parse_bool(const std::string& value) {
  101. std::unordered_set<std::string> valid_values = {"on", "1", "yes", "y", "enable", "true"};
  102. return valid_values.find(value) != valid_values.end();
  103. }
  104. /**
  105. * @brief Initialize the CANN device information.
  106. *
  107. * This function initializes the CANN device information by obtaining the
  108. * device count and setting the memory allocation granularity for each device.
  109. *
  110. * @return A structure containing the device information.
  111. */
  112. static ggml_cann_device_info ggml_cann_init() {
  113. ggml_cann_device_info info = {};
  114. aclError err = aclrtGetDeviceCount((uint32_t*)&info.device_count);
  115. if (err != ACL_SUCCESS) {
  116. GGML_LOG_ERROR("%s: failed to initialize CANN: %s\n",
  117. __func__, aclGetRecentErrMsg());
  118. return info;
  119. }
  120. GGML_ASSERT(info.device_count <= GGML_CANN_MAX_DEVICES);
  121. for (int id = 0; id < info.device_count; ++id) {
  122. aclrtPhysicalMemProp prop = {};
  123. prop.handleType = ACL_MEM_HANDLE_TYPE_NONE;
  124. prop.allocationType = ACL_MEM_ALLOCATION_TYPE_PINNED;
  125. prop.memAttr = ACL_HBM_MEM_HUGE;
  126. prop.location.type = ACL_MEM_LOCATION_TYPE_DEVICE;
  127. prop.location.id = id;
  128. prop.reserve = 0;
  129. err = aclrtMemGetAllocationGranularity(
  130. &prop, ACL_RT_MEM_ALLOC_GRANULARITY_RECOMMENDED,
  131. &info.devices[id].vmm_granularity);
  132. info.devices[id].vmm = err == ACL_SUCCESS;
  133. size_t free, total;
  134. ggml_backend_cann_get_device_memory(id, &free, &total);
  135. info.devices[id].total_vram = free;
  136. }
  137. // TODO: add more device info later.
  138. return info;
  139. }
  140. /**
  141. * @brief Retrieve the CANN device information.
  142. *
  143. * This function returns a reference to a structure containing the CANN device
  144. * information. The device information is initialized once and reused on
  145. * subsequent calls.
  146. *
  147. * @return A reference to the structure containing the device information.
  148. */
  149. const ggml_cann_device_info& ggml_cann_info() {
  150. static ggml_cann_device_info info = ggml_cann_init();
  151. return info;
  152. }
  153. //#define DEBUG_CANN_MALLOC
  154. /**
  155. * @brief A pool of CANN buffers(priority segment buffer).
  156. *
  157. * This class manages a pool of CANN buffers for a specific device.
  158. */
  159. struct ggml_cann_pool_buf_prio : public ggml_cann_pool {
  160. /**
  161. * @brief The maximum reuse margin for a buffer.
  162. */
  163. static const size_t max_reuse_margin = 1ull << 22; // 4MB
  164. /**
  165. * @brief The minimum free margin for a buffer.
  166. */
  167. static const size_t min_free_margin = 1ull << 20; // 1MB
  168. /**
  169. * @brief The alignment for buffer allocation.
  170. */
  171. static const size_t alignment = 128;
  172. /**
  173. * @brief The device ID associated with this buffer pool.
  174. */
  175. int device;
  176. /**
  177. * @brief Whether to disable clean during buffer allocation.
  178. */
  179. bool disable_clean = false;
  180. /**
  181. * @brief Structure representing a CANN buffer.
  182. */
  183. struct ggml_cann_buffer {
  184. void* ptr = nullptr; ///< Pointer to the buffer.
  185. size_t size = 0; ///< Size of the buffer.
  186. std::chrono::steady_clock::time_point last_used; ///< Last used time.
  187. bool operator>(const ggml_cann_buffer& other) const {
  188. return size > other.size;
  189. }
  190. };
  191. /**
  192. * @brief Array of CANN buffers in the pool.
  193. */
  194. std::unordered_map<void*, size_t> buffer_pool;
  195. std::priority_queue<ggml_cann_buffer,
  196. std::vector<ggml_cann_buffer>,
  197. std::greater<>> free_buffers ;
  198. /**
  199. * @brief Total size of all buffers in the pool.
  200. */
  201. size_t pool_size = 0;
  202. /**
  203. * @brief Constructor to initialize the buffer pool for a specific device.
  204. *
  205. * @param device The device ID to associate with this buffer pool.
  206. */
  207. explicit ggml_cann_pool_buf_prio(int device) : device(device) {
  208. disable_clean = parse_bool(get_env("GGML_CANN_DISABLE_BUF_POOL_CLEAN").value_or(""));
  209. }
  210. /**
  211. * @brief Destructor to free all buffers in the pool.
  212. */
  213. ~ggml_cann_pool_buf_prio() {
  214. ggml_cann_set_device(device);
  215. for (auto& [b_ptr, b_size] : buffer_pool) {
  216. aclrtFree(b_ptr);
  217. pool_size -= b_size;
  218. }
  219. buffer_pool.clear();
  220. GGML_ASSERT(pool_size == 0);
  221. }
  222. /**
  223. * @brief Allocate a buffer of the given size.
  224. *
  225. * @param size The size of the buffer to allocate.
  226. * @param actual_size A pointer to a variable to receive the actual size of
  227. * the allocated buffer.
  228. * @return A pointer to the allocated buffer.
  229. */
  230. void* alloc(size_t size, size_t* actual_size) override {
  231. size = GGML_PAD(size, alignment);
  232. if (size == 0) {
  233. size = alignment;
  234. }
  235. void* ptr = nullptr;
  236. auto now = std::chrono::steady_clock::now();
  237. std::vector<ggml_cann_buffer> free_buffers_rest;
  238. free_buffers_rest.reserve(free_buffers.size());
  239. while (!free_buffers.empty()) {
  240. auto b = free_buffers.top();
  241. free_buffers.pop();
  242. if (b.size >= size) {
  243. // reuse the buffer if the size is enough
  244. const size_t margin = b.size - size;
  245. if (margin <= max_reuse_margin) {
  246. *actual_size = b.size;
  247. ptr = b.ptr;
  248. #ifdef DEBUG_CANN_MALLOC
  249. GGML_LOG_INFO(
  250. "cann pool[%d]: reused %p, "
  251. "pool_size = %5u MB, "
  252. "size = %5u MB, "
  253. "margin = %5u MB\n",
  254. device, b.ptr,
  255. (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576),
  256. (uint32_t)(GGML_PAD(size, 1048576) / 1048576),
  257. (uint32_t)(GGML_PAD(margin, 1048576) / 1048576));
  258. #endif
  259. break;
  260. }
  261. }
  262. bool should_clean = !disable_clean &&
  263. b.size > min_free_margin &&
  264. std::chrono::duration_cast<std::chrono::milliseconds>(now - b.last_used).count() > 100;
  265. if (should_clean) {
  266. // free the buffer if the size is needed to be freed
  267. ACL_CHECK(aclrtFree(b.ptr));
  268. pool_size -= b.size;
  269. buffer_pool.erase(b.ptr);
  270. #ifdef DEBUG_CANN_MALLOC
  271. GGML_LOG_INFO(
  272. "cann pool[%d]: clean %p, "
  273. "pool_size = %5u MB, "
  274. "size = %5u MB\n",
  275. device, b.ptr,
  276. (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576),
  277. (uint32_t)(GGML_PAD(b.size, 1048576) / 1048576));
  278. #endif
  279. continue;
  280. }
  281. free_buffers_rest.push_back(b);
  282. }
  283. for (ggml_cann_buffer &b : free_buffers_rest) {
  284. free_buffers.push(std::move(b));
  285. }
  286. #ifdef DEBUG_CANN_MALLOC
  287. GGML_LOG_INFO("cann pool[%d] free pool_size = %5u MB\n\n", device, (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576));
  288. #endif
  289. if (ptr != nullptr) {
  290. return ptr;
  291. }
  292. // allocate a new buffer if no buffer can be reused
  293. ggml_cann_set_device(device);
  294. ACL_CHECK(aclrtMalloc(&ptr, size, ACL_MEM_MALLOC_HUGE_FIRST));
  295. *actual_size = size;
  296. pool_size += size;
  297. #ifdef DEBUG_CANN_MALLOC
  298. GGML_LOG_INFO(
  299. "cann pool[%d]: allocate %p, "
  300. "pool_size = %5u MB, "
  301. "size = %5u MB\n",
  302. device, ptr, (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576),
  303. (uint32_t)(GGML_PAD(size, 1048576) / 1048576));
  304. #endif
  305. buffer_pool.emplace(ptr, size);
  306. return ptr;
  307. }
  308. /**
  309. * @brief Free a buffer and return it to the pool.
  310. *
  311. * @param ptr Pointer to the buffer to free.
  312. * @param size Size of the buffer to free.
  313. */
  314. void free(void* ptr, size_t size) override {
  315. GGML_UNUSED(size);
  316. auto it = buffer_pool.find(ptr);
  317. if (it == buffer_pool.end()) {
  318. GGML_ABORT("cann pool[%d]: buffer %p not found in pool\n", device, ptr);
  319. }
  320. auto now = std::chrono::steady_clock::now();
  321. free_buffers.emplace(ggml_cann_buffer{ptr, it->second, now});
  322. #ifdef DEBUG_CANN_MALLOC
  323. GGML_LOG_INFO(
  324. "cann pool[%d]: return %p, "
  325. "pool_size = %5u MB\n",
  326. device, ptr,
  327. (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576));
  328. #endif
  329. }
  330. };
  331. /**
  332. * @brief A pool of CANN buffers(segment buffer).
  333. *
  334. * This class manages a pool of CANN buffers for a specific device.
  335. */
  336. struct ggml_cann_pool_buf : public ggml_cann_pool {
  337. /**
  338. * @brief The maximum reuse margin for a buffer.
  339. */
  340. static const size_t max_reuse_margin = 1ull << 22; // 4MB
  341. /**
  342. * @brief The minimum free margin for a buffer.
  343. */
  344. static const size_t min_free_margin = 1ull << 20; // 1MB
  345. /**
  346. * @brief The alignment for buffer allocation.
  347. */
  348. static const size_t alignment = 128;
  349. /**
  350. * @brief The maximum number of buffers in the pool.
  351. */
  352. static const int MAX_BUFFERS = 256;
  353. /**
  354. * @brief The device ID associated with this buffer pool.
  355. */
  356. int device;
  357. /**
  358. * @brief Whether to disable clean during buffer allocation.
  359. */
  360. bool disable_clean = false;
  361. /**
  362. * @brief Structure representing a CANN buffer.
  363. */
  364. struct ggml_cann_buffer {
  365. void* ptr = nullptr; ///< Pointer to the buffer memory.
  366. size_t size = 0; ///< Size of the buffer.
  367. bool used = false; ///< Whether the buffer is currently in use.
  368. std::chrono::steady_clock::time_point last_used; ///< Last used time.
  369. };
  370. /**
  371. * @brief Array of CANN buffers in the pool.
  372. */
  373. ggml_cann_buffer buffer_pool[MAX_BUFFERS] = {};
  374. /**
  375. * @brief Total size of all buffers in the pool.
  376. */
  377. size_t pool_size = 0;
  378. /**
  379. * @brief Constructor to initialize the buffer pool for a specific device.
  380. *
  381. * @param device The device ID to associate with this buffer pool.
  382. */
  383. explicit ggml_cann_pool_buf(int device) : device(device) {
  384. disable_clean = parse_bool(get_env("GGML_CANN_DISABLE_BUF_POOL_CLEAN").value_or(""));
  385. }
  386. /**
  387. * @brief Destructor to free all buffers in the pool.
  388. */
  389. ~ggml_cann_pool_buf() {
  390. ggml_cann_set_device(device);
  391. for (int i = 0; i < MAX_BUFFERS; ++i) {
  392. ggml_cann_buffer& b = buffer_pool[i];
  393. if (b.ptr != nullptr) {
  394. aclrtFree(b.ptr);
  395. pool_size -= b.size;
  396. }
  397. }
  398. GGML_ASSERT(pool_size == 0);
  399. }
  400. /**
  401. * @brief Allocate a buffer of the given size.
  402. *
  403. * @param size The size of the buffer to allocate.
  404. * @param actual_size A pointer to a variable to receive the actual size of
  405. * the allocated buffer.
  406. * @return A pointer to the allocated buffer.
  407. */
  408. void* alloc(size_t size, size_t* actual_size) override {
  409. size = GGML_PAD(size, alignment);
  410. if (size == 0) {
  411. size = alignment;
  412. }
  413. void* ptr = nullptr;
  414. auto now = std::chrono::steady_clock::now();
  415. int i = 0;
  416. for (; i < MAX_BUFFERS; ++i) {
  417. ggml_cann_buffer& b = buffer_pool[i];
  418. if (b.ptr == nullptr) {
  419. break;
  420. }
  421. if (b.used) {
  422. continue;
  423. }
  424. if (b.size >= size) {
  425. // reuse the buffer if the size is enough
  426. const size_t margin = b.size - size;
  427. if (margin <= max_reuse_margin) {
  428. *actual_size = b.size;
  429. b.used = true;
  430. ptr = b.ptr;
  431. #ifdef DEBUG_CANN_MALLOC
  432. GGML_LOG_INFO(
  433. "cann pool[%d]: reused %p, "
  434. "pool_size = %5u MB, "
  435. "size = %5u MB, "
  436. "margin = %5u MB\n",
  437. device, b.ptr,
  438. (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576),
  439. (uint32_t)(GGML_PAD(size, 1048576) / 1048576),
  440. (uint32_t)(GGML_PAD(margin, 1048576) / 1048576));
  441. #endif
  442. break;
  443. }
  444. }
  445. bool should_clean = !disable_clean &&
  446. b.size > min_free_margin &&
  447. std::chrono::duration_cast<std::chrono::milliseconds>(now - b.last_used).count() > 100;
  448. if (should_clean) {
  449. // free the buffer if the size is needed to be freed
  450. ACL_CHECK(aclrtFree(b.ptr));
  451. pool_size -= b.size;
  452. #ifdef DEBUG_CANN_MALLOC
  453. GGML_LOG_INFO(
  454. "cann pool[%d]: clean %p, "
  455. "pool_size = %5u MB, "
  456. "size = %5u MB\n",
  457. device, b.ptr,
  458. (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576),
  459. (uint32_t)(GGML_PAD(b.size, 1048576) / 1048576));
  460. #endif
  461. b.ptr = nullptr;
  462. }
  463. }
  464. if (ptr != nullptr) {
  465. return ptr;
  466. }
  467. if (i < MAX_BUFFERS) {
  468. // allocate a new buffer if no buffer can be reused
  469. ggml_cann_buffer& b = buffer_pool[i];
  470. ggml_cann_set_device(device);
  471. ACL_CHECK(aclrtMalloc(&b.ptr, size, ACL_MEM_MALLOC_HUGE_FIRST));
  472. pool_size += size;
  473. *actual_size = size;
  474. b.size = size;
  475. b.used = true;
  476. if (i >= MAX_BUFFERS - 8) {
  477. GGML_LOG_WARN("cann pool[%d]: slots almost full\n", device);
  478. }
  479. #ifdef DEBUG_CANN_MALLOC
  480. GGML_LOG_INFO(
  481. "cann pool[%d]: allocate %p, "
  482. "pool_size = %5u MB, "
  483. "size = %5u MB\n",
  484. device, b.ptr,
  485. (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576),
  486. (uint32_t)(GGML_PAD(b.size, 1048576) / 1048576));
  487. #endif
  488. return b.ptr;
  489. }
  490. GGML_ABORT("cann pool[%d]: slots full\n", device);
  491. }
  492. /**
  493. * @brief Free a buffer and return it to the pool.
  494. *
  495. * @param ptr Pointer to the buffer to free.
  496. * @param size Size of the buffer to free.
  497. */
  498. void free(void* ptr, size_t size) override {
  499. GGML_UNUSED(size);
  500. for (int i = 0; i < MAX_BUFFERS; ++i) {
  501. ggml_cann_buffer& b = buffer_pool[i];
  502. if (b.ptr != ptr) {
  503. continue;
  504. }
  505. b.used = false;
  506. b.last_used = std::chrono::steady_clock::now();
  507. #ifdef DEBUG_CANN_MALLOC
  508. GGML_LOG_INFO(
  509. "cann pool[%d]: return %p, "
  510. "pool_size = %5u MB\n",
  511. device, b.ptr,
  512. (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576));
  513. #endif
  514. return;
  515. }
  516. GGML_ABORT("cann pool[%d]: slots full\n", device);
  517. }
  518. };
  519. /**
  520. * @brief A pool of CANN buffers with virtual memory.
  521. *
  522. * This class manages a pool of CANN buffers with virtual memory for a specific
  523. * device.
  524. */
  525. struct ggml_cann_pool_vmm : public ggml_cann_pool {
  526. /**
  527. * @brief The maximum size of the virtual memory pool (32 GB).
  528. */
  529. size_t max_size;
  530. /**
  531. * @brief The device ID associated with this buffer pool.
  532. */
  533. int device;
  534. /**
  535. * @brief Pointer to the start of the virtual memory pool.
  536. */
  537. void* pool_addr = 0;
  538. /**
  539. * @brief Amount of virtual memory used in the pool.
  540. */
  541. size_t pool_used = 0;
  542. /**
  543. * @brief Total size of the virtual memory pool.
  544. */
  545. size_t pool_size = 0;
  546. /**
  547. * @brief Allocation granularity for the virtual memory pool.
  548. */
  549. size_t granularity;
  550. /**
  551. * @brief Handles for the physical memory allocated.
  552. */
  553. std::vector<aclrtDrvMemHandle> handles;
  554. /**
  555. * @brief Offsets for the mapped memory regions.
  556. */
  557. std::vector<void*> map_offsets;
  558. /**
  559. * @brief Constructor to initialize the buffer pool with virtual memory for
  560. * a specific device.
  561. *
  562. * @param device The device ID to associate with this buffer pool.
  563. */
  564. explicit ggml_cann_pool_vmm(int device)
  565. : device(device) {
  566. auto dev = ggml_cann_info().devices[device];
  567. granularity = dev.vmm_granularity;
  568. max_size = dev.total_vram;
  569. }
  570. /**
  571. * @brief Destructor to free all buffers in the virtual memory pool.
  572. */
  573. ~ggml_cann_pool_vmm() {
  574. if (pool_addr != 0) {
  575. for (auto& offset : map_offsets) {
  576. ACL_CHECK(aclrtUnmapMem(offset));
  577. }
  578. for (auto& handle : handles) {
  579. ACL_CHECK(aclrtFreePhysical(handle));
  580. }
  581. ACL_CHECK(aclrtReleaseMemAddress(pool_addr));
  582. }
  583. }
  584. /**
  585. * @brief Allocate a buffer of the given size in the virtual memory pool.
  586. *
  587. * @param size The size of the buffer to allocate.
  588. * @param actual_size A pointer to a variable to receive the actual size of
  589. * the allocated buffer.
  590. * @return A pointer to the allocated buffer.
  591. */
  592. void* alloc(size_t size, size_t* actual_size) override {
  593. // round up the allocation size to the alignment to ensure that all
  594. // allocations are aligned for all data types
  595. const size_t alignment = 128;
  596. size = GGML_PAD(size, alignment);
  597. if (size == 0) {
  598. size = alignment;
  599. }
  600. size_t avail = pool_size - pool_used;
  601. if (size > avail) {
  602. // round up to the next multiple of the granularity
  603. size_t reserve_size = size - avail;
  604. reserve_size = GGML_PAD(reserve_size, granularity);
  605. GGML_ASSERT(pool_size + reserve_size <= max_size);
  606. // allocate more physical memory
  607. aclrtPhysicalMemProp prop = {};
  608. prop.handleType = ACL_MEM_HANDLE_TYPE_NONE;
  609. prop.allocationType = ACL_MEM_ALLOCATION_TYPE_PINNED;
  610. prop.memAttr = ACL_HBM_MEM_HUGE;
  611. prop.location.type = ACL_MEM_LOCATION_TYPE_DEVICE;
  612. prop.location.id = device;
  613. prop.reserve = 0;
  614. aclrtDrvMemHandle handle;
  615. ACL_CHECK(aclrtMallocPhysical(&handle, reserve_size, &prop, 0));
  616. // reserve virtual address space (if not already reserved)
  617. if (pool_addr == 0) {
  618. ACL_CHECK(aclrtReserveMemAddress(
  619. &pool_addr, max_size, 0, NULL, 1));
  620. }
  621. // map at the end of the pool
  622. ACL_CHECK(aclrtMapMem((char*)pool_addr + pool_size, reserve_size, 0,
  623. handle, 0));
  624. handles.push_back(handle);
  625. map_offsets.push_back((char*)pool_addr + pool_size);
  626. // add to the pool
  627. pool_size += reserve_size;
  628. #ifdef DEBUG_CANN_MALLOC
  629. GGML_LOG_INFO("cann pool[%d]: size increased to %llu MB (reserved %llu MB)\n",
  630. device, (unsigned long long) (pool_size/1024/1024),
  631. (unsigned long long) (reserve_size/1024/1024));
  632. #endif
  633. }
  634. GGML_ASSERT(pool_addr != 0);
  635. void* ptr = (void*)((char*)pool_addr + pool_used);
  636. *actual_size = size;
  637. pool_used += size;
  638. #ifdef DEBUG_CANN_MALLOC
  639. GGML_LOG_INFO("cann pool[%d]: allocated %llu bytes at %llx\n", device,
  640. (unsigned long long)size, (unsigned long long)ptr);
  641. #endif
  642. return ptr;
  643. }
  644. /**
  645. * @brief Free a buffer and return it to the virtual memory pool.
  646. *
  647. * @param ptr Pointer to the buffer to free.
  648. * @param size Size of the buffer to free.
  649. */
  650. void free(void* ptr, size_t size) override {
  651. #ifdef DEBUG_CANN_MALLOC
  652. GGML_LOG_INFO("cann pool[%d]: freed %llu bytes at %llx\n", device,
  653. (unsigned long long)size, (unsigned long long)ptr);
  654. #endif
  655. pool_used -= size;
  656. // all deallocations must be in reverse order of the allocations
  657. GGML_ASSERT(ptr == (void*)((char*)pool_addr + pool_used));
  658. }
  659. };
  660. /**
  661. * @brief Create a new CANN pool for a specific device.
  662. *
  663. * Factory method to create a new CANN pool object based on the device type.
  664. *
  665. * @param device The device ID for which to create the pool.
  666. * @return A unique pointer to the created CANN pool.
  667. */
  668. std::unique_ptr<ggml_cann_pool> ggml_backend_cann_context::new_pool_for_device(
  669. int device) {
  670. std::string mem_pool_type = get_env("GGML_CANN_MEM_POOL").value_or("");
  671. if (mem_pool_type == "prio") {
  672. GGML_LOG_INFO("%s: device %d use buffer pool with priority queue\n", __func__, device);
  673. return std::unique_ptr<ggml_cann_pool>(new ggml_cann_pool_buf_prio(device));
  674. }
  675. if (ggml_cann_info().devices[device].vmm && mem_pool_type != "leg") {
  676. GGML_LOG_INFO("%s: device %d use vmm pool\n", __func__, device);
  677. return std::unique_ptr<ggml_cann_pool>(new ggml_cann_pool_vmm(device));
  678. }
  679. GGML_LOG_INFO("%s: device %d use buffer pool\n", __func__, device);
  680. return std::unique_ptr<ggml_cann_pool>(new ggml_cann_pool_buf(device));
  681. }
  682. // cann buffer
  683. /**
  684. * @brief Context for managing a CANN buffer associated with a specific device.
  685. *
  686. * This structure holds information about a CANN buffer, including the device
  687. * ID, device pointer, and a name derived from GGML_CANN_NAME and the device ID.
  688. */
  689. struct ggml_backend_cann_buffer_context {
  690. int32_t device; ///< The device ID associated with this buffer context.
  691. void* dev_ptr =
  692. nullptr; ///< Pointer to the device memory allocated for the buffer.
  693. /**
  694. * @brief Constructor to initialize the CANN buffer context.
  695. *
  696. * @param device The device ID associated with this buffer context.
  697. * @param dev_ptr Pointer to the device memory allocated for the buffer.
  698. */
  699. ggml_backend_cann_buffer_context(int32_t device, void* dev_ptr)
  700. : device(device),
  701. dev_ptr(dev_ptr) {}
  702. /**
  703. * @brief Destructor to free the device memory allocated for the buffer.
  704. */
  705. ~ggml_backend_cann_buffer_context() { ACL_CHECK(aclrtFree(dev_ptr)); }
  706. };
  707. /**
  708. * @brief Check if a buffer is a CANN buffer.
  709. *
  710. * This function checks if a given buffer is a CANN buffer by comparing its
  711. * `get_name` function pointer to `ggml_backend_cann_buffer_get_name`.
  712. *
  713. * @param buffer The buffer to check.
  714. * @return true if the buffer is a CANN buffer, false otherwise.
  715. */
  716. static bool ggml_backend_buft_is_cann(ggml_backend_buffer_type_t buft);
  717. static bool ggml_backend_buffer_is_cann(
  718. ggml_backend_buffer_t buffer) {
  719. return ggml_backend_buft_is_cann(buffer->buft);
  720. }
  721. /**
  722. * @brief Free resources associated with a CANN buffer.
  723. *
  724. * This function frees the resources associated with a CANN buffer, including
  725. * its context.
  726. *
  727. * @param buffer The CANN buffer to free.
  728. */
  729. static void ggml_backend_cann_buffer_free_buffer(
  730. ggml_backend_buffer_t buffer) {
  731. ggml_backend_cann_buffer_context* ctx =
  732. (ggml_backend_cann_buffer_context*)buffer->context;
  733. delete ctx;
  734. }
  735. /**
  736. * @brief Retrieve the base pointer of a CANN buffer.
  737. *
  738. * This function returns the base pointer of a CANN buffer, which points to the
  739. * device memory allocated for the buffer.
  740. *
  741. * @param buffer The CANN buffer whose base pointer is to be retrieved.
  742. * @return A pointer to the base of the device memory allocated for the buffer.
  743. */
  744. static void* ggml_backend_cann_buffer_get_base(
  745. ggml_backend_buffer_t buffer) {
  746. ggml_backend_cann_buffer_context* ctx =
  747. (ggml_backend_cann_buffer_context*)buffer->context;
  748. return ctx->dev_ptr;
  749. }
  750. /**
  751. * @brief Transform quantized Q4.0 tensor data into a format suitable for CANN
  752. * processing.
  753. *
  754. * This function transforms quantized Q4.0 tensor data into a format suitable
  755. * for CANN processing. It extracts quantization values and scales from the
  756. * source data and prepares them in a format expected by CANN operations.
  757. *
  758. * @param tensor Pointer to the tensor information.
  759. * @param src Pointer to the source data in Q4.0 format.
  760. * @param dst Pointer to the destination buffer where transformed data will be
  761. * stored.
  762. */
  763. static void ggml_backend_cann_transform_q4_0(ggml_tensor* tensor,
  764. const void* src,
  765. void* dst) {
  766. int64_t n_elems = ggml_nelements(tensor);
  767. int64_t groups = n_elems / QK4_0;
  768. size_t quant_bytes = n_elems * sizeof(uint8_t) / 2;
  769. uint8_t* quant_offset = (uint8_t*)dst;
  770. uint16_t* scale_offset = (uint16_t*)((char*)dst + quant_bytes);
  771. for (int i = 0; i < groups; i++) {
  772. const block_q4_0* group =
  773. (const block_q4_0*)((const char*)src + i * sizeof(block_q4_0));
  774. *scale_offset = group->d;
  775. scale_offset++;
  776. // 0-15
  777. for (int j = 0; j < QK4_0 / 2; j += 2) {
  778. (*quant_offset) = (group->qs[j] & 0x0F);
  779. (*quant_offset) |= ((group->qs[j + 1] << 4));
  780. quant_offset++;
  781. }
  782. // 16-31
  783. for (int j = 0; j < QK4_0 / 2; j += 2) {
  784. (*quant_offset) = (group->qs[j] >> 4);
  785. (*quant_offset) |= (group->qs[j + 1] & 0xF0);
  786. quant_offset++;
  787. }
  788. }
  789. // put (uint4b_t -8) into int4b_t
  790. for (quant_offset = (uint8_t*)dst;
  791. quant_offset < (uint8_t*)dst + quant_bytes; quant_offset++) {
  792. (*quant_offset) ^= 0x88;
  793. }
  794. }
  795. /**
  796. * @brief Transform CANN processed data back into quantized Q4.0 format.
  797. *
  798. * This function transforms CANN processed data back into quantized Q4.0 format.
  799. * It reverses the transformation performed by
  800. * ggml_backend_cann_transform_q4_0(), converting the data back into its
  801. * original quantized form.
  802. *
  803. * @param tensor Pointer to the tensor information.
  804. * @param src Pointer to the source buffer containing transformed data.
  805. * @param dst Pointer to the destination buffer where the Q4.0 formatted data
  806. * will be stored.
  807. */
  808. static void ggml_backend_cann_transform_back_q4_0(
  809. const ggml_tensor* tensor, void* src, void* dst) {
  810. int64_t n_elems = ggml_nelements(tensor);
  811. int64_t groups = n_elems / QK4_0;
  812. size_t quant_bytes = n_elems * sizeof(uint8_t) / 2;
  813. uint8_t* quant_offset = (uint8_t*)src;
  814. uint16_t* scale_offset = (uint16_t*)((char*)src + quant_bytes);
  815. for (; quant_offset < (uint8_t*)src + quant_bytes; quant_offset++) {
  816. (*quant_offset) ^= 0x88;
  817. }
  818. quant_offset = (uint8_t*)src;
  819. for (int i = 0; i < groups; i++) {
  820. block_q4_0* group = (block_q4_0*)((char*)dst + i * sizeof(block_q4_0));
  821. group->d = *scale_offset;
  822. scale_offset++;
  823. // 0-15
  824. for (int j = 0; j < QK4_0 / 2; j += 2) {
  825. group->qs[j] = ((*quant_offset) & 0x0F);
  826. group->qs[j + 1] = ((*quant_offset) >> 4);
  827. quant_offset++;
  828. }
  829. // 16-31
  830. for (int j = 0; j < QK4_0 / 2; j += 2) {
  831. group->qs[j] |= ((*quant_offset) << 4);
  832. group->qs[j + 1] |= ((*quant_offset) & 0xF0);
  833. quant_offset++;
  834. }
  835. }
  836. }
  837. /**
  838. * @brief Transform quantized Q8.0 tensor data into a format suitable for CANN
  839. * processing.
  840. *
  841. * This function transforms quantized Q8.0 tensor data into a format suitable
  842. * for CANN processing. It extracts quantization values and scales from the
  843. * source data and prepares them in a format expected by CANN operations.
  844. *
  845. * @param tensor Pointer to the tensor information.
  846. * @param src Pointer to the source data in Q8.0 format.
  847. * @param dst Pointer to the destination buffer where transformed data will be
  848. * stored.
  849. */
  850. static void ggml_backend_cann_transform_q8_0(ggml_tensor* tensor,
  851. const void* src,
  852. void* dst) {
  853. int64_t n_elems = ggml_nelements(tensor);
  854. int64_t groups = n_elems / QK8_0;
  855. size_t quant_bytes = n_elems * sizeof(uint8_t);
  856. uint8_t* quant_offset = (uint8_t*)dst;
  857. uint16_t* scale_offset = (uint16_t*)((char*)dst + quant_bytes);
  858. for (int i = 0; i < groups; i++) {
  859. const block_q8_0* group =
  860. (const block_q8_0*)((const char*)src + i * sizeof(block_q8_0));
  861. *scale_offset = group->d;
  862. scale_offset++;
  863. size_t group_quant_size = QK8_0 * sizeof(uint8_t);
  864. memcpy(quant_offset, group->qs, group_quant_size);
  865. quant_offset += group_quant_size;
  866. }
  867. }
  868. /**
  869. * @brief Transform CANN processed data back into quantized Q8.0 format.
  870. *
  871. * This function transforms CANN processed data back into quantized Q8.0 format.
  872. * It reverses the transformation performed by
  873. * ggml_backend_cann_transform_q8_0(), converting the data back into its
  874. * original quantized form.
  875. *
  876. * @param tensor Pointer to the tensor information.
  877. * @param src Pointer to the source buffer containing transformed data.
  878. * @param dst Pointer to the destination buffer where the Q8.0 formatted data
  879. * will be stored.
  880. */
  881. static void ggml_backend_cann_transform_back_q8_0(
  882. const ggml_tensor* tensor, const void* src, void* dst) {
  883. int64_t n_elems = ggml_nelements(tensor);
  884. int64_t groups = n_elems / QK8_0;
  885. size_t quant_bytes = n_elems * sizeof(uint8_t);
  886. const uint8_t* quant_offset = (const uint8_t*)src;
  887. const uint16_t* scale_offset =
  888. (const uint16_t*)((const char*)src + quant_bytes);
  889. for (int i = 0; i < groups; i++) {
  890. block_q8_0* group = (block_q8_0*)((char*)dst + i * sizeof(block_q8_0));
  891. group->d = *scale_offset;
  892. scale_offset++;
  893. size_t group_quant_size = QK8_0 * sizeof(uint8_t);
  894. memcpy(group->qs, quant_offset, group_quant_size);
  895. quant_offset += group_quant_size;
  896. }
  897. }
  898. /**
  899. * @brief Transform tensor data based on its type for CANN processing.
  900. *
  901. * This function transforms tensor data based on its quantization type for CANN
  902. * processing. It dispatches the transformation based on the tensor's type to
  903. * specialized functions handling Q4.0 and Q8.0 formats.
  904. *
  905. * @param tensor Pointer to the tensor information.
  906. * @param src Pointer to the source data to be transformed.
  907. * @param dst Pointer to the destination buffer where transformed data will be
  908. * stored.
  909. */
  910. static void ggml_backend_cann_transform(ggml_tensor* tensor,
  911. const void* src, void* dst) {
  912. switch (tensor->type) {
  913. case GGML_TYPE_Q4_0:
  914. ggml_backend_cann_transform_q4_0(tensor, src, dst);
  915. break;
  916. case GGML_TYPE_Q8_0:
  917. ggml_backend_cann_transform_q8_0(tensor, src, dst);
  918. break;
  919. default:
  920. break;
  921. }
  922. }
  923. /**
  924. * @brief Transform CANN processed data back into tensor data based on its type.
  925. *
  926. * This function transforms CANN processed data back into tensor data based on
  927. * its quantization type for Q4.0 and Q8.0 formats. It dispatches the
  928. * transformation based on the tensor's type to specialized functions.
  929. *
  930. * @param tensor Pointer to the tensor information.
  931. * @param src Pointer to the source data containing CANN processed data.
  932. * @param dst Pointer to the destination buffer where transformed tensor data
  933. * will be stored.
  934. */
  935. static void ggml_backend_cann_transform_back(
  936. const ggml_tensor* tensor, void* src, void* dst) {
  937. switch (tensor->type) {
  938. case GGML_TYPE_Q4_0:
  939. ggml_backend_cann_transform_back_q4_0(tensor, src, dst);
  940. break;
  941. case GGML_TYPE_Q8_0:
  942. ggml_backend_cann_transform_back_q8_0(tensor, src, dst);
  943. break;
  944. default:
  945. break;
  946. }
  947. }
  948. /**
  949. * @brief Check if transformation is needed for a given tensor type.
  950. *
  951. * This function checks if transformation is needed for a given tensor type
  952. * to prepare data for CANN processing.
  953. *
  954. * @param type The tensor type to check.
  955. * @return true if transformation is needed, false otherwise.
  956. */
  957. static bool need_transform(ggml_type type) {
  958. switch (type) {
  959. case GGML_TYPE_Q4_0:
  960. case GGML_TYPE_Q8_0:
  961. return true;
  962. default:
  963. return false;
  964. }
  965. }
  966. /**
  967. * @brief Initialize a tensor using data from a CANN buffer.
  968. *
  969. * This function initializes a tensor using data from a CANN buffer.
  970. * It handles special cases such as views and quantization.
  971. *
  972. * @param buffer The CANN buffer from which to initialize the tensor.
  973. * @param tensor Pointer to the tensor to be initialized.
  974. */
  975. static enum ggml_status ggml_backend_cann_buffer_init_tensor(
  976. ggml_backend_buffer_t buffer, ggml_tensor* tensor) {
  977. if (tensor->view_src != NULL && tensor->view_offs == 0) {
  978. GGML_ASSERT(tensor->view_src->buffer->buft == buffer->buft);
  979. return GGML_STATUS_SUCCESS;
  980. }
  981. // TODO: cann backend doesn't support quantized yet. Just leave the code
  982. // here.
  983. if (ggml_is_quantized(tensor->type)) {
  984. // Initialize padding to 0 to avoid possible NaN values
  985. size_t original_size = ggml_nbytes(tensor);
  986. size_t padded_size =
  987. ggml_backend_buft_get_alloc_size(buffer->buft, tensor);
  988. if (padded_size > original_size && tensor->view_src == nullptr) {
  989. size_t memset_size = padded_size - original_size;
  990. ACL_CHECK(aclrtMemset((char*)tensor->data + original_size,
  991. memset_size, 0, memset_size));
  992. }
  993. }
  994. return GGML_STATUS_SUCCESS;
  995. }
  996. // ND to NZ Workspace Cache Management. Thread-safety: Not guaranteed
  997. namespace {
  998. void* g_nz_workspace = nullptr;
  999. size_t g_nz_workspace_allocated = 0;
  1000. void release_nz_workspace() {
  1001. if (g_nz_workspace) {
  1002. aclrtFree(g_nz_workspace);
  1003. g_nz_workspace = nullptr;
  1004. g_nz_workspace_allocated = 0;
  1005. }
  1006. }
  1007. void relloc_nz_workspace(size_t new_size) {
  1008. if (new_size > g_nz_workspace_allocated) {
  1009. if (g_nz_workspace) {
  1010. aclrtFree(g_nz_workspace);
  1011. g_nz_workspace = nullptr;
  1012. }
  1013. ACL_CHECK(aclrtMalloc(&g_nz_workspace, new_size, ACL_MEM_MALLOC_HUGE_FIRST));
  1014. g_nz_workspace_allocated = new_size;
  1015. }
  1016. }
  1017. }
  1018. /**
  1019. * @brief Convert tensor weights to NZ format using Ascend CANN API.
  1020. *
  1021. * This function creates a transposed tensor descriptor and performs the
  1022. * TransMatmulWeight operation. Converting tensor formats can significantly
  1023. * improve performance on certain hardware.
  1024. *
  1025. * @param tensor Pointer to the input ggml_tensor containing the weights.
  1026. * @param data Pointer to the raw data buffer for the tensor weights.
  1027. * @param offset Byte offset within the tensor data buffer where weights start.
  1028. *
  1029. * @note The workspace buffer used in this function is managed globally and reused
  1030. * across calls. This reduces overhead from repeated memory allocation and deallocation.
  1031. */
  1032. static void weight_format_to_nz(ggml_tensor *tensor, const void *data, size_t offset) {
  1033. aclTensor* weightTransposed = ggml_cann_create_tensor(tensor, tensor->ne,
  1034. tensor->nb, 2, ACL_FORMAT_ND, offset);
  1035. uint64_t workspaceSize = 0;
  1036. aclOpExecutor *executor;
  1037. // TransMatmulWeight
  1038. ACL_CHECK(aclnnTransMatmulWeightGetWorkspaceSize(weightTransposed,
  1039. &workspaceSize, &executor));
  1040. // Avoid frequent malloc/free of the workspace.
  1041. relloc_nz_workspace(workspaceSize);
  1042. ACL_CHECK(aclnnTransMatmulWeight(g_nz_workspace, workspaceSize, executor, nullptr));
  1043. ACL_CHECK(aclDestroyTensor(weightTransposed));
  1044. }
  1045. // TODO: need handle tensor which has paddings.
  1046. /**
  1047. * @brief Set tensor data in a CANN buffer.
  1048. *
  1049. * This function sets tensor data in a CANN buffer, handling transformations
  1050. * if needed based on the tensor's type.
  1051. *
  1052. * @param buffer The CANN buffer where the tensor data will be set.
  1053. * @param tensor Pointer to the tensor whose data will be set.
  1054. * @param data Pointer to the source data to be copied into the tensor.
  1055. * @param offset Offset in the source data from where to start copying.
  1056. * @param size Size of the data to be copied, in bytes.
  1057. */
  1058. static void ggml_backend_cann_buffer_set_tensor(
  1059. ggml_backend_buffer_t buffer, ggml_tensor *tensor, const void *data,
  1060. size_t offset, size_t size) {
  1061. ggml_backend_cann_buffer_context *ctx =
  1062. (ggml_backend_cann_buffer_context *)buffer->context;
  1063. ggml_cann_set_device(ctx->device);
  1064. // TODO: refer to cann(#6017), it use thread's default stream.
  1065. // For acl, synchronous functions use this default stream.
  1066. // Why aclrtSynchronizeDevice?
  1067. // Only check env once.
  1068. static bool weight_to_nz = parse_bool(get_env("GGML_CANN_WEIGHT_NZ").value_or(""));
  1069. if (!need_transform(tensor->type)) {
  1070. ACL_CHECK(aclrtMemcpy((char *)tensor->data + offset, size, data, size,
  1071. ACL_MEMCPY_HOST_TO_DEVICE));
  1072. if (weight_to_nz && is_matmul_weight((const ggml_tensor*)tensor)) {
  1073. GGML_ASSERT(tensor->ne[2] == 1);
  1074. GGML_ASSERT(tensor->ne[3] == 1);
  1075. weight_format_to_nz(tensor, data, offset);
  1076. }
  1077. } else {
  1078. void *transform_buffer = malloc(size);
  1079. ggml_backend_cann_transform(tensor, data, transform_buffer);
  1080. ACL_CHECK(aclrtMemcpy((char *)tensor->data + offset, size,
  1081. transform_buffer, size,
  1082. ACL_MEMCPY_HOST_TO_DEVICE));
  1083. free(transform_buffer);
  1084. }
  1085. }
  1086. /**
  1087. * @brief Get tensor data from a CANN buffer.
  1088. *
  1089. * This function retrieves tensor data from a CANN buffer, handling
  1090. * transformations if needed based on the tensor's type.
  1091. *
  1092. * @param buffer The CANN buffer from which to retrieve tensor data.
  1093. * @param tensor Pointer to the tensor whose data will be retrieved.
  1094. * @param data Pointer to the destination buffer where the tensor data will be
  1095. * copied.
  1096. * @param offset Offset in the destination buffer where to start copying.
  1097. * @param size Size of the data to be copied, in bytes.
  1098. */
  1099. static void ggml_backend_cann_buffer_get_tensor(
  1100. ggml_backend_buffer_t buffer, const ggml_tensor* tensor, void* data,
  1101. size_t offset, size_t size) {
  1102. ggml_backend_cann_buffer_context* ctx =
  1103. (ggml_backend_cann_buffer_context*)buffer->context;
  1104. ggml_cann_set_device(ctx->device);
  1105. if (!need_transform(tensor->type)) {
  1106. ACL_CHECK(aclrtMemcpy(data, size, (char*)tensor->data + offset, size,
  1107. ACL_MEMCPY_DEVICE_TO_HOST));
  1108. } else {
  1109. void* transform_buffer = malloc(size);
  1110. ACL_CHECK(aclrtMemcpy(transform_buffer, size,
  1111. (char*)tensor->data + offset, size,
  1112. ACL_MEMCPY_DEVICE_TO_HOST));
  1113. ggml_backend_cann_transform_back(tensor, transform_buffer, data);
  1114. free(transform_buffer);
  1115. }
  1116. }
  1117. /**
  1118. * @brief Copy tensor data between CANN buffers if possible.
  1119. *
  1120. * This function copies tensor data between CANN buffers if the source and
  1121. * destination buffers are CANN buffers and they meet the necessary conditions
  1122. * (same device or devices can access each other).
  1123. *
  1124. * @param buffer The destination CANN buffer where the tensor data will be
  1125. * copied.
  1126. * @param src Pointer to the source tensor whose data will be copied.
  1127. * @param dst Pointer to the destination tensor where the data will be copied.
  1128. * @return true if the copy operation succeeded, false otherwise.
  1129. */
  1130. static bool ggml_backend_cann_buffer_cpy_tensor(
  1131. ggml_backend_buffer_t buffer, const ggml_tensor* src, ggml_tensor* dst) {
  1132. if (ggml_backend_buffer_is_cann(src->buffer)) {
  1133. ggml_backend_cann_buffer_context* src_ctx =
  1134. (ggml_backend_cann_buffer_context*)src->buffer->context;
  1135. ggml_backend_cann_buffer_context* dst_ctx =
  1136. (ggml_backend_cann_buffer_context*)buffer->context;
  1137. size_t memcpy_size = ggml_nbytes(src);
  1138. // Same device.
  1139. if (src_ctx->device == dst_ctx->device) {
  1140. ACL_CHECK(aclrtMemcpy((char*)dst->data, memcpy_size,
  1141. (const char*)src->data, memcpy_size,
  1142. ACL_MEMCPY_DEVICE_TO_DEVICE));
  1143. return true;
  1144. } else {
  1145. // Different device but can access by peer.
  1146. int32_t canAccessPeer = 0;
  1147. ACL_CHECK(aclrtDeviceCanAccessPeer(&canAccessPeer, src_ctx->device,
  1148. dst_ctx->device));
  1149. if (canAccessPeer) {
  1150. ggml_cann_set_device(src_ctx->device);
  1151. ACL_CHECK(aclrtDeviceEnablePeerAccess(dst_ctx->device, 0));
  1152. ACL_CHECK(aclrtMemcpy((char*)dst->data, memcpy_size,
  1153. (const char*)src->data, memcpy_size,
  1154. ACL_MEMCPY_DEVICE_TO_DEVICE));
  1155. return true;
  1156. }
  1157. }
  1158. }
  1159. return false;
  1160. }
  1161. /**
  1162. * @brief Clear a CANN buffer by setting all its memory to a specified value.
  1163. *
  1164. * This function clears a CANN buffer by setting all its memory to a specified
  1165. * value.
  1166. *
  1167. * @param buffer The CANN buffer to be cleared.
  1168. * @param value The value to which each byte in the buffer will be set.
  1169. */
  1170. static void ggml_backend_cann_buffer_clear(
  1171. ggml_backend_buffer_t buffer, uint8_t value) {
  1172. ggml_backend_cann_buffer_context* ctx =
  1173. (ggml_backend_cann_buffer_context*)buffer->context;
  1174. ggml_cann_set_device(ctx->device);
  1175. ACL_CHECK(aclrtMemset(ctx->dev_ptr, buffer->size, value, buffer->size));
  1176. }
  1177. /**
  1178. * @brief Interface for a CANN buffer in the backend.
  1179. *
  1180. * This structure defines function pointers to operations that can be performed
  1181. * on a CANN buffer within the backend.
  1182. */
  1183. static const ggml_backend_buffer_i ggml_backend_cann_buffer_interface = {
  1184. /* .free_buffer = */ ggml_backend_cann_buffer_free_buffer,
  1185. /* .get_base = */ ggml_backend_cann_buffer_get_base,
  1186. /* .init_tensor = */ ggml_backend_cann_buffer_init_tensor,
  1187. /* .memset_tensor = */ NULL,
  1188. /* .set_tensor = */ ggml_backend_cann_buffer_set_tensor,
  1189. /* .get_tensor = */ ggml_backend_cann_buffer_get_tensor,
  1190. /* .cpy_tensor = */ ggml_backend_cann_buffer_cpy_tensor,
  1191. /* .clear = */ ggml_backend_cann_buffer_clear,
  1192. /* .reset = */ NULL,
  1193. };
  1194. // cann buffer type
  1195. /**
  1196. * @brief Structure representing context information for a specific backend
  1197. * buffer type.
  1198. */
  1199. struct ggml_backend_cann_buffer_type_context {
  1200. int32_t
  1201. device; /**< Device identifier associated with the buffer context. */
  1202. std::string name; /**< Name associated with the buffer context. */
  1203. };
  1204. /**
  1205. * @brief Retrieves the name associated with a CANN buffer type.
  1206. *
  1207. * This function returns the descriptive name associated with the specified
  1208. * CANN buffer type context.
  1209. *
  1210. * @param buft Pointer to the buffer type context.
  1211. * @return Const pointer to the C-style string containing the name.
  1212. */
  1213. static const char* ggml_backend_cann_buffer_type_name(
  1214. ggml_backend_buffer_type_t buft) {
  1215. ggml_backend_cann_buffer_type_context* buft_ctx =
  1216. (ggml_backend_cann_buffer_type_context*)buft->context;
  1217. return buft_ctx->name.c_str();
  1218. }
  1219. /**
  1220. * @brief Allocates a new CANN buffer of the specified type and size.
  1221. *
  1222. * This function allocates a new CANN buffer on the specified device with the
  1223. * given size.
  1224. *
  1225. * @param buft Pointer to the buffer type context.
  1226. * @param size Size in bytes of the buffer to allocate.
  1227. * @return Pointer to the allocated buffer, or nullptr if allocation fails.
  1228. */
  1229. static ggml_backend_buffer_t
  1230. ggml_backend_cann_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft,
  1231. size_t size) {
  1232. ggml_backend_cann_buffer_type_context* buft_ctx =
  1233. (ggml_backend_cann_buffer_type_context*)buft->context;
  1234. ggml_cann_set_device(buft_ctx->device);
  1235. const size_t alignment = 128;
  1236. size = GGML_PAD(size, alignment);
  1237. if (size == 0) {
  1238. size = alignment;
  1239. }
  1240. void* dev_ptr;
  1241. aclError err = aclrtMalloc(&dev_ptr, size, ACL_MEM_MALLOC_HUGE_FIRST);
  1242. if (err != ACL_SUCCESS) {
  1243. GGML_LOG_ERROR(
  1244. "%s: allocating %.2f MiB on device %d: aclrtMalloc failed: %s\n",
  1245. __func__, size / 1024.0 / 1024.0, buft_ctx->device,
  1246. aclGetRecentErrMsg());
  1247. return nullptr;
  1248. }
  1249. ggml_backend_cann_buffer_context* ctx =
  1250. new ggml_backend_cann_buffer_context(buft_ctx->device, dev_ptr);
  1251. return ggml_backend_buffer_init(buft, ggml_backend_cann_buffer_interface,
  1252. ctx, size);
  1253. }
  1254. /**
  1255. * @brief Retrieves the memory alignment requirement for CANN buffers of this
  1256. * type.
  1257. *
  1258. * This function returns the alignment requirement in bytes for memory allocated
  1259. * by the CANN buffer type.
  1260. *
  1261. * @param buft Pointer to the buffer type context (unused in this
  1262. * implementation).
  1263. * @return The alignment requirement in bytes (fixed at 128 bytes for CANN
  1264. * buffers).
  1265. */
  1266. static size_t ggml_backend_cann_buffer_type_get_alignment(
  1267. ggml_backend_buffer_type_t buft) {
  1268. return 128;
  1269. GGML_UNUSED(buft);
  1270. }
  1271. /**
  1272. * @brief Calculates the allocation size required for a tensor in a CANN buffer.
  1273. *
  1274. * Computes the total allocation size needed for storing the tensor's data in a
  1275. * CANN buffer, considering any necessary padding or adjustments for quantized
  1276. * types.
  1277. *
  1278. * @param buft Pointer to the buffer type context (unused in this
  1279. * implementation).
  1280. * @param tensor Pointer to the tensor for which the allocation size is
  1281. * calculated.
  1282. * @return The total allocation size in bytes required for the tensor in the
  1283. * CANN buffer.
  1284. */
  1285. static size_t ggml_backend_cann_buffer_type_get_alloc_size(
  1286. ggml_backend_buffer_type_t buft, const ggml_tensor* tensor) {
  1287. size_t size = ggml_nbytes(tensor);
  1288. int64_t ne0 = tensor->ne[0];
  1289. // Only check env once.
  1290. static bool weight_to_nz = parse_bool(get_env("GGML_CANN_WEIGHT_NZ").value_or(""));
  1291. // last line must bigger than 32, because every single op deal at
  1292. // least 32 bytes.
  1293. // TODO: quantized type?
  1294. // int64_t line_size = ne0 * ggml_element_size(tensor);
  1295. // int64_t line_size_align_32 = (line_size + 31) & ~31;
  1296. // size += (line_size_align_32 - line_size);
  1297. if (ggml_is_quantized(tensor->type)) {
  1298. if (ne0 % MATRIX_ROW_PADDING != 0) {
  1299. size += ggml_row_size(
  1300. tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
  1301. }
  1302. } else if (weight_to_nz && is_matmul_weight((const ggml_tensor*)tensor)) {
  1303. // NZ format weight are not support quantized yet.
  1304. // If ND tensor transform to NZ, size may changed.
  1305. int64_t shape[] = {tensor->ne[1], tensor->ne[0]};
  1306. GGML_ASSERT(tensor->ne[2] == 1);
  1307. GGML_ASSERT(tensor->ne[3] == 1);
  1308. const aclIntArray *acl_shape = aclCreateIntArray(shape, 2);
  1309. size_t new_size;
  1310. ACL_CHECK(aclnnCalculateMatmulWeightSizeV2(acl_shape,
  1311. ggml_cann_type_mapping(tensor->type), &new_size));
  1312. ACL_CHECK(aclDestroyIntArray(acl_shape));
  1313. size = std::max(size, new_size);
  1314. }
  1315. return size;
  1316. GGML_UNUSED(buft);
  1317. }
  1318. static bool ggml_backend_cann_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
  1319. return false;
  1320. GGML_UNUSED(buft);
  1321. }
  1322. /**
  1323. * @brief Interface for managing CANN buffer types in the GGML backend.
  1324. *
  1325. * Provides function pointers for allocating, querying properties, and managing
  1326. * memory for CANN buffer types in the GGML backend.
  1327. */
  1328. static const ggml_backend_buffer_type_i ggml_backend_cann_buffer_type_interface = {
  1329. /* .get_name = */ ggml_backend_cann_buffer_type_name,
  1330. /* .alloc_buffer = */ ggml_backend_cann_buffer_type_alloc_buffer,
  1331. /* .get_alignment = */ ggml_backend_cann_buffer_type_get_alignment,
  1332. /* .get_max_size = */ NULL, // defaults to SIZE_MAX
  1333. /* .get_alloc_size = */ ggml_backend_cann_buffer_type_get_alloc_size,
  1334. /* .is_host = */ ggml_backend_cann_buffer_type_is_host,
  1335. };
  1336. /**
  1337. * @brief Retrieves the CANN buffer type for a specified device.
  1338. *
  1339. * This function initializes and returns the buffer type interface associated
  1340. * with the given device. It ensures thread-safe access using a mutex.
  1341. *
  1342. * @param device The device index for which to retrieve the buffer type.
  1343. * @return A pointer to the buffer type interface for the specified device, or
  1344. * nullptr if the device index is out of range.
  1345. */
  1346. ggml_backend_buffer_type_t
  1347. ggml_backend_cann_buffer_type(int32_t device) {
  1348. static std::mutex mutex;
  1349. std::lock_guard<std::mutex> lock(mutex);
  1350. if (device >= ggml_backend_cann_get_device_count()) {
  1351. return nullptr;
  1352. }
  1353. static ggml_backend_buffer_type
  1354. ggml_backend_cann_buffer_types[GGML_CANN_MAX_DEVICES];
  1355. static bool ggml_backend_cann_buffer_type_initialized = false;
  1356. if (!ggml_backend_cann_buffer_type_initialized) {
  1357. for (int32_t i = 0; i < ggml_cann_info().device_count; i++) {
  1358. ggml_backend_cann_buffer_types[i] = {
  1359. /* .iface = */ ggml_backend_cann_buffer_type_interface,
  1360. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), i),
  1361. /* .context = */
  1362. new ggml_backend_cann_buffer_type_context{
  1363. i, "CANN" + std::to_string(i)},
  1364. };
  1365. }
  1366. ggml_backend_cann_buffer_type_initialized = true;
  1367. }
  1368. return &ggml_backend_cann_buffer_types[device];
  1369. }
  1370. /**
  1371. * @brief Retrieves the name associated with a CANN host buffer type.
  1372. *
  1373. * This function returns the descriptive name associated with the specified
  1374. * CANN host buffer type context.
  1375. *
  1376. * @param buft Pointer to the host buffer type context.
  1377. * @return Const pointer to the C-style string containing the name.
  1378. */
  1379. static const char * ggml_backend_cann_host_buffer_type_name(ggml_backend_buffer_type_t buft) {
  1380. return "CANN_Host";
  1381. GGML_UNUSED(buft);
  1382. }
  1383. /**
  1384. * @brief Retrieves the name associated with a CANN host buffer.
  1385. *
  1386. * This function returns the descriptive name associated with the specified
  1387. * CANN host buffer context.
  1388. *
  1389. * @param buft Pointer to the host buffer context.
  1390. * @return Const pointer to the C-style string containing the name.
  1391. */
  1392. static const char * ggml_backend_cann_host_buffer_name(ggml_backend_buffer_t buffer) {
  1393. return "CANN_Host";
  1394. GGML_UNUSED(buffer);
  1395. }
  1396. /**
  1397. * @brief Free resources associated with a CANN host buffer.
  1398. *
  1399. * This function frees the resources associated with a CANN host buffer, including
  1400. * its context.
  1401. *
  1402. * @param buffer The CANN host buffer to free.
  1403. */
  1404. static void ggml_backend_cann_host_buffer_free(ggml_backend_buffer_t buffer) {
  1405. ACL_CHECK(aclrtFreeHost(buffer->context));
  1406. }
  1407. /**
  1408. * @brief Allocates a new CANN host buffer of the specified size.
  1409. *
  1410. * This function allocates a new CANN host buffer with the given size.
  1411. * @param size Size in bytes of the host buffer to allocate.
  1412. * @return Pointer to the allocated host buffer, or nullptr if allocation fails.
  1413. */
  1414. static void * ggml_cann_host_malloc(size_t size) {
  1415. if (getenv("GGML_CANN_NO_PINNED") != nullptr) {
  1416. return nullptr;
  1417. }
  1418. const size_t alignment = 128;
  1419. size = GGML_PAD(size, alignment);
  1420. if (size == 0) {
  1421. size = alignment;
  1422. }
  1423. void * hostPtr = nullptr;
  1424. aclError err = aclrtMallocHost((void **) &hostPtr, size);
  1425. if (err != ACL_SUCCESS) {
  1426. GGML_LOG_WARN("%s: failed to allocate %.2f MiB of pinned memory: %s\n", __func__,
  1427. size / 1024.0 / 1024.0, aclGetRecentErrMsg());
  1428. return nullptr;
  1429. }
  1430. return hostPtr;
  1431. }
  1432. /**
  1433. * @brief Allocates a new CANN host buffer of the specified type and size.
  1434. *
  1435. * @param buft Pointer to the host buffer type context.
  1436. * @param size Size in bytes of the host buffer to allocate.
  1437. * @return Pointer to the allocated host buffer, or CPU buffer pointer if allocation fails.
  1438. */
  1439. static ggml_backend_buffer_t ggml_backend_cann_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  1440. void * hostPtr = ggml_cann_host_malloc(size);
  1441. if (hostPtr == nullptr) {
  1442. // fallback to cpu buffer
  1443. return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
  1444. }
  1445. ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(hostPtr, size);
  1446. buffer->buft = buft;
  1447. buffer->iface.free_buffer = ggml_backend_cann_host_buffer_free;
  1448. return buffer;
  1449. }
  1450. /**
  1451. * @brief Interface for managing CANN host buffer types in the GGML backend.
  1452. *
  1453. * Provides function pointers for allocating, querying properties, and managing
  1454. * memory for CANN buffer types in the GGML backend.
  1455. */
  1456. ggml_backend_buffer_type_t ggml_backend_cann_host_buffer_type() {
  1457. static struct ggml_backend_buffer_type ggml_backend_cann_buffer_type_host = {
  1458. /* .iface = */ {
  1459. /* .get_name = */ ggml_backend_cann_host_buffer_type_name,
  1460. /* .alloc_buffer = */ ggml_backend_cann_host_buffer_type_alloc_buffer,
  1461. /* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment,
  1462. /* .get_max_size = */ NULL, // defaults to SIZE_MAX
  1463. /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size,
  1464. /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host,
  1465. },
  1466. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), 0),
  1467. /* .context = */ nullptr,
  1468. };
  1469. return &ggml_backend_cann_buffer_type_host;
  1470. }
  1471. /**
  1472. * @brief Computes the forward operation for a given tensor using CANN
  1473. * operations.
  1474. *
  1475. * This function selects the appropriate CANN operation based on the type of
  1476. * operation specified in the tensor and performs the computation.
  1477. *
  1478. * @param ctx The CANN context containing necessary resources and
  1479. * configurations.
  1480. * @param dst The destination tensor where the result of the computation will be
  1481. * stored.
  1482. * @return true if the computation was successful; false otherwise.
  1483. */
  1484. static bool ggml_cann_compute_forward(ggml_backend_cann_context& ctx,
  1485. struct ggml_tensor* dst) {
  1486. switch (dst->op) {
  1487. case GGML_OP_REPEAT:
  1488. ggml_cann_repeat(ctx, dst);
  1489. break;
  1490. case GGML_OP_GET_ROWS:
  1491. ggml_cann_get_rows(ctx, dst);
  1492. break;
  1493. case GGML_OP_SET_ROWS:
  1494. ggml_cann_set_rows(ctx, dst);
  1495. break;
  1496. case GGML_OP_DUP:
  1497. ggml_cann_dup(ctx, dst);
  1498. break;
  1499. case GGML_OP_ADD:
  1500. case GGML_OP_ADD1:
  1501. ggml_cann_binary_op<aclnn_add>(ctx, dst);
  1502. break;
  1503. case GGML_OP_SUB:
  1504. ggml_cann_binary_op<aclnn_sub>(ctx, dst);
  1505. break;
  1506. case GGML_OP_ACC:
  1507. ggml_cann_acc(ctx, dst);
  1508. break;
  1509. case GGML_OP_MUL:
  1510. ggml_cann_binary_op<aclnn_mul>(ctx, dst);
  1511. break;
  1512. case GGML_OP_DIV:
  1513. ggml_cann_binary_op<aclnn_div>(ctx, dst);
  1514. break;
  1515. case GGML_OP_UNARY:
  1516. switch (ggml_get_unary_op(dst)) {
  1517. case GGML_UNARY_OP_ABS:
  1518. GGML_CANN_CALL_OP_UNARY(Abs);
  1519. break;
  1520. case GGML_UNARY_OP_NEG:
  1521. GGML_CANN_CALL_OP_UNARY(Neg);
  1522. break;
  1523. case GGML_UNARY_OP_GELU:
  1524. case GGML_UNARY_OP_GELU_ERF:
  1525. // aclnnGelu internally uses the erf-based approximation.
  1526. GGML_CANN_CALL_OP_UNARY(Gelu);
  1527. break;
  1528. case GGML_UNARY_OP_SILU:
  1529. GGML_CANN_CALL_OP_UNARY(Silu);
  1530. break;
  1531. case GGML_UNARY_OP_GELU_QUICK: {
  1532. auto lambda = [](ggml_backend_cann_context& ctx,
  1533. aclTensor* acl_src,
  1534. aclTensor* acl_dst) {
  1535. GGML_CANN_CALL_ACLNN_OP(ctx, GeluV2, acl_src, 0, acl_dst);
  1536. };
  1537. ggml_cann_op_unary(lambda, ctx, dst);
  1538. } break;
  1539. case GGML_UNARY_OP_TANH:
  1540. GGML_CANN_CALL_OP_UNARY(Tanh);
  1541. break;
  1542. case GGML_UNARY_OP_RELU:
  1543. GGML_CANN_CALL_OP_UNARY(Relu);
  1544. break;
  1545. case GGML_UNARY_OP_SIGMOID:
  1546. GGML_CANN_CALL_OP_UNARY(Sigmoid);
  1547. break;
  1548. case GGML_UNARY_OP_HARDSIGMOID:
  1549. GGML_CANN_CALL_OP_UNARY(Hardsigmoid);
  1550. break;
  1551. case GGML_UNARY_OP_HARDSWISH:
  1552. GGML_CANN_CALL_OP_UNARY(Hardswish);
  1553. break;
  1554. case GGML_UNARY_OP_EXP:
  1555. GGML_CANN_CALL_OP_UNARY(Exp);
  1556. break;
  1557. case GGML_UNARY_OP_ELU:
  1558. ggml_cann_elu(ctx, dst);
  1559. break;
  1560. case GGML_UNARY_OP_SGN:
  1561. GGML_CANN_CALL_OP_UNARY(Sign);
  1562. break;
  1563. case GGML_UNARY_OP_STEP:
  1564. ggml_cann_step(ctx, dst);
  1565. break;
  1566. default:
  1567. return false;
  1568. }
  1569. break;
  1570. case GGML_OP_GLU:
  1571. switch (ggml_get_glu_op(dst)) {
  1572. case GGML_GLU_OP_REGLU:
  1573. GGML_CANN_CALL_OP_UNARY_GATED(Relu);
  1574. break;
  1575. case GGML_GLU_OP_GEGLU:
  1576. case GGML_GLU_OP_GEGLU_ERF:
  1577. // aclnnGelu internally uses the erf-based approximation.
  1578. GGML_CANN_CALL_OP_UNARY_GATED(Gelu);
  1579. break;
  1580. case GGML_GLU_OP_SWIGLU:
  1581. GGML_CANN_CALL_OP_UNARY_GATED(Silu);
  1582. break;
  1583. case GGML_GLU_OP_GEGLU_QUICK: {
  1584. auto lambda = [](ggml_backend_cann_context& ctx,
  1585. aclTensor* acl_src,
  1586. aclTensor* acl_dst) {
  1587. GGML_CANN_CALL_ACLNN_OP(ctx, GeluV2, acl_src, 0, acl_dst);
  1588. };
  1589. ggml_cann_op_unary_gated(lambda, ctx, dst);
  1590. } break;
  1591. default:
  1592. return false;
  1593. }
  1594. break;
  1595. case GGML_OP_NORM:
  1596. ggml_cann_norm(ctx, dst);
  1597. break;
  1598. case GGML_OP_GROUP_NORM:
  1599. ggml_cann_group_norm(ctx, dst);
  1600. break;
  1601. case GGML_OP_CONCAT:
  1602. ggml_cann_concat(ctx, dst);
  1603. break;
  1604. case GGML_OP_UPSCALE:
  1605. ggml_cann_upsample_nearest2d(ctx, dst);
  1606. break;
  1607. case GGML_OP_PAD:
  1608. ggml_cann_pad(ctx, dst);
  1609. break;
  1610. case GGML_OP_ARANGE:
  1611. ggml_cann_arange(ctx, dst);
  1612. break;
  1613. case GGML_OP_TIMESTEP_EMBEDDING:
  1614. ggml_cann_timestep_embedding(ctx, dst);
  1615. break;
  1616. case GGML_OP_LEAKY_RELU:
  1617. ggml_cann_leaky_relu(ctx, dst);
  1618. break;
  1619. case GGML_OP_RMS_NORM:
  1620. ggml_cann_rms_norm(ctx, dst);
  1621. break;
  1622. case GGML_OP_MUL_MAT:
  1623. ggml_cann_mul_mat(ctx, dst);
  1624. break;
  1625. case GGML_OP_MUL_MAT_ID:
  1626. ggml_cann_mul_mat_id(ctx, dst);
  1627. break;
  1628. case GGML_OP_SCALE:
  1629. ggml_cann_scale(ctx, dst);
  1630. break;
  1631. case GGML_OP_SQR:
  1632. GGML_ASSERT(dst->src[1] == nullptr);
  1633. dst->src[1] = dst->src[0];
  1634. ggml_cann_binary_op<aclnn_mul>(ctx, dst);
  1635. break;
  1636. case GGML_OP_SQRT:
  1637. GGML_CANN_CALL_OP_UNARY(Sqrt);
  1638. break;
  1639. case GGML_OP_CLAMP:
  1640. ggml_cann_clamp(ctx, dst);
  1641. break;
  1642. case GGML_OP_CPY:
  1643. ggml_cann_cpy(ctx, dst);
  1644. break;
  1645. case GGML_OP_CONT:
  1646. ggml_cann_dup(ctx, dst);
  1647. break;
  1648. case GGML_OP_NONE:
  1649. case GGML_OP_RESHAPE:
  1650. case GGML_OP_VIEW:
  1651. case GGML_OP_PERMUTE:
  1652. case GGML_OP_TRANSPOSE:
  1653. break;
  1654. case GGML_OP_DIAG_MASK_INF:
  1655. ggml_cann_diag_mask(ctx, dst, -INFINITY);
  1656. break;
  1657. case GGML_OP_SOFT_MAX:
  1658. ggml_cann_softmax(ctx, dst);
  1659. break;
  1660. case GGML_OP_ROPE:
  1661. ggml_cann_rope(ctx, dst);
  1662. break;
  1663. case GGML_OP_IM2COL:
  1664. ggml_cann_im2col(ctx, dst);
  1665. break;
  1666. case GGML_OP_POOL_2D:
  1667. ggml_cann_pool2d(ctx, dst);
  1668. break;
  1669. case GGML_OP_SUM:
  1670. ggml_cann_sum(ctx, dst);
  1671. break;
  1672. case GGML_OP_SUM_ROWS:
  1673. ggml_cann_sum_rows(ctx, dst);
  1674. break;
  1675. case GGML_OP_ARGSORT:
  1676. ggml_cann_argsort(ctx, dst);
  1677. break;
  1678. case GGML_OP_ARGMAX:
  1679. ggml_cann_argmax(ctx, dst);
  1680. break;
  1681. case GGML_OP_COS:
  1682. ggml_cann_op_unary<aclnn_cos>(ctx, dst);
  1683. break;
  1684. case GGML_OP_SIN:
  1685. ggml_cann_op_unary<aclnn_sin>(ctx, dst);
  1686. break;
  1687. case GGML_OP_CONV_TRANSPOSE_1D:
  1688. ggml_cann_conv_transpose_1d(ctx, dst);
  1689. break;
  1690. case GGML_OP_LOG:
  1691. GGML_CANN_CALL_OP_UNARY(Log);
  1692. break;
  1693. case GGML_OP_MEAN:
  1694. ggml_cann_mean(ctx, dst);
  1695. break;
  1696. case GGML_OP_PAD_REFLECT_1D:
  1697. ggml_cann_pad_reflect_1d(ctx, dst);
  1698. break;
  1699. case GGML_OP_COUNT_EQUAL:
  1700. ggml_cann_count_equal(ctx, dst);
  1701. break;
  1702. case GGML_OP_FLASH_ATTN_EXT:
  1703. ggml_cann_flash_attn_ext(ctx, dst);
  1704. break;
  1705. default:
  1706. return false;
  1707. }
  1708. return true;
  1709. }
  1710. // backend
  1711. /**
  1712. * @brief Retrieves the name associated with the CANN backend.
  1713. *
  1714. * This function returns the name assigned to the CANN backend, which is stored
  1715. * in the context of the provided backend structure.
  1716. *
  1717. * @param backend Pointer to the CANN backend structure.
  1718. * @return A pointer to a constant string representing the backend name.
  1719. */
  1720. static const char* ggml_backend_cann_name(ggml_backend_t backend) {
  1721. ggml_backend_cann_context* cann_ctx =
  1722. (ggml_backend_cann_context*)backend->context;
  1723. return cann_ctx->name.c_str();
  1724. }
  1725. /**
  1726. * @brief Frees resources associated with the CANN backend.
  1727. *
  1728. * This function releases resources associated with the CANN backend context
  1729. * and resets the device associated with the backend to its initial state.
  1730. *
  1731. * @param backend Pointer to the CANN backend structure to be freed.
  1732. */
  1733. static void ggml_backend_cann_free(ggml_backend_t backend) {
  1734. ggml_backend_cann_context* cann_ctx =
  1735. (ggml_backend_cann_context*)backend->context;
  1736. ACL_CHECK(aclrtSynchronizeDevice());
  1737. ACL_CHECK(aclrtResetDevice(cann_ctx->device));
  1738. delete cann_ctx;
  1739. delete backend;
  1740. }
  1741. /**
  1742. * @brief Sets tensor data asynchronously in the CANN backend.
  1743. *
  1744. * This function asynchronously sets tensor data in the CANN backend.
  1745. *
  1746. * @param backend Pointer to the CANN backend structure.
  1747. * @param tensor Pointer to the tensor structure to set data for.
  1748. * @param data Pointer to the host data to copy to the tensor.
  1749. * @param offset Offset in bytes within the host data.
  1750. * @param size Size of the data to copy in bytes.
  1751. */
  1752. static void ggml_backend_cann_set_tensor_async(ggml_backend_t backend,
  1753. ggml_tensor *tensor,
  1754. const void *data,
  1755. size_t offset,
  1756. size_t size) {
  1757. ggml_backend_cann_context *cann_ctx =
  1758. (ggml_backend_cann_context *)backend->context;
  1759. ggml_backend_buffer_t buf =
  1760. tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
  1761. GGML_ASSERT(buf->buft == ggml_backend_cann_buffer_type(cann_ctx->device) &&
  1762. "unsupported buffer type");
  1763. GGML_ASSERT(!ggml_is_quantized(tensor->type));
  1764. ggml_cann_async_memcpy(cann_ctx, (char *)tensor->data + offset, data, size,
  1765. ACL_MEMCPY_HOST_TO_DEVICE);
  1766. }
  1767. /**
  1768. * @brief Gets tensor data asynchronously in the CANN backend.
  1769. *
  1770. * This function asynchronously gets tensor data in the CANN backend.
  1771. *
  1772. * @param backend Pointer to the CANN backend structure.
  1773. * @param tensor Pointer to the tensor structure to get data from.
  1774. * @param data Pointer to the host data to copy from the tensor.
  1775. * @param offset Offset in bytes within the host data.
  1776. * @param size Size of the data to copy in bytes.
  1777. */
  1778. static void ggml_backend_cann_get_tensor_async(
  1779. ggml_backend_t backend, const ggml_tensor *tensor, void *data,
  1780. size_t offset, size_t size) {
  1781. ggml_backend_cann_context *cann_ctx =
  1782. (ggml_backend_cann_context *)backend->context;
  1783. ggml_backend_buffer_t buf =
  1784. tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
  1785. GGML_ASSERT(buf->buft == ggml_backend_cann_buffer_type(cann_ctx->device) &&
  1786. "unsupported buffer type");
  1787. GGML_ASSERT(!ggml_is_quantized(tensor->type));
  1788. ggml_cann_async_memcpy(cann_ctx, data, (char *)tensor->data + offset, size,
  1789. ACL_MEMCPY_DEVICE_TO_HOST);
  1790. }
  1791. /**
  1792. * @brief Asynchronously copies tensor data between CANN backends.
  1793. *
  1794. * This function copies tensor data asynchronously between two CANN backends. It
  1795. * checks if both tensors reside in CANN buffers and whether the devices support
  1796. * peer-to-peer access for direct copying. If not, it returns false.
  1797. *
  1798. * @param backend_src Pointer to the source CANN backend structure.
  1799. * @param backend_dst Pointer to the destination CANN backend structure.
  1800. * @param src Pointer to the source tensor to copy data from.
  1801. * @param dst Pointer to the destination tensor to copy data to.
  1802. * @return true if the copy operation succeeds, false otherwise.
  1803. */
  1804. static bool ggml_backend_cann_cpy_tensor_async(
  1805. ggml_backend_t backend_src, ggml_backend_t backend_dst,
  1806. const ggml_tensor* src, ggml_tensor* dst) {
  1807. GGML_ASSERT(ggml_backend_is_cann(backend_src) ||
  1808. ggml_backend_is_cann(backend_dst));
  1809. if (!ggml_backend_buffer_is_cann(src->buffer) ||
  1810. !ggml_backend_buffer_is_cann(dst->buffer)) {
  1811. return false;
  1812. }
  1813. ggml_backend_buffer_t buf_src =
  1814. src->view_src ? src->view_src->buffer : src->buffer;
  1815. ggml_backend_buffer_t buf_dst =
  1816. dst->view_src ? dst->view_src->buffer : dst->buffer;
  1817. ggml_backend_cann_context* cann_ctx_src =
  1818. (ggml_backend_cann_context*)backend_src->context;
  1819. ggml_backend_cann_context* cann_ctx_dst =
  1820. (ggml_backend_cann_context*)backend_dst->context;
  1821. size_t copy_size = ggml_nbytes(dst);
  1822. if (copy_size == 0) {
  1823. return true;
  1824. }
  1825. if (backend_src != backend_dst) {
  1826. ggml_backend_cann_buffer_context* buf_ctx_src =
  1827. (ggml_backend_cann_buffer_context*)buf_src->context;
  1828. ggml_backend_cann_buffer_context* buf_ctx_dst =
  1829. (ggml_backend_cann_buffer_context*)buf_dst->context;
  1830. GGML_ASSERT(cann_ctx_src->device == buf_ctx_src->device);
  1831. GGML_ASSERT(cann_ctx_dst->device == buf_ctx_dst->device);
  1832. int32_t canAccessPeer = 0;
  1833. ACL_CHECK(aclrtDeviceCanAccessPeer(&canAccessPeer, cann_ctx_src->device,
  1834. cann_ctx_dst->device));
  1835. if (!canAccessPeer) {
  1836. return false;
  1837. }
  1838. // need open both directions for memcpyasync between devices.
  1839. ggml_cann_set_device(cann_ctx_dst->device);
  1840. ACL_CHECK(aclrtDeviceEnablePeerAccess(cann_ctx_src->device, 0));
  1841. ggml_cann_set_device(cann_ctx_src->device);
  1842. ACL_CHECK(aclrtDeviceEnablePeerAccess(cann_ctx_dst->device, 0));
  1843. // wait for task_queue empty to keep task order.
  1844. cann_ctx_src->task_queue.wait();
  1845. ACL_CHECK(aclrtMemcpyAsync(dst->data, copy_size, src->data, copy_size,
  1846. ACL_MEMCPY_DEVICE_TO_DEVICE,
  1847. cann_ctx_src->stream()));
  1848. //TODO: workaround for Event didn`t work here.
  1849. aclrtSynchronizeStream(cann_ctx_src->stream());
  1850. } else {
  1851. // src and dst are on the same backend
  1852. ACL_CHECK(aclrtMemcpyAsync(dst->data, copy_size, src->data, copy_size,
  1853. ACL_MEMCPY_DEVICE_TO_DEVICE,
  1854. cann_ctx_dst->stream()));
  1855. }
  1856. return true;
  1857. }
  1858. /**
  1859. * @brief Synchronizes a CANN backend.
  1860. *
  1861. * This function synchronizes the specified CANN backend by waiting for all
  1862. * operations in its associated stream to complete.
  1863. *
  1864. * @param backend Pointer to the CANN backend structure to synchronize.
  1865. */
  1866. static void ggml_backend_cann_synchronize(ggml_backend_t backend) {
  1867. ggml_backend_cann_context* cann_ctx =
  1868. (ggml_backend_cann_context*)backend->context;
  1869. cann_ctx->task_queue.wait();
  1870. ggml_cann_set_device(cann_ctx->device);
  1871. ACL_CHECK(aclrtSynchronizeStream(cann_ctx->stream()));
  1872. }
  1873. #ifdef USE_ACL_GRAPH
  1874. /**
  1875. * @brief Populate the internal CANN graph node properties from the ggml computation graph.
  1876. *
  1877. * This function copies all node attributes (operation type, dimensions, strides, input sources,
  1878. * and operation parameters) into the cached CANN graph structure for later reuse or comparison.
  1879. *
  1880. * @param cann_ctx The CANN backend context.
  1881. * @param cgraph The ggml computational graph.
  1882. */
  1883. static void set_ggml_graph_node_properties(ggml_backend_cann_context * cann_ctx, ggml_cgraph * cgraph) {
  1884. for (int node_idx = 0; node_idx < cgraph->n_nodes; node_idx++) {
  1885. ggml_tensor * node = cgraph->nodes[node_idx];
  1886. cann_ctx->cann_graph->ggml_graph_properties[node_idx].node_address = node->data;
  1887. cann_ctx->cann_graph->ggml_graph_properties[node_idx].node_op = node->op;
  1888. for (int dim = 0; dim < GGML_MAX_DIMS; dim++) {
  1889. cann_ctx->cann_graph->ggml_graph_properties[node_idx].ne[dim] = node->ne[dim];
  1890. cann_ctx->cann_graph->ggml_graph_properties[node_idx].nb[dim] = node->nb[dim];
  1891. }
  1892. for (int src = 0; src < GGML_MAX_SRC; src++) {
  1893. cann_ctx->cann_graph->ggml_graph_properties[node_idx].src_address[src] =
  1894. node->src[src] ? node->src[src]->data : nullptr;
  1895. }
  1896. memcpy(cann_ctx->cann_graph->ggml_graph_properties[node_idx].op_params, node->op_params, GGML_MAX_OP_PARAMS);
  1897. }
  1898. }
  1899. /**
  1900. * @brief Check if a ggml tensor node matches a previously captured CANN graph node.
  1901. *
  1902. * This function compares all relevant fields (address, op type, shape, source inputs, op params)
  1903. * to determine whether the current node matches a previously recorded version.
  1904. *
  1905. * @param node The current ggml tensor node.
  1906. * @param graph_node_properties The stored properties of a CANN graph node.
  1907. * @return true if all fields match (excluding GGML_OP_VIEW); false otherwise.
  1908. */
  1909. static bool ggml_graph_node_has_matching_properties(ggml_tensor * node, ggml_graph_node_properties * graph_node_properties) {
  1910. if (node->data != graph_node_properties->node_address &&
  1911. node->op != GGML_OP_VIEW) {
  1912. return false;
  1913. }
  1914. if (node->op != graph_node_properties->node_op) {
  1915. return false;
  1916. }
  1917. for (int i = 0; i < GGML_MAX_DIMS; i++) {
  1918. if (node->ne[i] != graph_node_properties->ne[i]) {
  1919. return false;
  1920. }
  1921. if (node->nb[i] != graph_node_properties->nb[i]) {
  1922. return false;
  1923. }
  1924. }
  1925. for (int i = 0; i < GGML_MAX_SRC; i++) {
  1926. if (node->src[i] &&
  1927. node->src[i]->data != graph_node_properties->src_address[i] &&
  1928. node->op != GGML_OP_VIEW
  1929. ) {
  1930. return false;
  1931. }
  1932. }
  1933. if (node->op == GGML_OP_SCALE &&
  1934. memcmp(graph_node_properties->op_params, node->op_params, GGML_MAX_OP_PARAMS) != 0) {
  1935. return false;
  1936. }
  1937. return true;
  1938. }
  1939. /**
  1940. * @brief Determine if the CANN graph needs to be rebuilt due to graph changes.
  1941. *
  1942. * This checks whether the number or properties of ggml graph nodes have changed
  1943. * compared to the last captured CANN graph. If so, the CANN graph must be re-captured.
  1944. *
  1945. * @param cann_ctx The CANN backend context.
  1946. * @param cgraph The current ggml computation graph.
  1947. * @return true if an update is required; false otherwise.
  1948. */
  1949. static bool is_cann_graph_update_required(ggml_backend_cann_context * cann_ctx, ggml_cgraph * cgraph) {
  1950. // The number of nodes is different, so the graph needs to be reconstructed.
  1951. if (cann_ctx->cann_graph->ggml_graph_properties.size() != (size_t)cgraph->n_nodes) {
  1952. cann_ctx->cann_graph->ggml_graph_properties.resize(cgraph->n_nodes);
  1953. return true;
  1954. }
  1955. // The number of nodes is the same; iterate over each node to check whether they match.
  1956. for (int i = 0; i < cgraph->n_nodes; i++) {
  1957. bool has_matching_properties = ggml_graph_node_has_matching_properties(
  1958. cgraph->nodes[i], &cann_ctx->cann_graph->ggml_graph_properties[i]);
  1959. if(!has_matching_properties) {
  1960. return true;
  1961. }
  1962. }
  1963. return false;
  1964. }
  1965. #endif // USE_ACL_GRAPH
  1966. /**
  1967. * @brief Evaluate the computation graph and optionally capture or execute it using CANN graph API.
  1968. *
  1969. * If CANN graph execution is enabled and graph capture is required, this function begins
  1970. * graph capture, runs the graph, ends capture, and stores the captured graph.
  1971. *
  1972. * Otherwise, it falls back to op-by-op execution using the CANN compute kernel dispatcher.
  1973. *
  1974. * @param cann_ctx The CANN backend context.
  1975. * @param cgraph The ggml computation graph.
  1976. * @param use_cann_graph Whether to use CANN graph execution.
  1977. * @param cann_graph_update_required Whether graph capture is needed due to graph changes.
  1978. */
  1979. static void evaluate_and_capture_cann_graph(ggml_backend_cann_context * cann_ctx, ggml_cgraph * cgraph,
  1980. bool & use_cann_graph, bool & cann_graph_update_required) {
  1981. #ifdef USE_ACL_GRAPH
  1982. if (use_cann_graph && cann_graph_update_required) {
  1983. if (cann_ctx->cann_graph->graph != nullptr) {
  1984. ACL_CHECK(aclmdlRIDestroy(cann_ctx->cann_graph->graph));
  1985. cann_ctx->cann_graph->graph = nullptr;
  1986. }
  1987. ACL_CHECK(aclmdlRICaptureBegin(cann_ctx->stream(), ACL_MODEL_RI_CAPTURE_MODE_GLOBAL));
  1988. }
  1989. #endif // USE_ACL_GRAPH
  1990. // Only perform the graph execution if CANN graphs are not enabled, or we are capturing the graph.
  1991. // With the use of CANN graphs, the execution will be performed by the graph launch.
  1992. if (!use_cann_graph || cann_graph_update_required) {
  1993. for (int i = 0; i < cgraph->n_nodes; i++) {
  1994. ggml_tensor * node = cgraph->nodes[i];
  1995. if (ggml_is_empty(node) || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) {
  1996. continue;
  1997. }
  1998. bool ok = ggml_cann_compute_forward(*cann_ctx, node);
  1999. if (!ok) {
  2000. GGML_LOG_ERROR("%s: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op));
  2001. }
  2002. GGML_ASSERT(ok);
  2003. }
  2004. }
  2005. #ifdef USE_ACL_GRAPH
  2006. if (use_cann_graph && cann_graph_update_required) { // End CANN graph capture
  2007. ACL_CHECK(aclmdlRICaptureEnd(cann_ctx->stream(), &cann_ctx->cann_graph->graph));
  2008. }
  2009. if (use_cann_graph) {
  2010. // Execute graph
  2011. ACL_CHECK(aclmdlRIExecuteAsync(cann_ctx->cann_graph->graph, cann_ctx->stream()));
  2012. }
  2013. #endif // USE_ACL_GRAPH
  2014. }
  2015. /**
  2016. * @brief Computes a computational graph using a CANN backend.
  2017. *
  2018. * This function computes the operations defined in the computational graph
  2019. * using the specified CANN backend.
  2020. *
  2021. * @param backend Pointer to the CANN backend structure to use for computation.
  2022. * @param cgraph Pointer to the computational graph structure containing nodes
  2023. * representing operations to be computed.
  2024. * @return enum ggml_status Returns GGML_STATUS_SUCCESS if computation
  2025. * completes successfully, otherwise an appropriate error status.
  2026. */
  2027. static enum ggml_status ggml_backend_cann_graph_compute(
  2028. ggml_backend_t backend, ggml_cgraph* cgraph) {
  2029. ggml_backend_cann_context* cann_ctx =
  2030. (ggml_backend_cann_context*)backend->context;
  2031. ggml_cann_set_device(cann_ctx->device);
  2032. release_nz_workspace();
  2033. #ifdef USE_ACL_GRAPH
  2034. bool use_cann_graph = true;
  2035. bool cann_graph_update_required = false;
  2036. // check environment LLAMA_SET_ROWS
  2037. if (!cann_ctx->support_set_rows) {
  2038. use_cann_graph = false;
  2039. }
  2040. if (use_cann_graph) {
  2041. if (cann_ctx->cann_graph == nullptr) {
  2042. cann_ctx->cann_graph.reset(new ggml_cann_graph());
  2043. cann_graph_update_required = true;
  2044. }
  2045. cann_graph_update_required = is_cann_graph_update_required(cann_ctx, cgraph);
  2046. set_ggml_graph_node_properties(cann_ctx, cgraph);
  2047. }
  2048. #else
  2049. bool use_cann_graph = false;
  2050. bool cann_graph_update_required = false;
  2051. #endif // USE_ACL_GRAPH
  2052. evaluate_and_capture_cann_graph(
  2053. cann_ctx,
  2054. cgraph,
  2055. use_cann_graph,
  2056. cann_graph_update_required
  2057. );
  2058. return GGML_STATUS_SUCCESS;
  2059. }
  2060. /**
  2061. * @brief Checks if the CANN backend supports a specific operation.
  2062. *
  2063. * This function checks whether the specified operation is supported by the
  2064. * CANN backend.
  2065. *
  2066. * @param backend Pointer to the CANN backend structure to check support for
  2067. * the operation.
  2068. * @param op Pointer to the tensor representing the operation to check.
  2069. * @return bool Returns true if the operation is supported by the backend,
  2070. * otherwise false.
  2071. */
  2072. static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev,
  2073. const ggml_tensor* op) {
  2074. switch (op->op) {
  2075. case GGML_OP_UNARY:
  2076. switch (ggml_get_unary_op(op)) {
  2077. case GGML_UNARY_OP_ABS:
  2078. case GGML_UNARY_OP_NEG:
  2079. case GGML_UNARY_OP_GELU:
  2080. case GGML_UNARY_OP_SILU:
  2081. case GGML_UNARY_OP_RELU:
  2082. case GGML_UNARY_OP_SIGMOID:
  2083. case GGML_UNARY_OP_HARDSIGMOID:
  2084. case GGML_UNARY_OP_HARDSWISH:
  2085. case GGML_UNARY_OP_GELU_QUICK:
  2086. case GGML_UNARY_OP_TANH:
  2087. case GGML_UNARY_OP_EXP:
  2088. case GGML_UNARY_OP_ELU:
  2089. case GGML_UNARY_OP_SGN:
  2090. case GGML_UNARY_OP_STEP:
  2091. case GGML_UNARY_OP_GELU_ERF:
  2092. return true;
  2093. default:
  2094. return false;
  2095. }
  2096. case GGML_OP_GLU:
  2097. switch (ggml_get_glu_op(op)) {
  2098. case GGML_GLU_OP_REGLU:
  2099. case GGML_GLU_OP_GEGLU:
  2100. case GGML_GLU_OP_SWIGLU:
  2101. case GGML_GLU_OP_GEGLU_ERF:
  2102. case GGML_GLU_OP_GEGLU_QUICK:
  2103. return true;
  2104. default:
  2105. return false;
  2106. }
  2107. break;
  2108. case GGML_OP_MUL_MAT: {
  2109. switch (op->src[0]->type) {
  2110. case GGML_TYPE_F16:
  2111. case GGML_TYPE_F32:
  2112. return true;
  2113. case GGML_TYPE_Q8_0:
  2114. case GGML_TYPE_Q4_0:
  2115. #ifdef ASCEND_310P
  2116. // Q4 && Q8 per group is not suppor on 310p device
  2117. return false;
  2118. #endif
  2119. // only support contiguous for quantized types.
  2120. return ggml_is_contiguous(op->src[0]) &&
  2121. ggml_is_contiguous(op->src[1]);
  2122. default:
  2123. return false;
  2124. }
  2125. }
  2126. case GGML_OP_MUL_MAT_ID:
  2127. switch (op->src[0]->type) {
  2128. case GGML_TYPE_F16:
  2129. case GGML_TYPE_F32:
  2130. return true;
  2131. case GGML_TYPE_Q8_0:
  2132. case GGML_TYPE_Q4_0:
  2133. #ifdef ASCEND_310P
  2134. // Q4 && Q8 per group is not suppor on 310p device
  2135. return false;
  2136. #endif
  2137. // only support contiguous for quantized types.
  2138. return ggml_is_contiguous(op->src[0]) &&
  2139. ggml_is_contiguous(op->src[1]);
  2140. default:
  2141. return false;
  2142. }
  2143. // embedding
  2144. case GGML_OP_GET_ROWS: {
  2145. switch (op->src[0]->type) {
  2146. case GGML_TYPE_F32:
  2147. case GGML_TYPE_F16:
  2148. case GGML_TYPE_Q8_0:
  2149. return true;
  2150. default:
  2151. return false;
  2152. }
  2153. } break;
  2154. case GGML_OP_SET_ROWS: {
  2155. switch (op->type) {
  2156. case GGML_TYPE_F32:
  2157. case GGML_TYPE_F16:
  2158. return true;
  2159. default:
  2160. return false;
  2161. }
  2162. } break;
  2163. case GGML_OP_CPY: {
  2164. ggml_tensor *src = op->src[0];
  2165. if ((op->type != GGML_TYPE_F32 && op->type != GGML_TYPE_F16) ||
  2166. (src->type != GGML_TYPE_F32 &&
  2167. src->type != GGML_TYPE_F16)) {
  2168. // only support F32 and F16.
  2169. return false;
  2170. }
  2171. return true;
  2172. } break;
  2173. case GGML_OP_CONT: {
  2174. // TODO: support GGML_TYPE_BF16
  2175. switch (op->src[0]->type) {
  2176. case GGML_TYPE_F32:
  2177. case GGML_TYPE_F16:
  2178. return true;
  2179. default:
  2180. return false;
  2181. }
  2182. }
  2183. case GGML_OP_ROPE: {
  2184. // TODO: with ops-test v == 1
  2185. float ext_factor = 0.0f;
  2186. memcpy(&ext_factor, (const float *) op->op_params + 7, sizeof(float));
  2187. // TODO: n_dims <= ne0
  2188. if (op->src[0]->ne[0] != op->op_params[1]) {
  2189. return false;
  2190. }
  2191. // TODO: ext_factor != 0
  2192. if (ext_factor != 0) {
  2193. return false;
  2194. }
  2195. const int mode = ((const int32_t *) op->op_params)[2];
  2196. if (mode & GGML_ROPE_TYPE_MROPE) {
  2197. return false;
  2198. }
  2199. if (mode & GGML_ROPE_TYPE_VISION) {
  2200. return false;
  2201. }
  2202. if(!ggml_is_contiguous(op->src[0])){
  2203. return false;
  2204. }
  2205. return true;
  2206. }
  2207. case GGML_OP_UPSCALE: {
  2208. // aclnnUpsampleNearest2dGetWorkspaceSize not support
  2209. // selfDimN[2]/outDimN[2] or selfDimC[3]/outDimC[3] not equal
  2210. if (op->src[0]->ne[2] * op->ne[3] != op->src[0]->ne[3] * op->ne[2]) {
  2211. return false;
  2212. }
  2213. if (op->op_params[0] != GGML_SCALE_MODE_NEAREST) {
  2214. return false;
  2215. }
  2216. return true;
  2217. }
  2218. case GGML_OP_POOL_2D: {
  2219. const int32_t * opts = (const int32_t *) op->op_params;
  2220. #ifdef ASCEND_310P
  2221. enum ggml_op_pool opt = static_cast<ggml_op_pool>(opts[0]);
  2222. if(opt == GGML_OP_POOL_MAX){
  2223. return false;
  2224. }
  2225. #endif
  2226. const int k0 = opts[1];
  2227. const int k1 = opts[2];
  2228. const int p0 = opts[5];
  2229. const int p1 = opts[6];
  2230. // value of paddingH should be at most half of kernelH
  2231. // value of paddingW should be at most half of kernelW
  2232. return (p0 <= (k0 / 2)) && (p1 <= (k1 / 2));
  2233. }
  2234. case GGML_OP_DUP:
  2235. case GGML_OP_SUM:
  2236. case GGML_OP_IM2COL:
  2237. case GGML_OP_CONCAT:
  2238. case GGML_OP_REPEAT:
  2239. case GGML_OP_NONE:
  2240. case GGML_OP_RESHAPE:
  2241. case GGML_OP_VIEW:
  2242. case GGML_OP_PERMUTE:
  2243. case GGML_OP_TRANSPOSE:
  2244. case GGML_OP_NORM:
  2245. case GGML_OP_ADD:
  2246. case GGML_OP_ADD1:
  2247. case GGML_OP_SUB:
  2248. case GGML_OP_MUL:
  2249. case GGML_OP_DIV:
  2250. case GGML_OP_RMS_NORM:
  2251. case GGML_OP_SQR:
  2252. case GGML_OP_SQRT:
  2253. case GGML_OP_CLAMP:
  2254. case GGML_OP_DIAG_MASK_INF:
  2255. case GGML_OP_SUM_ROWS:
  2256. case GGML_OP_ARGSORT:
  2257. case GGML_OP_ACC:
  2258. case GGML_OP_GROUP_NORM:
  2259. case GGML_OP_PAD:
  2260. case GGML_OP_ARANGE:
  2261. case GGML_OP_TIMESTEP_EMBEDDING:
  2262. case GGML_OP_LEAKY_RELU:
  2263. case GGML_OP_ARGMAX:
  2264. case GGML_OP_COS:
  2265. case GGML_OP_SIN:
  2266. case GGML_OP_CONV_TRANSPOSE_1D:
  2267. case GGML_OP_LOG:
  2268. case GGML_OP_MEAN:
  2269. case GGML_OP_PAD_REFLECT_1D:
  2270. case GGML_OP_COUNT_EQUAL:
  2271. return true;
  2272. case GGML_OP_SCALE:
  2273. float bias;
  2274. memcpy(&bias, (float*)op->op_params + 1, sizeof(float));
  2275. return bias == 0.0f; // TODO: support bias != 0.0f
  2276. case GGML_OP_SOFT_MAX:
  2277. // TODO: support attention sinks [TAG_ATTN_SINKS]
  2278. if (op->src[2]) {
  2279. return false;
  2280. }
  2281. return true;
  2282. case GGML_OP_FLASH_ATTN_EXT:{
  2283. // derived from [ggml-cuda.cu]
  2284. if(op->src[1]->type != GGML_TYPE_F16 || op->src[2]->type != GGML_TYPE_F16){
  2285. return false;
  2286. }
  2287. if(op->src[1]->type != GGML_TYPE_F16 && op->src[1]->type != GGML_TYPE_F32 && op->src[1]->type != GGML_TYPE_BF16){
  2288. return false;
  2289. }
  2290. if(op->type != GGML_TYPE_F16 && op->type != GGML_TYPE_F32 && op->type != GGML_TYPE_BF16){
  2291. return false;
  2292. }
  2293. // TODO: support attention sinks [TAG_ATTN_SINKS]
  2294. if (op->src[4]) {
  2295. return false;
  2296. }
  2297. if (op->src[1]->ne[0] != op->src[2]->ne[0]) {
  2298. // different head sizes of K and V are not supported yet
  2299. return false;
  2300. }
  2301. if (op->src[0]->ne[0] == 192) {
  2302. return false;
  2303. }
  2304. if (op->src[0]->ne[0] == 576) {
  2305. // DeepSeek MLA
  2306. return false;
  2307. }
  2308. float logitSoftcap = 0.0f;
  2309. memcpy(&logitSoftcap, (float*)op->op_params + 2, sizeof(float));
  2310. if(logitSoftcap != 0.0f) {
  2311. return false;
  2312. }
  2313. return true;
  2314. }
  2315. default:
  2316. return false;
  2317. }
  2318. GGML_UNUSED(dev);
  2319. }
  2320. /**
  2321. * @brief Checks if the backend buffer type is associated with the CANN backend.
  2322. *
  2323. * This function checks whether the provided backend buffer type is associated
  2324. * with the CANN backend based on the comparison of its name retrieval function
  2325. * pointer.
  2326. *
  2327. * @param buft Pointer to the backend buffer type to check.
  2328. * @return bool Returns true if the buffer type is associated with the CANN
  2329. * backend, otherwise false.
  2330. */
  2331. static bool ggml_backend_buft_is_cann(ggml_backend_buffer_type_t buft) {
  2332. return buft->iface.get_name == ggml_backend_cann_buffer_type_name;
  2333. }
  2334. /**
  2335. * @brief Determines if a tensor operation should be offloaded to the CANN
  2336. * backend.
  2337. *
  2338. * This function checks if a given tensor operation should be offloaded to the
  2339. * CANN backend based on the operation type and the size of the tensor. It
  2340. * returns true if the second dimension (ne[1]) of the tensor is greater than or
  2341. * equal to the minimum batch size and the operation is not GGML_OP_GET_ROWS.
  2342. *
  2343. * @param backend Pointer to the CANN backend.
  2344. * @param op Pointer to the tensor operation to check.
  2345. * @return bool Returns true if the operation should be offloaded, otherwise
  2346. * false.
  2347. */
  2348. static bool ggml_backend_cann_offload_op(ggml_backend_dev_t dev,
  2349. const ggml_tensor* op) {
  2350. const int min_batch_size = 32;
  2351. GGML_UNUSED(dev);
  2352. return op->ne[1] >= min_batch_size && op->op != GGML_OP_GET_ROWS;
  2353. }
  2354. /**
  2355. * @brief Records an event on the CANN backend stream.
  2356. *
  2357. * This function records the given event on the ACL runtime stream associated
  2358. * with the backend context.
  2359. *
  2360. * @param event Pointer to the event structure to be recorded.
  2361. */
  2362. static void ggml_backend_cann_event_record(ggml_backend_t backend, ggml_backend_event_t event) {
  2363. ggml_backend_cann_context* cann_ctx =
  2364. (ggml_backend_cann_context*)backend->context;
  2365. ACL_CHECK(aclrtRecordEvent((aclrtEvent)event->context, cann_ctx->stream()));
  2366. }
  2367. /**
  2368. * @brief Waits for a recorded event to complete on the CANN backend stream.
  2369. *
  2370. * This function makes the given backend wait for the event to complete on its
  2371. * ACL runtime stream.
  2372. *
  2373. * @param backend Pointer to the backend structure.
  2374. * @param event Pointer to the event structure that the backend needs to wait
  2375. * for.
  2376. */
  2377. static void ggml_backend_cann_event_wait(ggml_backend_t backend,
  2378. ggml_backend_event_t event) {
  2379. ggml_backend_cann_context* cann_ctx =
  2380. (ggml_backend_cann_context*)backend->context;
  2381. if (ggml_backend_is_cann(backend)) {
  2382. ACL_CHECK(aclrtStreamWaitEvent(cann_ctx->stream(),
  2383. (aclrtEvent)event->context));
  2384. } else {
  2385. GGML_ABORT("fatal error");
  2386. }
  2387. }
  2388. /**
  2389. * @brief Structure defining the interface for the CANN backend.
  2390. *
  2391. * This structure contains function pointers for various operations
  2392. * supported by the CANN backend, including name retrieval, memory
  2393. * management, tensor operations, synchronization, and event handling.
  2394. */
  2395. static const ggml_backend_i ggml_backend_cann_interface = {
  2396. /* .get_name = */ ggml_backend_cann_name,
  2397. /* .free = */ ggml_backend_cann_free,
  2398. /* .set_tensor_async = */ ggml_backend_cann_set_tensor_async,
  2399. /* .get_tensor_async = */ ggml_backend_cann_get_tensor_async,
  2400. /* .cpy_tensor_async = */ ggml_backend_cann_cpy_tensor_async,
  2401. /* .synchronize = */ ggml_backend_cann_synchronize,
  2402. /* .graph_plan_create = */ NULL,
  2403. /* .graph_plan_free = */ NULL,
  2404. /* .graph_plan_update = */ NULL,
  2405. /* .graph_plan_compute = */ NULL,
  2406. /* .graph_compute = */ ggml_backend_cann_graph_compute,
  2407. /* .event_record = */ ggml_backend_cann_event_record,
  2408. /* .event_wait = */ ggml_backend_cann_event_wait,
  2409. };
  2410. /**
  2411. * @brief Return the hardcoded GUID for the CANN backend.
  2412. *
  2413. * This function returns a static GUID which uniquely identifies the CANN
  2414. * backend.
  2415. *
  2416. * @return A pointer to the static GUID.
  2417. */
  2418. static ggml_guid_t ggml_backend_cann_guid() {
  2419. static ggml_guid guid = {0xa1, 0x94, 0xaf, 0xac, 0xbd, 0x4f, 0x47, 0x34,
  2420. 0xbe, 0x1a, 0x9e, 0x71, 0x1f, 0x9e, 0xed, 0x64};
  2421. return &guid;
  2422. }
  2423. // backend device
  2424. struct ggml_backend_cann_device_context {
  2425. int device;
  2426. std::string name;
  2427. std::string description;
  2428. };
  2429. static const char * ggml_backend_cann_device_get_name(ggml_backend_dev_t dev) {
  2430. ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context;
  2431. return ctx->name.c_str();
  2432. }
  2433. static const char* ggml_backend_cann_device_get_description(ggml_backend_dev_t dev) {
  2434. ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context;
  2435. return ctx->description.c_str();
  2436. }
  2437. static void ggml_backend_cann_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
  2438. ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context;
  2439. ggml_backend_cann_get_device_memory(ctx->device, free, total);
  2440. }
  2441. static enum ggml_backend_dev_type ggml_backend_cann_device_get_type(ggml_backend_dev_t dev) {
  2442. GGML_UNUSED(dev);
  2443. return GGML_BACKEND_DEVICE_TYPE_GPU;
  2444. }
  2445. static void ggml_backend_cann_device_get_props(ggml_backend_dev_t dev, ggml_backend_dev_props * props) {
  2446. props->name = ggml_backend_cann_device_get_name(dev);
  2447. props->description = ggml_backend_cann_device_get_description(dev);
  2448. props->type = ggml_backend_cann_device_get_type(dev);
  2449. ggml_backend_cann_device_get_memory(dev, &props->memory_free, &props->memory_total);
  2450. bool host_buffer = getenv("GGML_CANN_NO_PINNED") == nullptr;
  2451. props->caps = {
  2452. /* .async = */ false,
  2453. /* .host_buffer = */ host_buffer,
  2454. /* .buffer_from_host_ptr = */ false,
  2455. /* .events = */ true,
  2456. };
  2457. }
  2458. static ggml_backend_t ggml_backend_cann_device_init(ggml_backend_dev_t dev, const char * params) {
  2459. GGML_UNUSED(params);
  2460. ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context;
  2461. return ggml_backend_cann_init(ctx->device);
  2462. }
  2463. /**
  2464. * @brief Checks if the CANN backend supports a specific backend buffer type.
  2465. *
  2466. * This function determines whether the CANN backend supports the given backend
  2467. * buffer type by comparing the device context of the backend and buffer type.
  2468. * It returns true if the devices are same between the backend context and
  2469. * buffer type context.
  2470. *
  2471. * @param backend Pointer to the CANN backend.
  2472. * @param buft Pointer to the backend buffer type to check.
  2473. * @return bool Returns true if the CANN backend supports the buffer type,
  2474. * otherwise false.
  2475. */
  2476. static bool ggml_backend_cann_supports_buft(
  2477. ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
  2478. if (ggml_backend_buft_is_cann(buft)) {
  2479. ggml_backend_cann_device_context * dev_ctx = (ggml_backend_cann_device_context *)dev->context;
  2480. ggml_backend_cann_buffer_type_context * buft_ctx =
  2481. (ggml_backend_cann_buffer_type_context *)buft->context;
  2482. return buft_ctx->device == dev_ctx->device;
  2483. }
  2484. return false;
  2485. }
  2486. static ggml_backend_buffer_type_t ggml_backend_cann_device_get_buffer_type(ggml_backend_dev_t dev) {
  2487. ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context;
  2488. return ggml_backend_cann_buffer_type(ctx->device);
  2489. }
  2490. static ggml_backend_buffer_type_t ggml_backend_cann_device_get_host_buffer_type(ggml_backend_dev_t dev) {
  2491. GGML_UNUSED(dev);
  2492. return ggml_backend_cann_host_buffer_type();
  2493. }
  2494. /**
  2495. * @brief Creates a new event for the CANN backend device.
  2496. *
  2497. * This function initializes a new event for the CANN backend by setting the
  2498. * device and creating an ACL runtime event. The created event is then wrapped
  2499. * in a ggml_backend_event structure and returned.
  2500. *
  2501. * @param backend Pointer to the CANN backend.
  2502. * @return ggml_backend_event_t Returns a pointer to the new event structure.
  2503. */
  2504. static ggml_backend_event_t ggml_backend_cann_device_event_new(
  2505. ggml_backend_dev_t dev) {
  2506. ggml_backend_cann_device_context * dev_ctx = (ggml_backend_cann_device_context *)dev->context;
  2507. ggml_cann_set_device(dev_ctx->device);
  2508. aclrtEvent event;
  2509. ACL_CHECK(aclrtCreateEvent(&event));
  2510. return new ggml_backend_event{
  2511. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), dev_ctx->device),
  2512. /* .context = */ event,
  2513. };
  2514. }
  2515. /**
  2516. * @brief Frees a CANN backend event.
  2517. *
  2518. * This function destroys the ACL runtime event associated with the given CANN
  2519. * backend event and then deletes the event structure itself.
  2520. *
  2521. * @param event Pointer to the event structure to be freed.
  2522. */
  2523. static void ggml_backend_cann_device_event_free(ggml_backend_dev_t dev, ggml_backend_event_t event) {
  2524. ACL_CHECK(aclrtDestroyEvent((aclrtEvent)event->context));
  2525. delete event;
  2526. GGML_UNUSED(dev);
  2527. }
  2528. /**
  2529. * @brief Synchronizes the given event on the CANN backend.
  2530. *
  2531. * This function waits for the specified event to complete on the ACL runtime.
  2532. *
  2533. * @param event Pointer to the event structure to be synchronized.
  2534. */
  2535. static void ggml_backend_cann_device_event_synchronize(ggml_backend_dev_t dev, ggml_backend_event_t event) {
  2536. ACL_CHECK(aclrtSynchronizeEvent((aclrtEvent)event->context));
  2537. GGML_UNUSED(dev);
  2538. }
  2539. static const ggml_backend_device_i ggml_backend_cann_device_interface = {
  2540. /* .get_name = */ ggml_backend_cann_device_get_name,
  2541. /* .get_description = */ ggml_backend_cann_device_get_description,
  2542. /* .get_memory = */ ggml_backend_cann_device_get_memory,
  2543. /* .get_type = */ ggml_backend_cann_device_get_type,
  2544. /* .get_props = */ ggml_backend_cann_device_get_props,
  2545. /* .init_backend = */ ggml_backend_cann_device_init, // called for every card
  2546. /* .get_buffer_type = */ ggml_backend_cann_device_get_buffer_type,
  2547. /* .get_host_buffer_type = */ ggml_backend_cann_device_get_host_buffer_type,
  2548. /* .buffer_from_host_ptr = */ NULL, // not supported for CANN
  2549. /* .supports_op = */ ggml_backend_cann_supports_op,
  2550. /* .supports_buft = */ ggml_backend_cann_supports_buft,
  2551. /* .offload_op = */ ggml_backend_cann_offload_op,
  2552. /* .event_new = */ ggml_backend_cann_device_event_new,
  2553. /* .event_free = */ ggml_backend_cann_device_event_free,
  2554. /* .event_synchronize = */ ggml_backend_cann_device_event_synchronize,
  2555. };
  2556. // backend reg
  2557. struct ggml_backend_cann_reg_context {
  2558. std::vector<ggml_backend_dev_t> devices;
  2559. };
  2560. static const char * ggml_backend_cann_reg_get_name(ggml_backend_reg_t reg) {
  2561. GGML_UNUSED(reg);
  2562. return GGML_CANN_NAME;
  2563. }
  2564. static size_t ggml_backend_cann_reg_get_device_count(ggml_backend_reg_t reg) {
  2565. ggml_backend_cann_reg_context * ctx = (ggml_backend_cann_reg_context *)reg->context;
  2566. return ctx->devices.size();
  2567. }
  2568. static ggml_backend_dev_t ggml_backend_cann_reg_get_device(ggml_backend_reg_t reg, size_t index) {
  2569. ggml_backend_cann_reg_context * ctx = (ggml_backend_cann_reg_context *)reg->context;
  2570. GGML_ASSERT(index < ctx->devices.size());
  2571. return ctx->devices[index];
  2572. }
  2573. static void * ggml_backend_cann_reg_get_proc_address(ggml_backend_reg_t reg, const char * name) {
  2574. GGML_UNUSED(reg);
  2575. GGML_UNUSED(name);
  2576. // reserved for future use
  2577. return nullptr;
  2578. }
  2579. static const ggml_backend_reg_i ggml_backend_cann_reg_interface = {
  2580. /* .get_name = */ ggml_backend_cann_reg_get_name,
  2581. /* .get_device_count = */ ggml_backend_cann_reg_get_device_count,
  2582. /* .get_device = */ ggml_backend_cann_reg_get_device,
  2583. /* .get_proc_address = */ ggml_backend_cann_reg_get_proc_address,
  2584. };
  2585. // backend registry, called only once for cann backend
  2586. ggml_backend_reg_t ggml_backend_cann_reg() {
  2587. static ggml_backend_reg reg;
  2588. static bool initialized = false;
  2589. {
  2590. static std::mutex mutex;
  2591. std::lock_guard<std::mutex> lock(mutex);
  2592. if (!initialized) {
  2593. aclInit(nullptr);
  2594. ggml_backend_cann_reg_context * ctx = new ggml_backend_cann_reg_context;
  2595. for (int i = 0; i < ggml_cann_info().device_count; i++) {
  2596. ggml_backend_cann_device_context* dev_ctx = new ggml_backend_cann_device_context();
  2597. dev_ctx->description = aclrtGetSocName();
  2598. dev_ctx->device = i;
  2599. dev_ctx->name = GGML_CANN_NAME + std::to_string(i);
  2600. ggml_cann_set_device(i);
  2601. ggml_backend_dev_t dev = new ggml_backend_device {
  2602. /* .iface = */ ggml_backend_cann_device_interface,
  2603. /* .reg = */ &reg,
  2604. /* .context = */ dev_ctx
  2605. };
  2606. ctx->devices.push_back(dev);
  2607. }
  2608. reg = ggml_backend_reg {
  2609. /* .api_version = */ GGML_BACKEND_API_VERSION,
  2610. /* .iface = */ ggml_backend_cann_reg_interface,
  2611. /* .context = */ ctx
  2612. };
  2613. }
  2614. initialized = true;
  2615. }
  2616. return &reg;
  2617. }
  2618. ggml_backend_t ggml_backend_cann_init(int32_t device) {
  2619. aclInit(nullptr);
  2620. if (device < 0 || device >= ggml_backend_cann_get_device_count()) {
  2621. GGML_LOG_ERROR("%s: error: invalid device %d\n", __func__, device);
  2622. return nullptr;
  2623. }
  2624. ggml_backend_cann_context* ctx = new ggml_backend_cann_context(device);
  2625. if (ctx == nullptr) {
  2626. GGML_LOG_ERROR("%s: error: failed to allocate context\n", __func__);
  2627. return nullptr;
  2628. }
  2629. ggml_cann_set_device(ctx->device);
  2630. ggml_backend_t cann_backend =
  2631. new ggml_backend{/* .guid = */ ggml_backend_cann_guid(),
  2632. /* .interface = */ ggml_backend_cann_interface,
  2633. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), device),
  2634. /* .context = */ ctx};
  2635. return cann_backend;
  2636. }
  2637. bool ggml_backend_is_cann(ggml_backend_t backend) {
  2638. return backend != NULL &&
  2639. ggml_guid_matches(backend->guid, ggml_backend_cann_guid());
  2640. }
  2641. int32_t ggml_backend_cann_get_device_count() {
  2642. return ggml_cann_info().device_count;
  2643. }
  2644. void ggml_backend_cann_get_device_description(
  2645. int32_t device, char* description, size_t description_size) {
  2646. ggml_cann_set_device(device);
  2647. const char* soc_name = aclrtGetSocName();
  2648. snprintf(description, description_size, "%s", soc_name);
  2649. }
  2650. void ggml_backend_cann_get_device_memory(int32_t device, size_t* free,
  2651. size_t* total) {
  2652. ggml_cann_set_device(device);
  2653. ACL_CHECK(aclrtGetMemInfo(ACL_HBM_MEM, free, total));
  2654. }
  2655. GGML_BACKEND_DL_IMPL(ggml_backend_cann_reg)