ggml-zdnn.cpp 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628
  1. #include "ggml-zdnn.h"
  2. #include "ggml-impl.h"
  3. #include "ggml-backend-impl.h"
  4. #include "ggml-zdnn/common.hpp"
  5. #include "ggml-zdnn/mmf.hpp"
  6. #include "ggml-zdnn/utils.hpp"
  7. #include "ggml.h"
  8. #include <vector>
  9. #include <memory>
  10. #include <csignal> // raise(SIGTRAP)
  11. #include <unistd.h>
  12. static void ggml_zdnn_compute_forward_mul_mat(
  13. const ggml_backend_zdnn_context * ctx,
  14. ggml_tensor * dst) {
  15. const ggml_tensor * src0 = dst->src[0]; // weights
  16. const ggml_tensor * src1 = dst->src[1]; // inputs
  17. // TODO: implement support for quantized types
  18. // we currently only support f32, f16, and bf16
  19. ggml_zdnn_mul_mat_f(ctx, src0, src1, dst);
  20. }
  21. static bool ggml_zdnn_compute_forward(
  22. ggml_backend_zdnn_context * ctx,
  23. ggml_tensor * dst) {
  24. switch (dst->op) {
  25. case GGML_OP_MUL_MAT:
  26. {
  27. ggml_zdnn_compute_forward_mul_mat(ctx, dst);
  28. } break;
  29. default:
  30. return false;
  31. }
  32. return true;
  33. }
  34. static enum ggml_status ggml_zdnn_graph_compute(ggml_backend_t backend, ggml_cgraph * gf) {
  35. ggml_backend_zdnn_context * ctx = ( ggml_backend_zdnn_context *)backend->context;
  36. ggml_backend_zdnn_device_context * ctx_dev = (ggml_backend_zdnn_device_context *)backend->device->context;
  37. ctx->gf = gf;
  38. for (int i = 0; i < gf->n_nodes; i++) {
  39. ggml_tensor * node = gf->nodes[i];
  40. if (ggml_is_empty(node)
  41. || node->op == GGML_OP_NONE
  42. || node->op == GGML_OP_RESHAPE
  43. || node->op == GGML_OP_VIEW
  44. || node->op == GGML_OP_PERMUTE
  45. || node->op == GGML_OP_TRANSPOSE) {
  46. continue;
  47. }
  48. bool ok = ggml_zdnn_compute_forward(ctx, node);
  49. if (!ok) {
  50. GGML_LOG_ERROR("%s: unsupported op %s (%s)\n",
  51. __func__, node->name, ggml_op_name(node->op));
  52. }
  53. GGML_ASSERT(ok);
  54. }
  55. return GGML_STATUS_SUCCESS;
  56. GGML_UNUSED(ctx_dev);
  57. }
  58. static bool ggml_zdnn_supports_op(const ggml_backend_zdnn_device_context * ctx_dev, const ggml_tensor * op) {
  59. switch (op->op) {
  60. case GGML_OP_NONE:
  61. case GGML_OP_RESHAPE:
  62. case GGML_OP_VIEW:
  63. case GGML_OP_TRANSPOSE:
  64. case GGML_OP_PERMUTE:
  65. return true;
  66. case GGML_OP_MUL_MAT:
  67. {
  68. const ggml_tensor * weights = op->src[0];
  69. const ggml_tensor * inputs = op->src[1];
  70. const int64_t ne10 = inputs->ne[0];
  71. const int64_t ne0 = op->ne[0];
  72. const int64_t ne1 = op->ne[1];
  73. const int64_t max_batch = ctx_dev->max_size;
  74. if (!ggml_is_matrix(weights) || !ggml_is_matrix(inputs) ||
  75. !ggml_is_contiguous(weights) || !ggml_is_contiguous(inputs) ||
  76. weights->view_src != nullptr || inputs->view_src != nullptr ||
  77. ne0 > max_batch || ne1 > max_batch || ne10 > max_batch) {
  78. return false;
  79. }
  80. switch (weights->type) {
  81. case GGML_TYPE_F32:
  82. case GGML_TYPE_F16:
  83. case GGML_TYPE_BF16:
  84. return true;
  85. default:
  86. return false;
  87. }
  88. } break;
  89. default:
  90. return false;
  91. }
  92. }
  93. ////////////////////////////////////////////////////////////////////////////////
  94. //
  95. // globals
  96. //
  97. // initialised in ggml_backend_zdnn_reg
  98. static ggml_backend_reg g_ggml_backend_zdnn_reg;
  99. static ggml_backend_device g_ggml_backend_zdnn_device;
  100. static ggml_backend_zdnn_device_context g_ggml_ctx_dev_main = {
  101. /* .zdnn_device = */ 0,
  102. /* .zdnn_device_ref_count = */ 0,
  103. /* .has_parmblkformat_0 = */ false,
  104. /* .has_parmblkformat_1 = */ false,
  105. /* .max_size = */ 0,
  106. /* .name = */ "",
  107. };
  108. static int ggml_backend_zdnn_device_acq(ggml_backend_zdnn_device_context * ctx) {
  109. assert(ctx != NULL);
  110. if (ctx->zdnn_device == 0) {
  111. ctx->zdnn_device = 1;
  112. }
  113. if (ctx->zdnn_device >= 1) {
  114. ctx->has_parmblkformat_0 = zdnn_is_nnpa_parmblk_fmt_installed(1, NNPA_PARMBLKFORMAT_0);
  115. ctx->has_parmblkformat_1 = zdnn_is_nnpa_parmblk_fmt_installed(1, NNPA_PARMBLKFORMAT_1);
  116. ctx->max_size = zdnn_get_nnpa_max_dim_idx_size();
  117. strncpy(ctx->name, GGML_ZDNN_NAME, sizeof(ctx->name) - 1);
  118. }
  119. ctx->zdnn_device_ref_count++;
  120. return ctx->zdnn_device;
  121. }
  122. static void ggml_backend_zdnn_device_rel(ggml_backend_zdnn_device_context * ctx) {
  123. assert(ctx != NULL);
  124. assert(ctx->zdnn_device_ref_count > 0);
  125. ctx->zdnn_device_ref_count--;
  126. if (ctx->zdnn_device_ref_count == 0) {
  127. if (ctx->zdnn_device >= 0) {
  128. ctx->zdnn_device = 0;
  129. }
  130. }
  131. }
  132. static ggml_backend_zdnn_context * ggml_zdnn_init(ggml_backend_dev_t dev) {
  133. GGML_LOG_INFO("%s: allocating\n", __func__);
  134. GGML_LOG_INFO("%s: found 1 device\n", __func__);
  135. #ifdef STATIC_LIB
  136. zdnn_init();
  137. #endif
  138. ggml_backend_zdnn_context * ctx = new ggml_backend_zdnn_context();
  139. ggml_backend_zdnn_device_context * ctx_dev = (ggml_backend_zdnn_device_context *)dev->context;
  140. int device = 1;
  141. GGML_LOG_INFO("%s: picking default device: %s\n", __func__, ctx_dev->name);
  142. ctx->device = device;
  143. GGML_LOG_INFO("%s: NNPA name: %s\n", __func__, ctx_dev->name);
  144. GGML_LOG_INFO("%s: NNPA_PARMBLKFORMAT_0 = %s\n", __func__, ctx_dev->has_parmblkformat_0 ? "true" : "false");
  145. GGML_LOG_INFO("%s: NNPA_PARMBLKFORMAT_1 = %s\n", __func__, ctx_dev->has_parmblkformat_1 ? "true" : "false");
  146. ctx->gf = nullptr;
  147. return ctx;
  148. }
  149. static void ggml_zdnn_free(ggml_backend_zdnn_context * ctx) {
  150. GGML_LOG_INFO("%s: deallocating\n", __func__);
  151. delete ctx;
  152. }
  153. //
  154. // backend interface
  155. //
  156. static void ggml_backend_zdnn_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  157. ggml_backend_zdnn_buffer_context * ctx = (ggml_backend_zdnn_buffer_context *)buffer->context;
  158. for (const auto & buf_ptr : ctx->buffers) {
  159. ggml_backend_zdnn_buffer * buf = buf_ptr.get();
  160. // Free any extra buffer allocated for the tensor. E.g., bias for GGML_OP_MUL_MAT
  161. if (buf->extra != nullptr) free(buf->extra->data);
  162. if (buf->ztensor.buffer_size > 0) ZDNN_CHECK(zdnn_free_ztensor_buffer(&buf->ztensor));
  163. }
  164. delete ctx;
  165. }
  166. static void * ggml_backend_zdnn_buffer_get_base(ggml_backend_buffer_t buffer) {
  167. ggml_backend_zdnn_buffer_context * ctx = (ggml_backend_zdnn_buffer_context *)buffer->context;
  168. return ctx->all_data;
  169. }
  170. static enum ggml_status ggml_backend_zdnn_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
  171. if (tensor->view_src != NULL) {
  172. assert(tensor->view_src->buffer->buft == buffer->buft);
  173. return GGML_STATUS_SUCCESS;
  174. }
  175. ggml_backend_zdnn_buffer_context * ctx = (ggml_backend_zdnn_buffer_context *)buffer->context;
  176. const int64_t tsize = ggml_nbytes(tensor);
  177. int buffer_idx = ctx->n_buffers;
  178. std::unique_ptr<ggml_backend_zdnn_buffer> zdnn_buffer = std::make_unique<ggml_backend_zdnn_buffer>();
  179. zdnn_buffer->data = tensor->data;
  180. zdnn_buffer->size = tsize;
  181. zdnn_buffer->extra = nullptr;
  182. snprintf(zdnn_buffer->name, GGML_MAX_NAME, "%s", tensor->name);
  183. ggml_zdnn_init_tensor(zdnn_buffer.get(), tensor);
  184. tensor->extra = zdnn_buffer.get();
  185. switch (tensor->op) {
  186. case GGML_OP_MUL_MAT:
  187. {
  188. std::unique_ptr<ggml_backend_zdnn_buffer> zdnn_bias_buffer = std::make_unique<ggml_backend_zdnn_buffer>();
  189. zdnn_bias_buffer->data = (void *)calloc(tensor->ne[0], ggml_element_size(tensor));
  190. zdnn_bias_buffer->size = ggml_element_size(tensor) * tensor->ne[0];
  191. snprintf(zdnn_bias_buffer->name, GGML_MAX_NAME, "%.*s (bias)",
  192. GGML_MAX_NAME - (int)sizeof(" (bias)"), tensor->name);
  193. const int64_t bias_dim[GGML_MAX_DIMS] = { 1, 1, 1, tensor->ne[0] };
  194. ggml_zdnn_create_tensor(zdnn_bias_buffer->pre_tfm_desc,
  195. zdnn_bias_buffer->tfm_desc,
  196. zdnn_bias_buffer->ztensor,
  197. tensor, bias_dim, ZDNN_1D);
  198. ggml_zdnn_load_tensor(zdnn_bias_buffer->ztensor, zdnn_bias_buffer->data);
  199. zdnn_buffer->extra = zdnn_bias_buffer.get();
  200. ctx->buffers.push_back(std::move(zdnn_bias_buffer));
  201. ctx->n_buffers++;
  202. } break;
  203. default:
  204. break;
  205. }
  206. ctx->buffers.push_back(std::move(zdnn_buffer));
  207. ctx->n_buffers++;
  208. // GGML_LOG_INFO("%s: initialised tensor '%s' in buffer %d, size = %8.2f MiB\n",
  209. // __func__, tensor->name, buffer_idx, tsize);
  210. return GGML_STATUS_SUCCESS;
  211. GGML_UNUSED(buffer_idx);
  212. }
  213. static void ggml_backend_zdnn_buffer_memset_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
  214. memset((char *)tensor->data + offset, value, size);
  215. GGML_UNUSED(buffer);
  216. }
  217. static void ggml_backend_zdnn_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  218. memcpy((char *)tensor->data + offset, data, size);
  219. ggml_backend_zdnn_buffer * extra = (ggml_backend_zdnn_buffer *)tensor->extra;
  220. // Fixes the LLAMA_SET_ROWS bug
  221. // see: https://github.com/ggml-org/llama.cpp/issues/15414
  222. if (tensor->buffer->usage == GGML_BACKEND_BUFFER_USAGE_COMPUTE && extra->ztensor.is_transformed) zdnn_reset_ztensor(&extra->ztensor);
  223. if (extra->ztensor.is_transformed == false) ggml_zdnn_load_tensor(extra->ztensor, tensor->data);
  224. GGML_UNUSED(buffer);
  225. }
  226. static void ggml_backend_zdnn_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  227. memcpy(data, (const char *)tensor->data + offset, size);
  228. GGML_UNUSED(buffer);
  229. }
  230. static void ggml_backend_zdnn_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  231. ggml_backend_zdnn_buffer_context * ctx = (ggml_backend_zdnn_buffer_context *)buffer->context;
  232. memset(ctx->all_data, value, ctx->all_size);
  233. }
  234. static ggml_backend_buffer_i ggml_backend_zdnn_buffer_i = {
  235. /* .free_buffer = */ ggml_backend_zdnn_buffer_free_buffer,
  236. /* .get_base = */ ggml_backend_zdnn_buffer_get_base,
  237. /* .init_tensor = */ ggml_backend_zdnn_buffer_init_tensor,
  238. /* .memset_tensor = */ ggml_backend_zdnn_buffer_memset_tensor,
  239. /* .set_tensor = */ ggml_backend_zdnn_buffer_set_tensor,
  240. /* .get_tensor = */ ggml_backend_zdnn_buffer_get_tensor,
  241. /* .cpy_tensor = */ NULL,
  242. /* .clear = */ ggml_backend_zdnn_buffer_clear,
  243. /* .reset = */ NULL,
  244. };
  245. //
  246. // default buffer type
  247. //
  248. static const char * ggml_backend_zdnn_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
  249. return GGML_ZDNN_NAME;
  250. GGML_UNUSED(buft);
  251. }
  252. static ggml_backend_buffer_t ggml_backend_zdnn_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  253. ggml_backend_zdnn_buffer_context * ctx = new ggml_backend_zdnn_buffer_context();
  254. const size_t size_page = sysconf(_SC_PAGESIZE);
  255. size_t size_aligned = size;
  256. if ((size_aligned % size_page) != 0) {
  257. size_aligned += size_page - (size_aligned % size_page);
  258. }
  259. ggml_backend_zdnn_device_context * ctx_dev = (ggml_backend_zdnn_device_context *)buft->device->context;
  260. GGML_ASSERT(ctx_dev->zdnn_device >= 0);
  261. int device = ctx_dev->zdnn_device; GGML_UNUSED(device);
  262. ctx->all_data = ggml_aligned_malloc(size_aligned);
  263. ctx->all_size = size_aligned;
  264. ctx->owned = true;
  265. ctx->n_buffers = 1;
  266. if (ctx->all_data != NULL) {
  267. std::unique_ptr<ggml_backend_zdnn_buffer> zdnn_buffer = std::make_unique<ggml_backend_zdnn_buffer>();
  268. zdnn_buffer->data = ctx->all_data;
  269. zdnn_buffer->size = size_aligned;
  270. ctx->buffers.push_back(std::move(zdnn_buffer));
  271. }
  272. if (size_aligned > 0 && (ctx->all_data == NULL)) {
  273. GGML_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f\n",
  274. __func__, size_aligned / 1024.0 / 1024.0);
  275. delete ctx;
  276. return NULL;
  277. }
  278. return ggml_backend_buffer_init(buft, ggml_backend_zdnn_buffer_i, ctx, size);
  279. }
  280. static size_t ggml_backend_zdnn_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  281. return 256;
  282. GGML_UNUSED(buft);
  283. }
  284. static bool ggml_backend_zdnn_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
  285. return true;
  286. GGML_UNUSED(buft);
  287. }
  288. ggml_backend_buffer_type_t ggml_backend_zdnn_buffer_type(void) {
  289. static ggml_backend_buffer_type ggml_backend_buffer_type_zdnn = {
  290. /* .iface = */ {
  291. /* .get_name = */ ggml_backend_zdnn_buffer_type_get_name,
  292. /* .alloc_buffer = */ ggml_backend_zdnn_buffer_type_alloc_buffer,
  293. /* .get_alignment = */ ggml_backend_zdnn_buffer_type_get_alignment,
  294. /* .get_max_size = */ NULL,
  295. /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
  296. /* .is_host = */ ggml_backend_zdnn_buffer_type_is_host,
  297. },
  298. /* .device = */ &g_ggml_backend_zdnn_device,
  299. /* .context = */ NULL,
  300. };
  301. return &ggml_backend_buffer_type_zdnn;
  302. }
  303. //
  304. // backend
  305. //
  306. static const char * ggml_backend_zdnn_name(ggml_backend_t backend) {
  307. return GGML_ZDNN_NAME;
  308. GGML_UNUSED(backend);
  309. }
  310. static void ggml_backend_zdnn_free(ggml_backend_t backend) {
  311. ggml_backend_zdnn_context * ctx = (ggml_backend_zdnn_context *)backend->context;
  312. ggml_zdnn_free(ctx);
  313. free(backend);
  314. }
  315. static enum ggml_status ggml_backend_zdnn_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
  316. return ggml_zdnn_graph_compute(backend, cgraph);
  317. }
  318. static ggml_backend_i ggml_backend_zdnn_i = {
  319. /* .get_name = */ ggml_backend_zdnn_name,
  320. /* .free = */ ggml_backend_zdnn_free,
  321. /* .set_tensor_async = */ NULL,
  322. /* .get_tensor_async = */ NULL,
  323. /* .cpy_tensor_async = */ NULL,
  324. /* .synchronize = */ NULL,
  325. /* .graph_plan_create = */ NULL,
  326. /* .graph_plan_free = */ NULL,
  327. /* .graph_plan_update = */ NULL,
  328. /* .graph_plan_compute = */ NULL,
  329. /* .graph_compute = */ ggml_backend_zdnn_graph_compute,
  330. /* .event_record = */ NULL,
  331. /* .event_wait = */ NULL,
  332. /* .graph_optimize = */ NULL,
  333. };
  334. static ggml_guid_t ggml_backend_zdnn_guid(void) {
  335. static const char * guid_str = "IBM-ZDNN-ACCELER";
  336. return reinterpret_cast<ggml_guid_t>((void *)guid_str);
  337. }
  338. bool ggml_backend_is_zdnn(ggml_backend_t backend) {
  339. return backend != NULL &&
  340. ggml_guid_matches(backend->guid, ggml_backend_zdnn_guid());
  341. GGML_UNUSED(backend);
  342. }
  343. //
  344. // backend device
  345. //
  346. static const char * ggml_backend_zdnn_device_get_name(ggml_backend_dev_t dev) {
  347. return GGML_ZDNN_NAME;
  348. GGML_UNUSED(dev);
  349. }
  350. static const char * ggml_backend_zdnn_device_get_description(ggml_backend_dev_t dev) {
  351. return "IBM Z Neural Network Processing Assist (NNPA)";
  352. GGML_UNUSED(dev);
  353. }
  354. static void ggml_backend_zdnn_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
  355. *free = 0;
  356. *total = 0;
  357. GGML_UNUSED(dev);
  358. }
  359. static enum ggml_backend_dev_type ggml_backend_zdnn_device_get_type(ggml_backend_dev_t dev) {
  360. return GGML_BACKEND_DEVICE_TYPE_ACCEL;
  361. GGML_UNUSED(dev);
  362. }
  363. static void ggml_backend_zdnn_device_get_props(ggml_backend_dev_t dev, ggml_backend_dev_props * props) {
  364. props->name = ggml_backend_zdnn_device_get_name(dev);
  365. props->description = ggml_backend_zdnn_device_get_description(dev);
  366. props->type = ggml_backend_zdnn_device_get_type(dev);
  367. ggml_backend_zdnn_device_get_memory(dev, &props->memory_free, &props->memory_total);
  368. props->caps = (ggml_backend_dev_caps) {
  369. /* .async = */ false,
  370. /* .host_buffer = */ false,
  371. /* .buffer_from_host_ptr = */ false,
  372. /* .events = */ false
  373. };
  374. }
  375. static ggml_backend_t ggml_backend_zdnn_device_init(ggml_backend_dev_t dev, const char * params) {
  376. ggml_backend_zdnn_context * ctx = ggml_zdnn_init(dev);
  377. if (ctx == NULL) {
  378. GGML_LOG_ERROR("%s: error: failed to allocate context\n", __func__);
  379. return NULL;
  380. }
  381. ggml_backend_t backend = (ggml_backend *)malloc(sizeof(ggml_backend));
  382. *backend = (ggml_backend) {
  383. /* .guid = */ ggml_backend_zdnn_guid(),
  384. /* .iface = */ ggml_backend_zdnn_i,
  385. /* .device = */ dev,
  386. /* .context = */ ctx
  387. };
  388. return backend;
  389. GGML_UNUSED(params);
  390. }
  391. static ggml_backend_buffer_type_t ggml_backend_zdnn_device_get_buffer_type(ggml_backend_dev_t dev) {
  392. return ggml_backend_zdnn_buffer_type();
  393. GGML_UNUSED(dev);
  394. }
  395. static bool ggml_backend_zdnn_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
  396. ggml_backend_zdnn_device_context * ctx_dev = (ggml_backend_zdnn_device_context *) dev->context;
  397. return ggml_zdnn_supports_op(ctx_dev, op);
  398. }
  399. static bool ggml_backend_zdnn_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
  400. return
  401. buft->iface.get_name == ggml_backend_zdnn_buffer_type_get_name;
  402. GGML_UNUSED(dev);
  403. }
  404. static ggml_backend_device_i ggml_backend_zdnn_device_i = {
  405. /* .get_name = */ ggml_backend_zdnn_device_get_name,
  406. /* .get_description = */ ggml_backend_zdnn_device_get_description,
  407. /* .get_memory = */ ggml_backend_zdnn_device_get_memory,
  408. /* .get_type = */ ggml_backend_zdnn_device_get_type,
  409. /* .get_props = */ ggml_backend_zdnn_device_get_props,
  410. /* .init_backend = */ ggml_backend_zdnn_device_init,
  411. /* .get_buffer_type = */ ggml_backend_zdnn_device_get_buffer_type,
  412. /* .get_host_buffer_type = */ NULL,
  413. /* .buffer_from_host_ptr = */ NULL,
  414. /* .supports_op = */ ggml_backend_zdnn_device_supports_op,
  415. /* .supports_buft = */ ggml_backend_zdnn_device_supports_buft,
  416. /* .offload_op = */ NULL,
  417. /* .event_new = */ NULL,
  418. /* .event_free = */ NULL,
  419. /* .event_synchronize = */ NULL,
  420. };
  421. //
  422. // backend registry
  423. //
  424. static const char * ggml_backend_zdnn_reg_get_name(ggml_backend_reg_t reg) {
  425. return GGML_ZDNN_NAME;
  426. GGML_UNUSED(reg);
  427. }
  428. static size_t ggml_backend_zdnn_reg_device_count(ggml_backend_reg_t reg) {
  429. if (!zdnn_is_nnpa_installed()) {
  430. return 0;
  431. }
  432. return 1;
  433. GGML_UNUSED(reg);
  434. }
  435. static ggml_backend_dev_t ggml_backend_zdnn_reg_device_get(ggml_backend_reg_t reg, size_t index) {
  436. GGML_ASSERT(index == 0);
  437. return &g_ggml_backend_zdnn_device;
  438. GGML_UNUSED(reg);
  439. GGML_UNUSED(index);
  440. }
  441. static ggml_backend_feature g_ggml_backend_zdnn_features[] = {
  442. { "NNPA", zdnn_is_nnpa_installed() ? "1" : "0" },
  443. { "NNPA_PARMBLKFORMAT_0", zdnn_is_nnpa_parmblk_fmt_installed(1, NNPA_PARMBLKFORMAT_0) ? "1" : "0" },
  444. { "NNPA_PARMBLKFORMAT_1", zdnn_is_nnpa_parmblk_fmt_installed(1, NNPA_PARMBLKFORMAT_1) ? "1" : "0" },
  445. { NULL, NULL },
  446. };
  447. static ggml_backend_feature * ggml_backend_zdnn_get_features(ggml_backend_reg_t reg) {
  448. return g_ggml_backend_zdnn_features;
  449. GGML_UNUSED(reg);
  450. }
  451. static void * ggml_backend_zdnn_get_proc_address(ggml_backend_reg_t reg, const char * name) {
  452. if (strcmp(name, "ggml_backend_get_features") == 0) {
  453. return (void *) ggml_backend_zdnn_get_features;
  454. }
  455. return NULL;
  456. GGML_UNUSED(reg);
  457. }
  458. static ggml_backend_reg_i ggml_backend_zdnn_reg_i = {
  459. /* .get_name = */ ggml_backend_zdnn_reg_get_name,
  460. /* .get_device_count = */ ggml_backend_zdnn_reg_device_count,
  461. /* .get_device = */ ggml_backend_zdnn_reg_device_get,
  462. /* .get_proc_address = */ ggml_backend_zdnn_get_proc_address
  463. };
  464. static void ggml_zdnn_cleanup(void) {
  465. ggml_backend_zdnn_device_rel(&g_ggml_ctx_dev_main);
  466. }
  467. // TODO: make thread-safe
  468. ggml_backend_reg_t ggml_backend_zdnn_reg(void) {
  469. ggml_backend_zdnn_device_acq(&g_ggml_ctx_dev_main);
  470. // register cleanup callback
  471. atexit(ggml_zdnn_cleanup);
  472. {
  473. g_ggml_backend_zdnn_reg = (ggml_backend_reg) {
  474. /* .api_version = */ GGML_ZDNN_VERSION,
  475. /* .iface = */ ggml_backend_zdnn_reg_i,
  476. /* .context = */ NULL
  477. };
  478. g_ggml_backend_zdnn_device = (ggml_backend_device) {
  479. /* .iface = */ ggml_backend_zdnn_device_i,
  480. /* .reg = */ &g_ggml_backend_zdnn_reg,
  481. /* .context = */ &g_ggml_ctx_dev_main
  482. };
  483. return &g_ggml_backend_zdnn_reg;
  484. }
  485. }
  486. GGML_BACKEND_DL_IMPL(ggml_backend_zdnn_reg)