ggml-backend.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. #pragma once
  2. #include "ggml.h"
  3. #include "ggml-alloc.h"
  4. #ifdef GGML_BACKEND_SHARED
  5. # if defined(_WIN32) && !defined(__MINGW32__)
  6. # ifdef GGML_BACKEND_BUILD
  7. # define GGML_BACKEND_API __declspec(dllexport) extern
  8. # else
  9. # define GGML_BACKEND_API __declspec(dllimport) extern
  10. # endif
  11. # else
  12. # define GGML_BACKEND_API __attribute__ ((visibility ("default"))) extern
  13. # endif
  14. #else
  15. # define GGML_BACKEND_API extern
  16. #endif
  17. #ifdef __cplusplus
  18. extern "C" {
  19. #endif
  20. typedef struct ggml_backend_buffer_type * ggml_backend_buffer_type_t;
  21. typedef struct ggml_backend_buffer * ggml_backend_buffer_t;
  22. typedef struct ggml_backend_event * ggml_backend_event_t;
  23. typedef struct ggml_backend * ggml_backend_t;
  24. typedef void * ggml_backend_graph_plan_t;
  25. typedef struct ggml_backend_reg * ggml_backend_reg_t;
  26. typedef struct ggml_backend_device * ggml_backend_dev_t;
  27. //
  28. // Backend buffer type
  29. //
  30. GGML_API const char * ggml_backend_buft_name (ggml_backend_buffer_type_t buft);
  31. GGML_API ggml_backend_buffer_t ggml_backend_buft_alloc_buffer (ggml_backend_buffer_type_t buft, size_t size);
  32. GGML_API size_t ggml_backend_buft_get_alignment (ggml_backend_buffer_type_t buft);
  33. GGML_API size_t ggml_backend_buft_get_max_size (ggml_backend_buffer_type_t buft);
  34. GGML_API size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor);
  35. GGML_API bool ggml_backend_buft_is_host (ggml_backend_buffer_type_t buft);
  36. GGML_API ggml_backend_dev_t ggml_backend_buft_get_device (ggml_backend_buffer_type_t buft);
  37. //
  38. // Backend buffer
  39. //
  40. enum ggml_backend_buffer_usage {
  41. GGML_BACKEND_BUFFER_USAGE_ANY = 0,
  42. GGML_BACKEND_BUFFER_USAGE_WEIGHTS = 1,
  43. GGML_BACKEND_BUFFER_USAGE_COMPUTE = 2,
  44. };
  45. GGML_API const char * ggml_backend_buffer_name (ggml_backend_buffer_t buffer);
  46. GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer);
  47. GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer);
  48. GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer);
  49. GGML_API enum ggml_status ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
  50. GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer);
  51. GGML_API size_t ggml_backend_buffer_get_max_size (ggml_backend_buffer_t buffer);
  52. GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
  53. GGML_API void ggml_backend_buffer_clear (ggml_backend_buffer_t buffer, uint8_t value);
  54. GGML_API bool ggml_backend_buffer_is_host (ggml_backend_buffer_t buffer);
  55. GGML_API void ggml_backend_buffer_set_usage (ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage);
  56. GGML_API enum ggml_backend_buffer_usage ggml_backend_buffer_get_usage (ggml_backend_buffer_t buffer);
  57. GGML_API ggml_backend_buffer_type_t ggml_backend_buffer_get_type (ggml_backend_buffer_t buffer);
  58. GGML_API void ggml_backend_buffer_reset (ggml_backend_buffer_t buffer);
  59. // tensor copy between different backends
  60. GGML_API void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst);
  61. //
  62. // Backend (stream)
  63. //
  64. GGML_API ggml_guid_t ggml_backend_guid(ggml_backend_t backend);
  65. GGML_API const char * ggml_backend_name(ggml_backend_t backend);
  66. GGML_API void ggml_backend_free(ggml_backend_t backend);
  67. GGML_API ggml_backend_buffer_type_t ggml_backend_get_default_buffer_type(ggml_backend_t backend);
  68. GGML_API ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size);
  69. GGML_API size_t ggml_backend_get_alignment(ggml_backend_t backend);
  70. GGML_API size_t ggml_backend_get_max_size(ggml_backend_t backend);
  71. GGML_API void ggml_backend_tensor_set_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
  72. GGML_API void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
  73. // "offset" refers to the offset in tensor->data for setting/getting data
  74. GGML_API void ggml_backend_tensor_set( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
  75. GGML_API void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
  76. GGML_API void ggml_backend_tensor_memset( struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size);
  77. GGML_API void ggml_backend_synchronize(ggml_backend_t backend);
  78. GGML_API ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph);
  79. GGML_API void ggml_backend_graph_plan_free (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
  80. GGML_API enum ggml_status ggml_backend_graph_plan_compute (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
  81. GGML_API enum ggml_status ggml_backend_graph_compute (ggml_backend_t backend, struct ggml_cgraph * cgraph);
  82. GGML_API enum ggml_status ggml_backend_graph_compute_async(ggml_backend_t backend, struct ggml_cgraph * cgraph);
  83. // NOTE: will be removed, use device version instead
  84. GGML_API bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op);
  85. GGML_API bool ggml_backend_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft);
  86. GGML_API bool ggml_backend_offload_op(ggml_backend_t backend, const struct ggml_tensor * op);
  87. // asynchronous copy
  88. // the copy is performed after all the currently queued operations in backend_src
  89. // backend_dst will wait for the copy to complete before performing other operations
  90. // automatic fallback to sync copy if async is not supported
  91. GGML_API void ggml_backend_tensor_copy_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, struct ggml_tensor * src, struct ggml_tensor * dst);
  92. GGML_API ggml_backend_dev_t ggml_backend_get_device(ggml_backend_t backend);
  93. //
  94. // Events
  95. //
  96. GGML_API ggml_backend_event_t ggml_backend_event_new(ggml_backend_dev_t device);
  97. GGML_API void ggml_backend_event_free(ggml_backend_event_t event);
  98. GGML_API void ggml_backend_event_record(ggml_backend_event_t event, ggml_backend_t backend);
  99. GGML_API void ggml_backend_event_synchronize(ggml_backend_event_t event);
  100. GGML_API void ggml_backend_event_wait(ggml_backend_t backend, ggml_backend_event_t event);
  101. //
  102. // Backend device
  103. //
  104. enum ggml_backend_dev_type {
  105. // CPU device using system memory
  106. GGML_BACKEND_DEVICE_TYPE_CPU,
  107. // GPU device using dedicated memory
  108. GGML_BACKEND_DEVICE_TYPE_GPU,
  109. // accelerator devices intended to be used together with the CPU backend (e.g. BLAS or AMX)
  110. GGML_BACKEND_DEVICE_TYPE_ACCEL
  111. };
  112. // functionality supported by the device
  113. struct ggml_backend_dev_caps {
  114. // asynchronous operations
  115. bool async;
  116. // pinned host buffer
  117. bool host_buffer;
  118. // creating buffers from host ptr
  119. bool buffer_from_host_ptr;
  120. // event synchronization
  121. bool events;
  122. };
  123. // all the device properties
  124. struct ggml_backend_dev_props {
  125. const char * name;
  126. const char * description;
  127. size_t memory_free;
  128. size_t memory_total;
  129. enum ggml_backend_dev_type type;
  130. struct ggml_backend_dev_caps caps;
  131. };
  132. GGML_API const char * ggml_backend_dev_name(ggml_backend_dev_t device);
  133. GGML_API const char * ggml_backend_dev_description(ggml_backend_dev_t device);
  134. GGML_API void ggml_backend_dev_memory(ggml_backend_dev_t device, size_t * free, size_t * total);
  135. GGML_API enum ggml_backend_dev_type ggml_backend_dev_type(ggml_backend_dev_t device);
  136. GGML_API void ggml_backend_dev_get_props(ggml_backend_dev_t device, struct ggml_backend_dev_props * props);
  137. GGML_API ggml_backend_reg_t ggml_backend_dev_backend_reg(ggml_backend_dev_t device);
  138. GGML_API ggml_backend_t ggml_backend_dev_init(ggml_backend_dev_t device, const char * params);
  139. GGML_API ggml_backend_buffer_type_t ggml_backend_dev_buffer_type(ggml_backend_dev_t device);
  140. GGML_API ggml_backend_buffer_type_t ggml_backend_dev_host_buffer_type(ggml_backend_dev_t device);
  141. GGML_API ggml_backend_buffer_t ggml_backend_dev_buffer_from_host_ptr(ggml_backend_dev_t device, void * ptr, size_t size, size_t max_tensor_size);
  142. GGML_API bool ggml_backend_dev_supports_op(ggml_backend_dev_t device, const struct ggml_tensor * op);
  143. GGML_API bool ggml_backend_dev_supports_buft(ggml_backend_dev_t device, ggml_backend_buffer_type_t buft);
  144. GGML_API bool ggml_backend_dev_offload_op(ggml_backend_dev_t device, const struct ggml_tensor * op);
  145. //
  146. // Backend (reg)
  147. //
  148. GGML_API const char * ggml_backend_reg_name(ggml_backend_reg_t reg);
  149. GGML_API size_t ggml_backend_reg_dev_count(ggml_backend_reg_t reg);
  150. GGML_API ggml_backend_dev_t ggml_backend_reg_dev_get(ggml_backend_reg_t reg, size_t index);
  151. GGML_API void * ggml_backend_reg_get_proc_address(ggml_backend_reg_t reg, const char * name);
  152. // Common functions that may be obtained using ggml_backend_reg_get_proc_address
  153. // Split buffer type for tensor parallelism
  154. typedef ggml_backend_buffer_type_t (*ggml_backend_split_buffer_type_t)(int main_device, const float * tensor_split);
  155. // Set the number of threads for the backend
  156. typedef void (*ggml_backend_set_n_threads_t)(ggml_backend_t backend, int n_threads);
  157. // Get additional buffer types provided by the device (returns a NULL-terminated array)
  158. typedef ggml_backend_buffer_type_t * (*ggml_backend_dev_get_extra_bufts_t)(ggml_backend_dev_t device);
  159. // Set the abort callback for the backend
  160. typedef void (*ggml_backend_set_abort_callback_t)(ggml_backend_t backend, ggml_abort_callback abort_callback, void * abort_callback_data);
  161. // Get a list of feature flags supported by the backend (returns a NULL-terminated array)
  162. struct ggml_backend_feature {
  163. const char * name;
  164. const char * value;
  165. };
  166. typedef struct ggml_backend_feature * (*ggml_backend_get_features_t)(ggml_backend_reg_t reg);
  167. //
  168. // Backend registry
  169. //
  170. GGML_API void ggml_backend_device_register(ggml_backend_dev_t device);
  171. // Backend (reg) enumeration
  172. GGML_API size_t ggml_backend_reg_count(void);
  173. GGML_API ggml_backend_reg_t ggml_backend_reg_get(size_t index);
  174. GGML_API ggml_backend_reg_t ggml_backend_reg_by_name(const char * name);
  175. // Device enumeration
  176. GGML_API size_t ggml_backend_dev_count(void);
  177. GGML_API ggml_backend_dev_t ggml_backend_dev_get(size_t index);
  178. GGML_API ggml_backend_dev_t ggml_backend_dev_by_name(const char * name);
  179. GGML_API ggml_backend_dev_t ggml_backend_dev_by_type(enum ggml_backend_dev_type type);
  180. // Direct backend (stream) initialization
  181. // = ggml_backend_dev_init(ggml_backend_dev_by_name(name), params)
  182. GGML_API ggml_backend_t ggml_backend_init_by_name(const char * name, const char * params);
  183. // = ggml_backend_dev_init(ggml_backend_dev_by_type(type), params)
  184. GGML_API ggml_backend_t ggml_backend_init_by_type(enum ggml_backend_dev_type type, const char * params);
  185. // = ggml_backend_dev_init(ggml_backend_dev_by_type(GPU) OR ggml_backend_dev_by_type(CPU), NULL)
  186. GGML_API ggml_backend_t ggml_backend_init_best(void);
  187. // Load a backend from a dynamic library and register it
  188. GGML_API ggml_backend_reg_t ggml_backend_load(const char * path);
  189. // Unload a backend if loaded dynamically and unregister it
  190. GGML_API void ggml_backend_unload(ggml_backend_reg_t reg);
  191. // Load all known backends from dynamic libraries
  192. GGML_API void ggml_backend_load_all(void);
  193. GGML_API void ggml_backend_load_all_from_path(const char * dir_path);
  194. //
  195. // Backend scheduler
  196. //
  197. // The backend scheduler allows for multiple backend devices to be used together
  198. // Handles compute buffer allocation, assignment of tensors to backends, and copying of tensors between backends
  199. // The backends are selected based on:
  200. // - the backend that supports the operation
  201. // - the location of the pre-allocated tensors (e.g. the weights)
  202. /*
  203. Example usage:
  204. // operations that use tensors allocated in a buffer with USAGE_WEIGHTS will be assigned
  205. // preferrably to run on the same backend as the buffer
  206. ggml_backend_buffer_set_usage(buf_weights, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
  207. sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, GGML_DEFAULT_GRAPH_SIZE, false);
  208. // initialize buffers from a max size graph (optional)
  209. reserve_graph = build_graph(sched, max_batch_size);
  210. // manually assign nodes to a backend (optional, should not be needed in most cases)
  211. struct ggml_tensor * node = ggml_mul_mat(ctx, ...);
  212. ggml_backend_sched_set_tensor_backend(sched, node, backend_gpu);
  213. ggml_backend_sched_reserve(sched, reserve_graph);
  214. // compute
  215. graph = build_graph(sched); // the graph and its tensors are single-use in terms of allocation, multi-use in terms of computation
  216. for (int i = 0; i < 10; ++i) {
  217. ggml_backend_sched_graph_compute(sched, graph); // on the first iteration the graph is allocated automatically
  218. }
  219. // if there are graph inputs:
  220. graph = build_graph(sched); // get a new graph that is not allocated (the metadata for the old graph is freed once ggml_free is called)
  221. ggml_backend_sched_reset(sched); // clear the allocation of the previous graph
  222. ggml_backend_sched_alloc_graph(sched, graph); // explicitly allocate the new graph but do not execute it
  223. ggml_backend_tensor_set(input_tensor, ...); // copy data to the newly allocated graph tensors
  224. ggml_backend_sched_graph_compute(sched, graph); // execute the graph
  225. // as an alternative to the above it is also possible to assign the inputs to a dedicated context and
  226. // allocate them statically via ggml_backend_alloc_ctx_tensors
  227. }
  228. */
  229. typedef struct ggml_backend_sched * ggml_backend_sched_t;
  230. // Evaluation callback for each node in the graph (set with ggml_backend_sched_set_eval_callback)
  231. // when ask == true, the scheduler wants to know if the user wants to observe this node
  232. // this allows the scheduler to batch nodes together in order to evaluate them in a single call
  233. //
  234. // when ask == false, the scheduler is passing the node tensor to the user for observation
  235. // if the user returns false, the scheduler will cancel the graph compute
  236. //
  237. typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data);
  238. // Initialize a backend scheduler, backends with low index are given priority over backends with high index
  239. GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel);
  240. GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
  241. // Initialize backend buffers from a measure graph
  242. GGML_API bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph); // returns success
  243. GGML_API int ggml_backend_sched_get_n_backends(ggml_backend_sched_t sched);
  244. GGML_API ggml_backend_t ggml_backend_sched_get_backend(ggml_backend_sched_t sched, int i);
  245. // Get the number of splits of the last graph
  246. GGML_API int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched);
  247. GGML_API int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched);
  248. GGML_API size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend);
  249. GGML_API void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend);
  250. GGML_API ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node);
  251. // Allocate and compute graph on the backend scheduler
  252. GGML_API bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph); // returns success
  253. GGML_API enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
  254. GGML_API enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
  255. GGML_API void ggml_backend_sched_synchronize(ggml_backend_sched_t sched);
  256. // Reset all assignments and allocators - must be called before changing the node backends or allocating a new graph.
  257. // This in effect deallocates all tensors that were previously allocated and leaves them with dangling pointers.
  258. // The correct way to use this API is to discard the deallocated tensors and create new ones.
  259. GGML_API void ggml_backend_sched_reset(ggml_backend_sched_t sched);
  260. // Set a callback to be called for each resulting node during graph compute
  261. GGML_API void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data);
  262. //
  263. // Utils
  264. //
  265. struct ggml_backend_graph_copy {
  266. ggml_backend_buffer_t buffer;
  267. struct ggml_context * ctx_allocated;
  268. struct ggml_context * ctx_unallocated;
  269. struct ggml_cgraph * graph;
  270. };
  271. // Copy a graph to a different backend
  272. GGML_API struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph);
  273. GGML_API void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy);
  274. typedef bool (*ggml_backend_eval_callback)(int node_index, struct ggml_tensor * t1, struct ggml_tensor * t2, void * user_data);
  275. // Compare the output of two backends
  276. GGML_API bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data);
  277. // Tensor initialization
  278. GGML_API enum ggml_status ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr);
  279. GGML_API enum ggml_status ggml_backend_view_init(struct ggml_tensor * tensor);
  280. // CPU buffer types are always available
  281. GGML_API ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size);
  282. GGML_API ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void);
  283. #ifdef __cplusplus
  284. }
  285. #endif