ggml-metal.h 3.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. // An interface allowing to compute ggml_cgraph with Metal
  2. //
  3. // This is a fully functional interface that extends ggml with GPU support for Apple devices.
  4. // A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, OpenCL, etc.)
  5. //
  6. // How it works?
  7. //
  8. // As long as your program can create and evaluate a ggml_cgraph on the CPU, you can use this
  9. // interface to evaluate the same graph on the GPU. Instead of using ggml_graph_compute(), you
  10. // use ggml_metal_graph_compute() (or ggml_vulkan_graph_compute(), etc.)
  11. //
  12. // You only need to make sure that all memory buffers that you used during the graph creation
  13. // are mapped to the device memory with the ggml_metal_add_buffer() function. This mapping is
  14. // used during the graph evaluation to determine the arguments of the compute kernels.
  15. //
  16. // Synchronization between device and host memory (for example for input and output tensors)
  17. // is done with the ggml_metal_set_tensor() and ggml_metal_get_tensor() functions.
  18. //
  19. #pragma once
  20. #include "ggml.h"
  21. #include <stddef.h>
  22. #include <stdbool.h>
  23. // max memory buffers that can be mapped to the device
  24. #define GGML_METAL_MAX_BUFFERS 16
  25. #define GGML_METAL_MAX_COMMAND_BUFFERS 32
  26. struct ggml_tensor;
  27. struct ggml_cgraph;
  28. #ifdef __cplusplus
  29. extern "C" {
  30. #endif
  31. void ggml_metal_log_set_callback(ggml_log_callback log_callback, void * user_data);
  32. struct ggml_metal_context;
  33. // number of command buffers to use
  34. struct ggml_metal_context * ggml_metal_init(int n_cb);
  35. void ggml_metal_free(struct ggml_metal_context * ctx);
  36. void * ggml_metal_host_malloc(size_t n);
  37. void ggml_metal_host_free (void * data);
  38. // set the number of command buffers to use
  39. void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb);
  40. // creates a mapping between a host memory buffer and a device memory buffer
  41. // - make sure to map all buffers used in the graph before calling ggml_metal_graph_compute
  42. // - the mapping is used during computation to determine the arguments of the compute kernels
  43. // - you don't need to keep the host memory buffer allocated as it is never accessed by Metal
  44. // - max_size specifies the maximum size of a tensor and is used to create shared views such
  45. // that it is guaranteed that the tensor will fit in at least one of the views
  46. //
  47. bool ggml_metal_add_buffer(
  48. struct ggml_metal_context * ctx,
  49. const char * name,
  50. void * data,
  51. size_t size,
  52. size_t max_size);
  53. // set data from host memory into the device
  54. void ggml_metal_set_tensor(struct ggml_metal_context * ctx, struct ggml_tensor * t);
  55. // get data from the device into host memory
  56. void ggml_metal_get_tensor(struct ggml_metal_context * ctx, struct ggml_tensor * t);
  57. // try to find operations that can be run concurrently in the graph
  58. // you should run it again if the topology of your graph changes
  59. void ggml_metal_graph_find_concurrency(struct ggml_metal_context * ctx, struct ggml_cgraph * gf, bool check_mem);
  60. // if the graph has been optimized for concurrently dispatch, return length of the concur_list if optimized
  61. int ggml_metal_if_optimized(struct ggml_metal_context * ctx);
  62. // output the concur_list for ggml_alloc
  63. int * ggml_metal_get_concur_list(struct ggml_metal_context * ctx);
  64. // same as ggml_graph_compute but uses Metal
  65. // creates gf->n_threads command buffers in parallel
  66. void ggml_metal_graph_compute(struct ggml_metal_context * ctx, struct ggml_cgraph * gf);
  67. #ifdef __cplusplus
  68. }
  69. #endif