metal.cpp 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. // Evaluate a statically exported ggml computation graph with Metal
  2. //
  3. // - First, export a LLaMA graph:
  4. //
  5. // $ ./bin/main -m ../models/7B/ggml-model-q4_0.bin --export
  6. //
  7. // - Run this tool to evaluate the exported graph:
  8. //
  9. // $ ./bin/metal llama.ggml
  10. //
  11. // The purpose of this tool is mostly for debugging and demonstration purposes.
  12. // The main limitation of exporting computation graphs is that their sizes are static which often
  13. // can be a problem for real-world applications.
  14. //
  15. #include "ggml.h"
  16. #include "ggml-metal.h"
  17. #include <cstdio>
  18. #include <cstring>
  19. #include <cstdlib>
  20. int main(int argc, char ** argv) {
  21. ggml_time_init();
  22. if (argc != 2) {
  23. fprintf(stderr, "Usage: %s llama.ggml\n", argv[0]);
  24. return -1;
  25. }
  26. const char * fname_cgraph = argv[1];
  27. // load the compute graph
  28. struct ggml_context * ctx_data = NULL;
  29. struct ggml_context * ctx_eval = NULL;
  30. struct ggml_cgraph gf = ggml_graph_import(fname_cgraph, &ctx_data, &ctx_eval);
  31. gf.n_threads = 1;
  32. // this allocates all Metal resources and memory buffers
  33. auto * ctx_metal = ggml_metal_init();
  34. const size_t max_size_data = ggml_get_max_tensor_size(ctx_data);
  35. const size_t max_size_eval = ggml_get_max_tensor_size(ctx_eval);
  36. ggml_metal_add_buffer(ctx_metal, "data", ggml_get_mem_buffer(ctx_data), ggml_get_mem_size(ctx_data), max_size_data);
  37. ggml_metal_add_buffer(ctx_metal, "eval", ggml_get_mem_buffer(ctx_eval), ggml_get_mem_size(ctx_eval), max_size_eval);
  38. // main
  39. {
  40. struct ggml_tensor * input = ggml_graph_get_tensor(&gf, "embd");
  41. *(int32_t *) input->data = 1; // BOS
  42. ggml_metal_set_tensor(ctx_metal, input);
  43. // warmup
  44. ggml_metal_graph_compute(ctx_metal, &gf);
  45. const int n_iter = 16;
  46. const int64_t t0 = ggml_time_us();
  47. // the actual inference happens here
  48. for (int i = 0; i < n_iter; ++i) {
  49. ggml_metal_graph_compute(ctx_metal, &gf);
  50. }
  51. const int64_t t1 = ggml_time_us();
  52. printf("time: %.2f ms, %.2f ms/tok\n", (t1 - t0) / 1000.0, (t1 - t0) / 1000.0 / n_iter);
  53. }
  54. // debug output
  55. {
  56. struct ggml_tensor * logits = gf.nodes[gf.n_nodes - 1];
  57. ggml_metal_get_tensor(ctx_metal, logits);
  58. float * ptr = (float *) ggml_get_data(logits);
  59. printf("logits: ");
  60. for (int i = 0; i < 10; i++) {
  61. printf("%8.4f ", ptr[i]);
  62. }
  63. printf("\n");
  64. int imax = 0;
  65. double sum = 0.0;
  66. double vmax = -1e9;
  67. for (int i = 0; i < 32000; i++) {
  68. sum += (double) ptr[i];
  69. if (ptr[i] > vmax) {
  70. vmax = ptr[i];
  71. imax = i;
  72. }
  73. }
  74. printf("sum: %f, imax = %d, vmax = %f\n", sum, imax, vmax);
  75. }
  76. ggml_metal_free(ctx_metal);
  77. ggml_free(ctx_data);
  78. ggml_free(ctx_eval);
  79. return 0;
  80. }