rpc-server.cpp 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. #include "ggml-cpu.h"
  2. #ifdef GGML_USE_CUDA
  3. #include "ggml-cuda.h"
  4. #endif
  5. #ifdef GGML_USE_METAL
  6. #include "ggml-metal.h"
  7. #endif
  8. #ifdef GGML_USE_VULKAN
  9. #include "ggml-vulkan.h"
  10. #endif
  11. #include "ggml-rpc.h"
  12. #ifdef _WIN32
  13. # include <windows.h>
  14. #else
  15. # include <unistd.h>
  16. #endif
  17. #include <string>
  18. #include <stdio.h>
  19. struct rpc_server_params {
  20. std::string host = "127.0.0.1";
  21. int port = 50052;
  22. size_t backend_mem = 0;
  23. };
  24. static void print_usage(int /*argc*/, char ** argv, rpc_server_params params) {
  25. fprintf(stderr, "Usage: %s [options]\n\n", argv[0]);
  26. fprintf(stderr, "options:\n");
  27. fprintf(stderr, " -h, --help show this help message and exit\n");
  28. fprintf(stderr, " -H HOST, --host HOST host to bind to (default: %s)\n", params.host.c_str());
  29. fprintf(stderr, " -p PORT, --port PORT port to bind to (default: %d)\n", params.port);
  30. fprintf(stderr, " -m MEM, --mem MEM backend memory size (in MB)\n");
  31. fprintf(stderr, "\n");
  32. }
  33. static bool rpc_server_params_parse(int argc, char ** argv, rpc_server_params & params) {
  34. std::string arg;
  35. for (int i = 1; i < argc; i++) {
  36. arg = argv[i];
  37. if (arg == "-H" || arg == "--host") {
  38. if (++i >= argc) {
  39. return false;
  40. }
  41. params.host = argv[i];
  42. } else if (arg == "-p" || arg == "--port") {
  43. if (++i >= argc) {
  44. return false;
  45. }
  46. params.port = std::stoi(argv[i]);
  47. if (params.port <= 0 || params.port > 65535) {
  48. return false;
  49. }
  50. } else if (arg == "-m" || arg == "--mem") {
  51. if (++i >= argc) {
  52. return false;
  53. }
  54. params.backend_mem = std::stoul(argv[i]) * 1024 * 1024;
  55. } else if (arg == "-h" || arg == "--help") {
  56. print_usage(argc, argv, params);
  57. exit(0);
  58. } else {
  59. fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
  60. print_usage(argc, argv, params);
  61. exit(0);
  62. }
  63. }
  64. return true;
  65. }
  66. static ggml_backend_t create_backend() {
  67. ggml_backend_t backend = NULL;
  68. #ifdef GGML_USE_CUDA
  69. fprintf(stderr, "%s: using CUDA backend\n", __func__);
  70. backend = ggml_backend_cuda_init(0); // init device 0
  71. if (!backend) {
  72. fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__);
  73. }
  74. #elif GGML_USE_METAL
  75. fprintf(stderr, "%s: using Metal backend\n", __func__);
  76. backend = ggml_backend_metal_init();
  77. if (!backend) {
  78. fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
  79. }
  80. #elif GGML_USE_VULKAN
  81. fprintf(stderr, "%s: using Vulkan backend\n", __func__);
  82. backend = ggml_backend_vk_init(0); // init device 0
  83. if (!backend) {
  84. fprintf(stderr, "%s: ggml_backend_vulkan_init() failed\n", __func__);
  85. }
  86. #endif
  87. // if there aren't GPU Backends fallback to CPU backend
  88. if (!backend) {
  89. fprintf(stderr, "%s: using CPU backend\n", __func__);
  90. backend = ggml_backend_cpu_init();
  91. }
  92. return backend;
  93. }
  94. static void get_backend_memory(size_t * free_mem, size_t * total_mem) {
  95. #ifdef GGML_USE_CUDA
  96. ggml_backend_cuda_get_device_memory(0, free_mem, total_mem);
  97. #elif GGML_USE_VULKAN
  98. ggml_backend_vk_get_device_memory(0, free_mem, total_mem);
  99. #else
  100. #ifdef _WIN32
  101. MEMORYSTATUSEX status;
  102. status.dwLength = sizeof(status);
  103. GlobalMemoryStatusEx(&status);
  104. *total_mem = status.ullTotalPhys;
  105. *free_mem = status.ullAvailPhys;
  106. #else
  107. long pages = sysconf(_SC_PHYS_PAGES);
  108. long page_size = sysconf(_SC_PAGE_SIZE);
  109. *total_mem = pages * page_size;
  110. *free_mem = *total_mem;
  111. #endif
  112. #endif
  113. }
  114. int main(int argc, char * argv[]) {
  115. rpc_server_params params;
  116. if (!rpc_server_params_parse(argc, argv, params)) {
  117. fprintf(stderr, "Invalid parameters\n");
  118. return 1;
  119. }
  120. if (params.host != "127.0.0.1") {
  121. fprintf(stderr, "\n");
  122. fprintf(stderr, "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
  123. fprintf(stderr, "WARNING: Host ('%s') is != '127.0.0.1'\n", params.host.c_str());
  124. fprintf(stderr, " Never expose the RPC server to an open network!\n");
  125. fprintf(stderr, " This is an experimental feature and is not secure!\n");
  126. fprintf(stderr, "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
  127. fprintf(stderr, "\n");
  128. }
  129. ggml_backend_t backend = create_backend();
  130. if (!backend) {
  131. fprintf(stderr, "Failed to create backend\n");
  132. return 1;
  133. }
  134. std::string endpoint = params.host + ":" + std::to_string(params.port);
  135. size_t free_mem, total_mem;
  136. if (params.backend_mem > 0) {
  137. free_mem = params.backend_mem;
  138. total_mem = params.backend_mem;
  139. } else {
  140. get_backend_memory(&free_mem, &total_mem);
  141. }
  142. printf("Starting RPC server on %s, backend memory: %zu MB\n", endpoint.c_str(), free_mem / (1024 * 1024));
  143. ggml_backend_rpc_start_server(backend, endpoint.c_str(), free_mem, total_mem);
  144. ggml_backend_free(backend);
  145. return 0;
  146. }