rpc-server.cpp 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. #ifdef GGML_USE_CUDA
  2. #include "ggml-cuda.h"
  3. #endif
  4. #ifdef GGML_USE_METAL
  5. #include "ggml-metal.h"
  6. #endif
  7. #ifdef GGML_USE_VULKAN
  8. #include "ggml-vulkan.h"
  9. #endif
  10. #include "ggml-rpc.h"
  11. #ifdef _WIN32
  12. # include <windows.h>
  13. #else
  14. # include <unistd.h>
  15. #endif
  16. #include <string>
  17. #include <stdio.h>
  18. struct rpc_server_params {
  19. std::string host = "127.0.0.1";
  20. int port = 50052;
  21. size_t backend_mem = 0;
  22. };
  23. static void print_usage(int /*argc*/, char ** argv, rpc_server_params params) {
  24. fprintf(stderr, "Usage: %s [options]\n\n", argv[0]);
  25. fprintf(stderr, "options:\n");
  26. fprintf(stderr, " -h, --help show this help message and exit\n");
  27. fprintf(stderr, " -H HOST, --host HOST host to bind to (default: %s)\n", params.host.c_str());
  28. fprintf(stderr, " -p PORT, --port PORT port to bind to (default: %d)\n", params.port);
  29. fprintf(stderr, " -m MEM, --mem MEM backend memory size (in MB)\n");
  30. fprintf(stderr, "\n");
  31. }
  32. static bool rpc_server_params_parse(int argc, char ** argv, rpc_server_params & params) {
  33. std::string arg;
  34. for (int i = 1; i < argc; i++) {
  35. arg = argv[i];
  36. if (arg == "-H" || arg == "--host") {
  37. if (++i >= argc) {
  38. return false;
  39. }
  40. params.host = argv[i];
  41. } else if (arg == "-p" || arg == "--port") {
  42. if (++i >= argc) {
  43. return false;
  44. }
  45. params.port = std::stoi(argv[i]);
  46. if (params.port <= 0 || params.port > 65535) {
  47. return false;
  48. }
  49. } else if (arg == "-m" || arg == "--mem") {
  50. if (++i >= argc) {
  51. return false;
  52. }
  53. params.backend_mem = std::stoul(argv[i]) * 1024 * 1024;
  54. } else if (arg == "-h" || arg == "--help") {
  55. print_usage(argc, argv, params);
  56. exit(0);
  57. } else {
  58. fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
  59. print_usage(argc, argv, params);
  60. exit(0);
  61. }
  62. }
  63. return true;
  64. }
  65. static ggml_backend_t create_backend() {
  66. ggml_backend_t backend = NULL;
  67. #ifdef GGML_USE_CUDA
  68. fprintf(stderr, "%s: using CUDA backend\n", __func__);
  69. backend = ggml_backend_cuda_init(0); // init device 0
  70. if (!backend) {
  71. fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__);
  72. }
  73. #elif GGML_USE_METAL
  74. fprintf(stderr, "%s: using Metal backend\n", __func__);
  75. backend = ggml_backend_metal_init();
  76. if (!backend) {
  77. fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
  78. }
  79. #elif GGML_USE_VULKAN
  80. fprintf(stderr, "%s: using Vulkan backend\n", __func__);
  81. backend = ggml_backend_vk_init(0); // init device 0
  82. if (!backend) {
  83. fprintf(stderr, "%s: ggml_backend_vulkan_init() failed\n", __func__);
  84. }
  85. #endif
  86. // if there aren't GPU Backends fallback to CPU backend
  87. if (!backend) {
  88. fprintf(stderr, "%s: using CPU backend\n", __func__);
  89. backend = ggml_backend_cpu_init();
  90. }
  91. return backend;
  92. }
  93. static void get_backend_memory(size_t * free_mem, size_t * total_mem) {
  94. #ifdef GGML_USE_CUDA
  95. ggml_backend_cuda_get_device_memory(0, free_mem, total_mem);
  96. #elif GGML_USE_VULKAN
  97. ggml_backend_vk_get_device_memory(0, free_mem, total_mem);
  98. #else
  99. #ifdef _WIN32
  100. MEMORYSTATUSEX status;
  101. status.dwLength = sizeof(status);
  102. GlobalMemoryStatusEx(&status);
  103. *total_mem = status.ullTotalPhys;
  104. *free_mem = status.ullAvailPhys;
  105. #else
  106. long pages = sysconf(_SC_PHYS_PAGES);
  107. long page_size = sysconf(_SC_PAGE_SIZE);
  108. *total_mem = pages * page_size;
  109. *free_mem = *total_mem;
  110. #endif
  111. #endif
  112. }
  113. int main(int argc, char * argv[]) {
  114. rpc_server_params params;
  115. if (!rpc_server_params_parse(argc, argv, params)) {
  116. fprintf(stderr, "Invalid parameters\n");
  117. return 1;
  118. }
  119. if (params.host != "127.0.0.1") {
  120. fprintf(stderr, "\n");
  121. fprintf(stderr, "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
  122. fprintf(stderr, "WARNING: Host ('%s') is != '127.0.0.1'\n", params.host.c_str());
  123. fprintf(stderr, " Never expose the RPC server to an open network!\n");
  124. fprintf(stderr, " This is an experimental feature and is not secure!\n");
  125. fprintf(stderr, "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
  126. fprintf(stderr, "\n");
  127. }
  128. ggml_backend_t backend = create_backend();
  129. if (!backend) {
  130. fprintf(stderr, "Failed to create backend\n");
  131. return 1;
  132. }
  133. std::string endpoint = params.host + ":" + std::to_string(params.port);
  134. size_t free_mem, total_mem;
  135. if (params.backend_mem > 0) {
  136. free_mem = params.backend_mem;
  137. total_mem = params.backend_mem;
  138. } else {
  139. get_backend_memory(&free_mem, &total_mem);
  140. }
  141. printf("Starting RPC server on %s, backend memory: %zu MB\n", endpoint.c_str(), free_mem / (1024 * 1024));
  142. ggml_backend_rpc_start_server(backend, endpoint.c_str(), free_mem, total_mem);
  143. ggml_backend_free(backend);
  144. return 0;
  145. }