rpc-server.cpp 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. #ifdef GGML_USE_CUDA
  2. #include "ggml-cuda.h"
  3. #endif
  4. #ifdef GGML_USE_METAL
  5. #include "ggml-metal.h"
  6. #endif
  7. #include "ggml-rpc.h"
  8. #include <string>
  9. #include <stdio.h>
  10. struct rpc_server_params {
  11. std::string host = "0.0.0.0";
  12. int port = 50052;
  13. size_t backend_mem = 0;
  14. };
  15. static void print_usage(int /*argc*/, char ** argv, rpc_server_params params) {
  16. fprintf(stderr, "Usage: %s [options]\n\n", argv[0]);
  17. fprintf(stderr, "options:\n");
  18. fprintf(stderr, " -h, --help show this help message and exit\n");
  19. fprintf(stderr, " -H HOST, --host HOST host to bind to (default: %s)\n", params.host.c_str());
  20. fprintf(stderr, " -p PORT, --port PORT port to bind to (default: %d)\n", params.port);
  21. fprintf(stderr, " -m MEM, --mem MEM backend memory size (in MB)\n");
  22. fprintf(stderr, "\n");
  23. }
  24. static bool rpc_server_params_parse(int argc, char ** argv, rpc_server_params & params) {
  25. std::string arg;
  26. for (int i = 1; i < argc; i++) {
  27. arg = argv[i];
  28. if (arg == "-H" || arg == "--host") {
  29. if (++i >= argc) {
  30. return false;
  31. }
  32. params.host = argv[i];
  33. } else if (arg == "-p" || arg == "--port") {
  34. if (++i >= argc) {
  35. return false;
  36. }
  37. params.port = std::stoi(argv[i]);
  38. if (params.port <= 0 || params.port > 65535) {
  39. return false;
  40. }
  41. } else if (arg == "-m" || arg == "--mem") {
  42. if (++i >= argc) {
  43. return false;
  44. }
  45. params.backend_mem = std::stoul(argv[i]) * 1024 * 1024;
  46. } else if (arg == "-h" || arg == "--help") {
  47. print_usage(argc, argv, params);
  48. exit(0);
  49. }
  50. }
  51. return true;
  52. }
  53. static ggml_backend_t create_backend() {
  54. ggml_backend_t backend = NULL;
  55. #ifdef GGML_USE_CUDA
  56. fprintf(stderr, "%s: using CUDA backend\n", __func__);
  57. backend = ggml_backend_cuda_init(0); // init device 0
  58. if (!backend) {
  59. fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__);
  60. }
  61. #elif GGML_USE_METAL
  62. fprintf(stderr, "%s: using Metal backend\n", __func__);
  63. backend = ggml_backend_metal_init();
  64. if (!backend) {
  65. fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
  66. }
  67. #endif
  68. // if there aren't GPU Backends fallback to CPU backend
  69. if (!backend) {
  70. fprintf(stderr, "%s: using CPU backend\n", __func__);
  71. backend = ggml_backend_cpu_init();
  72. }
  73. return backend;
  74. }
  75. static void get_backend_memory(size_t * free_mem, size_t * total_mem) {
  76. #ifdef GGML_USE_CUDA
  77. ggml_backend_cuda_get_device_memory(0, free_mem, total_mem);
  78. #else
  79. // TODO: implement for other backends
  80. *free_mem = 1;
  81. *total_mem = 1;
  82. #endif
  83. }
  84. int main(int argc, char * argv[]) {
  85. rpc_server_params params;
  86. if (!rpc_server_params_parse(argc, argv, params)) {
  87. fprintf(stderr, "Invalid parameters\n");
  88. return 1;
  89. }
  90. ggml_backend_t backend = create_backend();
  91. if (!backend) {
  92. fprintf(stderr, "Failed to create backend\n");
  93. return 1;
  94. }
  95. std::string endpoint = params.host + ":" + std::to_string(params.port);
  96. size_t free_mem, total_mem;
  97. if (params.backend_mem > 0) {
  98. free_mem = params.backend_mem;
  99. total_mem = params.backend_mem;
  100. } else {
  101. get_backend_memory(&free_mem, &total_mem);
  102. }
  103. printf("Starting RPC server on %s, backend memory: %zu MB\n", endpoint.c_str(), free_mem / (1024 * 1024));
  104. start_rpc_server(backend, endpoint.c_str(), free_mem, total_mem);
  105. ggml_backend_free(backend);
  106. return 0;
  107. }