ggml-rpc.cpp 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228
  1. #include "ggml-rpc.h"
  2. #include "ggml-impl.h"
  3. #include "ggml-backend-impl.h"
  4. #include <cinttypes>
  5. #include <string>
  6. #include <vector>
  7. #include <memory>
  8. #include <mutex>
  9. #include <unordered_map>
  10. #include <unordered_set>
  11. #ifdef _WIN32
  12. # define WIN32_LEAN_AND_MEAN
  13. # ifndef NOMINMAX
  14. # define NOMINMAX
  15. # endif
  16. # include <windows.h>
  17. # include <winsock2.h>
  18. #else
  19. # include <arpa/inet.h>
  20. # include <sys/socket.h>
  21. # include <sys/types.h>
  22. # include <netinet/in.h>
  23. # include <netinet/tcp.h>
  24. # include <netdb.h>
  25. # include <unistd.h>
  26. #endif
  27. #include <string.h>
  28. #define UNUSED GGML_UNUSED
  29. #define GGML_DEBUG 0
  30. #if (GGML_DEBUG >= 1)
  31. #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
  32. #else
  33. #define GGML_PRINT_DEBUG(...)
  34. #endif
  35. #ifdef _WIN32
  36. typedef SOCKET sockfd_t;
  37. using ssize_t = __int64;
  38. #else
  39. typedef int sockfd_t;
  40. #endif
  41. // cross-platform socket
  42. struct socket_t {
  43. sockfd_t fd;
  44. socket_t(sockfd_t fd) : fd(fd) {}
  45. ~socket_t() {
  46. GGML_PRINT_DEBUG("[%s] closing socket %d\n", __func__, this->fd);
  47. #ifdef _WIN32
  48. closesocket(this->fd);
  49. #else
  50. close(this->fd);
  51. #endif
  52. }
  53. };
  54. // ggml_tensor is serialized into rpc_tensor
  55. #pragma pack(push, 1)
  56. struct rpc_tensor {
  57. uint64_t id;
  58. uint32_t type;
  59. uint64_t buffer;
  60. uint32_t ne[GGML_MAX_DIMS];
  61. uint32_t nb[GGML_MAX_DIMS];
  62. uint32_t op;
  63. int32_t op_params[GGML_MAX_OP_PARAMS / sizeof(int32_t)];
  64. int32_t flags;
  65. uint64_t src[GGML_MAX_SRC];
  66. uint64_t view_src;
  67. uint64_t view_offs;
  68. uint64_t data;
  69. char name[GGML_MAX_NAME];
  70. char padding[4];
  71. };
  72. #pragma pack(pop)
  73. static_assert(sizeof(rpc_tensor) % 8 == 0, "rpc_tensor size must be multiple of 8");
  74. // RPC commands
  75. enum rpc_cmd {
  76. RPC_CMD_ALLOC_BUFFER = 0,
  77. RPC_CMD_GET_ALIGNMENT,
  78. RPC_CMD_GET_MAX_SIZE,
  79. RPC_CMD_BUFFER_GET_BASE,
  80. RPC_CMD_FREE_BUFFER,
  81. RPC_CMD_BUFFER_CLEAR,
  82. RPC_CMD_SET_TENSOR,
  83. RPC_CMD_GET_TENSOR,
  84. RPC_CMD_COPY_TENSOR,
  85. RPC_CMD_GRAPH_COMPUTE,
  86. RPC_CMD_GET_DEVICE_MEMORY,
  87. RPC_CMD_COUNT,
  88. };
  89. // RPC data structures
  90. static ggml_guid_t ggml_backend_rpc_guid() {
  91. static ggml_guid guid = {0x99, 0x68, 0x5b, 0x6c, 0xd2, 0x83, 0x3d, 0x24, 0x25, 0x36, 0x72, 0xe1, 0x5b, 0x0e, 0x14, 0x03};
  92. return &guid;
  93. }
  94. struct ggml_backend_rpc_buffer_type_context {
  95. std::string endpoint;
  96. std::string name;
  97. size_t alignment;
  98. size_t max_size;
  99. };
  100. struct ggml_backend_rpc_context {
  101. std::string endpoint;
  102. std::string name;
  103. };
  104. struct ggml_backend_rpc_buffer_context {
  105. std::shared_ptr<socket_t> sock;
  106. std::unordered_map<ggml_backend_buffer_t, void *> base_cache;
  107. uint64_t remote_ptr;
  108. std::string name;
  109. };
  110. // RPC helper functions
  111. static std::shared_ptr<socket_t> make_socket(sockfd_t fd) {
  112. #ifdef _WIN32
  113. if (fd == INVALID_SOCKET) {
  114. return nullptr;
  115. }
  116. #else
  117. if (fd < 0) {
  118. return nullptr;
  119. }
  120. #endif
  121. return std::make_shared<socket_t>(fd);
  122. }
  123. static bool set_no_delay(sockfd_t sockfd) {
  124. int flag = 1;
  125. // set TCP_NODELAY to disable Nagle's algorithm
  126. int ret = setsockopt(sockfd, IPPROTO_TCP, TCP_NODELAY, (char *)&flag, sizeof(int));
  127. return ret == 0;
  128. }
  129. static bool set_reuse_addr(sockfd_t sockfd) {
  130. int flag = 1;
  131. int ret = setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, (char *)&flag, sizeof(int));
  132. return ret == 0;
  133. }
  134. static std::shared_ptr<socket_t> socket_connect(const char * host, int port) {
  135. struct sockaddr_in addr;
  136. auto sockfd = socket(AF_INET, SOCK_STREAM, 0);
  137. auto sock_ptr = make_socket(sockfd);
  138. if (sock_ptr == nullptr) {
  139. return nullptr;
  140. }
  141. if (!set_no_delay(sockfd)) {
  142. fprintf(stderr, "Failed to set TCP_NODELAY\n");
  143. return nullptr;
  144. }
  145. addr.sin_family = AF_INET;
  146. addr.sin_port = htons(port);
  147. struct hostent * server = gethostbyname(host);
  148. if (server == NULL) {
  149. fprintf(stderr, "Cannot resolve host '%s'\n", host);
  150. return nullptr;
  151. }
  152. memcpy(&addr.sin_addr.s_addr, server->h_addr, server->h_length);
  153. if (connect(sock_ptr->fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
  154. return nullptr;
  155. }
  156. return sock_ptr;
  157. }
  158. static std::shared_ptr<socket_t> socket_accept(sockfd_t srv_sockfd) {
  159. auto client_socket_fd = accept(srv_sockfd, NULL, NULL);
  160. auto client_socket = make_socket(client_socket_fd);
  161. if (client_socket == nullptr) {
  162. return nullptr;
  163. }
  164. if (!set_no_delay(client_socket_fd)) {
  165. fprintf(stderr, "Failed to set TCP_NODELAY\n");
  166. return nullptr;
  167. }
  168. return client_socket;
  169. }
  170. static std::shared_ptr<socket_t> create_server_socket(const char * host, int port) {
  171. auto sockfd = socket(AF_INET, SOCK_STREAM, 0);
  172. auto sock = make_socket(sockfd);
  173. if (sock == nullptr) {
  174. return nullptr;
  175. }
  176. if (!set_reuse_addr(sockfd)) {
  177. fprintf(stderr, "Failed to set SO_REUSEADDR\n");
  178. return nullptr;
  179. }
  180. if (inet_addr(host) == INADDR_NONE) {
  181. fprintf(stderr, "Invalid host address: %s\n", host);
  182. return nullptr;
  183. }
  184. struct sockaddr_in serv_addr;
  185. serv_addr.sin_family = AF_INET;
  186. serv_addr.sin_addr.s_addr = inet_addr(host);
  187. serv_addr.sin_port = htons(port);
  188. if (bind(sockfd, (struct sockaddr *) &serv_addr, sizeof(serv_addr)) < 0) {
  189. return nullptr;
  190. }
  191. if (listen(sockfd, 1) < 0) {
  192. return nullptr;
  193. }
  194. return sock;
  195. }
  196. static bool send_data(sockfd_t sockfd, const void * data, size_t size) {
  197. size_t bytes_sent = 0;
  198. while (bytes_sent < size) {
  199. ssize_t n = send(sockfd, (const char *)data + bytes_sent, size - bytes_sent, 0);
  200. if (n < 0) {
  201. return false;
  202. }
  203. bytes_sent += n;
  204. }
  205. return true;
  206. }
  207. static bool recv_data(sockfd_t sockfd, void * data, size_t size) {
  208. size_t bytes_recv = 0;
  209. while (bytes_recv < size) {
  210. ssize_t n = recv(sockfd, (char *)data + bytes_recv, size - bytes_recv, 0);
  211. if (n <= 0) {
  212. return false;
  213. }
  214. bytes_recv += n;
  215. }
  216. return true;
  217. }
  218. static bool parse_endpoint(const std::string & endpoint, std::string & host, int & port) {
  219. size_t pos = endpoint.find(':');
  220. if (pos == std::string::npos) {
  221. return false;
  222. }
  223. host = endpoint.substr(0, pos);
  224. port = std::stoi(endpoint.substr(pos + 1));
  225. return true;
  226. }
  227. // RPC request : | rpc_cmd (1 byte) | request_size (8 bytes) | request_data (request_size bytes) |
  228. // RPC response: | response_size (8 bytes) | response_data (response_size bytes) |
  229. static bool send_rpc_cmd(const std::shared_ptr<socket_t> & sock, enum rpc_cmd cmd, const std::vector<uint8_t> & input, std::vector<uint8_t> & output) {
  230. uint8_t cmd_byte = cmd;
  231. if (!send_data(sock->fd, &cmd_byte, sizeof(cmd_byte))) {
  232. return false;
  233. }
  234. uint64_t input_size = input.size();
  235. if (!send_data(sock->fd, &input_size, sizeof(input_size))) {
  236. return false;
  237. }
  238. if (!send_data(sock->fd, input.data(), input.size())) {
  239. return false;
  240. }
  241. uint64_t output_size;
  242. if (!recv_data(sock->fd, &output_size, sizeof(output_size))) {
  243. return false;
  244. }
  245. if (output_size == 0) {
  246. output.clear();
  247. return true;
  248. }
  249. output.resize(output_size);
  250. if (!recv_data(sock->fd, output.data(), output_size)) {
  251. return false;
  252. }
  253. return true;
  254. }
  255. // RPC client-side implementation
  256. static std::shared_ptr<socket_t> get_socket(const std::string & endpoint) {
  257. static std::mutex mutex;
  258. std::lock_guard<std::mutex> lock(mutex);
  259. static std::unordered_map<std::string, std::weak_ptr<socket_t>> sockets;
  260. static bool initialized = false;
  261. auto it = sockets.find(endpoint);
  262. if (it != sockets.end()) {
  263. if (auto sock = it->second.lock()) {
  264. return sock;
  265. }
  266. }
  267. std::string host;
  268. int port;
  269. if (!parse_endpoint(endpoint, host, port)) {
  270. return nullptr;
  271. }
  272. #ifdef _WIN32
  273. if (!initialized) {
  274. WSADATA wsaData;
  275. int res = WSAStartup(MAKEWORD(2, 2), &wsaData);
  276. if (res != 0) {
  277. return nullptr;
  278. }
  279. initialized = true;
  280. }
  281. #else
  282. UNUSED(initialized);
  283. #endif
  284. auto sock = socket_connect(host.c_str(), port);
  285. if (sock == nullptr) {
  286. return nullptr;
  287. }
  288. GGML_PRINT_DEBUG("[%s] connected to %s, sockfd=%d\n", __func__, endpoint.c_str(), sock->fd);
  289. sockets[endpoint] = sock;
  290. return sock;
  291. }
  292. GGML_CALL static const char * ggml_backend_rpc_buffer_get_name(ggml_backend_buffer_t buffer) {
  293. ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context;
  294. return ctx->name.c_str();
  295. }
  296. GGML_CALL static void ggml_backend_rpc_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  297. ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context;
  298. // input serialization format: | remote_ptr (8 bytes) |
  299. std::vector<uint8_t> input(sizeof(uint64_t), 0);
  300. uint64_t remote_ptr = ctx->remote_ptr;
  301. memcpy(input.data(), &remote_ptr, sizeof(remote_ptr));
  302. std::vector<uint8_t> output;
  303. bool status = send_rpc_cmd(ctx->sock, RPC_CMD_FREE_BUFFER, input, output);
  304. GGML_ASSERT(status);
  305. GGML_ASSERT(output.empty());
  306. delete ctx;
  307. }
  308. GGML_CALL static void * ggml_backend_rpc_buffer_get_base(ggml_backend_buffer_t buffer) {
  309. ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context;
  310. if (ctx->base_cache.find(buffer) != ctx->base_cache.end()) {
  311. return ctx->base_cache[buffer];
  312. }
  313. // input serialization format: | remote_ptr (8 bytes) |
  314. std::vector<uint8_t> input(sizeof(uint64_t), 0);
  315. uint64_t remote_ptr = ctx->remote_ptr;
  316. memcpy(input.data(), &remote_ptr, sizeof(remote_ptr));
  317. std::vector<uint8_t> output;
  318. bool status = send_rpc_cmd(ctx->sock, RPC_CMD_BUFFER_GET_BASE, input, output);
  319. GGML_ASSERT(status);
  320. GGML_ASSERT(output.size() == sizeof(uint64_t));
  321. // output serialization format: | base_ptr (8 bytes) |
  322. uint64_t base_ptr;
  323. memcpy(&base_ptr, output.data(), sizeof(base_ptr));
  324. void * base = reinterpret_cast<void *>(base_ptr);
  325. ctx->base_cache[buffer] = base;
  326. return base;
  327. }
  328. static rpc_tensor serialize_tensor(const ggml_tensor * tensor) {
  329. rpc_tensor result;
  330. result.id = reinterpret_cast<uint64_t>(tensor);
  331. result.type = tensor->type;
  332. if (tensor->buffer) {
  333. ggml_backend_buffer_t buffer = tensor->buffer;
  334. ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context;
  335. result.buffer = ctx->remote_ptr;
  336. } else {
  337. result.buffer = 0;
  338. }
  339. for (uint32_t i = 0; i < GGML_MAX_DIMS; i++) {
  340. result.ne[i] = tensor->ne[i];
  341. result.nb[i] = tensor->nb[i];
  342. }
  343. result.op = tensor->op;
  344. for (uint32_t i = 0; i < GGML_MAX_OP_PARAMS / sizeof(int32_t); i++) {
  345. result.op_params[i] = tensor->op_params[i];
  346. }
  347. result.flags = tensor->flags;
  348. for (uint32_t i = 0; i < GGML_MAX_SRC; i++) {
  349. result.src[i] = reinterpret_cast<uint64_t>(tensor->src[i]);
  350. }
  351. result.view_src = reinterpret_cast<uint64_t>(tensor->view_src);
  352. result.view_offs = tensor->view_offs;
  353. result.data = reinterpret_cast<uint64_t>(tensor->data);
  354. snprintf(result.name, GGML_MAX_NAME, "%s", tensor->name);
  355. return result;
  356. }
  357. GGML_CALL static void ggml_backend_rpc_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
  358. UNUSED(buffer);
  359. if (ggml_is_quantized(tensor->type)) {
  360. // TODO: this check is due to MATRIX_ROW_PADDING in CUDA and should be generalized
  361. GGML_ASSERT(tensor->ne[0] % 512 == 0 && "unsupported quantized tensor");
  362. }
  363. }
  364. GGML_CALL static void ggml_backend_rpc_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  365. ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context;
  366. // input serialization format: | rpc_tensor | offset (8 bytes) | data (size bytes) |
  367. size_t input_size = sizeof(rpc_tensor) + sizeof(uint64_t) + size;
  368. std::vector<uint8_t> input(input_size, 0);
  369. rpc_tensor rpc_tensor = serialize_tensor(tensor);
  370. memcpy(input.data(), &rpc_tensor, sizeof(rpc_tensor));
  371. memcpy(input.data() + sizeof(rpc_tensor), &offset, sizeof(offset));
  372. memcpy(input.data() + sizeof(rpc_tensor) + sizeof(offset), data, size);
  373. std::vector<uint8_t> output;
  374. bool status = send_rpc_cmd(ctx->sock, RPC_CMD_SET_TENSOR, input, output);
  375. GGML_ASSERT(status);
  376. }
  377. GGML_CALL static void ggml_backend_rpc_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  378. ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context;
  379. // input serialization format: | rpc_tensor | offset (8 bytes) | size (8 bytes) |
  380. int input_size = sizeof(rpc_tensor) + 2*sizeof(uint64_t);
  381. std::vector<uint8_t> input(input_size, 0);
  382. rpc_tensor rpc_tensor = serialize_tensor(tensor);
  383. memcpy(input.data(), &rpc_tensor, sizeof(rpc_tensor));
  384. memcpy(input.data() + sizeof(rpc_tensor), &offset, sizeof(offset));
  385. memcpy(input.data() + sizeof(rpc_tensor) + sizeof(offset), &size, sizeof(size));
  386. std::vector<uint8_t> output;
  387. bool status = send_rpc_cmd(ctx->sock, RPC_CMD_GET_TENSOR, input, output);
  388. GGML_ASSERT(status);
  389. GGML_ASSERT(output.size() == size);
  390. // output serialization format: | data (size bytes) |
  391. memcpy(data, output.data(), size);
  392. }
  393. GGML_CALL static bool ggml_backend_rpc_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) {
  394. // check if src and dst are on the same server
  395. ggml_backend_buffer_t src_buffer = src->buffer;
  396. ggml_backend_rpc_buffer_context * src_ctx = (ggml_backend_rpc_buffer_context *)src_buffer->context;
  397. ggml_backend_buffer_t dst_buffer = dst->buffer;
  398. ggml_backend_rpc_buffer_context * dst_ctx = (ggml_backend_rpc_buffer_context *)dst_buffer->context;
  399. if (src_ctx->sock != dst_ctx->sock) {
  400. return false;
  401. }
  402. ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context;
  403. // input serialization format: | rpc_tensor src | rpc_tensor dst |
  404. int input_size = 2*sizeof(rpc_tensor);
  405. std::vector<uint8_t> input(input_size, 0);
  406. rpc_tensor rpc_src = serialize_tensor(src);
  407. rpc_tensor rpc_dst = serialize_tensor(dst);
  408. memcpy(input.data(), &rpc_src, sizeof(rpc_src));
  409. memcpy(input.data() + sizeof(rpc_src), &rpc_dst, sizeof(rpc_dst));
  410. std::vector<uint8_t> output;
  411. bool status = send_rpc_cmd(ctx->sock, RPC_CMD_COPY_TENSOR, input, output);
  412. GGML_ASSERT(status);
  413. // output serialization format: | result (1 byte) |
  414. GGML_ASSERT(output.size() == 1);
  415. return output[0];
  416. }
  417. GGML_CALL static void ggml_backend_rpc_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  418. ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context;
  419. // serialization format: | bufptr (8 bytes) | value (1 byte) |
  420. int input_size = sizeof(uint64_t) + sizeof(uint8_t);
  421. std::vector<uint8_t> input(input_size, 0);
  422. memcpy(input.data(), &ctx->remote_ptr, sizeof(ctx->remote_ptr));
  423. memcpy(input.data() + sizeof(ctx->remote_ptr), &value, sizeof(value));
  424. std::vector<uint8_t> output;
  425. bool status = send_rpc_cmd(ctx->sock, RPC_CMD_BUFFER_CLEAR, input, output);
  426. GGML_ASSERT(status);
  427. }
  428. static ggml_backend_buffer_i ggml_backend_rpc_buffer_interface = {
  429. /* .get_name = */ ggml_backend_rpc_buffer_get_name,
  430. /* .free_buffer = */ ggml_backend_rpc_buffer_free_buffer,
  431. /* .get_base = */ ggml_backend_rpc_buffer_get_base,
  432. /* .init_tensor = */ ggml_backend_rpc_buffer_init_tensor,
  433. /* .set_tensor = */ ggml_backend_rpc_buffer_set_tensor,
  434. /* .get_tensor = */ ggml_backend_rpc_buffer_get_tensor,
  435. /* .cpy_tensor = */ ggml_backend_rpc_buffer_cpy_tensor,
  436. /* .clear = */ ggml_backend_rpc_buffer_clear,
  437. /* .reset = */ NULL,
  438. };
  439. GGML_CALL static const char * ggml_backend_rpc_buffer_type_name(ggml_backend_buffer_type_t buft) {
  440. ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context;
  441. return buft_ctx->name.c_str();
  442. }
  443. GGML_CALL static ggml_backend_buffer_t ggml_backend_rpc_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  444. ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context;
  445. // input serialization format: | size (8 bytes) |
  446. int input_size = sizeof(uint64_t);
  447. std::vector<uint8_t> input(input_size, 0);
  448. memcpy(input.data(), &size, sizeof(size));
  449. std::vector<uint8_t> output;
  450. auto sock = get_socket(buft_ctx->endpoint);
  451. bool status = send_rpc_cmd(sock, RPC_CMD_ALLOC_BUFFER, input, output);
  452. GGML_ASSERT(status);
  453. GGML_ASSERT(output.size() == 2*sizeof(uint64_t));
  454. // output serialization format: | remote_ptr (8 bytes) | remote_size (8 bytes) |
  455. uint64_t remote_ptr;
  456. memcpy(&remote_ptr, output.data(), sizeof(remote_ptr));
  457. size_t remote_size;
  458. memcpy(&remote_size, output.data() + sizeof(uint64_t), sizeof(remote_size));
  459. if (remote_ptr != 0) {
  460. ggml_backend_buffer_t buffer = ggml_backend_buffer_init(buft,
  461. ggml_backend_rpc_buffer_interface,
  462. new ggml_backend_rpc_buffer_context{sock, {}, remote_ptr, "RPC[" + std::string(buft_ctx->endpoint) + "]"},
  463. remote_size);
  464. return buffer;
  465. } else {
  466. return nullptr;
  467. }
  468. }
  469. static size_t get_alignment(const std::shared_ptr<socket_t> & sock) {
  470. // input serialization format: | 0 bytes |
  471. std::vector<uint8_t> input;
  472. std::vector<uint8_t> output;
  473. bool status = send_rpc_cmd(sock, RPC_CMD_GET_ALIGNMENT, input, output);
  474. GGML_ASSERT(status);
  475. GGML_ASSERT(output.size() == sizeof(uint64_t));
  476. // output serialization format: | alignment (8 bytes) |
  477. uint64_t alignment;
  478. memcpy(&alignment, output.data(), sizeof(alignment));
  479. return alignment;
  480. }
  481. GGML_CALL static size_t ggml_backend_rpc_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  482. ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context;
  483. return buft_ctx->alignment;
  484. }
  485. static size_t get_max_size(const std::shared_ptr<socket_t> & sock) {
  486. // input serialization format: | 0 bytes |
  487. std::vector<uint8_t> input;
  488. std::vector<uint8_t> output;
  489. bool status = send_rpc_cmd(sock, RPC_CMD_GET_MAX_SIZE, input, output);
  490. GGML_ASSERT(status);
  491. GGML_ASSERT(output.size() == sizeof(uint64_t));
  492. // output serialization format: | max_size (8 bytes) |
  493. uint64_t max_size;
  494. memcpy(&max_size, output.data(), sizeof(max_size));
  495. return max_size;
  496. }
  497. GGML_CALL static size_t ggml_backend_rpc_get_max_size(ggml_backend_buffer_type_t buft) {
  498. ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context;
  499. return buft_ctx->max_size;
  500. }
  501. GGML_CALL static size_t ggml_backend_rpc_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
  502. UNUSED(buft);
  503. return ggml_nbytes(tensor);
  504. }
  505. static ggml_backend_buffer_type_i ggml_backend_rpc_buffer_type_interface = {
  506. /* .get_name = */ ggml_backend_rpc_buffer_type_name,
  507. /* .alloc_buffer = */ ggml_backend_rpc_buffer_type_alloc_buffer,
  508. /* .get_alignment = */ ggml_backend_rpc_buffer_type_get_alignment,
  509. /* .get_max_size = */ ggml_backend_rpc_get_max_size,
  510. /* .get_alloc_size = */ ggml_backend_rpc_buffer_type_get_alloc_size,
  511. /* .is_host = */ NULL,
  512. };
  513. GGML_CALL static const char * ggml_backend_rpc_name(ggml_backend_t backend) {
  514. ggml_backend_rpc_context * rpc_ctx = (ggml_backend_rpc_context *)backend->context;
  515. return rpc_ctx->name.c_str();
  516. }
  517. GGML_CALL static void ggml_backend_rpc_free(ggml_backend_t backend) {
  518. ggml_backend_rpc_context * rpc_ctx = (ggml_backend_rpc_context *)backend->context;
  519. delete rpc_ctx;
  520. delete backend;
  521. }
  522. GGML_CALL static ggml_backend_buffer_type_t ggml_backend_rpc_get_default_buffer_type(ggml_backend_t backend) {
  523. ggml_backend_rpc_context * ctx = (ggml_backend_rpc_context *)backend->context;
  524. return ggml_backend_rpc_buffer_type(ctx->endpoint.c_str());
  525. }
  526. GGML_CALL static void ggml_backend_rpc_synchronize(ggml_backend_t backend) {
  527. UNUSED(backend);
  528. // this is no-op because we don't have any async operations
  529. }
  530. static void add_tensor(ggml_tensor * tensor, std::vector<rpc_tensor> & tensors, std::unordered_set<ggml_tensor*> & visited) {
  531. if (tensor == nullptr) {
  532. return;
  533. }
  534. if (visited.find(tensor) != visited.end()) {
  535. return;
  536. }
  537. visited.insert(tensor);
  538. for (int i = 0; i < GGML_MAX_SRC; i++) {
  539. add_tensor(tensor->src[i], tensors, visited);
  540. }
  541. add_tensor(tensor->view_src, tensors, visited);
  542. tensors.push_back(serialize_tensor(tensor));
  543. }
  544. static void serialize_graph(const ggml_cgraph * cgraph, std::vector<uint8_t> & output) {
  545. uint32_t n_nodes = cgraph->n_nodes;
  546. std::vector<rpc_tensor> tensors;
  547. std::unordered_set<ggml_tensor*> visited;
  548. for (uint32_t i = 0; i < n_nodes; i++) {
  549. add_tensor(cgraph->nodes[i], tensors, visited);
  550. }
  551. // serialization format:
  552. // | n_nodes (4 bytes) | nodes (n_nodes * sizeof(uint64_t) | n_tensors (4 bytes) | tensors (n_tensors * sizeof(rpc_tensor)) |
  553. uint32_t n_tensors = tensors.size();
  554. int output_size = sizeof(uint32_t) + n_nodes * sizeof(uint64_t) + sizeof(uint32_t) + n_tensors * sizeof(rpc_tensor);
  555. output.resize(output_size, 0);
  556. memcpy(output.data(), &n_nodes, sizeof(n_nodes));
  557. for (uint32_t i = 0; i < n_nodes; i++) {
  558. memcpy(output.data() + sizeof(n_nodes) + i * sizeof(uint64_t), &cgraph->nodes[i], sizeof(uint64_t));
  559. }
  560. uint32_t * out_ntensors = (uint32_t *)(output.data() + sizeof(n_nodes) + n_nodes * sizeof(uint64_t));
  561. *out_ntensors = n_tensors;
  562. rpc_tensor * out_tensors = (rpc_tensor *)(output.data() + sizeof(n_nodes) + n_nodes * sizeof(uint64_t) + sizeof(uint32_t));
  563. memcpy(out_tensors, tensors.data(), n_tensors * sizeof(rpc_tensor));
  564. }
  565. GGML_CALL static enum ggml_status ggml_backend_rpc_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
  566. ggml_backend_rpc_context * rpc_ctx = (ggml_backend_rpc_context *)backend->context;
  567. std::vector<uint8_t> input;
  568. serialize_graph(cgraph, input);
  569. std::vector<uint8_t> output;
  570. auto sock = get_socket(rpc_ctx->endpoint);
  571. bool status = send_rpc_cmd(sock, RPC_CMD_GRAPH_COMPUTE, input, output);
  572. GGML_ASSERT(status);
  573. GGML_ASSERT(output.size() == 1);
  574. return (enum ggml_status)output[0];
  575. }
  576. GGML_CALL static bool ggml_backend_rpc_supports_op(ggml_backend_t backend, const ggml_tensor * op) {
  577. UNUSED(backend);
  578. UNUSED(op);
  579. //TODO: call the remote backend and cache the results
  580. return true;
  581. }
  582. GGML_CALL static bool ggml_backend_rpc_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) {
  583. if (!buft || buft->iface.get_name != ggml_backend_rpc_buffer_type_name) {
  584. return false;
  585. }
  586. ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context;
  587. ggml_backend_rpc_context * rpc_ctx = (ggml_backend_rpc_context *)backend->context;
  588. return buft_ctx->endpoint == rpc_ctx->endpoint;
  589. }
  590. static ggml_backend_i ggml_backend_rpc_interface = {
  591. /* .get_name = */ ggml_backend_rpc_name,
  592. /* .free = */ ggml_backend_rpc_free,
  593. /* .get_default_buffer_type = */ ggml_backend_rpc_get_default_buffer_type,
  594. /* .set_tensor_async = */ NULL,
  595. /* .get_tensor_async = */ NULL,
  596. /* .cpy_tensor_async = */ NULL,
  597. /* .synchronize = */ ggml_backend_rpc_synchronize,
  598. /* .graph_plan_create = */ NULL,
  599. /* .graph_plan_free = */ NULL,
  600. /* .graph_plan_update = */ NULL,
  601. /* .graph_plan_compute = */ NULL,
  602. /* .graph_compute = */ ggml_backend_rpc_graph_compute,
  603. /* .supports_op = */ ggml_backend_rpc_supports_op,
  604. /* .supports_buft = */ ggml_backend_rpc_supports_buft,
  605. /* .offload_op = */ NULL,
  606. /* .event_new = */ NULL,
  607. /* .event_free = */ NULL,
  608. /* .event_record = */ NULL,
  609. /* .event_wait = */ NULL,
  610. /* .event_synchronize = */ NULL,
  611. };
  612. GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const char * endpoint) {
  613. static std::mutex mutex;
  614. std::lock_guard<std::mutex> lock(mutex);
  615. // NOTE: buffer types are allocated and never freed; this is by design
  616. static std::unordered_map<std::string, ggml_backend_buffer_type_t> buft_map;
  617. auto it = buft_map.find(endpoint);
  618. if (it != buft_map.end()) {
  619. return it->second;
  620. }
  621. auto sock = get_socket(endpoint);
  622. if (sock == nullptr) {
  623. fprintf(stderr, "Failed to connect to %s\n", endpoint);
  624. return nullptr;
  625. }
  626. size_t alignment = get_alignment(sock);
  627. size_t max_size = get_max_size(sock);
  628. ggml_backend_rpc_buffer_type_context * buft_ctx = new ggml_backend_rpc_buffer_type_context {
  629. /* .endpoint = */ endpoint,
  630. /* .name = */ "RPC[" + std::string(endpoint) + "]",
  631. /* .alignment = */ alignment,
  632. /* .max_size = */ max_size
  633. };
  634. ggml_backend_buffer_type_t buft = new ggml_backend_buffer_type {
  635. /* .iface = */ ggml_backend_rpc_buffer_type_interface,
  636. /* .context = */ buft_ctx
  637. };
  638. buft_map[endpoint] = buft;
  639. return buft;
  640. }
  641. GGML_CALL ggml_backend_t ggml_backend_rpc_init(const char * endpoint) {
  642. ggml_backend_rpc_context * ctx = new ggml_backend_rpc_context {
  643. /* .endpoint = */ endpoint,
  644. /* .name = */ "RPC[" + std::string(endpoint) + "]",
  645. };
  646. ggml_backend_t backend = new ggml_backend {
  647. /* .guid = */ ggml_backend_rpc_guid(),
  648. /* .interface = */ ggml_backend_rpc_interface,
  649. /* .context = */ ctx
  650. };
  651. return backend;
  652. }
  653. GGML_API GGML_CALL bool ggml_backend_is_rpc(ggml_backend_t backend) {
  654. return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_rpc_guid());
  655. }
  656. static void get_device_memory(const std::shared_ptr<socket_t> & sock, size_t * free, size_t * total) {
  657. // input serialization format: | 0 bytes |
  658. std::vector<uint8_t> input;
  659. std::vector<uint8_t> output;
  660. bool status = send_rpc_cmd(sock, RPC_CMD_GET_DEVICE_MEMORY, input, output);
  661. GGML_ASSERT(status);
  662. GGML_ASSERT(output.size() == 2*sizeof(uint64_t));
  663. // output serialization format: | free (8 bytes) | total (8 bytes) |
  664. uint64_t free_mem;
  665. memcpy(&free_mem, output.data(), sizeof(free_mem));
  666. uint64_t total_mem;
  667. memcpy(&total_mem, output.data() + sizeof(uint64_t), sizeof(total_mem));
  668. *free = free_mem;
  669. *total = total_mem;
  670. }
  671. GGML_API GGML_CALL void ggml_backend_rpc_get_device_memory(const char * endpoint, size_t * free, size_t * total) {
  672. auto sock = get_socket(endpoint);
  673. if (sock == nullptr) {
  674. *free = 0;
  675. *total = 0;
  676. return;
  677. }
  678. get_device_memory(sock, free, total);
  679. }
  680. // RPC server-side implementation
  681. class rpc_server {
  682. public:
  683. rpc_server(ggml_backend_t backend) : backend(backend) {}
  684. ~rpc_server();
  685. bool alloc_buffer(const std::vector<uint8_t> & input, std::vector<uint8_t> & output);
  686. void get_alignment(std::vector<uint8_t> & output);
  687. void get_max_size(std::vector<uint8_t> & output);
  688. bool buffer_get_base(const std::vector<uint8_t> & input, std::vector<uint8_t> & output);
  689. bool free_buffer(const std::vector<uint8_t> & input);
  690. bool buffer_clear(const std::vector<uint8_t> & input);
  691. bool set_tensor(const std::vector<uint8_t> & input);
  692. bool get_tensor(const std::vector<uint8_t> & input, std::vector<uint8_t> & output);
  693. bool copy_tensor(const std::vector<uint8_t> & input, std::vector<uint8_t> & output);
  694. bool graph_compute(const std::vector<uint8_t> & input, std::vector<uint8_t> & output);
  695. private:
  696. ggml_tensor * deserialize_tensor(struct ggml_context * ctx, const rpc_tensor * tensor);
  697. ggml_tensor * create_node(uint64_t id,
  698. struct ggml_context * ctx,
  699. const std::unordered_map<uint64_t, const rpc_tensor*> & tensor_ptrs,
  700. std::unordered_map<uint64_t, struct ggml_tensor*> & tensor_map);
  701. ggml_backend_t backend;
  702. std::unordered_set<ggml_backend_buffer_t> buffers;
  703. };
  704. bool rpc_server::alloc_buffer(const std::vector<uint8_t> & input, std::vector<uint8_t> & output) {
  705. // input serialization format: | size (8 bytes) |
  706. if (input.size() != sizeof(uint64_t)) {
  707. return false;
  708. }
  709. uint64_t size;
  710. memcpy(&size, input.data(), sizeof(size));
  711. ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend);
  712. ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, size);
  713. uint64_t remote_ptr = 0;
  714. uint64_t remote_size = 0;
  715. if (buffer != nullptr) {
  716. remote_ptr = reinterpret_cast<uint64_t>(buffer);
  717. remote_size = buffer->size;
  718. GGML_PRINT_DEBUG("[%s] size: %" PRIu64 " -> remote_ptr: %" PRIx64 ", remote_size: %" PRIu64 "\n", __func__, size, remote_ptr, remote_size);
  719. buffers.insert(buffer);
  720. } else {
  721. GGML_PRINT_DEBUG("[%s] size: %" PRIu64 " -> failed\n", __func__, size);
  722. }
  723. // output serialization format: | remote_ptr (8 bytes) | remote_size (8 bytes) |
  724. output.resize(2*sizeof(uint64_t), 0);
  725. memcpy(output.data(), &remote_ptr, sizeof(remote_ptr));
  726. memcpy(output.data() + sizeof(uint64_t), &remote_size, sizeof(remote_size));
  727. return true;
  728. }
  729. void rpc_server::get_alignment(std::vector<uint8_t> & output) {
  730. ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend);
  731. size_t alignment = ggml_backend_buft_get_alignment(buft);
  732. GGML_PRINT_DEBUG("[%s] alignment: %lu\n", __func__, alignment);
  733. // output serialization format: | alignment (8 bytes) |
  734. output.resize(sizeof(uint64_t), 0);
  735. memcpy(output.data(), &alignment, sizeof(alignment));
  736. }
  737. void rpc_server::get_max_size(std::vector<uint8_t> & output) {
  738. ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend);
  739. size_t max_size = ggml_backend_buft_get_max_size(buft);
  740. GGML_PRINT_DEBUG("[%s] max_size: %lu\n", __func__, max_size);
  741. // output serialization format: | max_size (8 bytes) |
  742. output.resize(sizeof(uint64_t), 0);
  743. memcpy(output.data(), &max_size, sizeof(max_size));
  744. }
  745. bool rpc_server::buffer_get_base(const std::vector<uint8_t> & input, std::vector<uint8_t> & output) {
  746. // input serialization format: | remote_ptr (8 bytes) |
  747. if (input.size() != sizeof(uint64_t)) {
  748. return false;
  749. }
  750. uint64_t remote_ptr;
  751. memcpy(&remote_ptr, input.data(), sizeof(remote_ptr));
  752. GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, remote_ptr);
  753. ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(remote_ptr);
  754. if (buffers.find(buffer) == buffers.end()) {
  755. GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__);
  756. return false;
  757. }
  758. void * base = ggml_backend_buffer_get_base(buffer);
  759. // output serialization format: | base_ptr (8 bytes) |
  760. uint64_t base_ptr = reinterpret_cast<uint64_t>(base);
  761. output.resize(sizeof(uint64_t), 0);
  762. memcpy(output.data(), &base_ptr, sizeof(base_ptr));
  763. return true;
  764. }
  765. bool rpc_server::free_buffer(const std::vector<uint8_t> & input) {
  766. // input serialization format: | remote_ptr (8 bytes) |
  767. if (input.size() != sizeof(uint64_t)) {
  768. return false;
  769. }
  770. uint64_t remote_ptr;
  771. memcpy(&remote_ptr, input.data(), sizeof(remote_ptr));
  772. GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, remote_ptr);
  773. ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(remote_ptr);
  774. if (buffers.find(buffer) == buffers.end()) {
  775. GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__);
  776. return false;
  777. }
  778. ggml_backend_buffer_free(buffer);
  779. buffers.erase(buffer);
  780. return true;
  781. }
  782. bool rpc_server::buffer_clear(const std::vector<uint8_t> & input) {
  783. // input serialization format: | remote_ptr (8 bytes) | value (1 byte) |
  784. if (input.size() != sizeof(uint64_t) + sizeof(uint8_t)) {
  785. return false;
  786. }
  787. uint64_t remote_ptr;
  788. memcpy(&remote_ptr, input.data(), sizeof(remote_ptr));
  789. uint8_t value;
  790. memcpy(&value, input.data() + sizeof(uint64_t), sizeof(value));
  791. GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 ", value: %u\n", __func__, remote_ptr, value);
  792. ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(remote_ptr);
  793. if (buffers.find(buffer) == buffers.end()) {
  794. GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__);
  795. return false;
  796. }
  797. ggml_backend_buffer_clear(buffer, value);
  798. return true;
  799. }
  800. ggml_tensor * rpc_server::deserialize_tensor(struct ggml_context * ctx, const rpc_tensor * tensor) {
  801. ggml_tensor * result = ggml_new_tensor_4d(ctx, (ggml_type) tensor->type,
  802. tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
  803. for (uint32_t i = 0; i < GGML_MAX_DIMS; i++) {
  804. result->nb[i] = tensor->nb[i];
  805. }
  806. result->buffer = reinterpret_cast<ggml_backend_buffer_t>(tensor->buffer);
  807. if (result->buffer && buffers.find(result->buffer) == buffers.end()) {
  808. result->buffer = nullptr;
  809. }
  810. if (result->buffer) {
  811. // require that the tensor data does not go beyond the buffer end
  812. uint64_t tensor_size = (uint64_t) ggml_nbytes(result);
  813. uint64_t buffer_start = (uint64_t) ggml_backend_buffer_get_base(result->buffer);
  814. uint64_t buffer_size = (uint64_t) ggml_backend_buffer_get_size(result->buffer);
  815. GGML_ASSERT(tensor->data + tensor_size >= tensor->data); // check for overflow
  816. GGML_ASSERT(tensor->data >= buffer_start && tensor->data + tensor_size <= buffer_start + buffer_size);
  817. }
  818. result->op = (ggml_op) tensor->op;
  819. for (uint32_t i = 0; i < GGML_MAX_OP_PARAMS / sizeof(int32_t); i++) {
  820. result->op_params[i] = tensor->op_params[i];
  821. }
  822. result->flags = tensor->flags;
  823. result->data = reinterpret_cast<void *>(tensor->data);
  824. ggml_set_name(result, tensor->name);
  825. return result;
  826. }
  827. bool rpc_server::set_tensor(const std::vector<uint8_t> & input) {
  828. // serialization format: | rpc_tensor | offset (8 bytes) | data (size bytes) |
  829. if (input.size() < sizeof(rpc_tensor) + sizeof(uint64_t)) {
  830. return false;
  831. }
  832. const rpc_tensor * in_tensor = (const rpc_tensor *)input.data();
  833. uint64_t offset;
  834. memcpy(&offset, input.data() + sizeof(rpc_tensor), sizeof(offset));
  835. const size_t size = input.size() - sizeof(rpc_tensor) - sizeof(offset);
  836. struct ggml_init_params params {
  837. /*.mem_size =*/ ggml_tensor_overhead(),
  838. /*.mem_buffer =*/ NULL,
  839. /*.no_alloc =*/ true,
  840. };
  841. struct ggml_context * ctx = ggml_init(params);
  842. ggml_tensor * tensor = deserialize_tensor(ctx, in_tensor);
  843. if (tensor == nullptr) {
  844. GGML_PRINT_DEBUG("[%s] error deserializing tensor\n", __func__);
  845. ggml_free(ctx);
  846. return false;
  847. }
  848. GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %zu\n", __func__, (void*)tensor->buffer, tensor->data, offset, size);
  849. // sanitize tensor->data
  850. {
  851. const size_t p0 = (size_t) ggml_backend_buffer_get_base(tensor->buffer);
  852. const size_t p1 = p0 + ggml_backend_buffer_get_size(tensor->buffer);
  853. if (in_tensor->data + offset < p0 || in_tensor->data + offset >= p1 || size > (p1 - in_tensor->data - offset)) {
  854. GGML_ABORT("[%s] tensor->data out of bounds\n", __func__);
  855. }
  856. }
  857. const void * data = input.data() + sizeof(rpc_tensor) + sizeof(offset);
  858. ggml_backend_tensor_set(tensor, data, offset, size);
  859. ggml_free(ctx);
  860. return true;
  861. }
  862. bool rpc_server::get_tensor(const std::vector<uint8_t> & input, std::vector<uint8_t> & output) {
  863. // serialization format: | rpc_tensor | offset (8 bytes) | size (8 bytes) |
  864. if (input.size() != sizeof(rpc_tensor) + 2*sizeof(uint64_t)) {
  865. return false;
  866. }
  867. const rpc_tensor * in_tensor = (const rpc_tensor *)input.data();
  868. uint64_t offset;
  869. memcpy(&offset, input.data() + sizeof(rpc_tensor), sizeof(offset));
  870. uint64_t size;
  871. memcpy(&size, input.data() + sizeof(rpc_tensor) + sizeof(offset), sizeof(size));
  872. struct ggml_init_params params {
  873. /*.mem_size =*/ ggml_tensor_overhead(),
  874. /*.mem_buffer =*/ NULL,
  875. /*.no_alloc =*/ true,
  876. };
  877. struct ggml_context * ctx = ggml_init(params);
  878. ggml_tensor * tensor = deserialize_tensor(ctx, in_tensor);
  879. if (tensor == nullptr) {
  880. GGML_PRINT_DEBUG("[%s] error deserializing tensor\n", __func__);
  881. ggml_free(ctx);
  882. return false;
  883. }
  884. GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %" PRIu64 "\n", __func__, (void*)tensor->buffer, tensor->data, offset, size);
  885. // sanitize tensor->data
  886. {
  887. const size_t p0 = (size_t) ggml_backend_buffer_get_base(tensor->buffer);
  888. const size_t p1 = p0 + ggml_backend_buffer_get_size(tensor->buffer);
  889. if (in_tensor->data + offset < p0 || in_tensor->data + offset >= p1 || size > (p1 - in_tensor->data - offset)) {
  890. GGML_ABORT("[%s] tensor->data out of bounds\n", __func__);
  891. }
  892. }
  893. // output serialization format: | data (size bytes) |
  894. output.resize(size, 0);
  895. ggml_backend_tensor_get(tensor, output.data(), offset, size);
  896. ggml_free(ctx);
  897. return true;
  898. }
  899. bool rpc_server::copy_tensor(const std::vector<uint8_t> & input, std::vector<uint8_t> & output) {
  900. // serialization format: | rpc_tensor src | rpc_tensor dst |
  901. if (input.size() != 2*sizeof(rpc_tensor)) {
  902. return false;
  903. }
  904. const rpc_tensor * rpc_src = (const rpc_tensor *)input.data();
  905. const rpc_tensor * rpc_dst = (const rpc_tensor *)(input.data() + sizeof(rpc_src));
  906. struct ggml_init_params params {
  907. /*.mem_size =*/ 2*ggml_tensor_overhead(),
  908. /*.mem_buffer =*/ NULL,
  909. /*.no_alloc =*/ true,
  910. };
  911. struct ggml_context * ctx = ggml_init(params);
  912. ggml_tensor * src = deserialize_tensor(ctx, rpc_src);
  913. ggml_tensor * dst = deserialize_tensor(ctx, rpc_dst);
  914. if (src == nullptr || dst == nullptr) {
  915. GGML_PRINT_DEBUG("[%s] error deserializing tensors\n", __func__);
  916. ggml_free(ctx);
  917. return false;
  918. }
  919. GGML_PRINT_DEBUG("[%s] src->buffer: %p, dst->buffer: %p\n", __func__, (void*)src->buffer, (void*)dst->buffer);
  920. bool result = ggml_backend_buffer_copy_tensor(src, dst);
  921. // output serialization format: | result (1 byte) |
  922. output.resize(1, 0);
  923. output[0] = result;
  924. ggml_free(ctx);
  925. return true;
  926. }
  927. ggml_tensor * rpc_server::create_node(uint64_t id,
  928. struct ggml_context * ctx,
  929. const std::unordered_map<uint64_t, const rpc_tensor*> & tensor_ptrs,
  930. std::unordered_map<uint64_t, struct ggml_tensor*> & tensor_map) {
  931. if (id == 0) {
  932. return nullptr;
  933. }
  934. if (tensor_map.find(id) != tensor_map.end()) {
  935. return tensor_map[id];
  936. }
  937. const rpc_tensor * tensor = tensor_ptrs.at(id);
  938. struct ggml_tensor * result = deserialize_tensor(ctx, tensor);
  939. if (result == nullptr) {
  940. return nullptr;
  941. }
  942. tensor_map[id] = result;
  943. for (int i = 0; i < GGML_MAX_SRC; i++) {
  944. result->src[i] = create_node(tensor->src[i], ctx, tensor_ptrs, tensor_map);
  945. }
  946. result->view_src = create_node(tensor->view_src, ctx, tensor_ptrs, tensor_map);
  947. result->view_offs = tensor->view_offs;
  948. return result;
  949. }
  950. bool rpc_server::graph_compute(const std::vector<uint8_t> & input, std::vector<uint8_t> & output) {
  951. // serialization format:
  952. // | n_nodes (4 bytes) | nodes (n_nodes * sizeof(uint64_t) | n_tensors (4 bytes) | tensors (n_tensors * sizeof(rpc_tensor)) |
  953. if (input.size() < sizeof(uint32_t)) {
  954. return false;
  955. }
  956. uint32_t n_nodes;
  957. memcpy(&n_nodes, input.data(), sizeof(n_nodes));
  958. if (input.size() < sizeof(uint32_t) + n_nodes*sizeof(uint64_t) + sizeof(uint32_t)) {
  959. return false;
  960. }
  961. const uint64_t * nodes = (const uint64_t *)(input.data() + sizeof(n_nodes));
  962. uint32_t n_tensors;
  963. memcpy(&n_tensors, input.data() + sizeof(n_nodes) + n_nodes*sizeof(uint64_t), sizeof(n_tensors));
  964. if (input.size() < sizeof(uint32_t) + n_nodes*sizeof(uint64_t) + sizeof(uint32_t) + n_tensors*sizeof(rpc_tensor)) {
  965. return false;
  966. }
  967. const rpc_tensor * tensors = (const rpc_tensor *)(input.data() + sizeof(n_nodes) + n_nodes*sizeof(uint64_t) + sizeof(n_tensors));
  968. GGML_PRINT_DEBUG("[%s] n_nodes: %u, n_tensors: %u\n", __func__, n_nodes, n_tensors);
  969. size_t buf_size = ggml_tensor_overhead()*(n_nodes + n_tensors) + ggml_graph_overhead_custom(n_nodes, false);
  970. struct ggml_init_params params = {
  971. /*.mem_size =*/ buf_size,
  972. /*.mem_buffer =*/ NULL,
  973. /*.no_alloc =*/ true,
  974. };
  975. struct ggml_context * ctx = ggml_init(params);
  976. struct ggml_cgraph * graph = ggml_new_graph_custom(ctx, n_nodes, false);
  977. graph->n_nodes = n_nodes;
  978. std::unordered_map<uint64_t, const rpc_tensor*> tensor_ptrs;
  979. for (uint32_t i = 0; i < n_tensors; i++) {
  980. tensor_ptrs[tensors[i].id] = &tensors[i];
  981. }
  982. std::unordered_map<uint64_t, ggml_tensor*> tensor_map;
  983. for (uint32_t i = 0; i < n_nodes; i++) {
  984. int64_t id;
  985. memcpy(&id, &nodes[i], sizeof(id));
  986. graph->nodes[i] = create_node(id, ctx, tensor_ptrs, tensor_map);
  987. }
  988. ggml_status status = ggml_backend_graph_compute(backend, graph);
  989. // output serialization format: | status (1 byte) |
  990. output.resize(1, 0);
  991. output[0] = status;
  992. ggml_free(ctx);
  993. return true;
  994. }
  995. rpc_server::~rpc_server() {
  996. for (auto buffer : buffers) {
  997. ggml_backend_buffer_free(buffer);
  998. }
  999. }
  1000. static void rpc_serve_client(ggml_backend_t backend, sockfd_t sockfd, size_t free_mem, size_t total_mem) {
  1001. rpc_server server(backend);
  1002. while (true) {
  1003. uint8_t cmd;
  1004. if (!recv_data(sockfd, &cmd, 1)) {
  1005. break;
  1006. }
  1007. if (cmd >= RPC_CMD_COUNT) {
  1008. // fail fast if the command is invalid
  1009. fprintf(stderr, "Unknown command: %d\n", cmd);
  1010. break;
  1011. }
  1012. std::vector<uint8_t> input;
  1013. std::vector<uint8_t> output;
  1014. uint64_t input_size;
  1015. if (!recv_data(sockfd, &input_size, sizeof(input_size))) {
  1016. break;
  1017. }
  1018. try {
  1019. input.resize(input_size);
  1020. } catch (const std::bad_alloc & e) {
  1021. fprintf(stderr, "Failed to allocate input buffer of size %" PRIu64 "\n", input_size);
  1022. break;
  1023. }
  1024. if (!recv_data(sockfd, input.data(), input_size)) {
  1025. break;
  1026. }
  1027. bool ok = true;
  1028. switch (cmd) {
  1029. case RPC_CMD_ALLOC_BUFFER: {
  1030. ok = server.alloc_buffer(input, output);
  1031. break;
  1032. }
  1033. case RPC_CMD_GET_ALIGNMENT: {
  1034. server.get_alignment(output);
  1035. break;
  1036. }
  1037. case RPC_CMD_GET_MAX_SIZE: {
  1038. server.get_max_size(output);
  1039. break;
  1040. }
  1041. case RPC_CMD_BUFFER_GET_BASE: {
  1042. ok = server.buffer_get_base(input, output);
  1043. break;
  1044. }
  1045. case RPC_CMD_FREE_BUFFER: {
  1046. ok = server.free_buffer(input);
  1047. break;
  1048. }
  1049. case RPC_CMD_BUFFER_CLEAR: {
  1050. ok = server.buffer_clear(input);
  1051. break;
  1052. }
  1053. case RPC_CMD_SET_TENSOR: {
  1054. ok = server.set_tensor(input);
  1055. break;
  1056. }
  1057. case RPC_CMD_GET_TENSOR: {
  1058. ok = server.get_tensor(input, output);
  1059. break;
  1060. }
  1061. case RPC_CMD_COPY_TENSOR: {
  1062. ok = server.copy_tensor(input, output);
  1063. break;
  1064. }
  1065. case RPC_CMD_GRAPH_COMPUTE: {
  1066. ok = server.graph_compute(input, output);
  1067. break;
  1068. }
  1069. case RPC_CMD_GET_DEVICE_MEMORY: {
  1070. // output serialization format: | free (8 bytes) | total (8 bytes) |
  1071. output.resize(2*sizeof(uint64_t), 0);
  1072. memcpy(output.data(), &free_mem, sizeof(free_mem));
  1073. memcpy(output.data() + sizeof(uint64_t), &total_mem, sizeof(total_mem));
  1074. break;
  1075. }
  1076. default: {
  1077. fprintf(stderr, "Unknown command: %d\n", cmd);
  1078. ok = false;
  1079. }
  1080. }
  1081. if (!ok) {
  1082. break;
  1083. }
  1084. uint64_t output_size = output.size();
  1085. if (!send_data(sockfd, &output_size, sizeof(output_size))) {
  1086. break;
  1087. }
  1088. if (!send_data(sockfd, output.data(), output_size)) {
  1089. break;
  1090. }
  1091. }
  1092. }
  1093. void start_rpc_server(ggml_backend_t backend, const char * endpoint, size_t free_mem, size_t total_mem) {
  1094. std::string host;
  1095. int port;
  1096. if (!parse_endpoint(endpoint, host, port)) {
  1097. return;
  1098. }
  1099. #ifdef _WIN32
  1100. {
  1101. WSADATA wsaData;
  1102. int res = WSAStartup(MAKEWORD(2, 2), &wsaData);
  1103. if (res != 0) {
  1104. fprintf(stderr, "WSAStartup failed: %d\n", res);
  1105. return;
  1106. }
  1107. }
  1108. #endif
  1109. auto server_socket = create_server_socket(host.c_str(), port);
  1110. if (server_socket == nullptr) {
  1111. fprintf(stderr, "Failed to create server socket\n");
  1112. return;
  1113. }
  1114. while (true) {
  1115. auto client_socket = socket_accept(server_socket->fd);
  1116. if (client_socket == nullptr) {
  1117. fprintf(stderr, "Failed to accept client connection\n");
  1118. return;
  1119. }
  1120. printf("Accepted client connection, free_mem=%zu, total_mem=%zu\n", free_mem, total_mem);
  1121. fflush(stdout);
  1122. rpc_serve_client(backend, client_socket->fd, free_mem, total_mem);
  1123. printf("Client connection closed\n");
  1124. fflush(stdout);
  1125. }
  1126. #ifdef _WIN32
  1127. WSACleanup();
  1128. #endif
  1129. }