1
0

test-thread-safety.cpp 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. // thread safety test
  2. // - Loads a copy of the same model on each GPU, plus a copy on the CPU
  3. // - Creates n_parallel (--parallel) contexts per model
  4. // - Runs inference in parallel on each context
  5. #include <array>
  6. #include <thread>
  7. #include <vector>
  8. #include <atomic>
  9. #include "llama.h"
  10. #include "arg.h"
  11. #include "common.h"
  12. #include "log.h"
  13. #include "sampling.h"
  14. int main(int argc, char ** argv) {
  15. common_params params;
  16. if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
  17. return 1;
  18. }
  19. common_init();
  20. llama_backend_init();
  21. llama_numa_init(params.numa);
  22. LOG_INF("%s\n", common_params_get_system_info(params).c_str());
  23. //llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
  24. // if (level == GGML_LOG_LEVEL_ERROR) {
  25. // common_log_add(common_log_main(), level, "%s", text);
  26. // }
  27. //}, NULL);
  28. auto cparams = common_context_params_to_llama(params);
  29. // each context has a single sequence
  30. cparams.n_seq_max = 1;
  31. int dev_count = ggml_backend_dev_count();
  32. std::vector<std::array<ggml_backend_dev_t, 2>> gpus;
  33. for (int i = 0; i < dev_count; ++i) {
  34. auto * dev = ggml_backend_dev_get(i);
  35. if (dev && ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_GPU) {
  36. gpus.push_back({dev, nullptr});
  37. }
  38. }
  39. const int gpu_dev_count = (int)gpus.size();
  40. const int num_models = gpu_dev_count + 1 + 1; // GPUs + 1 CPU model + 1 layer split
  41. //const int num_models = std::max(1, gpu_dev_count);
  42. const int num_contexts = std::max(1, params.n_parallel);
  43. std::vector<llama_model_ptr> models;
  44. std::vector<std::thread> threads;
  45. std::atomic<bool> failed = false;
  46. for (int m = 0; m < num_models; ++m) {
  47. auto mparams = common_model_params_to_llama(params);
  48. if (m < gpu_dev_count) {
  49. mparams.split_mode = LLAMA_SPLIT_MODE_NONE;
  50. mparams.devices = gpus[m].data();
  51. } else if (m == gpu_dev_count) {
  52. mparams.split_mode = LLAMA_SPLIT_MODE_NONE;
  53. mparams.main_gpu = -1; // CPU model
  54. } else {
  55. mparams.split_mode = LLAMA_SPLIT_MODE_LAYER;
  56. }
  57. llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams);
  58. if (model == NULL) {
  59. LOG_ERR("%s: failed to load model '%s'\n", __func__, params.model.path.c_str());
  60. return 1;
  61. }
  62. models.emplace_back(model);
  63. }
  64. for (int m = 0; m < num_models; ++m) {
  65. auto * model = models[m].get();
  66. for (int c = 0; c < num_contexts; ++c) {
  67. threads.emplace_back([&, m, c, model]() {
  68. LOG_INF("Creating context %d/%d for model %d/%d\n", c + 1, num_contexts, m + 1, num_models);
  69. llama_context_ptr ctx { llama_init_from_model(model, cparams) };
  70. if (ctx == NULL) {
  71. LOG_ERR("failed to create context\n");
  72. failed.store(true);
  73. return;
  74. }
  75. std::unique_ptr<common_sampler, decltype(&common_sampler_free)> sampler { common_sampler_init(model, params.sampling), common_sampler_free };
  76. if (sampler == NULL) {
  77. LOG_ERR("failed to create sampler\n");
  78. failed.store(true);
  79. return;
  80. }
  81. llama_batch batch = {};
  82. {
  83. auto prompt = common_tokenize(ctx.get(), params.prompt, true);
  84. if (prompt.empty()) {
  85. LOG_ERR("failed to tokenize prompt\n");
  86. failed.store(true);
  87. return;
  88. }
  89. batch = llama_batch_get_one(prompt.data(), prompt.size());
  90. if (llama_decode(ctx.get(), batch)) {
  91. LOG_ERR("failed to decode prompt\n");
  92. failed.store(true);
  93. return;
  94. }
  95. }
  96. const auto * vocab = llama_model_get_vocab(model);
  97. std::string result = params.prompt;
  98. for (int i = 0; i < params.n_predict; i++) {
  99. llama_token token;
  100. if (batch.n_tokens > 0) {
  101. token = common_sampler_sample(sampler.get(), ctx.get(), batch.n_tokens - 1);
  102. } else {
  103. token = llama_vocab_bos(vocab);
  104. }
  105. result += common_token_to_piece(ctx.get(), token);
  106. if (llama_vocab_is_eog(vocab, token)) {
  107. break;
  108. }
  109. batch = llama_batch_get_one(&token, 1);
  110. if (llama_decode(ctx.get(), batch)) {
  111. LOG_ERR("Model %d/%d, Context %d/%d: failed to decode\n", m + 1, num_models, c + 1, num_contexts);
  112. failed.store(true);
  113. return;
  114. }
  115. }
  116. LOG_INF("Model %d/%d, Context %d/%d: %s\n\n", m + 1, num_models, c + 1, num_contexts, result.c_str());
  117. });
  118. }
  119. }
  120. for (auto & thread : threads) {
  121. thread.join();
  122. }
  123. if (failed) {
  124. LOG_ERR("One or more threads failed.\n");
  125. return 1;
  126. }
  127. LOG_INF("All threads finished without errors.\n");
  128. return 0;
  129. }