batched.cpp 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. #include "arg.h"
  2. #include "common.h"
  3. #include "log.h"
  4. #include "llama.h"
  5. #include <algorithm>
  6. #include <cstdio>
  7. #include <string>
  8. #include <vector>
  9. static void print_usage(int, char ** argv) {
  10. LOG("\nexample usage:\n");
  11. LOG("\n %s -m model.gguf -p \"Hello my name is\" -n 32 -np 4\n", argv[0]);
  12. LOG("\n");
  13. }
  14. int main(int argc, char ** argv) {
  15. common_params params;
  16. params.prompt = "Hello my name is";
  17. params.n_predict = 32;
  18. if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON, print_usage)) {
  19. return 1;
  20. }
  21. common_init();
  22. // number of parallel batches
  23. int n_parallel = params.n_parallel;
  24. // total length of the sequences including the prompt
  25. int n_predict = params.n_predict;
  26. // init LLM
  27. llama_backend_init();
  28. llama_numa_init(params.numa);
  29. // initialize the model
  30. llama_model_params model_params = common_model_params_to_llama(params);
  31. llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
  32. if (model == NULL) {
  33. LOG_ERR("%s: error: unable to load model\n" , __func__);
  34. return 1;
  35. }
  36. // tokenize the prompt
  37. std::vector<llama_token> tokens_list;
  38. tokens_list = common_tokenize(model, params.prompt, true);
  39. const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size())*n_parallel;
  40. // initialize the context
  41. llama_context_params ctx_params = common_context_params_to_llama(params);
  42. ctx_params.n_ctx = n_kv_req;
  43. ctx_params.n_batch = std::max(n_predict, n_parallel);
  44. llama_context * ctx = llama_new_context_with_model(model, ctx_params);
  45. auto sparams = llama_sampler_chain_default_params();
  46. sparams.no_perf = false;
  47. llama_sampler * smpl = llama_sampler_chain_init(sparams);
  48. llama_sampler_chain_add(smpl, llama_sampler_init_top_k(params.sampling.top_k));
  49. llama_sampler_chain_add(smpl, llama_sampler_init_top_p(params.sampling.top_p, params.sampling.min_keep));
  50. llama_sampler_chain_add(smpl, llama_sampler_init_temp (params.sampling.temp));
  51. llama_sampler_chain_add(smpl, llama_sampler_init_dist (params.sampling.seed));
  52. if (ctx == NULL) {
  53. LOG_ERR("%s: error: failed to create the llama_context\n" , __func__);
  54. return 1;
  55. }
  56. const int n_ctx = llama_n_ctx(ctx);
  57. LOG_INF("\n%s: n_predict = %d, n_ctx = %d, n_batch = %u, n_parallel = %d, n_kv_req = %d\n", __func__, n_predict, n_ctx, ctx_params.n_batch, n_parallel, n_kv_req);
  58. // make sure the KV cache is big enough to hold all the prompt and generated tokens
  59. if (n_kv_req > n_ctx) {
  60. LOG_ERR("%s: error: n_kv_req (%d) > n_ctx, the required KV cache size is not big enough\n", __func__, n_kv_req);
  61. LOG_ERR("%s: either reduce n_parallel or increase n_ctx\n", __func__);
  62. return 1;
  63. }
  64. // print the prompt token-by-token
  65. LOG("\n");
  66. for (auto id : tokens_list) {
  67. LOG("%s", common_token_to_piece(ctx, id).c_str());
  68. }
  69. // create a llama_batch
  70. // we use this object to submit token data for decoding
  71. llama_batch batch = llama_batch_init(std::max(tokens_list.size(), (size_t) n_parallel), 0, n_parallel);
  72. std::vector<llama_seq_id> seq_ids(n_parallel, 0);
  73. for (int32_t i = 0; i < n_parallel; ++i) {
  74. seq_ids[i] = i;
  75. }
  76. // evaluate the initial prompt
  77. for (size_t i = 0; i < tokens_list.size(); ++i) {
  78. common_batch_add(batch, tokens_list[i], i, seq_ids, false);
  79. }
  80. GGML_ASSERT(batch.n_tokens == (int) tokens_list.size());
  81. if (llama_model_has_encoder(model)) {
  82. if (llama_encode(ctx, batch)) {
  83. LOG_ERR("%s : failed to eval\n", __func__);
  84. return 1;
  85. }
  86. llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
  87. if (decoder_start_token_id == LLAMA_TOKEN_NULL) {
  88. decoder_start_token_id = llama_token_bos(model);
  89. }
  90. common_batch_clear(batch);
  91. common_batch_add(batch, decoder_start_token_id, 0, seq_ids, false);
  92. }
  93. // llama_decode will output logits only for the last token of the prompt
  94. batch.logits[batch.n_tokens - 1] = true;
  95. if (llama_decode(ctx, batch) != 0) {
  96. LOG_ERR("%s: llama_decode() failed\n", __func__);
  97. return 1;
  98. }
  99. //// assign the system KV cache to all parallel sequences
  100. //// this way, the parallel sequences will "reuse" the prompt tokens without having to copy them
  101. //for (int32_t i = 1; i < n_parallel; ++i) {
  102. // llama_kv_cache_seq_cp(ctx, 0, i, -1, -1);
  103. //}
  104. if (n_parallel > 1) {
  105. LOG("\n\n%s: generating %d sequences ...\n", __func__, n_parallel);
  106. }
  107. // main loop
  108. // we will store the parallel decoded sequences in this vector
  109. std::vector<std::string> streams(n_parallel);
  110. // remember the batch index of the last token for each parallel sequence
  111. // we need this to determine which logits to sample from
  112. std::vector<int32_t> i_batch(n_parallel, batch.n_tokens - 1);
  113. int n_cur = batch.n_tokens;
  114. int n_decode = 0;
  115. const auto t_main_start = ggml_time_us();
  116. while (n_cur <= n_predict) {
  117. // prepare the next batch
  118. common_batch_clear(batch);
  119. // sample the next token for each parallel sequence / stream
  120. for (int32_t i = 0; i < n_parallel; ++i) {
  121. if (i_batch[i] < 0) {
  122. // the stream has already finished
  123. continue;
  124. }
  125. const llama_token new_token_id = llama_sampler_sample(smpl, ctx, i_batch[i]);
  126. // is it an end of generation? -> mark the stream as finished
  127. if (llama_token_is_eog(model, new_token_id) || n_cur == n_predict) {
  128. i_batch[i] = -1;
  129. LOG("\n");
  130. if (n_parallel > 1) {
  131. LOG_INF("%s: stream %d finished at n_cur = %d", __func__, i, n_cur);
  132. }
  133. continue;
  134. }
  135. // if there is only one stream, we print immediately to stdout
  136. if (n_parallel == 1) {
  137. LOG("%s", common_token_to_piece(ctx, new_token_id).c_str());
  138. }
  139. streams[i] += common_token_to_piece(ctx, new_token_id);
  140. i_batch[i] = batch.n_tokens;
  141. // push this new token for next evaluation
  142. common_batch_add(batch, new_token_id, n_cur, { i }, true);
  143. n_decode += 1;
  144. }
  145. // all streams are finished
  146. if (batch.n_tokens == 0) {
  147. break;
  148. }
  149. n_cur += 1;
  150. // evaluate the current batch with the transformer model
  151. if (llama_decode(ctx, batch)) {
  152. LOG_ERR("%s : failed to eval, return code %d\n", __func__, 1);
  153. return 1;
  154. }
  155. }
  156. if (n_parallel > 1) {
  157. LOG("\n");
  158. for (int32_t i = 0; i < n_parallel; ++i) {
  159. LOG("sequence %d:\n\n%s%s\n\n", i, params.prompt.c_str(), streams[i].c_str());
  160. }
  161. }
  162. const auto t_main_end = ggml_time_us();
  163. LOG_INF("%s: decoded %d tokens in %.2f s, speed: %.2f t/s\n",
  164. __func__, n_decode, (t_main_end - t_main_start) / 1000000.0f, n_decode / ((t_main_end - t_main_start) / 1000000.0f));
  165. LOG("\n");
  166. llama_perf_sampler_print(smpl);
  167. llama_perf_context_print(ctx);
  168. fprintf(stderr, "\n");
  169. llama_batch_free(batch);
  170. llama_sampler_free(smpl);
  171. llama_free(ctx);
  172. llama_free_model(model);
  173. llama_backend_free();
  174. return 0;
  175. }