batched.cpp 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. #include "common.h"
  2. #include "llama.h"
  3. #include <algorithm>
  4. #include <cstdio>
  5. #include <string>
  6. #include <vector>
  7. static void print_usage(int, char ** argv) {
  8. LOG_TEE("\nexample usage:\n");
  9. LOG_TEE("\n %s -m model.gguf -p \"Hello my name is\" -n 32 -np 4\n", argv[0]);
  10. LOG_TEE("\n");
  11. }
  12. int main(int argc, char ** argv) {
  13. gpt_params params;
  14. params.prompt = "Hello my name is";
  15. params.n_predict = 32;
  16. auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON, print_usage);
  17. if (!gpt_params_parse(argc, argv, params, options)) {
  18. return 1;
  19. }
  20. // number of parallel batches
  21. int n_parallel = params.n_parallel;
  22. // total length of the sequences including the prompt
  23. int n_predict = params.n_predict;
  24. // init LLM
  25. llama_backend_init();
  26. llama_numa_init(params.numa);
  27. // initialize the model
  28. llama_model_params model_params = llama_model_params_from_gpt_params(params);
  29. llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
  30. if (model == NULL) {
  31. fprintf(stderr , "%s: error: unable to load model\n" , __func__);
  32. return 1;
  33. }
  34. // tokenize the prompt
  35. std::vector<llama_token> tokens_list;
  36. tokens_list = ::llama_tokenize(model, params.prompt, true);
  37. const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size())*n_parallel;
  38. // initialize the context
  39. llama_context_params ctx_params = llama_context_params_from_gpt_params(params);
  40. ctx_params.n_ctx = n_kv_req;
  41. ctx_params.n_batch = std::max(n_predict, n_parallel);
  42. llama_context * ctx = llama_new_context_with_model(model, ctx_params);
  43. auto sparams = llama_sampler_chain_default_params();
  44. llama_sampler * smpl = llama_sampler_chain_init(sparams);
  45. llama_sampler_chain_add(smpl, llama_sampler_init_top_k(params.sparams.top_k));
  46. llama_sampler_chain_add(smpl, llama_sampler_init_top_p(params.sparams.top_p, params.sparams.min_keep));
  47. llama_sampler_chain_add(smpl, llama_sampler_init_temp (params.sparams.temp));
  48. llama_sampler_chain_add(smpl, llama_sampler_init_dist (params.sparams.seed));
  49. if (ctx == NULL) {
  50. fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__);
  51. return 1;
  52. }
  53. const int n_ctx = llama_n_ctx(ctx);
  54. LOG_TEE("\n%s: n_predict = %d, n_ctx = %d, n_batch = %u, n_parallel = %d, n_kv_req = %d\n", __func__, n_predict, n_ctx, ctx_params.n_batch, n_parallel, n_kv_req);
  55. // make sure the KV cache is big enough to hold all the prompt and generated tokens
  56. if (n_kv_req > n_ctx) {
  57. LOG_TEE("%s: error: n_kv_req (%d) > n_ctx, the required KV cache size is not big enough\n", __func__, n_kv_req);
  58. LOG_TEE("%s: either reduce n_parallel or increase n_ctx\n", __func__);
  59. return 1;
  60. }
  61. // print the prompt token-by-token
  62. fprintf(stderr, "\n");
  63. for (auto id : tokens_list) {
  64. fprintf(stderr, "%s", llama_token_to_piece(ctx, id).c_str());
  65. }
  66. fflush(stderr);
  67. // create a llama_batch
  68. // we use this object to submit token data for decoding
  69. llama_batch batch = llama_batch_init(std::max(tokens_list.size(), (size_t) n_parallel), 0, n_parallel);
  70. std::vector<llama_seq_id> seq_ids(n_parallel, 0);
  71. for (int32_t i = 0; i < n_parallel; ++i) {
  72. seq_ids[i] = i;
  73. }
  74. // evaluate the initial prompt
  75. for (size_t i = 0; i < tokens_list.size(); ++i) {
  76. llama_batch_add(batch, tokens_list[i], i, seq_ids, false);
  77. }
  78. GGML_ASSERT(batch.n_tokens == (int) tokens_list.size());
  79. if (llama_model_has_encoder(model)) {
  80. if (llama_encode(ctx, batch)) {
  81. LOG_TEE("%s : failed to eval\n", __func__);
  82. return 1;
  83. }
  84. llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
  85. if (decoder_start_token_id == -1) {
  86. decoder_start_token_id = llama_token_bos(model);
  87. }
  88. llama_batch_clear(batch);
  89. llama_batch_add(batch, decoder_start_token_id, 0, seq_ids, false);
  90. }
  91. // llama_decode will output logits only for the last token of the prompt
  92. batch.logits[batch.n_tokens - 1] = true;
  93. if (llama_decode(ctx, batch) != 0) {
  94. LOG_TEE("%s: llama_decode() failed\n", __func__);
  95. return 1;
  96. }
  97. //// assign the system KV cache to all parallel sequences
  98. //// this way, the parallel sequences will "reuse" the prompt tokens without having to copy them
  99. //for (int32_t i = 1; i < n_parallel; ++i) {
  100. // llama_kv_cache_seq_cp(ctx, 0, i, -1, -1);
  101. //}
  102. if (n_parallel > 1) {
  103. LOG_TEE("\n\n%s: generating %d sequences ...\n", __func__, n_parallel);
  104. }
  105. // main loop
  106. // we will store the parallel decoded sequences in this vector
  107. std::vector<std::string> streams(n_parallel);
  108. // remember the batch index of the last token for each parallel sequence
  109. // we need this to determine which logits to sample from
  110. std::vector<int32_t> i_batch(n_parallel, batch.n_tokens - 1);
  111. int n_cur = batch.n_tokens;
  112. int n_decode = 0;
  113. const auto t_main_start = ggml_time_us();
  114. while (n_cur <= n_predict) {
  115. // prepare the next batch
  116. llama_batch_clear(batch);
  117. // sample the next token for each parallel sequence / stream
  118. for (int32_t i = 0; i < n_parallel; ++i) {
  119. if (i_batch[i] < 0) {
  120. // the stream has already finished
  121. continue;
  122. }
  123. const llama_token new_token_id = llama_sampler_sample(smpl, ctx, i_batch[i]);
  124. // is it an end of generation? -> mark the stream as finished
  125. if (llama_token_is_eog(model, new_token_id) || n_cur == n_predict) {
  126. i_batch[i] = -1;
  127. LOG_TEE("\n");
  128. if (n_parallel > 1) {
  129. LOG_TEE("%s: stream %d finished at n_cur = %d", __func__, i, n_cur);
  130. }
  131. continue;
  132. }
  133. // if there is only one stream, we print immediately to stdout
  134. if (n_parallel == 1) {
  135. LOG_TEE("%s", llama_token_to_piece(ctx, new_token_id).c_str());
  136. fflush(stdout);
  137. }
  138. streams[i] += llama_token_to_piece(ctx, new_token_id);
  139. i_batch[i] = batch.n_tokens;
  140. // push this new token for next evaluation
  141. llama_batch_add(batch, new_token_id, n_cur, { i }, true);
  142. n_decode += 1;
  143. }
  144. // all streams are finished
  145. if (batch.n_tokens == 0) {
  146. break;
  147. }
  148. n_cur += 1;
  149. // evaluate the current batch with the transformer model
  150. if (llama_decode(ctx, batch)) {
  151. fprintf(stderr, "%s : failed to eval, return code %d\n", __func__, 1);
  152. return 1;
  153. }
  154. }
  155. LOG_TEE("\n");
  156. if (n_parallel > 1) {
  157. LOG_TEE("\n");
  158. for (int32_t i = 0; i < n_parallel; ++i) {
  159. LOG_TEE("sequence %d:\n\n%s%s\n\n", i, params.prompt.c_str(), streams[i].c_str());
  160. }
  161. }
  162. const auto t_main_end = ggml_time_us();
  163. LOG_TEE("%s: decoded %d tokens in %.2f s, speed: %.2f t/s\n",
  164. __func__, n_decode, (t_main_end - t_main_start) / 1000000.0f, n_decode / ((t_main_end - t_main_start) / 1000000.0f));
  165. LOG_TEE("\n");
  166. llama_perf_print(smpl, LLAMA_PERF_TYPE_SAMPLER_CHAIN);
  167. llama_perf_print(ctx, LLAMA_PERF_TYPE_CONTEXT);
  168. fprintf(stderr, "\n");
  169. llama_batch_free(batch);
  170. llama_sampler_free(smpl);
  171. llama_free(ctx);
  172. llama_free_model(model);
  173. llama_backend_free();
  174. return 0;
  175. }