batched.cpp 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. #include "common.h"
  2. #include "llama.h"
  3. #include <algorithm>
  4. #include <cstdio>
  5. #include <string>
  6. #include <vector>
  7. static void print_usage(int argc, char ** argv, const gpt_params & params) {
  8. gpt_params_print_usage(argc, argv, params);
  9. LOG_TEE("\nexample usage:\n");
  10. LOG_TEE("\n %s -m model.gguf -p \"Hello my name is\" -n 32 -np 4\n", argv[0]);
  11. LOG_TEE("\n");
  12. }
  13. int main(int argc, char ** argv) {
  14. gpt_params params;
  15. params.prompt = "Hello my name is";
  16. params.n_predict = 32;
  17. if (!gpt_params_parse(argc, argv, params)) {
  18. print_usage(argc, argv, params);
  19. return 1;
  20. }
  21. // number of parallel batches
  22. int n_parallel = params.n_parallel;
  23. // total length of the sequences including the prompt
  24. int n_predict = params.n_predict;
  25. // init LLM
  26. llama_backend_init();
  27. llama_numa_init(params.numa);
  28. // initialize the model
  29. llama_model_params model_params = llama_model_params_from_gpt_params(params);
  30. llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
  31. if (model == NULL) {
  32. fprintf(stderr , "%s: error: unable to load model\n" , __func__);
  33. return 1;
  34. }
  35. // tokenize the prompt
  36. std::vector<llama_token> tokens_list;
  37. tokens_list = ::llama_tokenize(model, params.prompt, true);
  38. const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size())*n_parallel;
  39. // initialize the context
  40. llama_context_params ctx_params = llama_context_params_from_gpt_params(params);
  41. ctx_params.n_ctx = n_kv_req;
  42. ctx_params.n_batch = std::max(n_predict, n_parallel);
  43. llama_context * ctx = llama_new_context_with_model(model, ctx_params);
  44. auto sparams = llama_sampler_chain_default_params();
  45. llama_sampler * smpl = llama_sampler_chain_init(sparams);
  46. llama_sampler_chain_add(smpl, llama_sampler_init_top_k(params.sparams.top_k));
  47. llama_sampler_chain_add(smpl, llama_sampler_init_top_p(params.sparams.top_p, params.sparams.min_keep));
  48. llama_sampler_chain_add(smpl, llama_sampler_init_temp (params.sparams.temp));
  49. llama_sampler_chain_add(smpl, llama_sampler_init_dist (params.sparams.seed));
  50. if (ctx == NULL) {
  51. fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__);
  52. return 1;
  53. }
  54. const int n_ctx = llama_n_ctx(ctx);
  55. LOG_TEE("\n%s: n_predict = %d, n_ctx = %d, n_batch = %u, n_parallel = %d, n_kv_req = %d\n", __func__, n_predict, n_ctx, ctx_params.n_batch, n_parallel, n_kv_req);
  56. // make sure the KV cache is big enough to hold all the prompt and generated tokens
  57. if (n_kv_req > n_ctx) {
  58. LOG_TEE("%s: error: n_kv_req (%d) > n_ctx, the required KV cache size is not big enough\n", __func__, n_kv_req);
  59. LOG_TEE("%s: either reduce n_parallel or increase n_ctx\n", __func__);
  60. return 1;
  61. }
  62. // print the prompt token-by-token
  63. fprintf(stderr, "\n");
  64. for (auto id : tokens_list) {
  65. fprintf(stderr, "%s", llama_token_to_piece(ctx, id).c_str());
  66. }
  67. fflush(stderr);
  68. // create a llama_batch
  69. // we use this object to submit token data for decoding
  70. llama_batch batch = llama_batch_init(std::max(tokens_list.size(), (size_t) n_parallel), 0, n_parallel);
  71. std::vector<llama_seq_id> seq_ids(n_parallel, 0);
  72. for (int32_t i = 0; i < n_parallel; ++i) {
  73. seq_ids[i] = i;
  74. }
  75. // evaluate the initial prompt
  76. for (size_t i = 0; i < tokens_list.size(); ++i) {
  77. llama_batch_add(batch, tokens_list[i], i, seq_ids, false);
  78. }
  79. GGML_ASSERT(batch.n_tokens == (int) tokens_list.size());
  80. if (llama_model_has_encoder(model)) {
  81. if (llama_encode(ctx, batch)) {
  82. LOG_TEE("%s : failed to eval\n", __func__);
  83. return 1;
  84. }
  85. llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
  86. if (decoder_start_token_id == -1) {
  87. decoder_start_token_id = llama_token_bos(model);
  88. }
  89. llama_batch_clear(batch);
  90. llama_batch_add(batch, decoder_start_token_id, 0, seq_ids, false);
  91. }
  92. // llama_decode will output logits only for the last token of the prompt
  93. batch.logits[batch.n_tokens - 1] = true;
  94. if (llama_decode(ctx, batch) != 0) {
  95. LOG_TEE("%s: llama_decode() failed\n", __func__);
  96. return 1;
  97. }
  98. //// assign the system KV cache to all parallel sequences
  99. //// this way, the parallel sequences will "reuse" the prompt tokens without having to copy them
  100. //for (int32_t i = 1; i < n_parallel; ++i) {
  101. // llama_kv_cache_seq_cp(ctx, 0, i, -1, -1);
  102. //}
  103. if (n_parallel > 1) {
  104. LOG_TEE("\n\n%s: generating %d sequences ...\n", __func__, n_parallel);
  105. }
  106. // main loop
  107. // we will store the parallel decoded sequences in this vector
  108. std::vector<std::string> streams(n_parallel);
  109. // remember the batch index of the last token for each parallel sequence
  110. // we need this to determine which logits to sample from
  111. std::vector<int32_t> i_batch(n_parallel, batch.n_tokens - 1);
  112. int n_cur = batch.n_tokens;
  113. int n_decode = 0;
  114. const auto t_main_start = ggml_time_us();
  115. while (n_cur <= n_predict) {
  116. // prepare the next batch
  117. llama_batch_clear(batch);
  118. // sample the next token for each parallel sequence / stream
  119. for (int32_t i = 0; i < n_parallel; ++i) {
  120. if (i_batch[i] < 0) {
  121. // the stream has already finished
  122. continue;
  123. }
  124. const llama_token new_token_id = llama_sampler_sample(smpl, ctx, i_batch[i]);
  125. llama_sampler_accept(smpl, new_token_id);
  126. // is it an end of generation? -> mark the stream as finished
  127. if (llama_token_is_eog(model, new_token_id) || n_cur == n_predict) {
  128. i_batch[i] = -1;
  129. LOG_TEE("\n");
  130. if (n_parallel > 1) {
  131. LOG_TEE("%s: stream %d finished at n_cur = %d", __func__, i, n_cur);
  132. }
  133. continue;
  134. }
  135. // if there is only one stream, we print immediately to stdout
  136. if (n_parallel == 1) {
  137. LOG_TEE("%s", llama_token_to_piece(ctx, new_token_id).c_str());
  138. fflush(stdout);
  139. }
  140. streams[i] += llama_token_to_piece(ctx, new_token_id);
  141. i_batch[i] = batch.n_tokens;
  142. // push this new token for next evaluation
  143. llama_batch_add(batch, new_token_id, n_cur, { i }, true);
  144. n_decode += 1;
  145. }
  146. // all streams are finished
  147. if (batch.n_tokens == 0) {
  148. break;
  149. }
  150. n_cur += 1;
  151. // evaluate the current batch with the transformer model
  152. if (llama_decode(ctx, batch)) {
  153. fprintf(stderr, "%s : failed to eval, return code %d\n", __func__, 1);
  154. return 1;
  155. }
  156. }
  157. LOG_TEE("\n");
  158. if (n_parallel > 1) {
  159. LOG_TEE("\n");
  160. for (int32_t i = 0; i < n_parallel; ++i) {
  161. LOG_TEE("sequence %d:\n\n%s%s\n\n", i, params.prompt.c_str(), streams[i].c_str());
  162. }
  163. }
  164. const auto t_main_end = ggml_time_us();
  165. LOG_TEE("%s: decoded %d tokens in %.2f s, speed: %.2f t/s\n",
  166. __func__, n_decode, (t_main_end - t_main_start) / 1000000.0f, n_decode / ((t_main_end - t_main_start) / 1000000.0f));
  167. LOG_TEE("\n");
  168. llama_perf_print(smpl, LLAMA_PERF_TYPE_SAMPLER_CHAIN);
  169. llama_perf_print(ctx, LLAMA_PERF_TYPE_CONTEXT);
  170. fprintf(stderr, "\n");
  171. llama_batch_free(batch);
  172. llama_sampler_free(smpl);
  173. llama_free(ctx);
  174. llama_free_model(model);
  175. llama_backend_free();
  176. return 0;
  177. }