simple.cpp 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. #include "common.h"
  2. #include "llama.h"
  3. #include <cmath>
  4. #include <cstdio>
  5. #include <string>
  6. #include <vector>
  7. static void print_usage(int argc, char ** argv, const gpt_params & params) {
  8. gpt_params_print_usage(argc, argv, params);
  9. LOG_TEE("\nexample usage:\n");
  10. LOG_TEE("\n %s -m model.gguf -p \"Hello my name is\" -n 32\n", argv[0]);
  11. LOG_TEE("\n");
  12. }
  13. int main(int argc, char ** argv) {
  14. gpt_params params;
  15. params.prompt = "Hello my name is";
  16. params.n_predict = 32;
  17. if (!gpt_params_parse(argc, argv, params)) {
  18. print_usage(argc, argv, params);
  19. return 1;
  20. }
  21. // total length of the sequence including the prompt
  22. const int n_predict = params.n_predict;
  23. // init LLM
  24. llama_backend_init();
  25. llama_numa_init(params.numa);
  26. // initialize the model
  27. llama_model_params model_params = llama_model_params_from_gpt_params(params);
  28. llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
  29. if (model == NULL) {
  30. fprintf(stderr , "%s: error: unable to load model\n" , __func__);
  31. return 1;
  32. }
  33. // initialize the context
  34. llama_context_params ctx_params = llama_context_params_from_gpt_params(params);
  35. llama_context * ctx = llama_new_context_with_model(model, ctx_params);
  36. if (ctx == NULL) {
  37. fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__);
  38. return 1;
  39. }
  40. auto sparams = llama_sampler_chain_default_params();
  41. sparams.no_perf = false;
  42. llama_sampler * smpl = llama_sampler_chain_init(sparams);
  43. llama_sampler_chain_add(smpl, llama_sampler_init_greedy());
  44. // tokenize the prompt
  45. std::vector<llama_token> tokens_list;
  46. tokens_list = ::llama_tokenize(ctx, params.prompt, true);
  47. const int n_ctx = llama_n_ctx(ctx);
  48. const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size());
  49. LOG_TEE("\n%s: n_predict = %d, n_ctx = %d, n_kv_req = %d\n", __func__, n_predict, n_ctx, n_kv_req);
  50. // make sure the KV cache is big enough to hold all the prompt and generated tokens
  51. if (n_kv_req > n_ctx) {
  52. LOG_TEE("%s: error: n_kv_req > n_ctx, the required KV cache size is not big enough\n", __func__);
  53. LOG_TEE("%s: either reduce n_predict or increase n_ctx\n", __func__);
  54. return 1;
  55. }
  56. // print the prompt token-by-token
  57. fprintf(stderr, "\n");
  58. for (auto id : tokens_list) {
  59. fprintf(stderr, "%s", llama_token_to_piece(ctx, id).c_str());
  60. }
  61. fflush(stderr);
  62. // create a llama_batch with size 512
  63. // we use this object to submit token data for decoding
  64. llama_batch batch = llama_batch_init(512, 0, 1);
  65. // evaluate the initial prompt
  66. for (size_t i = 0; i < tokens_list.size(); i++) {
  67. llama_batch_add(batch, tokens_list[i], i, { 0 }, false);
  68. }
  69. // llama_decode will output logits only for the last token of the prompt
  70. batch.logits[batch.n_tokens - 1] = true;
  71. if (llama_decode(ctx, batch) != 0) {
  72. LOG_TEE("%s: llama_decode() failed\n", __func__);
  73. return 1;
  74. }
  75. // main loop
  76. int n_cur = batch.n_tokens;
  77. int n_decode = 0;
  78. const auto t_main_start = ggml_time_us();
  79. while (n_cur <= n_predict) {
  80. // sample the next token
  81. {
  82. const llama_token new_token_id = llama_sampler_sample(smpl, ctx, batch.n_tokens - 1);
  83. llama_sampler_accept(smpl, new_token_id);
  84. // is it an end of generation?
  85. if (llama_token_is_eog(model, new_token_id) || n_cur == n_predict) {
  86. LOG_TEE("\n");
  87. break;
  88. }
  89. LOG_TEE("%s", llama_token_to_piece(ctx, new_token_id).c_str());
  90. fflush(stdout);
  91. // prepare the next batch
  92. llama_batch_clear(batch);
  93. // push this new token for next evaluation
  94. llama_batch_add(batch, new_token_id, n_cur, { 0 }, true);
  95. n_decode += 1;
  96. }
  97. n_cur += 1;
  98. // evaluate the current batch with the transformer model
  99. if (llama_decode(ctx, batch)) {
  100. fprintf(stderr, "%s : failed to eval, return code %d\n", __func__, 1);
  101. return 1;
  102. }
  103. }
  104. LOG_TEE("\n");
  105. const auto t_main_end = ggml_time_us();
  106. LOG_TEE("%s: decoded %d tokens in %.2f s, speed: %.2f t/s\n",
  107. __func__, n_decode, (t_main_end - t_main_start) / 1000000.0f, n_decode / ((t_main_end - t_main_start) / 1000000.0f));
  108. LOG_TEE("\n");
  109. llama_perf_print(smpl, LLAMA_PERF_TYPE_SAMPLER_CHAIN);
  110. llama_perf_print(ctx, LLAMA_PERF_TYPE_CONTEXT);
  111. fprintf(stderr, "\n");
  112. llama_batch_free(batch);
  113. llama_sampler_free(smpl);
  114. llama_free(ctx);
  115. llama_free_model(model);
  116. llama_backend_free();
  117. return 0;
  118. }