simple.cpp 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. #include "common.h"
  2. #include "llama.h"
  3. #include <cmath>
  4. #include <cstdio>
  5. #include <string>
  6. #include <vector>
  7. int main(int argc, char ** argv) {
  8. gpt_params params;
  9. if (argc == 1 || argv[1][0] == '-') {
  10. printf("usage: %s MODEL_PATH [PROMPT]\n" , argv[0]);
  11. return 1 ;
  12. }
  13. if (argc >= 2) {
  14. params.model = argv[1];
  15. }
  16. if (argc >= 3) {
  17. params.prompt = argv[2];
  18. }
  19. if (params.prompt.empty()) {
  20. params.prompt = "Hello my name is";
  21. }
  22. // total length of the sequence including the prompt
  23. const int n_len = 32;
  24. // init LLM
  25. llama_backend_init(params.numa);
  26. llama_context_params ctx_params = llama_context_default_params();
  27. ctx_params.seed = 1234;
  28. ctx_params.n_ctx = 2048;
  29. llama_model * model = llama_load_model_from_file(params.model.c_str(), ctx_params);
  30. if (model == NULL) {
  31. fprintf(stderr , "%s: error: unable to load model\n" , __func__);
  32. return 1;
  33. }
  34. llama_context * ctx = llama_new_context_with_model(model, ctx_params);
  35. if (ctx == NULL) {
  36. fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__);
  37. return 1;
  38. }
  39. // tokenize the prompt
  40. std::vector<llama_token> tokens_list;
  41. tokens_list = ::llama_tokenize(ctx, params.prompt, true);
  42. const int n_ctx = llama_n_ctx(ctx);
  43. const int n_kv_req = tokens_list.size() + (n_len - tokens_list.size());
  44. LOG_TEE("\n%s: n_len = %d, n_ctx = %d, n_kv_req = %d\n", __func__, n_len, n_ctx, n_kv_req);
  45. // make sure the KV cache is big enough to hold all the prompt and generated tokens
  46. if (n_kv_req > n_ctx) {
  47. LOG_TEE("%s: error: n_kv_req > n_ctx, the required KV cache size is not big enough\n", __func__);
  48. LOG_TEE("%s: either reduce n_parallel or increase n_ctx\n", __func__);
  49. return 1;
  50. }
  51. // print the prompt token-by-token
  52. fprintf(stderr, "\n");
  53. for (auto id : tokens_list) {
  54. fprintf(stderr, "%s", llama_token_to_piece(ctx, id).c_str());
  55. }
  56. fflush(stderr);
  57. // create a llama_batch with size 512
  58. // we use this object to submit token data for decoding
  59. llama_batch batch = llama_batch_init(512, 0);
  60. // evaluate the initial prompt
  61. batch.n_tokens = tokens_list.size();
  62. for (int32_t i = 0; i < batch.n_tokens; i++) {
  63. batch.token[i] = tokens_list[i];
  64. batch.pos[i] = i;
  65. batch.seq_id[i] = 0;
  66. batch.logits[i] = false;
  67. }
  68. // llama_decode will output logits only for the last token of the prompt
  69. batch.logits[batch.n_tokens - 1] = true;
  70. if (llama_decode(ctx, batch, params.n_threads) != 0) {
  71. LOG_TEE("%s: llama_decode() failed\n", __func__);
  72. return 1;
  73. }
  74. // main loop
  75. int n_cur = batch.n_tokens;
  76. int n_decode = 0;
  77. const auto t_main_start = ggml_time_us();
  78. while (n_cur <= n_len) {
  79. // sample the next token
  80. {
  81. auto n_vocab = llama_n_vocab(ctx);
  82. auto * logits = llama_get_logits_ith(ctx, batch.n_tokens - 1);
  83. std::vector<llama_token_data> candidates;
  84. candidates.reserve(n_vocab);
  85. for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
  86. candidates.emplace_back(llama_token_data{ token_id, logits[token_id], 0.0f });
  87. }
  88. llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
  89. // sample the most likely token
  90. const llama_token new_token_id = llama_sample_token_greedy(ctx, &candidates_p);
  91. // is it an end of stream?
  92. if (new_token_id == llama_token_eos(ctx) || n_cur == n_len) {
  93. LOG_TEE("\n");
  94. break;
  95. }
  96. LOG_TEE("%s", llama_token_to_piece(ctx, new_token_id).c_str());
  97. fflush(stdout);
  98. // prepare the next batch
  99. batch.n_tokens = 0;
  100. // push this new token for next evaluation
  101. batch.token [batch.n_tokens] = new_token_id;
  102. batch.pos [batch.n_tokens] = n_cur;
  103. batch.seq_id[batch.n_tokens] = 0;
  104. batch.logits[batch.n_tokens] = true;
  105. batch.n_tokens += 1;
  106. n_decode += 1;
  107. }
  108. n_cur += 1;
  109. // evaluate the current batch with the transformer model
  110. if (llama_decode(ctx, batch, params.n_threads)) {
  111. fprintf(stderr, "%s : failed to eval, return code %d\n", __func__, 1);
  112. return 1;
  113. }
  114. }
  115. LOG_TEE("\n");
  116. const auto t_main_end = ggml_time_us();
  117. LOG_TEE("%s: decoded %d tokens in %.2f s, speed: %.2f t/s\n",
  118. __func__, n_decode, (t_main_end - t_main_start) / 1000000.0f, n_decode / ((t_main_end - t_main_start) / 1000000.0f));
  119. llama_print_timings(ctx);
  120. fprintf(stderr, "\n");
  121. llama_batch_free(batch);
  122. llama_free(ctx);
  123. llama_free_model(model);
  124. llama_backend_free();
  125. return 0;
  126. }