simple.cpp 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. #ifndef _GNU_SOURCE
  2. #define _GNU_SOURCE
  3. #endif
  4. #include "build-info.h"
  5. #include "common.h"
  6. #include "llama.h"
  7. #include <cmath>
  8. #include <cstdio>
  9. #include <string>
  10. #include <vector>
  11. int main(int argc, char ** argv) {
  12. gpt_params params;
  13. if (argc == 1 || argv[1][0] == '-') {
  14. printf("usage: %s MODEL_PATH [PROMPT]\n" , argv[0]);
  15. return 1 ;
  16. }
  17. if (argc >= 2) {
  18. params.model = argv[1];
  19. }
  20. if (argc >= 3) {
  21. params.prompt = argv[2];
  22. }
  23. if (params.prompt.empty()) {
  24. params.prompt = "Hello my name is";
  25. }
  26. // init LLM
  27. llama_backend_init(params.numa);
  28. llama_context_params ctx_params = llama_context_default_params();
  29. llama_model * model = llama_load_model_from_file(params.model.c_str(), ctx_params);
  30. if (model == NULL) {
  31. fprintf(stderr , "%s: error: unable to load model\n" , __func__);
  32. return 1;
  33. }
  34. llama_context * ctx = llama_new_context_with_model(model, ctx_params);
  35. // tokenize the prompt
  36. std::vector<llama_token> tokens_list;
  37. tokens_list = ::llama_tokenize(ctx, params.prompt, true);
  38. const int max_context_size = llama_n_ctx(ctx);
  39. const int max_tokens_list_size = max_context_size - 4;
  40. if ((int) tokens_list.size() > max_tokens_list_size) {
  41. fprintf(stderr, "%s: error: prompt too long (%d tokens, max %d)\n", __func__, (int) tokens_list.size(), max_tokens_list_size);
  42. return 1;
  43. }
  44. fprintf(stderr, "\n\n");
  45. for (auto id : tokens_list) {
  46. fprintf(stderr, "%s", llama_token_to_str(ctx, id).c_str());
  47. }
  48. fflush(stderr);
  49. // main loop
  50. // The LLM keeps a contextual cache memory of previous token evaluation.
  51. // Usually, once this cache is full, it is required to recompute a compressed context based on previous
  52. // tokens (see "infinite text generation via context swapping" in the main example), but in this minimalist
  53. // example, we will just stop the loop once this cache is full or once an end of stream is detected.
  54. const int n_gen = std::min(32, max_context_size);
  55. while (llama_get_kv_cache_token_count(ctx) < n_gen) {
  56. // evaluate the transformer
  57. if (llama_eval(ctx, tokens_list.data(), int(tokens_list.size()), llama_get_kv_cache_token_count(ctx), params.n_threads)) {
  58. fprintf(stderr, "%s : failed to eval\n", __func__);
  59. return 1;
  60. }
  61. tokens_list.clear();
  62. // sample the next token
  63. llama_token new_token_id = 0;
  64. auto logits = llama_get_logits(ctx);
  65. auto n_vocab = llama_n_vocab(ctx);
  66. std::vector<llama_token_data> candidates;
  67. candidates.reserve(n_vocab);
  68. for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
  69. candidates.emplace_back(llama_token_data{ token_id, logits[token_id], 0.0f });
  70. }
  71. llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
  72. new_token_id = llama_sample_token_greedy(ctx , &candidates_p);
  73. // is it an end of stream ?
  74. if (new_token_id == llama_token_eos(ctx)) {
  75. fprintf(stderr, " [end of text]\n");
  76. break;
  77. }
  78. // print the new token :
  79. printf("%s", llama_token_to_str(ctx, new_token_id).c_str());
  80. fflush(stdout);
  81. // push this new token for next evaluation
  82. tokens_list.push_back(new_token_id);
  83. }
  84. llama_free(ctx);
  85. llama_free_model(model);
  86. llama_backend_free();
  87. fprintf(stderr, "\n\n");
  88. return 0;
  89. }