save-load-state.cpp 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. #include "common.h"
  2. #include "llama.h"
  3. #include <vector>
  4. #include <cstdio>
  5. #include <chrono>
  6. int main(int argc, char ** argv) {
  7. gpt_params params;
  8. params.prompt = "The quick brown fox";
  9. if (!gpt_params_parse(argc, argv, params)) {
  10. return 1;
  11. }
  12. print_build_info();
  13. if (params.n_predict < 0) {
  14. params.n_predict = 16;
  15. }
  16. auto n_past = 0;
  17. std::string result0;
  18. std::string result1;
  19. // init
  20. llama_model * model;
  21. llama_context * ctx;
  22. std::tie(model, ctx) = llama_init_from_gpt_params(params);
  23. if (model == nullptr || ctx == nullptr) {
  24. fprintf(stderr, "%s : failed to init\n", __func__);
  25. return 1;
  26. }
  27. // tokenize prompt
  28. auto tokens = llama_tokenize(ctx, params.prompt, true);
  29. // evaluate prompt
  30. llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), n_past, 0));
  31. n_past += tokens.size();
  32. // save state (rng, logits, embedding and kv_cache) to file
  33. {
  34. std::vector<uint8_t> state_mem(llama_get_state_size(ctx));
  35. {
  36. FILE *fp_write = fopen("dump_state.bin", "wb");
  37. llama_copy_state_data(ctx, state_mem.data()); // could also copy directly to memory mapped file
  38. fwrite(state_mem.data(), 1, state_mem.size(), fp_write);
  39. fclose(fp_write);
  40. }
  41. }
  42. // save state (last tokens)
  43. const auto n_past_saved = n_past;
  44. // first run
  45. printf("\nfirst run: %s", params.prompt.c_str());
  46. for (auto i = 0; i < params.n_predict; i++) {
  47. auto * logits = llama_get_logits(ctx);
  48. auto n_vocab = llama_n_vocab(model);
  49. std::vector<llama_token_data> candidates;
  50. candidates.reserve(n_vocab);
  51. for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
  52. candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f});
  53. }
  54. llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
  55. auto next_token = llama_sample_token(ctx, &candidates_p);
  56. auto next_token_str = llama_token_to_piece(ctx, next_token);
  57. printf("%s", next_token_str.c_str());
  58. result0 += next_token_str;
  59. if (llama_decode(ctx, llama_batch_get_one(&next_token, 1, n_past, 0))) {
  60. fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
  61. llama_free(ctx);
  62. llama_free_model(model);
  63. return 1;
  64. }
  65. n_past += 1;
  66. }
  67. printf("\n\n");
  68. // free old context
  69. llama_free(ctx);
  70. // make new context
  71. auto * ctx2 = llama_new_context_with_model(model, llama_context_params_from_gpt_params(params));
  72. printf("\nsecond run: %s", params.prompt.c_str());
  73. // load state (rng, logits, embedding and kv_cache) from file
  74. {
  75. std::vector<uint8_t> state_mem(llama_get_state_size(ctx2));
  76. FILE * fp_read = fopen("dump_state.bin", "rb");
  77. const size_t ret = fread(state_mem.data(), 1, state_mem.size(), fp_read);
  78. if (ret != state_mem.size()) {
  79. fprintf(stderr, "\n%s : failed to read state\n", __func__);
  80. llama_free(ctx2);
  81. llama_free_model(model);
  82. return 1;
  83. }
  84. llama_set_state_data(ctx2, state_mem.data());
  85. fclose(fp_read);
  86. }
  87. // restore state (last tokens)
  88. n_past = n_past_saved;
  89. // second run
  90. for (auto i = 0; i < params.n_predict; i++) {
  91. auto * logits = llama_get_logits(ctx2);
  92. auto n_vocab = llama_n_vocab(model);
  93. std::vector<llama_token_data> candidates;
  94. candidates.reserve(n_vocab);
  95. for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
  96. candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f});
  97. }
  98. llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
  99. auto next_token = llama_sample_token(ctx2, &candidates_p);
  100. auto next_token_str = llama_token_to_piece(ctx2, next_token);
  101. printf("%s", next_token_str.c_str());
  102. result1 += next_token_str;
  103. if (llama_decode(ctx2, llama_batch_get_one(&next_token, 1, n_past, 0))) {
  104. fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
  105. llama_free(ctx2);
  106. llama_free_model(model);
  107. return 1;
  108. }
  109. n_past += 1;
  110. }
  111. printf("\n");
  112. llama_free(ctx2);
  113. llama_free_model(model);
  114. if (result0 != result1) {
  115. fprintf(stderr, "\n%s : error : the 2 generations are different\n", __func__);
  116. return 1;
  117. }
  118. fprintf(stderr, "\n%s : success\n", __func__);
  119. return 0;
  120. }