perplexity.cpp 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. #include "common.h"
  2. #include "llama.h"
  3. #include "build-info.h"
  4. #include <cmath>
  5. #include <ctime>
  6. std::vector<float> softmax(const std::vector<float>& logits) {
  7. std::vector<float> probs(logits.size());
  8. float max_logit = logits[0];
  9. for (float v : logits) max_logit = std::max(max_logit, v);
  10. double sum_exp = 0.0;
  11. for (size_t i = 0; i < logits.size(); i++) {
  12. // Subtract the maximum logit value from the current logit value for numerical stability
  13. const float logit = logits[i] - max_logit;
  14. const float exp_logit = expf(logit);
  15. sum_exp += exp_logit;
  16. probs[i] = exp_logit;
  17. }
  18. for (size_t i = 0; i < probs.size(); i++) probs[i] /= sum_exp;
  19. return probs;
  20. }
  21. void perplexity(llama_context * ctx, const gpt_params & params) {
  22. // Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
  23. // Run `./perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
  24. // Output: `perplexity: 13.5106 [114/114]`
  25. auto tokens = ::llama_tokenize(ctx, params.prompt, true);
  26. int count = 0;
  27. int seq_count = tokens.size() / params.n_ctx;
  28. int n_vocab = llama_n_vocab(ctx);
  29. double nll = 0.0;
  30. fprintf(stderr, "%s : calculating perplexity over %d chunks, batch_size=%d\n", __func__, seq_count, params.n_batch);
  31. for (int i = 0; i < seq_count; ++i) {
  32. int start = i * params.n_ctx;
  33. int end = start + params.n_ctx;
  34. std::vector<float> logits;
  35. int num_batches = (params.n_ctx + params.n_batch - 1) / params.n_batch;
  36. auto start_t = std::chrono::high_resolution_clock::now();
  37. for (int j = 0; j < num_batches; ++j) {
  38. int batch_start = start + j * params.n_batch;
  39. int batch_size = std::min(end - batch_start, params.n_batch);
  40. if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * params.n_batch, params.n_threads)) {
  41. fprintf(stderr, "%s : failed to eval\n", __func__);
  42. return;
  43. }
  44. auto batch_logits = llama_get_logits(ctx);
  45. logits.insert(logits.end(), batch_logits, batch_logits + batch_size * n_vocab);
  46. }
  47. auto end_t = std::chrono::high_resolution_clock::now();
  48. if (i == 0) {
  49. const float seconds = std::chrono::duration<float>(end_t - start_t).count();
  50. printf("%.2f seconds per pass - ETA ", seconds);
  51. int total_seconds = (int)(seconds * seq_count);
  52. if (total_seconds >= 60*60) {
  53. printf("%d hours ", total_seconds / (60*60));
  54. total_seconds = total_seconds % (60*60);
  55. }
  56. printf("%d minutes\n", total_seconds / 60);
  57. }
  58. // We get the logits for all the tokens in the context window (params.n_ctx)
  59. // from llama_eval above. Now, based on https://huggingface.co/docs/transformers/perplexity,
  60. // calculate the perplexity over the last half the window (so the model always has
  61. // some context to predict the token).
  62. //
  63. // We rely on the fact that attention in the forward pass only looks at previous
  64. // tokens here, so the logits returned for each token are an accurate representation
  65. // of what the model would have predicted at that point.
  66. //
  67. // Example, we have a context window of 512, we will compute perplexity for each of the
  68. // last 256 tokens. Then, we split the input up into context window size chunks to
  69. // process the entire prompt.
  70. for (int j = std::min(512, params.n_ctx / 2); j < params.n_ctx - 1; ++j) {
  71. // Calculate probability of next token, given the previous ones.
  72. std::vector<float> tok_logits(
  73. logits.begin() + j * n_vocab,
  74. logits.begin() + (j + 1) * n_vocab);
  75. float prob = softmax(tok_logits)[tokens[start + j + 1]];
  76. nll += -std::log(prob);
  77. ++count;
  78. }
  79. // perplexity is e^(average negative log-likelihood)
  80. printf("[%d]%.4lf,", i + 1, std::exp(nll / count));
  81. fflush(stdout);
  82. }
  83. printf("\n");
  84. }
  85. int main(int argc, char ** argv) {
  86. gpt_params params;
  87. params.model = "models/llama-7B/ggml-model.bin";
  88. params.n_batch = 512;
  89. if (gpt_params_parse(argc, argv, params) == false) {
  90. return 1;
  91. }
  92. params.perplexity = true;
  93. params.n_batch = std::min(params.n_batch, params.n_ctx);
  94. if (params.n_ctx > 2048) {
  95. fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
  96. "expect poor results\n", __func__, params.n_ctx);
  97. }
  98. fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
  99. if (params.seed <= 0) {
  100. params.seed = time(NULL);
  101. }
  102. fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
  103. std::mt19937 rng(params.seed);
  104. if (params.random_prompt) {
  105. params.prompt = gpt_random_prompt(rng);
  106. }
  107. llama_context * ctx;
  108. // load the model
  109. {
  110. auto lparams = llama_context_default_params();
  111. lparams.n_ctx = params.n_ctx;
  112. lparams.n_parts = params.n_parts;
  113. lparams.seed = params.seed;
  114. lparams.f16_kv = params.memory_f16;
  115. lparams.logits_all = params.perplexity;
  116. lparams.use_mmap = params.use_mmap;
  117. lparams.use_mlock = params.use_mlock;
  118. lparams.embedding = params.embedding;
  119. ctx = llama_init_from_file(params.model.c_str(), lparams);
  120. if (ctx == NULL) {
  121. fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
  122. return 1;
  123. }
  124. }
  125. if (!params.lora_adapter.empty()) {
  126. int err = llama_apply_lora_from_file(ctx,
  127. params.lora_adapter.c_str(),
  128. params.lora_base.empty() ? NULL : params.lora_base.c_str(),
  129. params.n_threads);
  130. if (err != 0) {
  131. fprintf(stderr, "%s: error: failed to apply lora adapter\n", __func__);
  132. return 1;
  133. }
  134. }
  135. // print system information
  136. {
  137. fprintf(stderr, "\n");
  138. fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
  139. params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
  140. }
  141. perplexity(ctx, params);
  142. llama_print_timings(ctx);
  143. llama_free(ctx);
  144. return 0;
  145. }