beam-search.cpp 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. #ifndef _GNU_SOURCE
  2. #define _GNU_SOURCE
  3. #endif
  4. #include "common.h"
  5. #include "llama.h"
  6. #include "build-info.h"
  7. #include <cassert>
  8. #include <cinttypes>
  9. #include <cmath>
  10. #include <cstdio>
  11. #include <cstring>
  12. #include <ctime>
  13. #include <fstream>
  14. #include <iostream>
  15. #include <string>
  16. #include <vector>
  17. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  18. #include <signal.h>
  19. #include <unistd.h>
  20. #elif defined (_WIN32)
  21. #define WIN32_LEAN_AND_MEAN
  22. #ifndef NOMINMAX
  23. # define NOMINMAX
  24. #endif
  25. #include <windows.h>
  26. #include <signal.h>
  27. #endif
  28. // Used for debugging to print out beam tokens.
  29. struct ostream_beam_view {
  30. llama_context * ctx;
  31. llama_beam_view beam_view;
  32. };
  33. std::ostream& operator<<(std::ostream& os, const ostream_beam_view & obv) {
  34. os << "p(" << obv.beam_view.p << ") eob(" << std::boolalpha << obv.beam_view.eob << ") tokens(";
  35. for (size_t i = 0 ; i < obv.beam_view.n_tokens ; ++i) {
  36. os << llama_token_to_piece(obv.ctx, obv.beam_view.tokens[i]);
  37. }
  38. return os << ')';
  39. }
  40. // Put here anything you want back in beam_search_callback().
  41. struct beam_search_callback_data {
  42. llama_context * ctx;
  43. std::vector<llama_token> response;
  44. };
  45. // In this case, end-of-beam (eob) is equivalent to end-of-sentence (eos) but this need not always be the same.
  46. // For example, eob can be flagged due to maximum token length, stop words, etc.
  47. bool is_at_eob(const beam_search_callback_data & callback_data, const llama_token * tokens, const size_t n_tokens) {
  48. return n_tokens && tokens[n_tokens-1] == llama_token_eos(callback_data.ctx);
  49. }
  50. // Function matching type llama_beam_search_callback_fn_t.
  51. // Custom callback example is called each time the beams lengths increase:
  52. // * Show progress by printing ',' following by number of convergent beam tokens if any.
  53. // * When all beams converge to a common prefix, they are made available in beams_state.beams[0].
  54. // This is also called when the stop condition is met.
  55. // Collect tokens into std::vector<llama_token> response which is pointed to by callback_data.
  56. void beam_search_callback(void * callback_data_ptr, llama_beams_state beams_state) {
  57. auto& callback_data = *static_cast<beam_search_callback_data*>(callback_data_ptr);
  58. // Mark beams as EOS as needed.
  59. for (size_t i = 0 ; i < beams_state.n_beams ; ++i) {
  60. llama_beam_view& beam_view = beams_state.beam_views[i];
  61. if (!beam_view.eob && is_at_eob(callback_data, beam_view.tokens, beam_view.n_tokens)) {
  62. beam_view.eob = true;
  63. }
  64. }
  65. printf(","); // Show progress
  66. if (const size_t n = beams_state.common_prefix_length) {
  67. callback_data.response.resize(callback_data.response.size() + n);
  68. assert(0u < beams_state.n_beams);
  69. const llama_token * tokens = beams_state.beam_views[0].tokens;
  70. std::copy(tokens, tokens + n, callback_data.response.end() - n);
  71. printf("%zu", n);
  72. }
  73. fflush(stdout);
  74. #if 1 // DEBUG: print current beams for this iteration
  75. std::cout << "\n\nCurrent beams (last_call=" << beams_state.last_call << "):\n";
  76. for (size_t i = 0 ; i < beams_state.n_beams ; ++i) {
  77. std::cout << "beams["<<i<<"]: " << ostream_beam_view{callback_data.ctx,beams_state.beam_views[i]} << std::endl;
  78. }
  79. #endif
  80. }
  81. int main(int argc, char ** argv)
  82. {
  83. gpt_params params;
  84. //params.n_gpu_layers = 200;
  85. //---------------------------------
  86. // Print help :
  87. //---------------------------------
  88. if ( argc < 2 || argv[1][0] == '-' )
  89. {
  90. printf( "Usage: %s MODEL_PATH [BEAM_WIDTH=2] [PROMPT]\n" , argv[0] );
  91. return 1 ;
  92. }
  93. //---------------------------------
  94. // Load parameters :
  95. //---------------------------------
  96. params.model = argv[1];
  97. params.n_beams = 2 < argc ? std::stoi(argv[2]) : 2;
  98. if ( argc > 3 )
  99. {
  100. params.prompt = argv[3];
  101. }
  102. if ( params.prompt.empty() )
  103. {
  104. params.prompt = "### Request:\nHow many countries are there?\n\n### Response:\n";
  105. }
  106. //---------------------------------
  107. // Init LLM :
  108. //---------------------------------
  109. llama_backend_init(params.numa);
  110. llama_model * model;
  111. llama_context * ctx;
  112. std::tie(model, ctx) = llama_init_from_gpt_params( params );
  113. if ( model == NULL )
  114. {
  115. fprintf( stderr , "%s: error: unable to load model\n" , __func__ );
  116. return 1;
  117. }
  118. //---------------------------------
  119. // Tokenize the prompt :
  120. //---------------------------------
  121. std::vector<llama_token> tokens_list = llama_tokenize(ctx, params.prompt, true);
  122. const size_t max_context_size = llama_n_ctx( ctx );
  123. const size_t max_tokens_list_size = max_context_size - 4 ;
  124. if (tokens_list.size() > max_tokens_list_size)
  125. {
  126. fprintf( stderr , "%s: error: prompt too long (%zu tokens, max %zu)\n" ,
  127. __func__ , tokens_list.size() , max_tokens_list_size );
  128. return 1;
  129. }
  130. fprintf( stderr, "\n\n" );
  131. // Print the tokens from the prompt :
  132. for( auto id : tokens_list )
  133. {
  134. std::cout << llama_token_to_piece(ctx, id);
  135. }
  136. std::cout << std::flush;
  137. int n_past = llama_get_kv_cache_token_count(ctx);
  138. if (llama_eval(ctx, tokens_list.data(), tokens_list.size(), n_past, params.n_threads))
  139. {
  140. fprintf(stderr, "%s : failed to eval prompt.\n" , __func__ );
  141. return 1;
  142. }
  143. n_past += tokens_list.size();
  144. beam_search_callback_data callback_data{ctx, {}};
  145. size_t const beam_width = static_cast<size_t>(params.n_beams);
  146. int const n_predict = 256;
  147. llama_beam_search(ctx, beam_search_callback, &callback_data, beam_width, n_past, n_predict, params.n_threads);
  148. std::cout << "\n\n";
  149. for (llama_token const token_id : callback_data.response) {
  150. std::cout << llama_token_to_piece(ctx,token_id);
  151. }
  152. std::cout << std::endl;
  153. llama_free( ctx );
  154. llama_free_model( model );
  155. llama_backend_free();
  156. return 0;
  157. }