speculative.cpp 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269
  1. #include "speculative.h"
  2. #include "log.h"
  3. #include "common.h"
  4. #include "sampling.h"
  5. #include <cstring>
  6. #define SPEC_VOCAB_MAX_SIZE_DIFFERENCE 128
  7. #define SPEC_VOCAB_CHECK_START_TOKEN_ID 5
  8. struct common_speculative {
  9. struct llama_context * ctx;
  10. struct common_sampler * smpl;
  11. llama_batch batch;
  12. llama_tokens prompt;
  13. };
  14. struct common_speculative * common_speculative_init(
  15. struct llama_context * ctx_dft) {
  16. auto * result = new common_speculative {
  17. /* .ctx = */ ctx_dft,
  18. /* .smpl = */ nullptr,
  19. /* .batch = */ llama_batch_init(llama_n_batch(ctx_dft), 0, 1),
  20. /* .prompt = */ {},
  21. };
  22. // TODO: optimize or pass from outside?
  23. #if 0
  24. {
  25. common_params_sampling params;
  26. params.no_perf = false;
  27. params.top_k = 40;
  28. params.top_p = 0.9;
  29. params.samplers = {
  30. COMMON_SAMPLER_TYPE_TOP_K,
  31. COMMON_SAMPLER_TYPE_TOP_P,
  32. COMMON_SAMPLER_TYPE_INFILL,
  33. };
  34. result->smpl = common_sampler_init(llama_get_model(ctx_dft), params);
  35. }
  36. #else
  37. {
  38. common_params_sampling params;
  39. params.no_perf = false;
  40. params.top_k = 10;
  41. params.samplers = {
  42. COMMON_SAMPLER_TYPE_TOP_K,
  43. };
  44. result->smpl = common_sampler_init(llama_get_model(ctx_dft), params);
  45. }
  46. #endif
  47. return result;
  48. }
  49. void common_speculative_free(struct common_speculative * spec) {
  50. common_sampler_free(spec->smpl);
  51. llama_batch_free(spec->batch);
  52. delete spec;
  53. }
  54. bool common_speculative_are_compatible(
  55. const struct llama_context * ctx_tgt,
  56. const struct llama_context * ctx_dft) {
  57. const struct llama_model * model_tgt = llama_get_model(ctx_tgt);
  58. const struct llama_model * model_dft = llama_get_model(ctx_dft);
  59. const bool vocab_type_tgt = llama_vocab_type(model_tgt);
  60. LOG_DBG("%s: vocab_type tgt: %d\n", __func__, vocab_type_tgt);
  61. const bool vocab_type_dft = llama_vocab_type(model_dft);
  62. LOG_DBG("%s: vocab_type dft: %d\n", __func__, vocab_type_dft);
  63. if (vocab_type_tgt != vocab_type_dft) {
  64. LOG_ERR("%s: draft model vocab type must match target model to use speculation but "
  65. "vocab_type_dft = %d while vocab_type_tgt = %d\n", __func__, vocab_type_dft, vocab_type_tgt);
  66. return false;
  67. }
  68. if (llama_add_bos_token(model_tgt) != llama_add_bos_token(model_dft) ||
  69. llama_add_eos_token(model_tgt) != llama_add_eos_token(model_dft) ||
  70. llama_token_bos(model_tgt) != llama_token_bos(model_dft) ||
  71. llama_token_eos(model_tgt) != llama_token_eos(model_dft)
  72. ) {
  73. LOG_ERR("%s: draft model special tokens must match target model to use speculation\n", __func__);
  74. return false;
  75. }
  76. {
  77. const int n_vocab_tgt = llama_n_vocab(model_tgt);
  78. const int n_vocab_dft = llama_n_vocab(model_dft);
  79. const int vocab_diff = std::abs(n_vocab_tgt - n_vocab_dft);
  80. if (vocab_diff > SPEC_VOCAB_MAX_SIZE_DIFFERENCE) {
  81. LOG_ERR("%s: draft model vocab must closely match target model to use speculation but "
  82. "target vocab size %d does not match draft vocab size %d - difference %d, max allowed %d\n",
  83. __func__, n_vocab_tgt, llama_n_vocab(model_dft), vocab_diff, SPEC_VOCAB_MAX_SIZE_DIFFERENCE);
  84. return false;
  85. }
  86. for (int i = SPEC_VOCAB_CHECK_START_TOKEN_ID; i < std::min(n_vocab_tgt, n_vocab_dft); ++i) {
  87. const char * token_text_tgt = llama_token_get_text(model_tgt, i);
  88. const char * token_text_dft = llama_token_get_text(model_dft, i);
  89. if (std::strcmp(token_text_tgt, token_text_dft) != 0) {
  90. LOG_ERR("%s: draft model vocab must match target model to use speculation but "
  91. "token %d content differs - target '%s', draft '%s'\n", __func__, i,
  92. common_token_to_piece(ctx_tgt, i).c_str(),
  93. common_token_to_piece(ctx_dft, i).c_str());
  94. return false;
  95. }
  96. }
  97. }
  98. return true;
  99. }
  100. llama_tokens common_speculative_gen_draft(
  101. struct common_speculative * spec,
  102. struct common_speculative_params params,
  103. const llama_tokens & prompt_tgt,
  104. llama_token id_last) {
  105. auto & batch = spec->batch;
  106. auto & ctx = spec->ctx;
  107. auto & smpl = spec->smpl;
  108. auto & prompt = spec->prompt;
  109. int reuse_i = 0;
  110. int reuse_n = 0;
  111. const int n_ctx = llama_n_ctx(ctx) - params.n_draft;
  112. const int i_start = std::max<int>(0, (int) prompt_tgt.size() - n_ctx);
  113. // reuse as much as possible from the old draft context
  114. // ideally, the draft context should be as big as the target context and we will always reuse the entire prompt
  115. for (int i = 0; i < (int) prompt.size(); ++i) {
  116. int cur = 0;
  117. while (i_start + cur < (int) prompt_tgt.size() &&
  118. i + cur < (int) prompt.size() &&
  119. prompt_tgt[i_start + cur] == prompt[i + cur]) {
  120. cur++;
  121. }
  122. if ((cur >= params.n_reuse || n_ctx >= (int) prompt_tgt.size()) && cur > reuse_n) {
  123. reuse_i = i;
  124. reuse_n = cur;
  125. }
  126. }
  127. LOG_DBG("%s: reuse_i = %d, reuse_n = %d, prompt = %d\n", __func__, reuse_i, reuse_n, (int) prompt.size());
  128. llama_tokens result;
  129. result.reserve(params.n_draft);
  130. if (reuse_n == 0) {
  131. llama_kv_cache_clear(ctx);
  132. prompt.clear();
  133. } else {
  134. // this happens when a previous draft has been discarded (for example, due to being too small), but the
  135. // target model agreed with it. in this case, we simply pass back the previous results to save compute
  136. if (reuse_i + reuse_n < (int) prompt.size() && prompt[reuse_i + reuse_n] == id_last) {
  137. for (int i = reuse_i + reuse_n + 1; i < (int) prompt.size(); ++i) {
  138. result.push_back(prompt[i]);
  139. if (params.n_draft <= (int) result.size()) {
  140. break;
  141. }
  142. }
  143. return result;
  144. }
  145. if (reuse_i > 0) {
  146. llama_kv_cache_seq_rm (ctx, 0, 0, reuse_i);
  147. llama_kv_cache_seq_add(ctx, 0, reuse_i, -1, -reuse_i);
  148. prompt.erase(prompt.begin(), prompt.begin() + reuse_i);
  149. }
  150. if (reuse_n < (int) prompt.size()) {
  151. llama_kv_cache_seq_rm (ctx, 0, reuse_n, -1);
  152. prompt.erase(prompt.begin() + reuse_n, prompt.end());
  153. }
  154. }
  155. // prepare a batch to evaluate any new tokens in the prompt
  156. common_batch_clear(batch);
  157. for (size_t i = i_start + reuse_n; i < prompt_tgt.size(); ++i) {
  158. //LOG_DBG("i = %d, i_start = %d, reuse_n = %d, i - i_start = %d, id = %6d\n", i, i_start, reuse_n, i - i_start, prompt_tgt[i]);
  159. common_batch_add(batch, prompt_tgt[i], i - i_start, { 0 }, false);
  160. prompt.push_back(prompt_tgt[i]);
  161. }
  162. // we should rarely end-up here during normal decoding
  163. if (batch.n_tokens > 0) {
  164. //LOG_DBG("%s: draft prompt batch: %s\n", __func__, string_from(ctx, batch).c_str());
  165. llama_decode(ctx, batch);
  166. }
  167. const llama_pos n_past = prompt.size();
  168. LOG_DBG("%s: n_past = %d\n", __func__, n_past);
  169. common_batch_clear(batch);
  170. common_batch_add (batch, id_last, n_past, { 0 }, true);
  171. prompt.push_back(id_last);
  172. //LOG_DBG("%s: draft prompt: %s\n", __func__, string_from(ctx, prompt).c_str());
  173. llama_decode(ctx, batch);
  174. common_sampler_reset(smpl);
  175. // sample n_draft tokens from the draft model
  176. for (int i = 0; i < params.n_draft; ++i) {
  177. common_batch_clear(batch);
  178. common_sampler_sample(smpl, ctx, 0, true);
  179. const auto * cur_p = common_sampler_get_candidates(smpl);
  180. for (int k = 0; k < std::min(3, (int) cur_p->size); ++k) {
  181. LOG_DBG(" - draft candidate %3d, pos %3d: %6d (%8.3f) '%s'\n",
  182. k, i, cur_p->data[k].id, cur_p->data[k].p, common_token_to_piece(ctx, cur_p->data[k].id).c_str());
  183. }
  184. // add drafted token for each sequence
  185. const llama_token id = cur_p->data[0].id;
  186. // only collect very high-confidence draft tokens
  187. if (cur_p->data[0].p < params.p_min) {
  188. break;
  189. }
  190. common_sampler_accept(smpl, id, true);
  191. result.push_back(id);
  192. if (params.n_draft <= (int) result.size()) {
  193. break;
  194. }
  195. common_batch_add(batch, id, n_past + i + 1, { 0 }, true);
  196. // evaluate the drafted tokens on the draft model
  197. llama_decode(ctx, batch);
  198. prompt.push_back(id);
  199. }
  200. return result;
  201. }