speculative.cpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642
  1. #include "common.h"
  2. #include "llama.h"
  3. #include <cmath>
  4. #include <cstdio>
  5. #include <string>
  6. #include <vector>
  7. #include <set>
  8. #define SPEC_VOCAB_MAX_SIZE_DIFFERENCE 100
  9. #define SPEC_VOCAB_CHECK_START_TOKEN_ID 5
  10. struct seq_draft {
  11. bool active = false;
  12. bool drafting = false;
  13. bool skip = false;
  14. int i_batch_dft = 0;
  15. std::vector<int> i_batch_tgt;
  16. std::vector<llama_token> tokens;
  17. std::vector<std::vector<llama_token_data>> dists;
  18. struct gpt_sampler * smpl = nullptr;
  19. };
  20. int main(int argc, char ** argv) {
  21. gpt_params params;
  22. if (!gpt_params_parse(argc, argv, params)) {
  23. gpt_params_print_usage(argc, argv, params);
  24. return 1;
  25. }
  26. if (params.model_draft.empty()) {
  27. fprintf(stderr, "%s: error: --model-draft is required\n", __func__);
  28. return 1;
  29. }
  30. // max number of parallel drafting sequences (i.e. tree branches)
  31. const int n_seq_dft = params.n_parallel;
  32. // probability threshold for splitting a draft branch (only for n_seq_dft > 1)
  33. const float p_split = params.p_split;
  34. std::default_random_engine rng(params.sparams.seed);
  35. std::uniform_real_distribution<> u_dist;
  36. #ifndef LOG_DISABLE_LOGS
  37. log_set_target(log_filename_generator("speculative", "log"));
  38. LOG_TEE("Log start\n");
  39. log_dump_cmdline(argc, argv);
  40. #endif // LOG_DISABLE_LOGS
  41. // init llama.cpp
  42. llama_backend_init();
  43. llama_numa_init(params.numa);
  44. llama_model * model_tgt = NULL;
  45. llama_model * model_dft = NULL;
  46. llama_context * ctx_tgt = NULL;
  47. llama_context * ctx_dft = NULL;
  48. // load the target model
  49. llama_init_result llama_init_tgt = llama_init_from_gpt_params(params);
  50. model_tgt = llama_init_tgt.model;
  51. ctx_tgt = llama_init_tgt.context;
  52. // load the draft model
  53. params.model = params.model_draft;
  54. params.n_gpu_layers = params.n_gpu_layers_draft;
  55. if (params.draft_cpuparams.n_threads > 0) {
  56. params.cpuparams.n_threads = params.draft_cpuparams.n_threads;
  57. }
  58. params.cpuparams_batch.n_threads = params.draft_cpuparams_batch.n_threads;
  59. llama_init_result llama_init_dft = llama_init_from_gpt_params(params);
  60. model_dft = llama_init_dft.model;
  61. ctx_dft = llama_init_dft.context;
  62. const bool vocab_type_tgt = llama_vocab_type(model_tgt);
  63. LOG("vocab_type tgt: %d\n", vocab_type_tgt);
  64. const bool vocab_type_dft = llama_vocab_type(model_dft);
  65. LOG("vocab_type dft: %d\n", vocab_type_dft);
  66. if (vocab_type_tgt != vocab_type_dft) {
  67. fprintf(stderr, "%s: error: draft model vocab type must match target model to use speculation but ", __func__);
  68. fprintf(stderr, "vocab_type_dft = %d while vocab_type_tgt = %d\n", vocab_type_dft, vocab_type_tgt);
  69. return 1;
  70. }
  71. if (
  72. llama_add_bos_token(model_tgt) != llama_add_bos_token(model_dft) ||
  73. llama_add_eos_token(model_tgt) != llama_add_eos_token(model_dft) ||
  74. llama_token_bos(model_tgt) != llama_token_bos(model_dft) ||
  75. llama_token_eos(model_tgt) != llama_token_eos(model_dft)
  76. ) {
  77. fprintf(stderr, "%s: error: draft model special tokens must match target model to use speculation\n", __func__);
  78. return 1;
  79. }
  80. {
  81. const int n_vocab_tgt = llama_n_vocab(model_tgt);
  82. const int n_vocab_dft = llama_n_vocab(model_dft);
  83. const int vocab_diff = n_vocab_tgt > n_vocab_dft
  84. ? n_vocab_tgt - n_vocab_dft
  85. : n_vocab_dft - n_vocab_tgt;
  86. if (vocab_diff > SPEC_VOCAB_MAX_SIZE_DIFFERENCE) {
  87. fprintf(stderr, "%s: error: draft model vocab must closely match target model to use speculation but ", __func__);
  88. fprintf(stderr, "target vocab size %d does not match draft vocab size %d - difference %d, max allowed %d\n",
  89. n_vocab_tgt, llama_n_vocab(model_dft), vocab_diff, SPEC_VOCAB_MAX_SIZE_DIFFERENCE);
  90. return 1;
  91. }
  92. for (int i = SPEC_VOCAB_CHECK_START_TOKEN_ID; i < std::min(n_vocab_tgt, n_vocab_dft); ++i) {
  93. const char * token_text_tgt = llama_token_get_text(model_tgt, i);
  94. const char * token_text_dft = llama_token_get_text(model_dft, i);
  95. if (std::strcmp(token_text_tgt, token_text_dft) != 0) {
  96. fprintf(stderr, "%s: error: draft model vocab must match target model to use speculation but ", __func__);
  97. fprintf(stderr, "token %d content differs - target '%s', draft '%s'\n", i,
  98. llama_token_to_piece(ctx_tgt, i).c_str(),
  99. llama_token_to_piece(ctx_dft, i).c_str());
  100. return 1;
  101. }
  102. }
  103. }
  104. // Tokenize the prompt
  105. std::vector<llama_token> inp;
  106. inp = ::llama_tokenize(ctx_tgt, params.prompt, true, true);
  107. const int max_context_size = llama_n_ctx(ctx_tgt);
  108. const int max_tokens_list_size = max_context_size - 4;
  109. if ((int) inp.size() > max_tokens_list_size) {
  110. fprintf(stderr, "%s: error: prompt too long (%d tokens, max %d)\n", __func__, (int) inp.size(), max_tokens_list_size);
  111. return 1;
  112. }
  113. fprintf(stderr, "\n\n");
  114. for (auto id : inp) {
  115. fprintf(stderr, "%s", llama_token_to_piece(ctx_tgt, id).c_str());
  116. }
  117. fflush(stderr);
  118. const int n_input = inp.size();
  119. const auto t_enc_start = ggml_time_us();
  120. // eval the prompt with both models
  121. llama_decode(ctx_tgt, llama_batch_get_one( inp.data(), n_input - 1, 0, 0));
  122. llama_decode(ctx_tgt, llama_batch_get_one(&inp.back(), 1, n_input - 1, 0));
  123. llama_decode(ctx_dft, llama_batch_get_one( inp.data(), n_input, 0, 0));
  124. const auto t_enc_end = ggml_time_us();
  125. // the 2 models should have the same vocab
  126. //GGML_ASSERT(n_vocab == llama_n_vocab(model_dft));
  127. // how many tokens to draft each time
  128. int n_draft = params.n_draft;
  129. int n_predict = 0;
  130. int n_drafted = 0;
  131. int n_accept = 0;
  132. int n_past_tgt = inp.size();
  133. int n_past_dft = inp.size();
  134. // used to determine end of generation
  135. bool has_eos = false;
  136. // target model sampling context (reuse the llama_context's sampling instance)
  137. struct gpt_sampler * smpl = gpt_sampler_init(model_tgt, params.sparams);
  138. struct llama_sampler * softmax = llama_sampler_init_softmax();
  139. // draft sequence data
  140. std::vector<seq_draft> drafts(n_seq_dft);
  141. for (int s = 0; s < n_seq_dft; ++s) {
  142. // allocate gpt_sampler for each draft sequence
  143. drafts[s].smpl = gpt_sampler_init(model_dft, params.sparams);
  144. }
  145. llama_batch batch_dft = llama_batch_init(params.n_ctx, 0, 1);
  146. llama_batch batch_tgt = llama_batch_init(params.n_ctx, 0, n_seq_dft);
  147. const auto t_dec_start = ggml_time_us();
  148. // sample from the last token of the prompt
  149. drafts[0].i_batch_tgt.resize(1);
  150. drafts[0].i_batch_tgt[0] = 0;
  151. while (true) {
  152. std::set<int> active_seqs = {};
  153. // print current draft sequences
  154. for (int s = 0; s < n_seq_dft; ++s) {
  155. if (!drafts[s].active) {
  156. continue;
  157. }
  158. active_seqs.insert(s);
  159. const auto & tokens = drafts[s].tokens;
  160. LOG("draft %d: %s\n", s, LOG_TOKENS_TOSTR_PRETTY(ctx_dft, tokens).c_str());
  161. }
  162. int i_dft = 0;
  163. int s_keep = 0;
  164. llama_token token_id;
  165. std::string token_str;
  166. // loop until we fail to accept a drafted token or we run out of drafted tokens
  167. while (true) {
  168. // check if the target token matches any of the drafts
  169. // for stochastic sampling, attempt to match the token with the drafted tokens
  170. {
  171. bool accept = false;
  172. if (params.sparams.temp > 0) {
  173. // stochastic verification
  174. gpt_sampler_sample(smpl, ctx_tgt, drafts[s_keep].i_batch_tgt[i_dft], true);
  175. auto & dist_tgt = *gpt_sampler_get_candidates(smpl);
  176. float p_tgt = 0.0f;
  177. float p_dft = 0.0f;
  178. while (active_seqs.size() > 0) {
  179. // randomly select a sequence to verify from active sequences
  180. std::uniform_int_distribution<unsigned int> u_int_dist(0, active_seqs.size() - 1);
  181. int s = *std::next(active_seqs.begin(), u_int_dist(rng));
  182. if (i_dft >= (int) drafts[s].tokens.size()) {
  183. drafts[s].active = false;
  184. active_seqs.erase(s);
  185. continue;
  186. }
  187. if (accept) {
  188. // if we already accepted a token, we can skip the rest
  189. if (drafts[s].tokens[i_dft] != drafts[s_keep].tokens[i_dft]) {
  190. drafts[s].active = false;
  191. active_seqs.erase(s);
  192. }
  193. continue;
  194. }
  195. LOG("verifying sequence #%d at pos #%d from %d active sequence(s)\n", s, i_dft, (int) active_seqs.size());
  196. float r = u_dist(rng);
  197. llama_token_data_array dist_dft = { drafts[s].dists[i_dft].data() , drafts[s].dists[i_dft].size(), LLAMA_TOKEN_NULL, true };
  198. //GGML_ASSERT(dist_tgt.size <= dist_dft.size);
  199. // acquire the token probabilities assigned by the draft and target models
  200. for (size_t i = 0; i < dist_tgt.size; i++) {
  201. if (dist_tgt.data[i].id == drafts[s].tokens[i_dft]) {
  202. p_tgt = dist_tgt.data[i].p;
  203. }
  204. if (dist_dft.data[i].id == drafts[s].tokens[i_dft]) {
  205. p_dft = dist_dft.data[i].p;
  206. }
  207. if (p_tgt && p_dft) {
  208. break;
  209. }
  210. }
  211. LOG("r = %f, p_dft = %f, p_tgt = %f\n", r, p_dft, p_tgt);
  212. if (r <= p_tgt / p_dft) {
  213. s_keep = s;
  214. accept = true;
  215. token_id = drafts[s].tokens[i_dft];
  216. token_str = llama_token_to_piece(ctx_tgt, token_id);
  217. gpt_sampler_accept(smpl, token_id, true);
  218. LOG("draft token %d of sequence %d (%d, '%s') accepted\n", i_dft, s, token_id, token_str.c_str());
  219. break;
  220. } else {
  221. LOG("draft token %d of sequence %d (%d, '%s') rejected\n", i_dft, s, drafts[s].tokens[i_dft], llama_token_to_piece(ctx_tgt, drafts[s].tokens[i_dft]).c_str());
  222. drafts[s].active = false;
  223. // calculate residual probability
  224. GGML_ASSERT(dist_tgt.sorted);
  225. GGML_ASSERT(dist_dft.sorted);
  226. // sort dist by id
  227. std::sort(dist_tgt.data, dist_tgt.data + dist_tgt.size, [](const llama_token_data &a, const llama_token_data &b) {
  228. return a.id < b.id;
  229. });
  230. std::sort(dist_dft.data, dist_dft.data + dist_dft.size, [](const llama_token_data &a, const llama_token_data &b) {
  231. return a.id < b.id;
  232. });
  233. float sum_probs = 0.0f;
  234. for (size_t i = 0; i < dist_tgt.size; i++) {
  235. if (i < dist_dft.size) {
  236. dist_tgt.data[i].p = std::max(0.0f, dist_tgt.data[i].p - dist_dft.data[i].p);
  237. } else {
  238. dist_tgt.data[i].p = std::max(0.0f, dist_tgt.data[i].p);
  239. }
  240. sum_probs += dist_tgt.data[i].p;
  241. }
  242. for (size_t i = 0; i < dist_tgt.size; i++) {
  243. dist_tgt.data[i].p /= sum_probs;
  244. }
  245. // sort dist_tgt by p desc
  246. std::sort(dist_tgt.data, dist_tgt.data + dist_tgt.size, [](const llama_token_data &a, const llama_token_data &b) {
  247. return a.p > b.p;
  248. });
  249. }
  250. active_seqs.erase(s);
  251. for(int i = 0; i < n_seq_dft; i++) {
  252. if (i == s) {
  253. continue;
  254. }
  255. if (drafts[i].tokens[i_dft] == drafts[s].tokens[i_dft]) {
  256. // synchronize active status for sequences with the same drafted token
  257. drafts[i].active = drafts[i].active && accept;
  258. if (!drafts[i].active) {
  259. active_seqs.erase(s);
  260. }
  261. }
  262. }
  263. }
  264. if (!accept) {
  265. // all drafted tokens were rejected
  266. // sample from the target model
  267. LOG("all drafted tokens were rejected, sampling from residual distribution\n");
  268. std::vector<float> probs(dist_tgt.size);
  269. for (size_t i = 0; i < dist_tgt.size; ++i) {
  270. probs[i] = dist_tgt.data[i].p;
  271. }
  272. std::discrete_distribution<> dist(probs.begin(), probs.end());
  273. const int idx = dist(rng);
  274. token_id = dist_tgt.data[idx].id;
  275. gpt_sampler_accept(smpl, token_id, true);
  276. token_str = llama_token_to_piece(ctx_tgt, token_id);
  277. }
  278. } else {
  279. // greedy verification
  280. // sample from the target model
  281. LOG("sampling target: s_keep = %3d, i_dft = %3d, i_batch_tgt = %3d\n", s_keep, i_dft, drafts[s_keep].i_batch_tgt[i_dft]);
  282. token_id = gpt_sampler_sample(smpl, ctx_tgt, drafts[s_keep].i_batch_tgt[i_dft]);
  283. gpt_sampler_accept(smpl, token_id, true);
  284. //LOG("last: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_tgt, smpl->prev).c_str());
  285. token_str = llama_token_to_piece(ctx_tgt, token_id);
  286. for (int s = 0; s < n_seq_dft; ++s) {
  287. if (!drafts[s].active) {
  288. continue;
  289. }
  290. if (i_dft < (int) drafts[s].tokens.size() && token_id == drafts[s].tokens[i_dft]) {
  291. LOG("the sampled target token matches the %dth drafted token of sequence %d (%d, '%s') - accepted\n", i_dft, s, token_id, token_str.c_str());
  292. s_keep = s;
  293. accept = true;
  294. } else {
  295. drafts[s].active = false;
  296. }
  297. }
  298. }
  299. if (llama_token_is_eog(model_tgt, token_id)) {
  300. has_eos = true;
  301. }
  302. ++n_predict;
  303. if (accept) {
  304. ++n_accept;
  305. ++n_past_tgt;
  306. ++n_past_dft;
  307. ++i_dft;
  308. if (params.use_color) {
  309. // Color token according to its origin sequence
  310. printf("\u001b[%dm%s\u001b[37m", (36 - s_keep % 6), token_str.c_str());
  311. } else {
  312. printf("%s", token_str.c_str());
  313. }
  314. fflush(stdout);
  315. continue;
  316. } else {
  317. printf("%s", token_str.c_str());
  318. fflush(stdout);
  319. break;
  320. }
  321. }
  322. }
  323. {
  324. LOG("the sampled target token (%d, '%s') did not match, or we ran out of drafted tokens\n", token_id, token_str.c_str());
  325. // TODO: simplify
  326. {
  327. LOG("keeping sequence %d, n_past_tgt = %d, n_past_dft = %d\n", s_keep, n_past_tgt, n_past_dft);
  328. llama_kv_cache_seq_keep(ctx_dft, s_keep);
  329. llama_kv_cache_seq_cp (ctx_dft, s_keep, 0, -1, -1);
  330. llama_kv_cache_seq_keep(ctx_dft, 0);
  331. llama_kv_cache_seq_rm (ctx_tgt, s_keep, n_past_tgt, -1);
  332. llama_kv_cache_seq_keep(ctx_tgt, s_keep);
  333. llama_kv_cache_seq_cp (ctx_tgt, s_keep, 0, -1, -1);
  334. llama_kv_cache_seq_keep(ctx_tgt, 0);
  335. }
  336. for (int s = 0; s < n_seq_dft; ++s) {
  337. drafts[s].active = false;
  338. drafts[s].tokens.clear();
  339. drafts[s].i_batch_tgt.clear();
  340. drafts[s].dists.clear();
  341. }
  342. // note: will be erased after the speculation phase
  343. drafts[0].tokens.push_back(token_id);
  344. drafts[0].dists.push_back(std::vector<llama_token_data>());
  345. drafts[0].i_batch_tgt.push_back(0);
  346. llama_batch_clear(batch_dft);
  347. llama_batch_add (batch_dft, token_id, n_past_dft, { 0 }, true);
  348. llama_kv_cache_seq_rm(ctx_dft, 0, n_past_dft, -1);
  349. // LOG("dft batch: %s\n", LOG_BATCH_TOSTR_PRETTY(ctx_dft, batch_dft).c_str());
  350. llama_decode(ctx_dft, batch_dft);
  351. ++n_past_dft;
  352. }
  353. if (n_predict > params.n_predict || has_eos) {
  354. break;
  355. }
  356. if (drafts[0].smpl) {
  357. gpt_sampler_free(drafts[0].smpl);
  358. }
  359. drafts[0].smpl = gpt_sampler_clone(smpl);
  360. int n_seq_cur = 1;
  361. int n_past_cur = n_past_dft;
  362. for (int s = 0; s < n_seq_dft; ++s) {
  363. drafts[s].active = false;
  364. drafts[s].drafting = false;
  365. }
  366. drafts[0].active = true;
  367. drafts[0].drafting = true;
  368. drafts[0].i_batch_dft = 0;
  369. llama_batch_clear(batch_tgt);
  370. llama_batch_add (batch_tgt, drafts[0].tokens[0], n_past_tgt, { 0 }, true);
  371. // sample n_draft tokens from the draft model using tree-based sampling
  372. for (int i = 0; i < n_draft; ++i) {
  373. batch_dft.n_tokens = 0;
  374. for (int s = 0; s < n_seq_dft; ++s) {
  375. drafts[s].skip = false;
  376. }
  377. for (int s = 0; s < n_seq_dft; ++s) {
  378. if (!drafts[s].drafting || drafts[s].skip) {
  379. continue;
  380. }
  381. gpt_sampler_sample(drafts[s].smpl, ctx_dft, drafts[s].i_batch_dft, true);
  382. const auto * cur_p = gpt_sampler_get_candidates(drafts[s].smpl);
  383. for (int k = 0; k < std::min(n_seq_dft + 3, (int) cur_p->size); ++k) {
  384. LOG(" - draft candidate %3d for seq %3d, pos %3d: %6d (%8.3f) '%s'\n",
  385. k, s, i, cur_p->data[k].id, cur_p->data[k].p, llama_token_to_piece(ctx_dft, cur_p->data[k].id).c_str());
  386. }
  387. std::vector<int> sa(1, s);
  388. // attempt to split the branch if the probability is high enough
  389. for (int f = 1; f < 8; ++f) {
  390. if (n_seq_cur < n_seq_dft && cur_p->data[f].p > p_split) {
  391. LOG("splitting seq %3d into %3d\n", s, n_seq_cur);
  392. llama_kv_cache_seq_rm(ctx_dft, n_seq_cur, -1, -1);
  393. llama_kv_cache_seq_cp(ctx_dft, s, n_seq_cur, -1, -1);
  394. // all previous tokens from this branch are now also part of the new branch
  395. for (int t = 0; t < batch_tgt.n_tokens; ++t) {
  396. for (int p = 0; p < batch_tgt.n_seq_id[t]; ++p) {
  397. if (batch_tgt.seq_id[t][p] == s) {
  398. batch_tgt.seq_id[t][batch_tgt.n_seq_id[t]] = n_seq_cur;
  399. batch_tgt.n_seq_id[t]++;
  400. break;
  401. }
  402. }
  403. }
  404. // copy the draft state
  405. drafts[n_seq_cur].active = true;
  406. drafts[n_seq_cur].drafting = true;
  407. drafts[n_seq_cur].skip = true;
  408. drafts[n_seq_cur].tokens = drafts[s].tokens;
  409. drafts[n_seq_cur].dists = drafts[s].dists;
  410. drafts[n_seq_cur].i_batch_dft = drafts[s].i_batch_dft;
  411. drafts[n_seq_cur].i_batch_tgt = drafts[s].i_batch_tgt;
  412. if (drafts[n_seq_cur].smpl) {
  413. gpt_sampler_free(drafts[n_seq_cur].smpl);
  414. }
  415. drafts[n_seq_cur].smpl = gpt_sampler_clone(drafts[s].smpl);
  416. sa.push_back(n_seq_cur);
  417. n_seq_cur++;
  418. } else {
  419. break;
  420. }
  421. }
  422. // add drafted token for each sequence
  423. for (int is = 0; is < (int) sa.size(); ++is) {
  424. const llama_token id = cur_p->data[is].id;
  425. const int s = sa[is];
  426. gpt_sampler_accept(drafts[s].smpl, id, true);
  427. drafts[s].tokens.push_back(id);
  428. // save cur_p.data into drafts[s].dists
  429. drafts[s].dists.push_back({cur_p->data, cur_p->data + cur_p->size});
  430. // add unique drafted tokens to the target batch
  431. drafts[s].i_batch_tgt.push_back(batch_tgt.n_tokens);
  432. llama_batch_add(batch_tgt, id, n_past_tgt + i + 1, { s }, true);
  433. // add the token to the batch for batched decoding with the draft model
  434. drafts[s].i_batch_dft = batch_dft.n_tokens;
  435. llama_batch_add(batch_dft, id, n_past_cur, { s }, true);
  436. if (batch_tgt.n_tokens > n_draft) {
  437. drafts[s].drafting = false;
  438. }
  439. }
  440. }
  441. // no sequence is drafting anymore
  442. if (batch_dft.n_tokens == 0) {
  443. break;
  444. }
  445. // evaluate the drafted tokens on the draft model
  446. llama_decode(ctx_dft, batch_dft);
  447. ++n_past_cur;
  448. ++n_drafted;
  449. if (batch_tgt.n_tokens > n_draft) {
  450. break;
  451. }
  452. }
  453. // evaluate the target model on the drafted tokens
  454. {
  455. llama_kv_cache_seq_keep(ctx_tgt, 0);
  456. for (int s = 1; s < n_seq_dft; ++s) {
  457. llama_kv_cache_seq_cp(ctx_tgt, 0, s, -1, -1);
  458. }
  459. // LOG("target batch: %s\n", LOG_BATCH_TOSTR_PRETTY(ctx_tgt, batch_tgt).c_str());
  460. llama_decode(ctx_tgt, batch_tgt);
  461. ++n_past_tgt;
  462. }
  463. // the first token is always proposed by the target model before the speculation loop so we erase it here
  464. for (int s = 0; s < n_seq_dft; ++s) {
  465. if (!drafts[s].active) {
  466. continue;
  467. }
  468. drafts[s].tokens.erase(drafts[s].tokens.begin());
  469. drafts[s].dists.erase(drafts[s].dists.begin());
  470. }
  471. }
  472. auto t_dec_end = ggml_time_us();
  473. LOG_TEE("\n\n");
  474. LOG_TEE("encoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_input, (t_enc_end - t_enc_start) / 1e6f, inp.size() / ((t_enc_end - t_enc_start) / 1e6f));
  475. LOG_TEE("decoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_predict, (t_dec_end - t_dec_start) / 1e6f, n_predict / ((t_dec_end - t_dec_start) / 1e6f));
  476. LOG_TEE("\n");
  477. LOG_TEE("n_draft = %d\n", n_draft);
  478. LOG_TEE("n_predict = %d\n", n_predict);
  479. LOG_TEE("n_drafted = %d\n", n_drafted);
  480. LOG_TEE("n_accept = %d\n", n_accept);
  481. LOG_TEE("accept = %.3f%%\n", 100.0f * n_accept / n_drafted);
  482. LOG_TEE("\ndraft:\n\n");
  483. // TODO: print sampling/grammar timings for all drafts
  484. llama_perf_print(ctx_dft, LLAMA_PERF_TYPE_CONTEXT);
  485. LOG_TEE("\ntarget:\n\n");
  486. gpt_perf_print(ctx_tgt, smpl);
  487. gpt_sampler_free(smpl);
  488. for (int s = 0; s < n_seq_dft; ++s) {
  489. gpt_sampler_free(drafts[s].smpl);
  490. }
  491. llama_sampler_free(softmax);
  492. llama_batch_free(batch_dft);
  493. llama_free(ctx_tgt);
  494. llama_free_model(model_tgt);
  495. llama_free(ctx_dft);
  496. llama_free_model(model_dft);
  497. llama_backend_free();
  498. fprintf(stderr, "\n\n");
  499. return 0;
  500. }