speculative.cpp 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643
  1. #include "arg.h"
  2. #include "common.h"
  3. #include "sampling.h"
  4. #include "log.h"
  5. #include "llama.h"
  6. #include <algorithm>
  7. #include <cstdio>
  8. #include <cstring>
  9. #include <random>
  10. #include <set>
  11. #include <string>
  12. #include <vector>
  13. #define SPEC_VOCAB_MAX_SIZE_DIFFERENCE 100
  14. #define SPEC_VOCAB_CHECK_START_TOKEN_ID 5
  15. struct seq_draft {
  16. bool active = false;
  17. bool drafting = false;
  18. bool skip = false;
  19. int i_batch_dft = 0;
  20. std::vector<int> i_batch_tgt;
  21. std::vector<llama_token> tokens;
  22. std::vector<std::vector<llama_token_data>> dists;
  23. struct common_sampler * smpl = nullptr;
  24. };
  25. int main(int argc, char ** argv) {
  26. common_params params;
  27. // needed to get candidate probs even for temp <= 0.0
  28. params.sparams.n_probs = 128;
  29. if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_SPECULATIVE)) {
  30. return 1;
  31. }
  32. if (params.n_predict < -1) {
  33. LOG_ERR("%s: --n-predict must be >= -1\n", __func__);
  34. return 1;
  35. }
  36. common_init();
  37. if (params.model_draft.empty()) {
  38. LOG_ERR("%s: --model-draft is required\n", __func__);
  39. return 1;
  40. }
  41. // max number of parallel drafting sequences (i.e. tree branches)
  42. const int n_seq_dft = params.n_parallel;
  43. // probability threshold for splitting a draft branch (only for n_seq_dft > 1)
  44. const float p_split = params.p_split;
  45. std::default_random_engine rng(params.sparams.seed == LLAMA_DEFAULT_SEED ? std::random_device()() : params.sparams.seed);
  46. std::uniform_real_distribution<> u_dist;
  47. // init llama.cpp
  48. llama_backend_init();
  49. llama_numa_init(params.numa);
  50. llama_model * model_tgt = NULL;
  51. llama_model * model_dft = NULL;
  52. llama_context * ctx_tgt = NULL;
  53. llama_context * ctx_dft = NULL;
  54. // load the target model
  55. common_init_result llama_init_tgt = common_init_from_params(params);
  56. model_tgt = llama_init_tgt.model;
  57. ctx_tgt = llama_init_tgt.context;
  58. // load the draft model
  59. params.model = params.model_draft;
  60. params.n_gpu_layers = params.n_gpu_layers_draft;
  61. if (params.draft_cpuparams.n_threads > 0) {
  62. params.cpuparams.n_threads = params.draft_cpuparams.n_threads;
  63. }
  64. params.cpuparams_batch.n_threads = params.draft_cpuparams_batch.n_threads;
  65. common_init_result llama_init_dft = common_init_from_params(params);
  66. model_dft = llama_init_dft.model;
  67. ctx_dft = llama_init_dft.context;
  68. const bool vocab_type_tgt = llama_vocab_type(model_tgt);
  69. LOG_DBG("vocab_type tgt: %d\n", vocab_type_tgt);
  70. const bool vocab_type_dft = llama_vocab_type(model_dft);
  71. LOG_DBG("vocab_type dft: %d\n", vocab_type_dft);
  72. if (vocab_type_tgt != vocab_type_dft) {
  73. LOG_ERR("%s: draft model vocab type must match target model to use speculation but ", __func__);
  74. LOG_ERR("vocab_type_dft = %d while vocab_type_tgt = %d\n", vocab_type_dft, vocab_type_tgt);
  75. return 1;
  76. }
  77. if (
  78. llama_add_bos_token(model_tgt) != llama_add_bos_token(model_dft) ||
  79. llama_add_eos_token(model_tgt) != llama_add_eos_token(model_dft) ||
  80. llama_token_bos(model_tgt) != llama_token_bos(model_dft) ||
  81. llama_token_eos(model_tgt) != llama_token_eos(model_dft)
  82. ) {
  83. LOG_ERR("%s: draft model special tokens must match target model to use speculation\n", __func__);
  84. return 1;
  85. }
  86. {
  87. const int n_vocab_tgt = llama_n_vocab(model_tgt);
  88. const int n_vocab_dft = llama_n_vocab(model_dft);
  89. const int vocab_diff = n_vocab_tgt > n_vocab_dft
  90. ? n_vocab_tgt - n_vocab_dft
  91. : n_vocab_dft - n_vocab_tgt;
  92. if (vocab_diff > SPEC_VOCAB_MAX_SIZE_DIFFERENCE) {
  93. LOG_ERR("%s: draft model vocab must closely match target model to use speculation but ", __func__);
  94. LOG_ERR("target vocab size %d does not match draft vocab size %d - difference %d, max allowed %d\n",
  95. n_vocab_tgt, llama_n_vocab(model_dft), vocab_diff, SPEC_VOCAB_MAX_SIZE_DIFFERENCE);
  96. return 1;
  97. }
  98. for (int i = SPEC_VOCAB_CHECK_START_TOKEN_ID; i < std::min(n_vocab_tgt, n_vocab_dft); ++i) {
  99. const char * token_text_tgt = llama_token_get_text(model_tgt, i);
  100. const char * token_text_dft = llama_token_get_text(model_dft, i);
  101. if (std::strcmp(token_text_tgt, token_text_dft) != 0) {
  102. LOG_ERR("%s: draft model vocab must match target model to use speculation but ", __func__);
  103. LOG_ERR("token %d content differs - target '%s', draft '%s'\n", i,
  104. common_token_to_piece(ctx_tgt, i).c_str(),
  105. common_token_to_piece(ctx_dft, i).c_str());
  106. return 1;
  107. }
  108. }
  109. }
  110. // Tokenize the prompt
  111. std::vector<llama_token> inp;
  112. inp = common_tokenize(ctx_tgt, params.prompt, true, true);
  113. const int max_context_size = llama_n_ctx(ctx_tgt);
  114. const int max_tokens_list_size = max_context_size - 4;
  115. if ((int) inp.size() > max_tokens_list_size) {
  116. LOG_ERR("%s: prompt too long (%d tokens, max %d)\n", __func__, (int) inp.size(), max_tokens_list_size);
  117. return 1;
  118. }
  119. LOG("\n\n");
  120. for (auto id : inp) {
  121. LOG("%s", common_token_to_piece(ctx_tgt, id).c_str());
  122. }
  123. const int n_input = inp.size();
  124. const auto t_enc_start = ggml_time_us();
  125. // eval the prompt with both models
  126. llama_decode(ctx_tgt, llama_batch_get_one( inp.data(), n_input - 1));
  127. llama_decode(ctx_tgt, llama_batch_get_one(&inp.back(), 1));
  128. llama_decode(ctx_dft, llama_batch_get_one( inp.data(), n_input));
  129. const auto t_enc_end = ggml_time_us();
  130. // the 2 models should have the same vocab
  131. //GGML_ASSERT(n_vocab == llama_n_vocab(model_dft));
  132. // how many tokens to draft each time
  133. int n_draft = params.n_draft;
  134. int n_predict = 0;
  135. int n_drafted = 0;
  136. int n_accept = 0;
  137. int n_past_tgt = inp.size();
  138. int n_past_dft = inp.size();
  139. // used to determine end of generation
  140. bool has_eos = false;
  141. // target model sampling context (reuse the llama_context's sampling instance)
  142. struct common_sampler * smpl = common_sampler_init(model_tgt, params.sparams);
  143. // draft sequence data
  144. std::vector<seq_draft> drafts(n_seq_dft);
  145. for (int s = 0; s < n_seq_dft; ++s) {
  146. // allocate llama_sampler for each draft sequence
  147. drafts[s].smpl = common_sampler_init(model_dft, params.sparams);
  148. }
  149. llama_batch batch_dft = llama_batch_init(llama_n_batch(ctx_dft), 0, 1);
  150. llama_batch batch_tgt = llama_batch_init(llama_n_batch(ctx_tgt), 0, n_seq_dft);
  151. const auto t_dec_start = ggml_time_us();
  152. // sample from the last token of the prompt
  153. drafts[0].i_batch_tgt.resize(1);
  154. drafts[0].i_batch_tgt[0] = 0;
  155. while (true) {
  156. std::set<int> active_seqs = {};
  157. // print current draft sequences
  158. for (int s = 0; s < n_seq_dft; ++s) {
  159. if (!drafts[s].active) {
  160. continue;
  161. }
  162. active_seqs.insert(s);
  163. const auto & tokens = drafts[s].tokens;
  164. LOG_DBG("draft %d: %s\n", s, string_from(ctx_dft, tokens).c_str());
  165. }
  166. int i_dft = 0;
  167. int s_keep = 0;
  168. llama_token token_id;
  169. std::string token_str;
  170. // loop until we fail to accept a drafted token or we run out of drafted tokens
  171. while (true) {
  172. // check if the target token matches any of the drafts
  173. // for stochastic sampling, attempt to match the token with the drafted tokens
  174. {
  175. bool accept = false;
  176. if (params.sparams.temp > 0) {
  177. // stochastic verification
  178. common_sampler_sample(smpl, ctx_tgt, drafts[s_keep].i_batch_tgt[i_dft], true);
  179. auto & dist_tgt = *common_sampler_get_candidates(smpl);
  180. float p_tgt = 0.0f;
  181. float p_dft = 0.0f;
  182. while (active_seqs.size() > 0) {
  183. // randomly select a sequence to verify from active sequences
  184. std::uniform_int_distribution<unsigned int> u_int_dist(0, active_seqs.size() - 1);
  185. int s = *std::next(active_seqs.begin(), u_int_dist(rng));
  186. if (i_dft >= (int) drafts[s].tokens.size()) {
  187. drafts[s].active = false;
  188. active_seqs.erase(s);
  189. continue;
  190. }
  191. if (accept) {
  192. // if we already accepted a token, we can skip the rest
  193. if (drafts[s].tokens[i_dft] != drafts[s_keep].tokens[i_dft]) {
  194. drafts[s].active = false;
  195. active_seqs.erase(s);
  196. }
  197. continue;
  198. }
  199. LOG_DBG("verifying sequence #%d at pos #%d from %d active sequence(s)\n", s, i_dft, (int) active_seqs.size());
  200. float r = u_dist(rng);
  201. llama_token_data_array dist_dft = { drafts[s].dists[i_dft].data() , drafts[s].dists[i_dft].size(), LLAMA_TOKEN_NULL, true };
  202. //GGML_ASSERT(dist_tgt.size <= dist_dft.size);
  203. // acquire the token probabilities assigned by the draft and target models
  204. for (size_t i = 0; i < dist_tgt.size; i++) {
  205. if (dist_tgt.data[i].id == drafts[s].tokens[i_dft]) {
  206. p_tgt = dist_tgt.data[i].p;
  207. }
  208. if (dist_dft.data[i].id == drafts[s].tokens[i_dft]) {
  209. p_dft = dist_dft.data[i].p;
  210. }
  211. if (p_tgt && p_dft) {
  212. break;
  213. }
  214. }
  215. LOG_DBG("r = %f, p_dft = %f, p_tgt = %f\n", r, p_dft, p_tgt);
  216. if (r <= p_tgt / p_dft) {
  217. s_keep = s;
  218. accept = true;
  219. token_id = drafts[s].tokens[i_dft];
  220. token_str = common_token_to_piece(ctx_tgt, token_id);
  221. common_sampler_accept(smpl, token_id, true);
  222. LOG_DBG("draft token %d of sequence %d (%d, '%s') accepted\n", i_dft, s, token_id, token_str.c_str());
  223. break;
  224. } else {
  225. LOG_DBG("draft token %d of sequence %d (%d, '%s') rejected\n", i_dft, s, drafts[s].tokens[i_dft], common_token_to_piece(ctx_tgt, drafts[s].tokens[i_dft]).c_str());
  226. drafts[s].active = false;
  227. // calculate residual probability
  228. GGML_ASSERT(dist_tgt.sorted);
  229. GGML_ASSERT(dist_dft.sorted);
  230. // sort dist by id
  231. std::sort(dist_tgt.data, dist_tgt.data + dist_tgt.size, [](const llama_token_data &a, const llama_token_data &b) {
  232. return a.id < b.id;
  233. });
  234. std::sort(dist_dft.data, dist_dft.data + dist_dft.size, [](const llama_token_data &a, const llama_token_data &b) {
  235. return a.id < b.id;
  236. });
  237. float sum_probs = 0.0f;
  238. for (size_t i = 0; i < dist_tgt.size; i++) {
  239. if (i < dist_dft.size) {
  240. dist_tgt.data[i].p = std::max(0.0f, dist_tgt.data[i].p - dist_dft.data[i].p);
  241. } else {
  242. dist_tgt.data[i].p = std::max(0.0f, dist_tgt.data[i].p);
  243. }
  244. sum_probs += dist_tgt.data[i].p;
  245. }
  246. for (size_t i = 0; i < dist_tgt.size; i++) {
  247. dist_tgt.data[i].p /= sum_probs;
  248. }
  249. // sort dist_tgt by p desc
  250. std::sort(dist_tgt.data, dist_tgt.data + dist_tgt.size, [](const llama_token_data &a, const llama_token_data &b) {
  251. return a.p > b.p;
  252. });
  253. }
  254. active_seqs.erase(s);
  255. for(int i = 0; i < n_seq_dft; i++) {
  256. if (i == s) {
  257. continue;
  258. }
  259. if (drafts[i].tokens[i_dft] == drafts[s].tokens[i_dft]) {
  260. // synchronize active status for sequences with the same drafted token
  261. drafts[i].active = drafts[i].active && accept;
  262. if (!drafts[i].active) {
  263. active_seqs.erase(s);
  264. }
  265. }
  266. }
  267. }
  268. if (!accept) {
  269. // all drafted tokens were rejected
  270. // sample from the target model
  271. LOG_DBG("all drafted tokens were rejected, sampling from residual distribution\n");
  272. std::vector<float> probs(dist_tgt.size);
  273. for (size_t i = 0; i < dist_tgt.size; ++i) {
  274. probs[i] = dist_tgt.data[i].p;
  275. }
  276. std::discrete_distribution<> dist(probs.begin(), probs.end());
  277. const int idx = dist(rng);
  278. token_id = dist_tgt.data[idx].id;
  279. common_sampler_accept(smpl, token_id, true);
  280. token_str = common_token_to_piece(ctx_tgt, token_id);
  281. }
  282. } else {
  283. // greedy verification
  284. // sample from the target model
  285. LOG_DBG("sampling target: s_keep = %3d, i_dft = %3d, i_batch_tgt = %3d\n", s_keep, i_dft, drafts[s_keep].i_batch_tgt[i_dft]);
  286. token_id = common_sampler_sample(smpl, ctx_tgt, drafts[s_keep].i_batch_tgt[i_dft]);
  287. common_sampler_accept(smpl, token_id, true);
  288. token_str = common_token_to_piece(ctx_tgt, token_id);
  289. for (int s = 0; s < n_seq_dft; ++s) {
  290. if (!drafts[s].active) {
  291. continue;
  292. }
  293. if (i_dft < (int) drafts[s].tokens.size() && token_id == drafts[s].tokens[i_dft]) {
  294. LOG_DBG("the sampled target token matches the %dth drafted token of sequence %d (%d, '%s') - accepted\n", i_dft, s, token_id, token_str.c_str());
  295. s_keep = s;
  296. accept = true;
  297. } else {
  298. drafts[s].active = false;
  299. }
  300. }
  301. }
  302. if (llama_token_is_eog(model_tgt, token_id)) {
  303. has_eos = true;
  304. }
  305. ++n_predict;
  306. if (accept) {
  307. ++n_accept;
  308. ++n_past_tgt;
  309. ++n_past_dft;
  310. ++i_dft;
  311. if (params.use_color) {
  312. // Color token according to its origin sequence
  313. LOG("\u001b[%dm%s\u001b[37m", (36 - s_keep % 6), token_str.c_str());
  314. } else {
  315. LOG("%s", token_str.c_str());
  316. }
  317. continue;
  318. } else {
  319. LOG("%s", token_str.c_str());
  320. break;
  321. }
  322. }
  323. }
  324. {
  325. LOG_DBG("the sampled target token (%d, '%s') did not match, or we ran out of drafted tokens\n", token_id, token_str.c_str());
  326. // TODO: simplify
  327. {
  328. LOG_DBG("keeping sequence %d, n_past_tgt = %d, n_past_dft = %d\n", s_keep, n_past_tgt, n_past_dft);
  329. llama_kv_cache_seq_keep(ctx_dft, s_keep);
  330. llama_kv_cache_seq_cp (ctx_dft, s_keep, 0, -1, -1);
  331. llama_kv_cache_seq_keep(ctx_dft, 0);
  332. llama_kv_cache_seq_rm (ctx_tgt, s_keep, n_past_tgt, -1);
  333. llama_kv_cache_seq_keep(ctx_tgt, s_keep);
  334. llama_kv_cache_seq_cp (ctx_tgt, s_keep, 0, -1, -1);
  335. llama_kv_cache_seq_keep(ctx_tgt, 0);
  336. }
  337. for (int s = 0; s < n_seq_dft; ++s) {
  338. drafts[s].active = false;
  339. drafts[s].tokens.clear();
  340. drafts[s].i_batch_tgt.clear();
  341. drafts[s].dists.clear();
  342. }
  343. // note: will be erased after the speculation phase
  344. drafts[0].tokens.push_back(token_id);
  345. drafts[0].dists.push_back(std::vector<llama_token_data>());
  346. drafts[0].i_batch_tgt.push_back(0);
  347. common_batch_clear(batch_dft);
  348. common_batch_add (batch_dft, token_id, n_past_dft, { 0 }, true);
  349. llama_kv_cache_seq_rm(ctx_dft, 0, n_past_dft, -1);
  350. // LOG_DBG("dft batch: %s\n", LOG_BATCH_TOSTR_PRETTY(ctx_dft, batch_dft).c_str());
  351. llama_decode(ctx_dft, batch_dft);
  352. ++n_past_dft;
  353. }
  354. if ((params.n_predict >= 0 && n_predict > params.n_predict) || has_eos) {
  355. break;
  356. }
  357. if (drafts[0].smpl) {
  358. common_sampler_free(drafts[0].smpl);
  359. }
  360. drafts[0].smpl = common_sampler_clone(smpl);
  361. int n_seq_cur = 1;
  362. int n_past_cur = n_past_dft;
  363. for (int s = 0; s < n_seq_dft; ++s) {
  364. drafts[s].active = false;
  365. drafts[s].drafting = false;
  366. }
  367. drafts[0].active = true;
  368. drafts[0].drafting = true;
  369. drafts[0].i_batch_dft = 0;
  370. common_batch_clear(batch_tgt);
  371. common_batch_add (batch_tgt, drafts[0].tokens[0], n_past_tgt, { 0 }, true);
  372. // sample n_draft tokens from the draft model using tree-based sampling
  373. for (int i = 0; i < n_draft; ++i) {
  374. batch_dft.n_tokens = 0;
  375. for (int s = 0; s < n_seq_dft; ++s) {
  376. drafts[s].skip = false;
  377. }
  378. for (int s = 0; s < n_seq_dft; ++s) {
  379. if (!drafts[s].drafting || drafts[s].skip) {
  380. continue;
  381. }
  382. common_sampler_sample(drafts[s].smpl, ctx_dft, drafts[s].i_batch_dft, true);
  383. const auto * cur_p = common_sampler_get_candidates(drafts[s].smpl);
  384. for (int k = 0; k < std::min(n_seq_dft + 3, (int) cur_p->size); ++k) {
  385. LOG_DBG(" - draft candidate %3d for seq %3d, pos %3d: %6d (%8.3f) '%s'\n",
  386. k, s, i, cur_p->data[k].id, cur_p->data[k].p, common_token_to_piece(ctx_dft, cur_p->data[k].id).c_str());
  387. }
  388. std::vector<int> sa(1, s);
  389. // attempt to split the branch if the probability is high enough
  390. for (int f = 1; f < 8; ++f) {
  391. if (n_seq_cur < n_seq_dft && cur_p->data[f].p > p_split) {
  392. LOG_DBG("splitting seq %3d into %3d\n", s, n_seq_cur);
  393. llama_kv_cache_seq_rm(ctx_dft, n_seq_cur, -1, -1);
  394. llama_kv_cache_seq_cp(ctx_dft, s, n_seq_cur, -1, -1);
  395. // all previous tokens from this branch are now also part of the new branch
  396. for (int t = 0; t < batch_tgt.n_tokens; ++t) {
  397. for (int p = 0; p < batch_tgt.n_seq_id[t]; ++p) {
  398. if (batch_tgt.seq_id[t][p] == s) {
  399. batch_tgt.seq_id[t][batch_tgt.n_seq_id[t]] = n_seq_cur;
  400. batch_tgt.n_seq_id[t]++;
  401. break;
  402. }
  403. }
  404. }
  405. // copy the draft state
  406. drafts[n_seq_cur].active = true;
  407. drafts[n_seq_cur].drafting = true;
  408. drafts[n_seq_cur].skip = true;
  409. drafts[n_seq_cur].tokens = drafts[s].tokens;
  410. drafts[n_seq_cur].dists = drafts[s].dists;
  411. drafts[n_seq_cur].i_batch_dft = drafts[s].i_batch_dft;
  412. drafts[n_seq_cur].i_batch_tgt = drafts[s].i_batch_tgt;
  413. if (drafts[n_seq_cur].smpl) {
  414. common_sampler_free(drafts[n_seq_cur].smpl);
  415. }
  416. drafts[n_seq_cur].smpl = common_sampler_clone(drafts[s].smpl);
  417. sa.push_back(n_seq_cur);
  418. n_seq_cur++;
  419. } else {
  420. break;
  421. }
  422. }
  423. // add drafted token for each sequence
  424. for (int is = 0; is < (int) sa.size(); ++is) {
  425. const llama_token id = cur_p->data[is].id;
  426. const int s = sa[is];
  427. common_sampler_accept(drafts[s].smpl, id, true);
  428. drafts[s].tokens.push_back(id);
  429. // save cur_p.data into drafts[s].dists
  430. drafts[s].dists.push_back({cur_p->data, cur_p->data + cur_p->size});
  431. // add unique drafted tokens to the target batch
  432. drafts[s].i_batch_tgt.push_back(batch_tgt.n_tokens);
  433. common_batch_add(batch_tgt, id, n_past_tgt + i + 1, { s }, true);
  434. // add the token to the batch for batched decoding with the draft model
  435. drafts[s].i_batch_dft = batch_dft.n_tokens;
  436. common_batch_add(batch_dft, id, n_past_cur, { s }, true);
  437. if (batch_tgt.n_tokens > n_draft) {
  438. drafts[s].drafting = false;
  439. }
  440. }
  441. }
  442. // no sequence is drafting anymore
  443. if (batch_dft.n_tokens == 0) {
  444. break;
  445. }
  446. // evaluate the drafted tokens on the draft model
  447. llama_decode(ctx_dft, batch_dft);
  448. ++n_past_cur;
  449. ++n_drafted;
  450. if (batch_tgt.n_tokens > n_draft) {
  451. break;
  452. }
  453. }
  454. // evaluate the target model on the drafted tokens
  455. {
  456. llama_kv_cache_seq_keep(ctx_tgt, 0);
  457. for (int s = 1; s < n_seq_dft; ++s) {
  458. llama_kv_cache_seq_cp(ctx_tgt, 0, s, -1, -1);
  459. }
  460. // LOG_DBG("target batch: %s\n", LOG_BATCH_TOSTR_PRETTY(ctx_tgt, batch_tgt).c_str());
  461. llama_decode(ctx_tgt, batch_tgt);
  462. ++n_past_tgt;
  463. }
  464. // the first token is always proposed by the target model before the speculation loop so we erase it here
  465. for (int s = 0; s < n_seq_dft; ++s) {
  466. if (!drafts[s].active) {
  467. continue;
  468. }
  469. drafts[s].tokens.erase(drafts[s].tokens.begin());
  470. drafts[s].dists.erase(drafts[s].dists.begin());
  471. }
  472. }
  473. auto t_dec_end = ggml_time_us();
  474. LOG("\n\n");
  475. LOG_INF("encoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_input, (t_enc_end - t_enc_start) / 1e6f, inp.size() / ((t_enc_end - t_enc_start) / 1e6f));
  476. LOG_INF("decoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_predict, (t_dec_end - t_dec_start) / 1e6f, n_predict / ((t_dec_end - t_dec_start) / 1e6f));
  477. LOG_INF("\n");
  478. LOG_INF("n_draft = %d\n", n_draft);
  479. LOG_INF("n_predict = %d\n", n_predict);
  480. LOG_INF("n_drafted = %d\n", n_drafted);
  481. LOG_INF("n_accept = %d\n", n_accept);
  482. LOG_INF("accept = %.3f%%\n", 100.0f * n_accept / n_drafted);
  483. LOG_INF("\n");
  484. LOG_INF("draft:\n\n");
  485. // TODO: print sampling/grammar timings for all drafts
  486. llama_perf_context_print(ctx_dft);
  487. LOG_INF("\n");
  488. LOG_INF("target:\n\n");
  489. common_perf_print(ctx_tgt, smpl);
  490. common_sampler_free(smpl);
  491. for (int s = 0; s < n_seq_dft; ++s) {
  492. common_sampler_free(drafts[s].smpl);
  493. }
  494. llama_batch_free(batch_dft);
  495. llama_free(ctx_tgt);
  496. llama_free_model(model_tgt);
  497. llama_free(ctx_dft);
  498. llama_free_model(model_dft);
  499. llama_backend_free();
  500. LOG("\n\n");
  501. return 0;
  502. }