sampling.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533
  1. #include "sampling.h"
  2. #include "common.h"
  3. #include <cmath>
  4. #include <unordered_map>
  5. #include <algorithm>
  6. // the ring buffer works similarly to std::deque, but with a fixed capacity
  7. // TODO: deduplicate with llama-impl.h
  8. template<typename T>
  9. struct ring_buffer {
  10. ring_buffer(size_t cap) : capacity(cap), data(cap) {}
  11. T & front() {
  12. if (sz == 0) {
  13. throw std::runtime_error("ring buffer is empty");
  14. }
  15. return data[first];
  16. }
  17. const T & front() const {
  18. if (sz == 0) {
  19. throw std::runtime_error("ring buffer is empty");
  20. }
  21. return data[first];
  22. }
  23. T & back() {
  24. if (sz == 0) {
  25. throw std::runtime_error("ring buffer is empty");
  26. }
  27. return data[pos];
  28. }
  29. const T & back() const {
  30. if (sz == 0) {
  31. throw std::runtime_error("ring buffer is empty");
  32. }
  33. return data[pos];
  34. }
  35. void push_back(const T & value) {
  36. if (sz == capacity) {
  37. // advance the start when buffer is full
  38. first = (first + 1) % capacity;
  39. } else {
  40. sz++;
  41. }
  42. data[pos] = value;
  43. pos = (pos + 1) % capacity;
  44. }
  45. T pop_front() {
  46. if (sz == 0) {
  47. throw std::runtime_error("ring buffer is empty");
  48. }
  49. T value = data[first];
  50. first = (first + 1) % capacity;
  51. sz--;
  52. return value;
  53. }
  54. const T & rat(size_t i) const {
  55. if (i >= sz) {
  56. throw std::runtime_error("ring buffer: index out of bounds");
  57. }
  58. return data[(first + sz - i - 1) % capacity];
  59. }
  60. std::vector<T> to_vector() const {
  61. std::vector<T> result;
  62. result.reserve(sz);
  63. for (size_t i = 0; i < sz; i++) {
  64. result.push_back(data[(first + i) % capacity]);
  65. }
  66. return result;
  67. }
  68. void clear() {
  69. // here only reset the status of the buffer
  70. sz = 0;
  71. first = 0;
  72. pos = 0;
  73. }
  74. bool empty() const {
  75. return sz == 0;
  76. }
  77. size_t size() const {
  78. return sz;
  79. }
  80. size_t capacity = 0;
  81. size_t sz = 0;
  82. size_t first = 0;
  83. size_t pos = 0;
  84. std::vector<T> data;
  85. };
  86. struct common_sampler {
  87. common_params_sampling params;
  88. struct llama_sampler * grmr;
  89. struct llama_sampler * chain;
  90. ring_buffer<llama_token> prev;
  91. std::vector<llama_token_data> cur;
  92. llama_token_data_array cur_p;
  93. void set_logits(struct llama_context * ctx, int idx) {
  94. const auto * logits = llama_get_logits_ith(ctx, idx);
  95. const llama_model * model = llama_get_model(ctx);
  96. const llama_vocab * vocab = llama_model_get_vocab(model);
  97. const int n_vocab = llama_vocab_n_tokens(vocab);
  98. cur.resize(n_vocab);
  99. for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
  100. cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f};
  101. }
  102. cur_p = { cur.data(), cur.size(), -1, false };
  103. }
  104. };
  105. std::string common_params_sampling::print() const {
  106. char result[1024];
  107. snprintf(result, sizeof(result),
  108. "\trepeat_last_n = %d, repeat_penalty = %.3f, frequency_penalty = %.3f, presence_penalty = %.3f\n"
  109. "\tdry_multiplier = %.3f, dry_base = %.3f, dry_allowed_length = %d, dry_penalty_last_n = %d\n"
  110. "\ttop_k = %d, top_p = %.3f, min_p = %.3f, xtc_probability = %.3f, xtc_threshold = %.3f, typical_p = %.3f, top_n_sigma = %.3f, temp = %.3f\n"
  111. "\tmirostat = %d, mirostat_lr = %.3f, mirostat_ent = %.3f",
  112. penalty_last_n, penalty_repeat, penalty_freq, penalty_present,
  113. dry_multiplier, dry_base, dry_allowed_length, dry_penalty_last_n,
  114. top_k, top_p, min_p, xtc_probability, xtc_threshold, typ_p, top_n_sigma, temp,
  115. mirostat, mirostat_eta, mirostat_tau);
  116. return std::string(result);
  117. }
  118. struct common_sampler * common_sampler_init(const struct llama_model * model, const struct common_params_sampling & params) {
  119. const llama_vocab * vocab = llama_model_get_vocab(model);
  120. llama_sampler_chain_params lparams = llama_sampler_chain_default_params();
  121. lparams.no_perf = params.no_perf;
  122. struct llama_sampler * grmr;
  123. if (params.grammar.compare(0, 11, "%llguidance") == 0) {
  124. #ifdef LLAMA_USE_LLGUIDANCE
  125. grmr = llama_sampler_init_llg(vocab, "lark", params.grammar.c_str());
  126. #else
  127. GGML_ABORT("llguidance (cmake -DLLAMA_LLGUIDANCE=ON) is not enabled");
  128. #endif // LLAMA_USE_LLGUIDANCE
  129. } else {
  130. std::vector<const char *> trigger_words;
  131. trigger_words.reserve(params.grammar_trigger_words.size());
  132. for (const auto & str : params.grammar_trigger_words) {
  133. trigger_words.push_back(str.word.c_str());
  134. }
  135. grmr = params.grammar_lazy
  136. ? llama_sampler_init_grammar_lazy(vocab, params.grammar.c_str(), "root",
  137. trigger_words.data(), trigger_words.size(),
  138. params.grammar_trigger_tokens.data(), params.grammar_trigger_tokens.size())
  139. : llama_sampler_init_grammar(vocab, params.grammar.c_str(), "root");
  140. }
  141. auto * result = new common_sampler {
  142. /* .params = */ params,
  143. /* .grmr = */ grmr,
  144. /* .chain = */ llama_sampler_chain_init(lparams),
  145. /* .prev = */ ring_buffer<llama_token>(std::max(32, params.n_prev)),
  146. /* .cur = */ {},
  147. /* .cur_p = */ {},
  148. };
  149. llama_sampler_chain_add(result->chain,
  150. llama_sampler_init_logit_bias(
  151. llama_vocab_n_tokens(vocab),
  152. params.logit_bias.size(),
  153. params.logit_bias.data()));
  154. if (params.mirostat == 0) {
  155. if (params.top_n_sigma >= 0) {
  156. llama_sampler_chain_add(result->chain, llama_sampler_init_top_k (params.top_k));
  157. llama_sampler_chain_add(result->chain, llama_sampler_init_temp (params.temp));
  158. llama_sampler_chain_add(result->chain, llama_sampler_init_top_n_sigma (params.top_n_sigma));
  159. } else {
  160. for (const auto & cnstr : params.samplers) {
  161. switch (cnstr) {
  162. case COMMON_SAMPLER_TYPE_DRY:
  163. {
  164. std::vector<const char *> c_breakers;
  165. c_breakers.reserve(params.dry_sequence_breakers.size());
  166. for (const auto & str : params.dry_sequence_breakers) {
  167. c_breakers.push_back(str.c_str());
  168. }
  169. llama_sampler_chain_add(result->chain, llama_sampler_init_dry (vocab, llama_model_n_ctx_train(model), params.dry_multiplier, params.dry_base, params.dry_allowed_length, params.dry_penalty_last_n, c_breakers.data(), c_breakers.size()));
  170. }
  171. break;
  172. case COMMON_SAMPLER_TYPE_TOP_K:
  173. llama_sampler_chain_add(result->chain, llama_sampler_init_top_k (params.top_k));
  174. break;
  175. case COMMON_SAMPLER_TYPE_TOP_P:
  176. llama_sampler_chain_add(result->chain, llama_sampler_init_top_p (params.top_p, params.min_keep));
  177. break;
  178. case COMMON_SAMPLER_TYPE_MIN_P:
  179. llama_sampler_chain_add(result->chain, llama_sampler_init_min_p (params.min_p, params.min_keep));
  180. break;
  181. case COMMON_SAMPLER_TYPE_XTC:
  182. llama_sampler_chain_add(result->chain, llama_sampler_init_xtc (params.xtc_probability, params.xtc_threshold, params.min_keep, params.seed));
  183. break;
  184. case COMMON_SAMPLER_TYPE_TYPICAL_P:
  185. llama_sampler_chain_add(result->chain, llama_sampler_init_typical (params.typ_p, params.min_keep));
  186. break;
  187. case COMMON_SAMPLER_TYPE_TEMPERATURE:
  188. llama_sampler_chain_add(result->chain, llama_sampler_init_temp_ext (params.temp, params.dynatemp_range, params.dynatemp_exponent));
  189. break;
  190. case COMMON_SAMPLER_TYPE_INFILL:
  191. llama_sampler_chain_add(result->chain, llama_sampler_init_infill (vocab));
  192. break;
  193. case COMMON_SAMPLER_TYPE_PENALTIES:
  194. llama_sampler_chain_add(result->chain, llama_sampler_init_penalties(params.penalty_last_n, params.penalty_repeat, params.penalty_freq, params.penalty_present));
  195. break;
  196. default:
  197. GGML_ASSERT(false && "unknown sampler type");
  198. }
  199. }
  200. }
  201. llama_sampler_chain_add(result->chain, llama_sampler_init_dist(params.seed));
  202. } else if (params.mirostat == 1) {
  203. llama_sampler_chain_add(result->chain, llama_sampler_init_temp(params.temp));
  204. llama_sampler_chain_add(result->chain, llama_sampler_init_mirostat(llama_vocab_n_tokens(vocab), params.seed, params.mirostat_tau, params.mirostat_eta, 100));
  205. } else if (params.mirostat == 2) {
  206. llama_sampler_chain_add(result->chain, llama_sampler_init_temp(params.temp));
  207. llama_sampler_chain_add(result->chain, llama_sampler_init_mirostat_v2(params.seed, params.mirostat_tau, params.mirostat_eta));
  208. } else {
  209. GGML_ASSERT(false && "unknown mirostat version");
  210. }
  211. return result;
  212. }
  213. void common_sampler_free(struct common_sampler * gsmpl) {
  214. if (gsmpl) {
  215. llama_sampler_free(gsmpl->grmr);
  216. llama_sampler_free(gsmpl->chain);
  217. delete gsmpl;
  218. }
  219. }
  220. void common_sampler_accept(struct common_sampler * gsmpl, llama_token token, bool accept_grammar) {
  221. if (accept_grammar) {
  222. llama_sampler_accept(gsmpl->grmr, token);
  223. }
  224. llama_sampler_accept(gsmpl->chain, token);
  225. gsmpl->prev.push_back(token);
  226. }
  227. void common_sampler_reset(struct common_sampler * gsmpl) {
  228. llama_sampler_reset(gsmpl->grmr);
  229. llama_sampler_reset(gsmpl->chain);
  230. }
  231. struct common_sampler * common_sampler_clone(common_sampler * gsmpl) {
  232. return new common_sampler {
  233. /* .params = */ gsmpl->params,
  234. /* .grmr = */ llama_sampler_clone(gsmpl->grmr),
  235. /* .chain = */ llama_sampler_clone(gsmpl->chain),
  236. /* .prev = */ gsmpl->prev,
  237. /* .cur = */ gsmpl->cur,
  238. /* .cur_p = */ gsmpl->cur_p,
  239. };
  240. }
  241. void common_perf_print(const struct llama_context * ctx, const struct common_sampler * gsmpl) {
  242. // TODO: measure grammar performance
  243. if (gsmpl) {
  244. llama_perf_sampler_print(gsmpl->chain);
  245. }
  246. if (ctx) {
  247. llama_perf_context_print(ctx);
  248. }
  249. }
  250. llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_context * ctx, int idx, bool grammar_first) {
  251. gsmpl->set_logits(ctx, idx);
  252. auto & grmr = gsmpl->grmr;
  253. auto & chain = gsmpl->chain;
  254. auto & cur_p = gsmpl->cur_p; // initialized by set_logits
  255. if (grammar_first) {
  256. llama_sampler_apply(grmr, &cur_p);
  257. }
  258. llama_sampler_apply(chain, &cur_p);
  259. GGML_ASSERT(cur_p.selected != -1 && "no selected token during sampling - check your sampling configuration");
  260. const llama_token id = cur_p.data[cur_p.selected].id;
  261. if (grammar_first) {
  262. return id;
  263. }
  264. // check if it the sampled token fits the grammar
  265. {
  266. llama_token_data single_token_data = { id, 1.0f, 0.0f };
  267. llama_token_data_array single_token_data_array = { &single_token_data, 1, -1, false };
  268. llama_sampler_apply(grmr, &single_token_data_array);
  269. const bool is_valid = single_token_data_array.data[0].logit != -INFINITY;
  270. if (is_valid) {
  271. return id;
  272. }
  273. }
  274. // resampling:
  275. // if the token is not valid, sample again, but first apply the grammar sampler and then the sampling chain
  276. gsmpl->set_logits(ctx, idx);
  277. llama_sampler_apply(grmr, &cur_p);
  278. llama_sampler_apply(chain, &cur_p);
  279. GGML_ASSERT(cur_p.selected != -1 && "no selected token during re-sampling - check your sampling configuration");
  280. return cur_p.data[cur_p.selected].id;
  281. }
  282. std::vector<llama_token> common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const std::vector<int> & idxs, const llama_tokens & draft, bool grammar_first) {
  283. GGML_ASSERT(idxs.size() == draft.size() + 1 && "idxs.size() must be draft.size() + 1");
  284. std::vector<llama_token> result;
  285. result.reserve(idxs.size());
  286. size_t i = 0;
  287. for (; i < draft.size(); i++) {
  288. const llama_token id = common_sampler_sample(gsmpl, ctx, idxs[i], grammar_first);
  289. common_sampler_accept(gsmpl, id, true);
  290. result.push_back(id);
  291. if (draft[i] != id) {
  292. break;
  293. }
  294. }
  295. if (i == draft.size()) {
  296. const llama_token id = common_sampler_sample(gsmpl, ctx, idxs[i], grammar_first);
  297. common_sampler_accept(gsmpl, id, true);
  298. result.push_back(id);
  299. }
  300. return result;
  301. }
  302. std::vector<llama_token> common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const llama_tokens & draft, bool grammar_first) {
  303. std::vector<int> idxs(draft.size() + 1);
  304. for (size_t i = 0; i < idxs.size(); ++i) {
  305. idxs[i] = i;
  306. }
  307. return common_sampler_sample_and_accept_n(gsmpl, ctx, idxs, draft, grammar_first);
  308. }
  309. uint32_t common_sampler_get_seed(const struct common_sampler * gsmpl) {
  310. return llama_sampler_get_seed(gsmpl->chain);
  311. }
  312. // helpers
  313. llama_token_data_array * common_sampler_get_candidates(struct common_sampler * gsmpl) {
  314. return &gsmpl->cur_p;
  315. }
  316. llama_token common_sampler_last(const struct common_sampler * gsmpl) {
  317. return gsmpl->prev.rat(0);
  318. }
  319. std::string common_sampler_print(const struct common_sampler * gsmpl) {
  320. std::string result = "logits ";
  321. for (int i = 0; i < llama_sampler_chain_n(gsmpl->chain); i++) {
  322. const auto * smpl = llama_sampler_chain_get(gsmpl->chain, i);
  323. result += std::string("-> ") + llama_sampler_name(smpl) + " ";
  324. }
  325. return result;
  326. }
  327. std::string common_sampler_prev_str(common_sampler * gsmpl, llama_context * ctx_main, int n) {
  328. n = std::min(n, (int) gsmpl->prev.size());
  329. if (n <= 0) {
  330. return "";
  331. }
  332. std::string result;
  333. result.reserve(8*n); // 8 is the average length of a token [citation needed], TODO: compute this from the vocab
  334. for (int i = n - 1; i >= 0; i--) {
  335. const llama_token id = gsmpl->prev.rat(i);
  336. GGML_ASSERT(id != LLAMA_TOKEN_NULL && "null token in the sampling history - should not happen");
  337. result += common_token_to_piece(ctx_main, id);
  338. }
  339. return result;
  340. }
  341. char common_sampler_type_to_chr(enum common_sampler_type cnstr) {
  342. switch (cnstr) {
  343. case COMMON_SAMPLER_TYPE_DRY: return 'd';
  344. case COMMON_SAMPLER_TYPE_TOP_K: return 'k';
  345. case COMMON_SAMPLER_TYPE_TYPICAL_P: return 'y';
  346. case COMMON_SAMPLER_TYPE_TOP_P: return 'p';
  347. case COMMON_SAMPLER_TYPE_MIN_P: return 'm';
  348. case COMMON_SAMPLER_TYPE_TEMPERATURE: return 't';
  349. case COMMON_SAMPLER_TYPE_XTC: return 'x';
  350. case COMMON_SAMPLER_TYPE_INFILL: return 'i';
  351. case COMMON_SAMPLER_TYPE_PENALTIES: return 'e';
  352. default : return '?';
  353. }
  354. }
  355. std::string common_sampler_type_to_str(enum common_sampler_type cnstr) {
  356. switch (cnstr) {
  357. case COMMON_SAMPLER_TYPE_DRY: return "dry";
  358. case COMMON_SAMPLER_TYPE_TOP_K: return "top_k";
  359. case COMMON_SAMPLER_TYPE_TYPICAL_P: return "typ_p";
  360. case COMMON_SAMPLER_TYPE_TOP_P: return "top_p";
  361. case COMMON_SAMPLER_TYPE_MIN_P: return "min_p";
  362. case COMMON_SAMPLER_TYPE_TEMPERATURE: return "temperature";
  363. case COMMON_SAMPLER_TYPE_XTC: return "xtc";
  364. case COMMON_SAMPLER_TYPE_INFILL: return "infill";
  365. case COMMON_SAMPLER_TYPE_PENALTIES: return "penalties";
  366. default : return "";
  367. }
  368. }
  369. std::vector<common_sampler_type> common_sampler_types_from_names(const std::vector<std::string> & names, bool allow_alt_names) {
  370. std::unordered_map<std::string, common_sampler_type> sampler_canonical_name_map {
  371. { "dry", COMMON_SAMPLER_TYPE_DRY },
  372. { "top_k", COMMON_SAMPLER_TYPE_TOP_K },
  373. { "top_p", COMMON_SAMPLER_TYPE_TOP_P },
  374. { "typ_p", COMMON_SAMPLER_TYPE_TYPICAL_P },
  375. { "min_p", COMMON_SAMPLER_TYPE_MIN_P },
  376. { "temperature", COMMON_SAMPLER_TYPE_TEMPERATURE },
  377. { "xtc", COMMON_SAMPLER_TYPE_XTC },
  378. { "infill", COMMON_SAMPLER_TYPE_INFILL },
  379. { "penalties", COMMON_SAMPLER_TYPE_PENALTIES },
  380. };
  381. // since samplers names are written multiple ways
  382. // make it ready for both system names and input names
  383. std::unordered_map<std::string, common_sampler_type> sampler_alt_name_map {
  384. { "top-k", COMMON_SAMPLER_TYPE_TOP_K },
  385. { "top-p", COMMON_SAMPLER_TYPE_TOP_P },
  386. { "nucleus", COMMON_SAMPLER_TYPE_TOP_P },
  387. { "typical-p", COMMON_SAMPLER_TYPE_TYPICAL_P },
  388. { "typical", COMMON_SAMPLER_TYPE_TYPICAL_P },
  389. { "typ-p", COMMON_SAMPLER_TYPE_TYPICAL_P },
  390. { "typ", COMMON_SAMPLER_TYPE_TYPICAL_P },
  391. { "min-p", COMMON_SAMPLER_TYPE_MIN_P },
  392. { "temp", COMMON_SAMPLER_TYPE_TEMPERATURE },
  393. };
  394. std::vector<common_sampler_type> samplers;
  395. samplers.reserve(names.size());
  396. for (const auto & name : names) {
  397. auto sampler = sampler_canonical_name_map.find(name);
  398. if (sampler != sampler_canonical_name_map.end()) {
  399. samplers.push_back(sampler->second);
  400. } else {
  401. if (allow_alt_names) {
  402. sampler = sampler_alt_name_map.find(name);
  403. if (sampler != sampler_alt_name_map.end()) {
  404. samplers.push_back(sampler->second);
  405. }
  406. }
  407. }
  408. }
  409. return samplers;
  410. }
  411. std::vector<common_sampler_type> common_sampler_types_from_chars(const std::string & chars) {
  412. std::unordered_map<char, common_sampler_type> sampler_name_map = {
  413. { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_DRY), COMMON_SAMPLER_TYPE_DRY },
  414. { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_K), COMMON_SAMPLER_TYPE_TOP_K },
  415. { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TYPICAL_P), COMMON_SAMPLER_TYPE_TYPICAL_P },
  416. { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_P), COMMON_SAMPLER_TYPE_TOP_P },
  417. { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_MIN_P), COMMON_SAMPLER_TYPE_MIN_P },
  418. { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TEMPERATURE), COMMON_SAMPLER_TYPE_TEMPERATURE },
  419. { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_XTC), COMMON_SAMPLER_TYPE_XTC },
  420. { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_INFILL), COMMON_SAMPLER_TYPE_INFILL },
  421. { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_PENALTIES), COMMON_SAMPLER_TYPE_PENALTIES },
  422. };
  423. std::vector<common_sampler_type> samplers;
  424. samplers.reserve(chars.size());
  425. for (const auto & c : chars) {
  426. const auto sampler = sampler_name_map.find(c);
  427. if (sampler != sampler_name_map.end()) {
  428. samplers.push_back(sampler->second);
  429. }
  430. }
  431. return samplers;
  432. }